crypto: aead - Remove CRYPTO_ALG_AEAD_NEW flag
[deliverable/linux.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
42e8b0d7 66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 67 bool is_sec1)
538caf83 68{
922f9dc8
LC
69 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
538caf83
LC
75}
76
922f9dc8
LC
77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
538caf83 79{
922f9dc8
LC
80 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
538caf83
LC
84}
85
922f9dc8 86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 87{
922f9dc8
LC
88 if (!is_sec1)
89 ptr->j_extent = 0;
185eb79f
LC
90}
91
9c4a7965
KP
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
edc6bd69 96 struct talitos_ptr *ptr,
42e8b0d7 97 unsigned int len, void *data,
9c4a7965
KP
98 enum dma_data_direction dir)
99{
81eb024c 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 103
922f9dc8
LC
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 113 struct talitos_ptr *ptr,
9c4a7965
KP
114 enum dma_data_direction dir)
115{
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
edc6bd69 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 120 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 127 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 128
dd3c0987
LC
129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
9c4a7965 132
dd3c0987
LC
133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
9c4a7965
KP
144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
81eb024c 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 153
fe5720e2
KP
154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
157 TALITOS_CCCR_LO_IWSE);
158
9c4a7965
KP
159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 168
c3e337f8 169 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 170
dd3c0987 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
172 && --timeout)
173 cpu_relax();
174
2cdba3cf 175 if (priv->irq[1]) {
c3e337f8
KP
176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
9c4a7965
KP
180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
dd3c0987 195 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
dd3c0987
LC
219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
9c4a7965 228
fe5720e2
KP
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
232 TALITOS_MDEUICR_LO_ICE);
233
9c4a7965
KP
234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
5228f0f7 240 * @ch: the SEC device channel to be used
9c4a7965
KP
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
865d5061
HG
249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
9c4a7965
KP
254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
5228f0f7 257 unsigned long flags;
9c4a7965 258 int head;
7d607c6a 259 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 260
4b992628 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 262
4b992628 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 264 /* h/w fifo is full */
4b992628 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
266 return -EAGAIN;
267 }
268
4b992628
KP
269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
ec6644d6 271
9c4a7965 272 /* map descriptor and save caller data */
7d607c6a
LC
273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
9c4a7965
KP
284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
4b992628 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
ad42d5fc
KP
295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 298 lower_32_bits(request->dma_desc));
9c4a7965 299
4b992628 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
301
302 return -EINPROGRESS;
303}
865d5061 304EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
7d607c6a 315 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 316
4b992628 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 318
4b992628
KP
319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
321 __be32 hdr;
322
4b992628 323 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
7d607c6a
LC
327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 330 status = 0;
ca38a814 331 else
9c4a7965
KP
332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
7d607c6a 338 TALITOS_DESC_SIZE,
e938e465 339 DMA_BIDIRECTIONAL);
9c4a7965
KP
340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
4b992628 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 352
4b992628 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 354
4b992628 355 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 356
9c4a7965
KP
357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
4b992628
KP
362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
9c4a7965
KP
364 }
365
4b992628 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
367}
368
369/*
370 * process completed requests for channels that have done status
371 */
dd3c0987
LC
372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 406 unsigned long flags; \
c3e337f8
KP
407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 422 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 426}
dd3c0987
LC
427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
431
432/*
433 * locate current (offending) descriptor
434 */
3e721aeb 435static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 438 int tail, iter;
9c4a7965
KP
439 dma_addr_t cur_desc;
440
b62ffd8c
HG
441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 443
b62ffd8c
HG
444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
9c4a7965 455 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 456 return 0;
9c4a7965
KP
457 }
458 }
459
b62ffd8c 460 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
3e721aeb 466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
3e721aeb 471 if (!desc_hdr)
ad42d5fc 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
515 break;
516 }
517
3e721aeb 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
536}
537
538/*
539 * recover from error interrupts
540 */
5e718a09 541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 542{
9c4a7965
KP
543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 545 int ch, error, reset_dev = 0;
42e8b0d7 546 u32 v_lo;
dd3c0987
LC
547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
dd3c0987
LC
552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
9c4a7965
KP
560
561 error = -EINVAL;
562
ad42d5fc 563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
9c4a7965 580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
9c4a7965
KP
583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
9c4a7965 588 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
9c4a7965
KP
600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
ad42d5fc 606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 607 TALITOS2_CCCR_CONT);
ad42d5fc
KP
608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 610 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
dd3c0987
LC
619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
dd3c0987
LC
637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
511d63cb 678 unsigned long flags; \
c3e337f8 679 \
511d63cb 680 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
511d63cb
HG
687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
c3e337f8
KP
692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
511d63cb
HG
698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
c3e337f8
KP
700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
9c4a7965 703}
dd3c0987
LC
704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
9c4a7965
KP
710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
5fa7fa14 722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
5fa7fa14
LC
750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
5fa7fa14 761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 769 int err;
9c4a7965
KP
770
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
776
35a3bb3d
AS
777 err = hwrng_register(&priv->rng);
778 if (!err)
779 priv->rng_registered = true;
780
781 return err;
9c4a7965
KP
782}
783
784static void talitos_unregister_rng(struct device *dev)
785{
786 struct talitos_private *priv = dev_get_drvdata(dev);
787
35a3bb3d
AS
788 if (!priv->rng_registered)
789 return;
790
9c4a7965 791 hwrng_unregister(&priv->rng);
35a3bb3d 792 priv->rng_registered = false;
9c4a7965
KP
793}
794
795/*
796 * crypto alg
797 */
798#define TALITOS_CRA_PRIORITY 3000
357fb605 799#define TALITOS_MAX_KEY_SIZE 96
3952f17e 800#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 801
9c4a7965
KP
802struct talitos_ctx {
803 struct device *dev;
5228f0f7 804 int ch;
9c4a7965
KP
805 __be32 desc_hdr_template;
806 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 807 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
808 unsigned int keylen;
809 unsigned int enckeylen;
810 unsigned int authkeylen;
9c4a7965
KP
811};
812
497f2e6b
LN
813#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
814#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
815
816struct talitos_ahash_req_ctx {
60f208d7 817 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
818 unsigned int hw_context_size;
819 u8 buf[HASH_MAX_BLOCK_SIZE];
820 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 821 unsigned int swinit;
497f2e6b
LN
822 unsigned int first;
823 unsigned int last;
824 unsigned int to_hash_later;
42e8b0d7 825 unsigned int nbuf;
497f2e6b
LN
826 struct scatterlist bufsl[2];
827 struct scatterlist *psrc;
828};
829
56af8cd4
LN
830static int aead_setkey(struct crypto_aead *authenc,
831 const u8 *key, unsigned int keylen)
9c4a7965
KP
832{
833 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 834 struct crypto_authenc_keys keys;
9c4a7965 835
c306a98d 836 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
837 goto badkey;
838
c306a98d 839 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
840 goto badkey;
841
c306a98d
MK
842 memcpy(ctx->key, keys.authkey, keys.authkeylen);
843 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 844
c306a98d
MK
845 ctx->keylen = keys.authkeylen + keys.enckeylen;
846 ctx->enckeylen = keys.enckeylen;
847 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
848
849 return 0;
850
851badkey:
852 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
853 return -EINVAL;
854}
855
856/*
56af8cd4 857 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
858 * @src_nents: number of segments in input scatterlist
859 * @dst_nents: number of segments in output scatterlist
2a1cfe46
HG
860 * @src_chained: whether src is chained or not
861 * @dst_chained: whether dst is chained or not
aeb4c132 862 * @icv_ool: whether ICV is out-of-line
79fd31d3 863 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 864 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 865 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 866 * @desc: h/w descriptor
6f65f6ac
LC
867 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
868 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
869 *
870 * if decrypting (with authcheck), or either one of src_nents or dst_nents
871 * is greater than 1, an integrity check value is concatenated to the end
872 * of link_tbl data
873 */
56af8cd4 874struct talitos_edesc {
9c4a7965
KP
875 int src_nents;
876 int dst_nents;
2a1cfe46
HG
877 bool src_chained;
878 bool dst_chained;
aeb4c132 879 bool icv_ool;
79fd31d3 880 dma_addr_t iv_dma;
9c4a7965
KP
881 int dma_len;
882 dma_addr_t dma_link_tbl;
883 struct talitos_desc desc;
6f65f6ac
LC
884 union {
885 struct talitos_ptr link_tbl[0];
886 u8 buf[0];
887 };
9c4a7965
KP
888};
889
4de9d0b5
LN
890static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
891 unsigned int nents, enum dma_data_direction dir,
2a1cfe46 892 bool chained)
4de9d0b5
LN
893{
894 if (unlikely(chained))
895 while (sg) {
896 dma_map_sg(dev, sg, 1, dir);
5be4d4c9 897 sg = sg_next(sg);
4de9d0b5
LN
898 }
899 else
900 dma_map_sg(dev, sg, nents, dir);
901 return nents;
902}
903
904static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
905 enum dma_data_direction dir)
906{
907 while (sg) {
908 dma_unmap_sg(dev, sg, 1, dir);
5be4d4c9 909 sg = sg_next(sg);
4de9d0b5
LN
910 }
911}
912
913static void talitos_sg_unmap(struct device *dev,
914 struct talitos_edesc *edesc,
915 struct scatterlist *src,
916 struct scatterlist *dst)
917{
918 unsigned int src_nents = edesc->src_nents ? : 1;
919 unsigned int dst_nents = edesc->dst_nents ? : 1;
920
921 if (src != dst) {
2a1cfe46 922 if (edesc->src_chained)
4de9d0b5
LN
923 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
924 else
925 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
926
497f2e6b 927 if (dst) {
2a1cfe46 928 if (edesc->dst_chained)
497f2e6b
LN
929 talitos_unmap_sg_chain(dev, dst,
930 DMA_FROM_DEVICE);
931 else
932 dma_unmap_sg(dev, dst, dst_nents,
933 DMA_FROM_DEVICE);
934 }
4de9d0b5 935 } else
2a1cfe46 936 if (edesc->src_chained)
4de9d0b5
LN
937 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
938 else
939 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
940}
941
9c4a7965 942static void ipsec_esp_unmap(struct device *dev,
56af8cd4 943 struct talitos_edesc *edesc,
9c4a7965
KP
944 struct aead_request *areq)
945{
946 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
947 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
949 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
950
4de9d0b5 951 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
952
953 if (edesc->dma_len)
954 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
955 DMA_BIDIRECTIONAL);
956}
957
958/*
959 * ipsec_esp descriptor callbacks
960 */
961static void ipsec_esp_encrypt_done(struct device *dev,
962 struct talitos_desc *desc, void *context,
963 int err)
964{
965 struct aead_request *areq = context;
9c4a7965 966 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 967 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 968 struct talitos_edesc *edesc;
9c4a7965
KP
969 struct scatterlist *sg;
970 void *icvdata;
971
19bbbc63
KP
972 edesc = container_of(desc, struct talitos_edesc, desc);
973
9c4a7965
KP
974 ipsec_esp_unmap(dev, edesc, areq);
975
976 /* copy the generated ICV to dst */
aeb4c132 977 if (edesc->icv_ool) {
9c4a7965 978 icvdata = &edesc->link_tbl[edesc->src_nents +
aeb4c132 979 edesc->dst_nents + 2];
9c4a7965 980 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
981 memcpy((char *)sg_virt(sg) + sg->length - authsize,
982 icvdata, authsize);
9c4a7965
KP
983 }
984
985 kfree(edesc);
986
987 aead_request_complete(areq, err);
988}
989
fe5720e2 990static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
991 struct talitos_desc *desc,
992 void *context, int err)
9c4a7965
KP
993{
994 struct aead_request *req = context;
9c4a7965 995 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 996 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 997 struct talitos_edesc *edesc;
9c4a7965 998 struct scatterlist *sg;
aeb4c132 999 char *oicv, *icv;
9c4a7965 1000
19bbbc63
KP
1001 edesc = container_of(desc, struct talitos_edesc, desc);
1002
9c4a7965
KP
1003 ipsec_esp_unmap(dev, edesc, req);
1004
1005 if (!err) {
1006 /* auth check */
9c4a7965 1007 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
1008 icv = (char *)sg_virt(sg) + sg->length - authsize;
1009
1010 if (edesc->dma_len) {
1011 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
1012 edesc->dst_nents + 2];
1013 if (edesc->icv_ool)
1014 icv = oicv + authsize;
1015 } else
1016 oicv = (char *)&edesc->link_tbl[0];
1017
1018 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
1019 }
1020
1021 kfree(edesc);
1022
1023 aead_request_complete(req, err);
1024}
1025
fe5720e2 1026static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
1027 struct talitos_desc *desc,
1028 void *context, int err)
fe5720e2
KP
1029{
1030 struct aead_request *req = context;
19bbbc63
KP
1031 struct talitos_edesc *edesc;
1032
1033 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1034
1035 ipsec_esp_unmap(dev, edesc, req);
1036
1037 /* check ICV auth status */
e938e465
KP
1038 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1039 DESC_HDR_LO_ICCR1_PASS))
1040 err = -EBADMSG;
fe5720e2
KP
1041
1042 kfree(edesc);
1043
1044 aead_request_complete(req, err);
1045}
1046
9c4a7965
KP
1047/*
1048 * convert scatterlist to SEC h/w link table format
1049 * stop at cryptlen bytes
1050 */
aeb4c132
HX
1051static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1052 unsigned int offset, int cryptlen,
1053 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1054{
70bcaca7 1055 int n_sg = sg_count;
aeb4c132 1056 int count = 0;
70bcaca7 1057
aeb4c132
HX
1058 while (cryptlen && sg && n_sg--) {
1059 unsigned int len = sg_dma_len(sg);
9c4a7965 1060
aeb4c132
HX
1061 if (offset >= len) {
1062 offset -= len;
1063 goto next;
1064 }
1065
1066 len -= offset;
1067
1068 if (len > cryptlen)
1069 len = cryptlen;
1070
1071 to_talitos_ptr(link_tbl_ptr + count,
1072 sg_dma_address(sg) + offset, 0);
1073 link_tbl_ptr[count].len = cpu_to_be16(len);
1074 link_tbl_ptr[count].j_extent = 0;
1075 count++;
1076 cryptlen -= len;
1077 offset = 0;
1078
1079next:
1080 sg = sg_next(sg);
70bcaca7 1081 }
9c4a7965
KP
1082
1083 /* tag end of link table */
aeb4c132
HX
1084 if (count > 0)
1085 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7 1086
aeb4c132
HX
1087 return count;
1088}
1089
1090static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1091 int cryptlen,
1092 struct talitos_ptr *link_tbl_ptr)
1093{
1094 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1095 link_tbl_ptr);
9c4a7965
KP
1096}
1097
1098/*
1099 * fill in and submit ipsec_esp descriptor
1100 */
56af8cd4 1101static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1102 void (*callback)(struct device *dev,
1103 struct talitos_desc *desc,
1104 void *context, int error))
9c4a7965
KP
1105{
1106 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1107 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1108 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1109 struct device *dev = ctx->dev;
1110 struct talitos_desc *desc = &edesc->desc;
1111 unsigned int cryptlen = areq->cryptlen;
e41256f1 1112 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1113 int tbl_off = 0;
fa86a267 1114 int sg_count, ret;
fe5720e2 1115 int sg_link_tbl_len;
9c4a7965
KP
1116
1117 /* hmac key */
1118 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1119 DMA_TO_DEVICE);
79fd31d3 1120
aeb4c132
HX
1121 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1122 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1123 : DMA_TO_DEVICE,
1124 edesc->src_chained);
1125
9c4a7965 1126 /* hmac data */
aeb4c132
HX
1127 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1128 if (sg_count > 1 &&
1129 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1130 areq->assoclen,
1131 &edesc->link_tbl[tbl_off])) > 1) {
1132 tbl_off += ret;
79fd31d3
HG
1133
1134 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1135 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1136 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1137
79fd31d3
HG
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, DMA_BIDIRECTIONAL);
1140 } else {
aeb4c132 1141 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
79fd31d3
HG
1142 desc->ptr[1].j_extent = 0;
1143 }
1144
9c4a7965 1145 /* cipher iv */
922f9dc8 1146 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1147 desc->ptr[2].len = cpu_to_be16(ivsize);
1148 desc->ptr[2].j_extent = 0;
9c4a7965
KP
1149
1150 /* cipher key */
1151 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1152 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1153 DMA_TO_DEVICE);
1154
1155 /*
1156 * cipher in
1157 * map and adjust cipher len to aead request cryptlen.
1158 * extent is bytes of HMAC postpended to ciphertext,
1159 * typically 12 for ipsec
1160 */
1161 desc->ptr[4].len = cpu_to_be16(cryptlen);
1162 desc->ptr[4].j_extent = authsize;
1163
aeb4c132
HX
1164 sg_link_tbl_len = cryptlen;
1165 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1166 sg_link_tbl_len += authsize;
1167
1168 if (sg_count > 1 &&
1169 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1170 sg_link_tbl_len,
1171 &edesc->link_tbl[tbl_off])) > 1) {
1172 tbl_off += ret;
1173 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1174 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1175 tbl_off *
1176 sizeof(struct talitos_ptr), 0);
1177 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1178 edesc->dma_len,
1179 DMA_BIDIRECTIONAL);
1180 } else
922f9dc8 1181 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
9c4a7965
KP
1182
1183 /* cipher out */
1184 desc->ptr[5].len = cpu_to_be16(cryptlen);
1185 desc->ptr[5].j_extent = authsize;
1186
e938e465 1187 if (areq->src != areq->dst)
4de9d0b5
LN
1188 sg_count = talitos_map_sg(dev, areq->dst,
1189 edesc->dst_nents ? : 1,
2a1cfe46 1190 DMA_FROM_DEVICE, edesc->dst_chained);
9c4a7965 1191
aeb4c132
HX
1192 edesc->icv_ool = false;
1193
1194 if (sg_count > 1 &&
1195 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1196 areq->assoclen, cryptlen,
1197 &edesc->link_tbl[tbl_off])) >
1198 1) {
79fd31d3 1199 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1200
81eb024c 1201 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1202 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1203
f3c85bc1 1204 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1205 tbl_ptr += sg_count - 1;
1206 tbl_ptr->j_extent = 0;
1207 tbl_ptr++;
1208 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1209 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1210
1211 /* icv data follows link tables */
79fd31d3 1212 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
aeb4c132
HX
1213 (edesc->src_nents + edesc->dst_nents +
1214 2) * sizeof(struct talitos_ptr) +
1215 authsize, 0);
9c4a7965
KP
1216 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1217 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1218 edesc->dma_len, DMA_BIDIRECTIONAL);
aeb4c132
HX
1219
1220 edesc->icv_ool = true;
1221 } else
1222 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
9c4a7965
KP
1223
1224 /* iv out */
a2b35aa8 1225 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1226 DMA_FROM_DEVICE);
1227
5228f0f7 1228 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1229 if (ret != -EINPROGRESS) {
1230 ipsec_esp_unmap(dev, edesc, areq);
1231 kfree(edesc);
1232 }
1233 return ret;
9c4a7965
KP
1234}
1235
9c4a7965
KP
1236/*
1237 * derive number of elements in scatterlist
1238 */
2a1cfe46 1239static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
9c4a7965
KP
1240{
1241 struct scatterlist *sg = sg_list;
1242 int sg_nents = 0;
1243
2a1cfe46 1244 *chained = false;
bde9079f 1245 while (nbytes > 0 && sg) {
9c4a7965
KP
1246 sg_nents++;
1247 nbytes -= sg->length;
4de9d0b5 1248 if (!sg_is_last(sg) && (sg + 1)->length == 0)
2a1cfe46 1249 *chained = true;
5be4d4c9 1250 sg = sg_next(sg);
9c4a7965
KP
1251 }
1252
1253 return sg_nents;
1254}
1255
1256/*
56af8cd4 1257 * allocate and map the extended descriptor
9c4a7965 1258 */
4de9d0b5
LN
1259static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1260 struct scatterlist *src,
1261 struct scatterlist *dst,
79fd31d3
HG
1262 u8 *iv,
1263 unsigned int assoclen,
4de9d0b5
LN
1264 unsigned int cryptlen,
1265 unsigned int authsize,
79fd31d3 1266 unsigned int ivsize,
4de9d0b5 1267 int icv_stashing,
62293a37
HG
1268 u32 cryptoflags,
1269 bool encrypt)
9c4a7965 1270{
56af8cd4 1271 struct talitos_edesc *edesc;
aeb4c132
HX
1272 int src_nents, dst_nents, alloc_len, dma_len;
1273 bool src_chained = false, dst_chained = false;
79fd31d3 1274 dma_addr_t iv_dma = 0;
4de9d0b5 1275 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1276 GFP_ATOMIC;
6f65f6ac
LC
1277 struct talitos_private *priv = dev_get_drvdata(dev);
1278 bool is_sec1 = has_ftr_sec1(priv);
1279 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
9c4a7965 1280
6f65f6ac 1281 if (cryptlen + authsize > max_len) {
4de9d0b5 1282 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1283 return ERR_PTR(-EINVAL);
1284 }
1285
935e99a3 1286 if (ivsize)
79fd31d3
HG
1287 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1288
62293a37 1289 if (!dst || dst == src) {
aeb4c132
HX
1290 src_nents = sg_count(src, assoclen + cryptlen + authsize,
1291 &src_chained);
62293a37
HG
1292 src_nents = (src_nents == 1) ? 0 : src_nents;
1293 dst_nents = dst ? src_nents : 0;
1294 } else { /* dst && dst != src*/
aeb4c132
HX
1295 src_nents = sg_count(src, assoclen + cryptlen +
1296 (encrypt ? 0 : authsize),
62293a37
HG
1297 &src_chained);
1298 src_nents = (src_nents == 1) ? 0 : src_nents;
aeb4c132
HX
1299 dst_nents = sg_count(dst, assoclen + cryptlen +
1300 (encrypt ? authsize : 0),
62293a37
HG
1301 &dst_chained);
1302 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1303 }
1304
1305 /*
1306 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1307 * allowing for two separate entries for AD and generated ICV (+ 2),
1308 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1309 */
56af8cd4 1310 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1311 if (src_nents || dst_nents) {
6f65f6ac 1312 if (is_sec1)
608f37d0
DC
1313 dma_len = (src_nents ? cryptlen : 0) +
1314 (dst_nents ? cryptlen : 0);
6f65f6ac 1315 else
aeb4c132
HX
1316 dma_len = (src_nents + dst_nents + 2) *
1317 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1318 alloc_len += dma_len;
1319 } else {
1320 dma_len = 0;
4de9d0b5 1321 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1322 }
1323
586725f8 1324 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1325 if (!edesc) {
79fd31d3
HG
1326 if (iv_dma)
1327 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
935e99a3 1328
4de9d0b5 1329 dev_err(dev, "could not allocate edescriptor\n");
9c4a7965
KP
1330 return ERR_PTR(-ENOMEM);
1331 }
1332
1333 edesc->src_nents = src_nents;
1334 edesc->dst_nents = dst_nents;
2a1cfe46
HG
1335 edesc->src_chained = src_chained;
1336 edesc->dst_chained = dst_chained;
79fd31d3 1337 edesc->iv_dma = iv_dma;
9c4a7965 1338 edesc->dma_len = dma_len;
497f2e6b
LN
1339 if (dma_len)
1340 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1341 edesc->dma_len,
1342 DMA_BIDIRECTIONAL);
9c4a7965
KP
1343
1344 return edesc;
1345}
1346
79fd31d3 1347static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1348 int icv_stashing, bool encrypt)
4de9d0b5
LN
1349{
1350 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1351 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1352 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1353 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1354
aeb4c132 1355 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1356 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1357 authsize, ivsize, icv_stashing,
62293a37 1358 areq->base.flags, encrypt);
4de9d0b5
LN
1359}
1360
56af8cd4 1361static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1362{
1363 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1364 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1365 struct talitos_edesc *edesc;
9c4a7965
KP
1366
1367 /* allocate extended descriptor */
62293a37 1368 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1369 if (IS_ERR(edesc))
1370 return PTR_ERR(edesc);
1371
1372 /* set encrypt */
70bcaca7 1373 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1374
aeb4c132 1375 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1376}
1377
56af8cd4 1378static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1379{
1380 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1381 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1382 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1383 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1384 struct talitos_edesc *edesc;
9c4a7965
KP
1385 struct scatterlist *sg;
1386 void *icvdata;
1387
1388 req->cryptlen -= authsize;
1389
1390 /* allocate extended descriptor */
62293a37 1391 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1392 if (IS_ERR(edesc))
1393 return PTR_ERR(edesc);
1394
fe5720e2 1395 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1396 ((!edesc->src_nents && !edesc->dst_nents) ||
1397 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1398
fe5720e2 1399 /* decrypt and check the ICV */
e938e465
KP
1400 edesc->desc.hdr = ctx->desc_hdr_template |
1401 DESC_HDR_DIR_INBOUND |
fe5720e2 1402 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1403
fe5720e2
KP
1404 /* reset integrity check result bits */
1405 edesc->desc.hdr_lo = 0;
9c4a7965 1406
aeb4c132 1407 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1408 }
fe5720e2 1409
e938e465
KP
1410 /* Have to check the ICV with software */
1411 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1412
e938e465
KP
1413 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1414 if (edesc->dma_len)
aeb4c132
HX
1415 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1416 edesc->dst_nents + 2];
e938e465
KP
1417 else
1418 icvdata = &edesc->link_tbl[0];
fe5720e2 1419
e938e465 1420 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1421
aeb4c132 1422 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1423
aeb4c132 1424 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1425}
1426
4de9d0b5
LN
1427static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1428 const u8 *key, unsigned int keylen)
1429{
1430 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1431
1432 memcpy(&ctx->key, key, keylen);
1433 ctx->keylen = keylen;
1434
1435 return 0;
4de9d0b5
LN
1436}
1437
032d197e
LC
1438static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1439 struct scatterlist *dst, unsigned int len,
1440 struct talitos_edesc *edesc)
1441{
6f65f6ac
LC
1442 struct talitos_private *priv = dev_get_drvdata(dev);
1443 bool is_sec1 = has_ftr_sec1(priv);
1444
1445 if (is_sec1) {
1446 if (!edesc->src_nents) {
1447 dma_unmap_sg(dev, src, 1,
1448 dst != src ? DMA_TO_DEVICE
1449 : DMA_BIDIRECTIONAL);
1450 }
1451 if (dst && edesc->dst_nents) {
1452 dma_sync_single_for_device(dev,
1453 edesc->dma_link_tbl + len,
1454 len, DMA_FROM_DEVICE);
1455 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1456 edesc->buf + len, len);
1457 } else if (dst && dst != src) {
1458 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1459 }
1460 } else {
1461 talitos_sg_unmap(dev, edesc, src, dst);
1462 }
032d197e
LC
1463}
1464
4de9d0b5
LN
1465static void common_nonsnoop_unmap(struct device *dev,
1466 struct talitos_edesc *edesc,
1467 struct ablkcipher_request *areq)
1468{
1469 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1470
1471 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1472 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1473 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1474
4de9d0b5
LN
1475 if (edesc->dma_len)
1476 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1477 DMA_BIDIRECTIONAL);
1478}
1479
1480static void ablkcipher_done(struct device *dev,
1481 struct talitos_desc *desc, void *context,
1482 int err)
1483{
1484 struct ablkcipher_request *areq = context;
19bbbc63
KP
1485 struct talitos_edesc *edesc;
1486
1487 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1488
1489 common_nonsnoop_unmap(dev, edesc, areq);
1490
1491 kfree(edesc);
1492
1493 areq->base.complete(&areq->base, err);
1494}
1495
032d197e
LC
1496int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1497 unsigned int len, struct talitos_edesc *edesc,
1498 enum dma_data_direction dir, struct talitos_ptr *ptr)
1499{
1500 int sg_count;
922f9dc8
LC
1501 struct talitos_private *priv = dev_get_drvdata(dev);
1502 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1503
922f9dc8 1504 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1505
6f65f6ac
LC
1506 if (is_sec1) {
1507 sg_count = edesc->src_nents ? : 1;
032d197e 1508
6f65f6ac
LC
1509 if (sg_count == 1) {
1510 dma_map_sg(dev, src, 1, dir);
1511 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1512 } else {
6f65f6ac
LC
1513 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1514 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1515 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1516 len, DMA_TO_DEVICE);
1517 }
1518 } else {
1519 to_talitos_ptr_extent_clear(ptr, is_sec1);
1520
1521 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1522 edesc->src_chained);
1523
1524 if (sg_count == 1) {
922f9dc8 1525 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1526 } else {
1527 sg_count = sg_to_link_tbl(src, sg_count, len,
1528 &edesc->link_tbl[0]);
1529 if (sg_count > 1) {
1530 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1531 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1532 dma_sync_single_for_device(dev,
1533 edesc->dma_link_tbl,
1534 edesc->dma_len,
1535 DMA_BIDIRECTIONAL);
1536 } else {
1537 /* Only one segment now, so no link tbl needed*/
1538 to_talitos_ptr(ptr, sg_dma_address(src),
1539 is_sec1);
1540 }
032d197e
LC
1541 }
1542 }
1543 return sg_count;
1544}
1545
1546void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1547 unsigned int len, struct talitos_edesc *edesc,
1548 enum dma_data_direction dir,
1549 struct talitos_ptr *ptr, int sg_count)
1550{
922f9dc8
LC
1551 struct talitos_private *priv = dev_get_drvdata(dev);
1552 bool is_sec1 = has_ftr_sec1(priv);
1553
032d197e
LC
1554 if (dir != DMA_NONE)
1555 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1556 dir, edesc->dst_chained);
1557
6f65f6ac
LC
1558 to_talitos_ptr_len(ptr, len, is_sec1);
1559
1560 if (is_sec1) {
1561 if (sg_count == 1) {
1562 if (dir != DMA_NONE)
1563 dma_map_sg(dev, dst, 1, dir);
1564 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1565 } else {
1566 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1567 dma_sync_single_for_device(dev,
1568 edesc->dma_link_tbl + len,
1569 len, DMA_FROM_DEVICE);
1570 }
032d197e 1571 } else {
6f65f6ac
LC
1572 to_talitos_ptr_extent_clear(ptr, is_sec1);
1573
1574 if (sg_count == 1) {
1575 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1576 } else {
1577 struct talitos_ptr *link_tbl_ptr =
1578 &edesc->link_tbl[edesc->src_nents + 1];
1579
1580 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1581 (edesc->src_nents + 1) *
1582 sizeof(struct talitos_ptr), 0);
1583 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
42e8b0d7 1584 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
6f65f6ac
LC
1585 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1586 edesc->dma_len,
1587 DMA_BIDIRECTIONAL);
1588 }
032d197e
LC
1589 }
1590}
1591
4de9d0b5
LN
1592static int common_nonsnoop(struct talitos_edesc *edesc,
1593 struct ablkcipher_request *areq,
4de9d0b5
LN
1594 void (*callback) (struct device *dev,
1595 struct talitos_desc *desc,
1596 void *context, int error))
1597{
1598 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1599 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1600 struct device *dev = ctx->dev;
1601 struct talitos_desc *desc = &edesc->desc;
1602 unsigned int cryptlen = areq->nbytes;
79fd31d3 1603 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1604 int sg_count, ret;
922f9dc8
LC
1605 struct talitos_private *priv = dev_get_drvdata(dev);
1606 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1607
1608 /* first DWORD empty */
2529bc37 1609 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1610
1611 /* cipher iv */
922f9dc8
LC
1612 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1613 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1614 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1615
1616 /* cipher key */
1617 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1618 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1619
1620 /*
1621 * cipher in
1622 */
032d197e
LC
1623 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1624 (areq->src == areq->dst) ?
1625 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1626 &desc->ptr[3]);
4de9d0b5
LN
1627
1628 /* cipher out */
032d197e
LC
1629 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1630 (areq->src == areq->dst) ? DMA_NONE
1631 : DMA_FROM_DEVICE,
1632 &desc->ptr[4], sg_count);
4de9d0b5
LN
1633
1634 /* iv out */
a2b35aa8 1635 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1636 DMA_FROM_DEVICE);
1637
1638 /* last DWORD empty */
2529bc37 1639 desc->ptr[6] = zero_entry;
4de9d0b5 1640
5228f0f7 1641 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1642 if (ret != -EINPROGRESS) {
1643 common_nonsnoop_unmap(dev, edesc, areq);
1644 kfree(edesc);
1645 }
1646 return ret;
1647}
1648
e938e465 1649static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1650 areq, bool encrypt)
4de9d0b5
LN
1651{
1652 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1653 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1654 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1655
aeb4c132 1656 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1657 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1658 areq->base.flags, encrypt);
4de9d0b5
LN
1659}
1660
1661static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1662{
1663 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1664 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1665 struct talitos_edesc *edesc;
1666
1667 /* allocate extended descriptor */
62293a37 1668 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1669 if (IS_ERR(edesc))
1670 return PTR_ERR(edesc);
1671
1672 /* set encrypt */
1673 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1674
febec542 1675 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1676}
1677
1678static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1679{
1680 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1681 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1682 struct talitos_edesc *edesc;
1683
1684 /* allocate extended descriptor */
62293a37 1685 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1686 if (IS_ERR(edesc))
1687 return PTR_ERR(edesc);
1688
1689 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1690
febec542 1691 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1692}
1693
497f2e6b
LN
1694static void common_nonsnoop_hash_unmap(struct device *dev,
1695 struct talitos_edesc *edesc,
1696 struct ahash_request *areq)
1697{
1698 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1699 struct talitos_private *priv = dev_get_drvdata(dev);
1700 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1701
1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1703
032d197e
LC
1704 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1705
497f2e6b 1706 /* When using hashctx-in, must unmap it. */
922f9dc8 1707 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1708 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1709 DMA_TO_DEVICE);
1710
922f9dc8 1711 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1712 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1713 DMA_TO_DEVICE);
1714
497f2e6b
LN
1715 if (edesc->dma_len)
1716 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1717 DMA_BIDIRECTIONAL);
1718
1719}
1720
1721static void ahash_done(struct device *dev,
1722 struct talitos_desc *desc, void *context,
1723 int err)
1724{
1725 struct ahash_request *areq = context;
1726 struct talitos_edesc *edesc =
1727 container_of(desc, struct talitos_edesc, desc);
1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729
1730 if (!req_ctx->last && req_ctx->to_hash_later) {
1731 /* Position any partial block for next update/final/finup */
1732 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1733 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1734 }
1735 common_nonsnoop_hash_unmap(dev, edesc, areq);
1736
1737 kfree(edesc);
1738
1739 areq->base.complete(&areq->base, err);
1740}
1741
2d02905e
LC
1742/*
1743 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1744 * ourself and submit a padded block
1745 */
1746void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1747 struct talitos_edesc *edesc,
1748 struct talitos_ptr *ptr)
1749{
1750 static u8 padded_hash[64] = {
1751 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1752 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1754 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1755 };
1756
1757 pr_err_once("Bug in SEC1, padding ourself\n");
1758 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1759 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1760 (char *)padded_hash, DMA_TO_DEVICE);
1761}
1762
497f2e6b
LN
1763static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1764 struct ahash_request *areq, unsigned int length,
1765 void (*callback) (struct device *dev,
1766 struct talitos_desc *desc,
1767 void *context, int error))
1768{
1769 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1770 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1771 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 struct device *dev = ctx->dev;
1773 struct talitos_desc *desc = &edesc->desc;
032d197e 1774 int ret;
922f9dc8
LC
1775 struct talitos_private *priv = dev_get_drvdata(dev);
1776 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1777
1778 /* first DWORD empty */
1779 desc->ptr[0] = zero_entry;
1780
60f208d7
KP
1781 /* hash context in */
1782 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1783 map_single_talitos_ptr(dev, &desc->ptr[1],
1784 req_ctx->hw_context_size,
a2b35aa8 1785 (char *)req_ctx->hw_context,
497f2e6b 1786 DMA_TO_DEVICE);
60f208d7 1787 req_ctx->swinit = 0;
497f2e6b
LN
1788 } else {
1789 desc->ptr[1] = zero_entry;
1790 /* Indicate next op is not the first. */
1791 req_ctx->first = 0;
1792 }
1793
1794 /* HMAC key */
1795 if (ctx->keylen)
1796 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1797 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1798 else
1799 desc->ptr[2] = zero_entry;
1800
1801 /*
1802 * data in
1803 */
032d197e
LC
1804 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1805 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1806
1807 /* fifth DWORD empty */
1808 desc->ptr[4] = zero_entry;
1809
1810 /* hash/HMAC out -or- hash context out */
1811 if (req_ctx->last)
1812 map_single_talitos_ptr(dev, &desc->ptr[5],
1813 crypto_ahash_digestsize(tfm),
a2b35aa8 1814 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1815 else
1816 map_single_talitos_ptr(dev, &desc->ptr[5],
1817 req_ctx->hw_context_size,
a2b35aa8 1818 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1819
1820 /* last DWORD empty */
1821 desc->ptr[6] = zero_entry;
1822
2d02905e
LC
1823 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1824 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1825
5228f0f7 1826 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1827 if (ret != -EINPROGRESS) {
1828 common_nonsnoop_hash_unmap(dev, edesc, areq);
1829 kfree(edesc);
1830 }
1831 return ret;
1832}
1833
1834static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1835 unsigned int nbytes)
1836{
1837 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1838 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1839 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840
aeb4c132 1841 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1842 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1843}
1844
1845static int ahash_init(struct ahash_request *areq)
1846{
1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1848 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1849
1850 /* Initialize the context */
5e833bc4 1851 req_ctx->nbuf = 0;
60f208d7
KP
1852 req_ctx->first = 1; /* first indicates h/w must init its context */
1853 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1854 req_ctx->hw_context_size =
1855 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1856 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1857 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1858
1859 return 0;
1860}
1861
60f208d7
KP
1862/*
1863 * on h/w without explicit sha224 support, we initialize h/w context
1864 * manually with sha224 constants, and tell it to run sha256.
1865 */
1866static int ahash_init_sha224_swinit(struct ahash_request *areq)
1867{
1868 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1869
1870 ahash_init(areq);
1871 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1872
a752447a
KP
1873 req_ctx->hw_context[0] = SHA224_H0;
1874 req_ctx->hw_context[1] = SHA224_H1;
1875 req_ctx->hw_context[2] = SHA224_H2;
1876 req_ctx->hw_context[3] = SHA224_H3;
1877 req_ctx->hw_context[4] = SHA224_H4;
1878 req_ctx->hw_context[5] = SHA224_H5;
1879 req_ctx->hw_context[6] = SHA224_H6;
1880 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1881
1882 /* init 64-bit count */
1883 req_ctx->hw_context[8] = 0;
1884 req_ctx->hw_context[9] = 0;
1885
1886 return 0;
1887}
1888
497f2e6b
LN
1889static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1890{
1891 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1892 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1893 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1894 struct talitos_edesc *edesc;
1895 unsigned int blocksize =
1896 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1897 unsigned int nbytes_to_hash;
1898 unsigned int to_hash_later;
5e833bc4 1899 unsigned int nsg;
2a1cfe46 1900 bool chained;
497f2e6b 1901
5e833bc4
LN
1902 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1903 /* Buffer up to one whole block */
497f2e6b
LN
1904 sg_copy_to_buffer(areq->src,
1905 sg_count(areq->src, nbytes, &chained),
5e833bc4
LN
1906 req_ctx->buf + req_ctx->nbuf, nbytes);
1907 req_ctx->nbuf += nbytes;
497f2e6b
LN
1908 return 0;
1909 }
1910
5e833bc4
LN
1911 /* At least (blocksize + 1) bytes are available to hash */
1912 nbytes_to_hash = nbytes + req_ctx->nbuf;
1913 to_hash_later = nbytes_to_hash & (blocksize - 1);
1914
1915 if (req_ctx->last)
1916 to_hash_later = 0;
1917 else if (to_hash_later)
1918 /* There is a partial block. Hash the full block(s) now */
1919 nbytes_to_hash -= to_hash_later;
1920 else {
1921 /* Keep one block buffered */
1922 nbytes_to_hash -= blocksize;
1923 to_hash_later = blocksize;
1924 }
1925
1926 /* Chain in any previously buffered data */
1927 if (req_ctx->nbuf) {
1928 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1929 sg_init_table(req_ctx->bufsl, nsg);
1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1931 if (nsg > 1)
1932 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1933 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1934 } else
497f2e6b 1935 req_ctx->psrc = areq->src;
5e833bc4
LN
1936
1937 if (to_hash_later) {
1938 int nents = sg_count(areq->src, nbytes, &chained);
d0525723 1939 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1940 req_ctx->bufnext,
1941 to_hash_later,
1942 nbytes - to_hash_later);
497f2e6b 1943 }
5e833bc4 1944 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1945
5e833bc4 1946 /* Allocate extended descriptor */
497f2e6b
LN
1947 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1948 if (IS_ERR(edesc))
1949 return PTR_ERR(edesc);
1950
1951 edesc->desc.hdr = ctx->desc_hdr_template;
1952
1953 /* On last one, request SEC to pad; otherwise continue */
1954 if (req_ctx->last)
1955 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1956 else
1957 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1958
60f208d7
KP
1959 /* request SEC to INIT hash. */
1960 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1961 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1962
1963 /* When the tfm context has a keylen, it's an HMAC.
1964 * A first or last (ie. not middle) descriptor must request HMAC.
1965 */
1966 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1967 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1968
1969 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1970 ahash_done);
1971}
1972
1973static int ahash_update(struct ahash_request *areq)
1974{
1975 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1976
1977 req_ctx->last = 0;
1978
1979 return ahash_process_req(areq, areq->nbytes);
1980}
1981
1982static int ahash_final(struct ahash_request *areq)
1983{
1984 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1985
1986 req_ctx->last = 1;
1987
1988 return ahash_process_req(areq, 0);
1989}
1990
1991static int ahash_finup(struct ahash_request *areq)
1992{
1993 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1994
1995 req_ctx->last = 1;
1996
1997 return ahash_process_req(areq, areq->nbytes);
1998}
1999
2000static int ahash_digest(struct ahash_request *areq)
2001{
2002 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 2003 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 2004
60f208d7 2005 ahash->init(areq);
497f2e6b
LN
2006 req_ctx->last = 1;
2007
2008 return ahash_process_req(areq, areq->nbytes);
2009}
2010
79b3a418
LN
2011struct keyhash_result {
2012 struct completion completion;
2013 int err;
2014};
2015
2016static void keyhash_complete(struct crypto_async_request *req, int err)
2017{
2018 struct keyhash_result *res = req->data;
2019
2020 if (err == -EINPROGRESS)
2021 return;
2022
2023 res->err = err;
2024 complete(&res->completion);
2025}
2026
2027static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2028 u8 *hash)
2029{
2030 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2031
2032 struct scatterlist sg[1];
2033 struct ahash_request *req;
2034 struct keyhash_result hresult;
2035 int ret;
2036
2037 init_completion(&hresult.completion);
2038
2039 req = ahash_request_alloc(tfm, GFP_KERNEL);
2040 if (!req)
2041 return -ENOMEM;
2042
2043 /* Keep tfm keylen == 0 during hash of the long key */
2044 ctx->keylen = 0;
2045 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2046 keyhash_complete, &hresult);
2047
2048 sg_init_one(&sg[0], key, keylen);
2049
2050 ahash_request_set_crypt(req, sg, hash, keylen);
2051 ret = crypto_ahash_digest(req);
2052 switch (ret) {
2053 case 0:
2054 break;
2055 case -EINPROGRESS:
2056 case -EBUSY:
2057 ret = wait_for_completion_interruptible(
2058 &hresult.completion);
2059 if (!ret)
2060 ret = hresult.err;
2061 break;
2062 default:
2063 break;
2064 }
2065 ahash_request_free(req);
2066
2067 return ret;
2068}
2069
2070static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2071 unsigned int keylen)
2072{
2073 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2074 unsigned int blocksize =
2075 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2076 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2077 unsigned int keysize = keylen;
2078 u8 hash[SHA512_DIGEST_SIZE];
2079 int ret;
2080
2081 if (keylen <= blocksize)
2082 memcpy(ctx->key, key, keysize);
2083 else {
2084 /* Must get the hash of the long key */
2085 ret = keyhash(tfm, key, keylen, hash);
2086
2087 if (ret) {
2088 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2089 return -EINVAL;
2090 }
2091
2092 keysize = digestsize;
2093 memcpy(ctx->key, hash, digestsize);
2094 }
2095
2096 ctx->keylen = keysize;
2097
2098 return 0;
2099}
2100
2101
9c4a7965 2102struct talitos_alg_template {
d5e4aaef
LN
2103 u32 type;
2104 union {
2105 struct crypto_alg crypto;
acbf7c62 2106 struct ahash_alg hash;
aeb4c132 2107 struct aead_alg aead;
d5e4aaef 2108 } alg;
9c4a7965
KP
2109 __be32 desc_hdr_template;
2110};
2111
2112static struct talitos_alg_template driver_algs[] = {
991155ba 2113 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2114 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2115 .alg.aead = {
2116 .base = {
2117 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2118 .cra_driver_name = "authenc-hmac-sha1-"
2119 "cbc-aes-talitos",
2120 .cra_blocksize = AES_BLOCK_SIZE,
2121 .cra_flags = CRYPTO_ALG_ASYNC,
2122 },
2123 .ivsize = AES_BLOCK_SIZE,
2124 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2125 },
9c4a7965
KP
2126 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2127 DESC_HDR_SEL0_AESU |
2128 DESC_HDR_MODE0_AESU_CBC |
2129 DESC_HDR_SEL1_MDEUA |
2130 DESC_HDR_MODE1_MDEU_INIT |
2131 DESC_HDR_MODE1_MDEU_PAD |
2132 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2133 },
d5e4aaef 2134 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2135 .alg.aead = {
2136 .base = {
2137 .cra_name = "authenc(hmac(sha1),"
2138 "cbc(des3_ede))",
2139 .cra_driver_name = "authenc-hmac-sha1-"
2140 "cbc-3des-talitos",
2141 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2142 .cra_flags = CRYPTO_ALG_ASYNC,
2143 },
2144 .ivsize = DES3_EDE_BLOCK_SIZE,
2145 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2146 },
70bcaca7
LN
2147 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2148 DESC_HDR_SEL0_DEU |
2149 DESC_HDR_MODE0_DEU_CBC |
2150 DESC_HDR_MODE0_DEU_3DES |
2151 DESC_HDR_SEL1_MDEUA |
2152 DESC_HDR_MODE1_MDEU_INIT |
2153 DESC_HDR_MODE1_MDEU_PAD |
2154 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2155 },
357fb605 2156 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2157 .alg.aead = {
2158 .base = {
2159 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2160 .cra_driver_name = "authenc-hmac-sha224-"
2161 "cbc-aes-talitos",
2162 .cra_blocksize = AES_BLOCK_SIZE,
2163 .cra_flags = CRYPTO_ALG_ASYNC,
2164 },
2165 .ivsize = AES_BLOCK_SIZE,
2166 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2167 },
2168 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2169 DESC_HDR_SEL0_AESU |
2170 DESC_HDR_MODE0_AESU_CBC |
2171 DESC_HDR_SEL1_MDEUA |
2172 DESC_HDR_MODE1_MDEU_INIT |
2173 DESC_HDR_MODE1_MDEU_PAD |
2174 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2175 },
2176 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2177 .alg.aead = {
2178 .base = {
2179 .cra_name = "authenc(hmac(sha224),"
2180 "cbc(des3_ede))",
2181 .cra_driver_name = "authenc-hmac-sha224-"
2182 "cbc-3des-talitos",
2183 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2184 .cra_flags = CRYPTO_ALG_ASYNC,
2185 },
2186 .ivsize = DES3_EDE_BLOCK_SIZE,
2187 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2188 },
2189 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2190 DESC_HDR_SEL0_DEU |
2191 DESC_HDR_MODE0_DEU_CBC |
2192 DESC_HDR_MODE0_DEU_3DES |
2193 DESC_HDR_SEL1_MDEUA |
2194 DESC_HDR_MODE1_MDEU_INIT |
2195 DESC_HDR_MODE1_MDEU_PAD |
2196 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2197 },
d5e4aaef 2198 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2199 .alg.aead = {
2200 .base = {
2201 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2202 .cra_driver_name = "authenc-hmac-sha256-"
2203 "cbc-aes-talitos",
2204 .cra_blocksize = AES_BLOCK_SIZE,
2205 .cra_flags = CRYPTO_ALG_ASYNC,
2206 },
2207 .ivsize = AES_BLOCK_SIZE,
2208 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2209 },
3952f17e
LN
2210 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2211 DESC_HDR_SEL0_AESU |
2212 DESC_HDR_MODE0_AESU_CBC |
2213 DESC_HDR_SEL1_MDEUA |
2214 DESC_HDR_MODE1_MDEU_INIT |
2215 DESC_HDR_MODE1_MDEU_PAD |
2216 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2217 },
d5e4aaef 2218 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2219 .alg.aead = {
2220 .base = {
2221 .cra_name = "authenc(hmac(sha256),"
2222 "cbc(des3_ede))",
2223 .cra_driver_name = "authenc-hmac-sha256-"
2224 "cbc-3des-talitos",
2225 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2226 .cra_flags = CRYPTO_ALG_ASYNC,
2227 },
2228 .ivsize = DES3_EDE_BLOCK_SIZE,
2229 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2230 },
3952f17e
LN
2231 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2232 DESC_HDR_SEL0_DEU |
2233 DESC_HDR_MODE0_DEU_CBC |
2234 DESC_HDR_MODE0_DEU_3DES |
2235 DESC_HDR_SEL1_MDEUA |
2236 DESC_HDR_MODE1_MDEU_INIT |
2237 DESC_HDR_MODE1_MDEU_PAD |
2238 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2239 },
d5e4aaef 2240 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2241 .alg.aead = {
2242 .base = {
2243 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2244 .cra_driver_name = "authenc-hmac-sha384-"
2245 "cbc-aes-talitos",
2246 .cra_blocksize = AES_BLOCK_SIZE,
2247 .cra_flags = CRYPTO_ALG_ASYNC,
2248 },
2249 .ivsize = AES_BLOCK_SIZE,
2250 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2251 },
2252 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2253 DESC_HDR_SEL0_AESU |
2254 DESC_HDR_MODE0_AESU_CBC |
2255 DESC_HDR_SEL1_MDEUB |
2256 DESC_HDR_MODE1_MDEU_INIT |
2257 DESC_HDR_MODE1_MDEU_PAD |
2258 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2259 },
2260 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2261 .alg.aead = {
2262 .base = {
2263 .cra_name = "authenc(hmac(sha384),"
2264 "cbc(des3_ede))",
2265 .cra_driver_name = "authenc-hmac-sha384-"
2266 "cbc-3des-talitos",
2267 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2268 .cra_flags = CRYPTO_ALG_ASYNC,
2269 },
2270 .ivsize = DES3_EDE_BLOCK_SIZE,
2271 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2272 },
2273 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2274 DESC_HDR_SEL0_DEU |
2275 DESC_HDR_MODE0_DEU_CBC |
2276 DESC_HDR_MODE0_DEU_3DES |
2277 DESC_HDR_SEL1_MDEUB |
2278 DESC_HDR_MODE1_MDEU_INIT |
2279 DESC_HDR_MODE1_MDEU_PAD |
2280 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2281 },
2282 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2283 .alg.aead = {
2284 .base = {
2285 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2286 .cra_driver_name = "authenc-hmac-sha512-"
2287 "cbc-aes-talitos",
2288 .cra_blocksize = AES_BLOCK_SIZE,
2289 .cra_flags = CRYPTO_ALG_ASYNC,
2290 },
2291 .ivsize = AES_BLOCK_SIZE,
2292 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2293 },
2294 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2295 DESC_HDR_SEL0_AESU |
2296 DESC_HDR_MODE0_AESU_CBC |
2297 DESC_HDR_SEL1_MDEUB |
2298 DESC_HDR_MODE1_MDEU_INIT |
2299 DESC_HDR_MODE1_MDEU_PAD |
2300 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2301 },
2302 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2303 .alg.aead = {
2304 .base = {
2305 .cra_name = "authenc(hmac(sha512),"
2306 "cbc(des3_ede))",
2307 .cra_driver_name = "authenc-hmac-sha512-"
2308 "cbc-3des-talitos",
2309 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310 .cra_flags = CRYPTO_ALG_ASYNC,
2311 },
2312 .ivsize = DES3_EDE_BLOCK_SIZE,
2313 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2314 },
2315 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2316 DESC_HDR_SEL0_DEU |
2317 DESC_HDR_MODE0_DEU_CBC |
2318 DESC_HDR_MODE0_DEU_3DES |
2319 DESC_HDR_SEL1_MDEUB |
2320 DESC_HDR_MODE1_MDEU_INIT |
2321 DESC_HDR_MODE1_MDEU_PAD |
2322 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2323 },
2324 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2325 .alg.aead = {
2326 .base = {
2327 .cra_name = "authenc(hmac(md5),cbc(aes))",
2328 .cra_driver_name = "authenc-hmac-md5-"
2329 "cbc-aes-talitos",
2330 .cra_blocksize = AES_BLOCK_SIZE,
2331 .cra_flags = CRYPTO_ALG_ASYNC,
2332 },
2333 .ivsize = AES_BLOCK_SIZE,
2334 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2335 },
3952f17e
LN
2336 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2337 DESC_HDR_SEL0_AESU |
2338 DESC_HDR_MODE0_AESU_CBC |
2339 DESC_HDR_SEL1_MDEUA |
2340 DESC_HDR_MODE1_MDEU_INIT |
2341 DESC_HDR_MODE1_MDEU_PAD |
2342 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2343 },
d5e4aaef 2344 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2345 .alg.aead = {
2346 .base = {
2347 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2348 .cra_driver_name = "authenc-hmac-md5-"
2349 "cbc-3des-talitos",
2350 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2351 .cra_flags = CRYPTO_ALG_ASYNC,
2352 },
2353 .ivsize = DES3_EDE_BLOCK_SIZE,
2354 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2355 },
3952f17e
LN
2356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2357 DESC_HDR_SEL0_DEU |
2358 DESC_HDR_MODE0_DEU_CBC |
2359 DESC_HDR_MODE0_DEU_3DES |
2360 DESC_HDR_SEL1_MDEUA |
2361 DESC_HDR_MODE1_MDEU_INIT |
2362 DESC_HDR_MODE1_MDEU_PAD |
2363 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2364 },
2365 /* ABLKCIPHER algorithms. */
d5e4aaef
LN
2366 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2367 .alg.crypto = {
4de9d0b5
LN
2368 .cra_name = "cbc(aes)",
2369 .cra_driver_name = "cbc-aes-talitos",
2370 .cra_blocksize = AES_BLOCK_SIZE,
2371 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2372 CRYPTO_ALG_ASYNC,
4de9d0b5 2373 .cra_ablkcipher = {
4de9d0b5
LN
2374 .min_keysize = AES_MIN_KEY_SIZE,
2375 .max_keysize = AES_MAX_KEY_SIZE,
2376 .ivsize = AES_BLOCK_SIZE,
2377 }
2378 },
2379 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2380 DESC_HDR_SEL0_AESU |
2381 DESC_HDR_MODE0_AESU_CBC,
2382 },
d5e4aaef
LN
2383 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2384 .alg.crypto = {
4de9d0b5
LN
2385 .cra_name = "cbc(des3_ede)",
2386 .cra_driver_name = "cbc-3des-talitos",
2387 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2388 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2389 CRYPTO_ALG_ASYNC,
4de9d0b5 2390 .cra_ablkcipher = {
4de9d0b5
LN
2391 .min_keysize = DES3_EDE_KEY_SIZE,
2392 .max_keysize = DES3_EDE_KEY_SIZE,
2393 .ivsize = DES3_EDE_BLOCK_SIZE,
2394 }
2395 },
2396 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2397 DESC_HDR_SEL0_DEU |
2398 DESC_HDR_MODE0_DEU_CBC |
2399 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2400 },
2401 /* AHASH algorithms. */
2402 { .type = CRYPTO_ALG_TYPE_AHASH,
2403 .alg.hash = {
497f2e6b
LN
2404 .halg.digestsize = MD5_DIGEST_SIZE,
2405 .halg.base = {
2406 .cra_name = "md5",
2407 .cra_driver_name = "md5-talitos",
b3988618 2408 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2409 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2410 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2411 }
2412 },
2413 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2414 DESC_HDR_SEL0_MDEUA |
2415 DESC_HDR_MODE0_MDEU_MD5,
2416 },
2417 { .type = CRYPTO_ALG_TYPE_AHASH,
2418 .alg.hash = {
497f2e6b
LN
2419 .halg.digestsize = SHA1_DIGEST_SIZE,
2420 .halg.base = {
2421 .cra_name = "sha1",
2422 .cra_driver_name = "sha1-talitos",
2423 .cra_blocksize = SHA1_BLOCK_SIZE,
2424 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2425 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2426 }
2427 },
2428 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2429 DESC_HDR_SEL0_MDEUA |
2430 DESC_HDR_MODE0_MDEU_SHA1,
2431 },
60f208d7
KP
2432 { .type = CRYPTO_ALG_TYPE_AHASH,
2433 .alg.hash = {
60f208d7
KP
2434 .halg.digestsize = SHA224_DIGEST_SIZE,
2435 .halg.base = {
2436 .cra_name = "sha224",
2437 .cra_driver_name = "sha224-talitos",
2438 .cra_blocksize = SHA224_BLOCK_SIZE,
2439 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2440 CRYPTO_ALG_ASYNC,
60f208d7
KP
2441 }
2442 },
2443 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2444 DESC_HDR_SEL0_MDEUA |
2445 DESC_HDR_MODE0_MDEU_SHA224,
2446 },
497f2e6b
LN
2447 { .type = CRYPTO_ALG_TYPE_AHASH,
2448 .alg.hash = {
497f2e6b
LN
2449 .halg.digestsize = SHA256_DIGEST_SIZE,
2450 .halg.base = {
2451 .cra_name = "sha256",
2452 .cra_driver_name = "sha256-talitos",
2453 .cra_blocksize = SHA256_BLOCK_SIZE,
2454 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2455 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2456 }
2457 },
2458 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2459 DESC_HDR_SEL0_MDEUA |
2460 DESC_HDR_MODE0_MDEU_SHA256,
2461 },
2462 { .type = CRYPTO_ALG_TYPE_AHASH,
2463 .alg.hash = {
497f2e6b
LN
2464 .halg.digestsize = SHA384_DIGEST_SIZE,
2465 .halg.base = {
2466 .cra_name = "sha384",
2467 .cra_driver_name = "sha384-talitos",
2468 .cra_blocksize = SHA384_BLOCK_SIZE,
2469 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2470 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2471 }
2472 },
2473 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2474 DESC_HDR_SEL0_MDEUB |
2475 DESC_HDR_MODE0_MDEUB_SHA384,
2476 },
2477 { .type = CRYPTO_ALG_TYPE_AHASH,
2478 .alg.hash = {
497f2e6b
LN
2479 .halg.digestsize = SHA512_DIGEST_SIZE,
2480 .halg.base = {
2481 .cra_name = "sha512",
2482 .cra_driver_name = "sha512-talitos",
2483 .cra_blocksize = SHA512_BLOCK_SIZE,
2484 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2485 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2486 }
2487 },
2488 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2489 DESC_HDR_SEL0_MDEUB |
2490 DESC_HDR_MODE0_MDEUB_SHA512,
2491 },
79b3a418
LN
2492 { .type = CRYPTO_ALG_TYPE_AHASH,
2493 .alg.hash = {
79b3a418
LN
2494 .halg.digestsize = MD5_DIGEST_SIZE,
2495 .halg.base = {
2496 .cra_name = "hmac(md5)",
2497 .cra_driver_name = "hmac-md5-talitos",
b3988618 2498 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2499 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2500 CRYPTO_ALG_ASYNC,
79b3a418
LN
2501 }
2502 },
2503 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2504 DESC_HDR_SEL0_MDEUA |
2505 DESC_HDR_MODE0_MDEU_MD5,
2506 },
2507 { .type = CRYPTO_ALG_TYPE_AHASH,
2508 .alg.hash = {
79b3a418
LN
2509 .halg.digestsize = SHA1_DIGEST_SIZE,
2510 .halg.base = {
2511 .cra_name = "hmac(sha1)",
2512 .cra_driver_name = "hmac-sha1-talitos",
2513 .cra_blocksize = SHA1_BLOCK_SIZE,
2514 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2515 CRYPTO_ALG_ASYNC,
79b3a418
LN
2516 }
2517 },
2518 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2519 DESC_HDR_SEL0_MDEUA |
2520 DESC_HDR_MODE0_MDEU_SHA1,
2521 },
2522 { .type = CRYPTO_ALG_TYPE_AHASH,
2523 .alg.hash = {
79b3a418
LN
2524 .halg.digestsize = SHA224_DIGEST_SIZE,
2525 .halg.base = {
2526 .cra_name = "hmac(sha224)",
2527 .cra_driver_name = "hmac-sha224-talitos",
2528 .cra_blocksize = SHA224_BLOCK_SIZE,
2529 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2530 CRYPTO_ALG_ASYNC,
79b3a418
LN
2531 }
2532 },
2533 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2534 DESC_HDR_SEL0_MDEUA |
2535 DESC_HDR_MODE0_MDEU_SHA224,
2536 },
2537 { .type = CRYPTO_ALG_TYPE_AHASH,
2538 .alg.hash = {
79b3a418
LN
2539 .halg.digestsize = SHA256_DIGEST_SIZE,
2540 .halg.base = {
2541 .cra_name = "hmac(sha256)",
2542 .cra_driver_name = "hmac-sha256-talitos",
2543 .cra_blocksize = SHA256_BLOCK_SIZE,
2544 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2545 CRYPTO_ALG_ASYNC,
79b3a418
LN
2546 }
2547 },
2548 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2549 DESC_HDR_SEL0_MDEUA |
2550 DESC_HDR_MODE0_MDEU_SHA256,
2551 },
2552 { .type = CRYPTO_ALG_TYPE_AHASH,
2553 .alg.hash = {
79b3a418
LN
2554 .halg.digestsize = SHA384_DIGEST_SIZE,
2555 .halg.base = {
2556 .cra_name = "hmac(sha384)",
2557 .cra_driver_name = "hmac-sha384-talitos",
2558 .cra_blocksize = SHA384_BLOCK_SIZE,
2559 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2560 CRYPTO_ALG_ASYNC,
79b3a418
LN
2561 }
2562 },
2563 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2564 DESC_HDR_SEL0_MDEUB |
2565 DESC_HDR_MODE0_MDEUB_SHA384,
2566 },
2567 { .type = CRYPTO_ALG_TYPE_AHASH,
2568 .alg.hash = {
79b3a418
LN
2569 .halg.digestsize = SHA512_DIGEST_SIZE,
2570 .halg.base = {
2571 .cra_name = "hmac(sha512)",
2572 .cra_driver_name = "hmac-sha512-talitos",
2573 .cra_blocksize = SHA512_BLOCK_SIZE,
2574 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2575 CRYPTO_ALG_ASYNC,
79b3a418
LN
2576 }
2577 },
2578 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2579 DESC_HDR_SEL0_MDEUB |
2580 DESC_HDR_MODE0_MDEUB_SHA512,
2581 }
9c4a7965
KP
2582};
2583
2584struct talitos_crypto_alg {
2585 struct list_head entry;
2586 struct device *dev;
acbf7c62 2587 struct talitos_alg_template algt;
9c4a7965
KP
2588};
2589
2590static int talitos_cra_init(struct crypto_tfm *tfm)
2591{
2592 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2593 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2594 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2595 struct talitos_private *priv;
9c4a7965 2596
497f2e6b
LN
2597 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2598 talitos_alg = container_of(__crypto_ahash_alg(alg),
2599 struct talitos_crypto_alg,
2600 algt.alg.hash);
2601 else
2602 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2603 algt.alg.crypto);
19bbbc63 2604
9c4a7965
KP
2605 /* update context with ptr to dev */
2606 ctx->dev = talitos_alg->dev;
19bbbc63 2607
5228f0f7
KP
2608 /* assign SEC channel to tfm in round-robin fashion */
2609 priv = dev_get_drvdata(ctx->dev);
2610 ctx->ch = atomic_inc_return(&priv->last_chan) &
2611 (priv->num_channels - 1);
2612
9c4a7965 2613 /* copy descriptor header template value */
acbf7c62 2614 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2615
602dba5a
KP
2616 /* select done notification */
2617 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2618
497f2e6b
LN
2619 return 0;
2620}
2621
aeb4c132 2622static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2623{
aeb4c132 2624 talitos_cra_init(crypto_aead_tfm(tfm));
9c4a7965
KP
2625 return 0;
2626}
2627
497f2e6b
LN
2628static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2629{
2630 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2631
2632 talitos_cra_init(tfm);
2633
2634 ctx->keylen = 0;
2635 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2636 sizeof(struct talitos_ahash_req_ctx));
2637
2638 return 0;
2639}
2640
9c4a7965
KP
2641/*
2642 * given the alg's descriptor header template, determine whether descriptor
2643 * type and primary/secondary execution units required match the hw
2644 * capabilities description provided in the device tree node.
2645 */
2646static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2647{
2648 struct talitos_private *priv = dev_get_drvdata(dev);
2649 int ret;
2650
2651 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2652 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2653
2654 if (SECONDARY_EU(desc_hdr_template))
2655 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2656 & priv->exec_units);
2657
2658 return ret;
2659}
2660
2dc11581 2661static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2662{
2663 struct device *dev = &ofdev->dev;
2664 struct talitos_private *priv = dev_get_drvdata(dev);
2665 struct talitos_crypto_alg *t_alg, *n;
2666 int i;
2667
2668 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2669 switch (t_alg->algt.type) {
2670 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2671 break;
aeb4c132
HX
2672 case CRYPTO_ALG_TYPE_AEAD:
2673 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2674 case CRYPTO_ALG_TYPE_AHASH:
2675 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2676 break;
2677 }
9c4a7965
KP
2678 list_del(&t_alg->entry);
2679 kfree(t_alg);
2680 }
2681
2682 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2683 talitos_unregister_rng(dev);
2684
35a3bb3d 2685 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2686 kfree(priv->chan[i].fifo);
9c4a7965 2687
4b992628 2688 kfree(priv->chan);
9c4a7965 2689
c3e337f8 2690 for (i = 0; i < 2; i++)
2cdba3cf 2691 if (priv->irq[i]) {
c3e337f8
KP
2692 free_irq(priv->irq[i], dev);
2693 irq_dispose_mapping(priv->irq[i]);
2694 }
9c4a7965 2695
c3e337f8 2696 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2697 if (priv->irq[1])
c3e337f8 2698 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2699
2700 iounmap(priv->reg);
2701
9c4a7965
KP
2702 kfree(priv);
2703
2704 return 0;
2705}
2706
2707static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2708 struct talitos_alg_template
2709 *template)
2710{
60f208d7 2711 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2712 struct talitos_crypto_alg *t_alg;
2713 struct crypto_alg *alg;
2714
2715 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2716 if (!t_alg)
2717 return ERR_PTR(-ENOMEM);
2718
acbf7c62
LN
2719 t_alg->algt = *template;
2720
2721 switch (t_alg->algt.type) {
2722 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2723 alg = &t_alg->algt.alg.crypto;
2724 alg->cra_init = talitos_cra_init;
d4cd3283 2725 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2726 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2727 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2728 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2729 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2730 break;
acbf7c62 2731 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2732 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2733 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2734 t_alg->algt.alg.aead.setkey = aead_setkey;
2735 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2736 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2737 break;
2738 case CRYPTO_ALG_TYPE_AHASH:
2739 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2740 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2741 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2742 t_alg->algt.alg.hash.init = ahash_init;
2743 t_alg->algt.alg.hash.update = ahash_update;
2744 t_alg->algt.alg.hash.final = ahash_final;
2745 t_alg->algt.alg.hash.finup = ahash_finup;
2746 t_alg->algt.alg.hash.digest = ahash_digest;
2747 t_alg->algt.alg.hash.setkey = ahash_setkey;
2748
79b3a418 2749 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2750 !strncmp(alg->cra_name, "hmac", 4)) {
2751 kfree(t_alg);
79b3a418 2752 return ERR_PTR(-ENOTSUPP);
0b2730d8 2753 }
60f208d7 2754 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2755 (!strcmp(alg->cra_name, "sha224") ||
2756 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2757 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2758 t_alg->algt.desc_hdr_template =
2759 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2760 DESC_HDR_SEL0_MDEUA |
2761 DESC_HDR_MODE0_MDEU_SHA256;
2762 }
497f2e6b 2763 break;
1d11911a
KP
2764 default:
2765 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2766 kfree(t_alg);
1d11911a 2767 return ERR_PTR(-EINVAL);
acbf7c62 2768 }
9c4a7965 2769
9c4a7965 2770 alg->cra_module = THIS_MODULE;
9c4a7965 2771 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2772 alg->cra_alignmask = 0;
9c4a7965 2773 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2774 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2775
9c4a7965
KP
2776 t_alg->dev = dev;
2777
2778 return t_alg;
2779}
2780
c3e337f8
KP
2781static int talitos_probe_irq(struct platform_device *ofdev)
2782{
2783 struct device *dev = &ofdev->dev;
2784 struct device_node *np = ofdev->dev.of_node;
2785 struct talitos_private *priv = dev_get_drvdata(dev);
2786 int err;
dd3c0987 2787 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2788
2789 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2790 if (!priv->irq[0]) {
c3e337f8
KP
2791 dev_err(dev, "failed to map irq\n");
2792 return -EINVAL;
2793 }
dd3c0987
LC
2794 if (is_sec1) {
2795 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2796 dev_driver_string(dev), dev);
2797 goto primary_out;
2798 }
c3e337f8
KP
2799
2800 priv->irq[1] = irq_of_parse_and_map(np, 1);
2801
2802 /* get the primary irq line */
2cdba3cf 2803 if (!priv->irq[1]) {
dd3c0987 2804 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2805 dev_driver_string(dev), dev);
2806 goto primary_out;
2807 }
2808
dd3c0987 2809 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2810 dev_driver_string(dev), dev);
2811 if (err)
2812 goto primary_out;
2813
2814 /* get the secondary irq line */
dd3c0987 2815 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2816 dev_driver_string(dev), dev);
2817 if (err) {
2818 dev_err(dev, "failed to request secondary irq\n");
2819 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2820 priv->irq[1] = 0;
c3e337f8
KP
2821 }
2822
2823 return err;
2824
2825primary_out:
2826 if (err) {
2827 dev_err(dev, "failed to request primary irq\n");
2828 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2829 priv->irq[0] = 0;
c3e337f8
KP
2830 }
2831
2832 return err;
2833}
2834
1c48a5c9 2835static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2836{
2837 struct device *dev = &ofdev->dev;
61c7a080 2838 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2839 struct talitos_private *priv;
2840 const unsigned int *prop;
2841 int i, err;
5fa7fa14 2842 int stride;
9c4a7965
KP
2843
2844 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2845 if (!priv)
2846 return -ENOMEM;
2847
f3de9cb1
KH
2848 INIT_LIST_HEAD(&priv->alg_list);
2849
9c4a7965
KP
2850 dev_set_drvdata(dev, priv);
2851
2852 priv->ofdev = ofdev;
2853
511d63cb
HG
2854 spin_lock_init(&priv->reg_lock);
2855
9c4a7965
KP
2856 priv->reg = of_iomap(np, 0);
2857 if (!priv->reg) {
2858 dev_err(dev, "failed to of_iomap\n");
2859 err = -ENOMEM;
2860 goto err_out;
2861 }
2862
2863 /* get SEC version capabilities from device tree */
2864 prop = of_get_property(np, "fsl,num-channels", NULL);
2865 if (prop)
2866 priv->num_channels = *prop;
2867
2868 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2869 if (prop)
2870 priv->chfifo_len = *prop;
2871
2872 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2873 if (prop)
2874 priv->exec_units = *prop;
2875
2876 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2877 if (prop)
2878 priv->desc_types = *prop;
2879
2880 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2881 !priv->exec_units || !priv->desc_types) {
2882 dev_err(dev, "invalid property data in device tree node\n");
2883 err = -EINVAL;
2884 goto err_out;
2885 }
2886
f3c85bc1
LN
2887 if (of_device_is_compatible(np, "fsl,sec3.0"))
2888 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2889
fe5720e2 2890 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2891 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2892 TALITOS_FTR_SHA224_HWINIT |
2893 TALITOS_FTR_HMAC_OK;
fe5720e2 2894
21590888
LC
2895 if (of_device_is_compatible(np, "fsl,sec1.0"))
2896 priv->features |= TALITOS_FTR_SEC1;
2897
5fa7fa14
LC
2898 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2899 priv->reg_deu = priv->reg + TALITOS12_DEU;
2900 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2901 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2902 stride = TALITOS1_CH_STRIDE;
2903 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2904 priv->reg_deu = priv->reg + TALITOS10_DEU;
2905 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2906 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2907 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2908 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2909 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2910 stride = TALITOS1_CH_STRIDE;
2911 } else {
2912 priv->reg_deu = priv->reg + TALITOS2_DEU;
2913 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2914 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2915 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2916 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2917 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2918 priv->reg_keu = priv->reg + TALITOS2_KEU;
2919 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2920 stride = TALITOS2_CH_STRIDE;
2921 }
2922
dd3c0987
LC
2923 err = talitos_probe_irq(ofdev);
2924 if (err)
2925 goto err_out;
2926
2927 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2928 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2929 (unsigned long)dev);
2930 } else {
2931 if (!priv->irq[1]) {
2932 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2933 (unsigned long)dev);
2934 } else {
2935 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2936 (unsigned long)dev);
2937 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2938 (unsigned long)dev);
2939 }
2940 }
2941
4b992628
KP
2942 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2943 priv->num_channels, GFP_KERNEL);
2944 if (!priv->chan) {
2945 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
2946 err = -ENOMEM;
2947 goto err_out;
2948 }
2949
f641dddd
MH
2950 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2951
c3e337f8 2952 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 2953 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 2954 if (!priv->irq[1] || !(i & 1))
c3e337f8 2955 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 2956
4b992628
KP
2957 spin_lock_init(&priv->chan[i].head_lock);
2958 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 2959
4b992628
KP
2960 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2961 priv->fifo_len, GFP_KERNEL);
2962 if (!priv->chan[i].fifo) {
9c4a7965
KP
2963 dev_err(dev, "failed to allocate request fifo %d\n", i);
2964 err = -ENOMEM;
2965 goto err_out;
2966 }
9c4a7965 2967
4b992628
KP
2968 atomic_set(&priv->chan[i].submit_count,
2969 -(priv->chfifo_len - 1));
f641dddd 2970 }
9c4a7965 2971
81eb024c
KP
2972 dma_set_mask(dev, DMA_BIT_MASK(36));
2973
9c4a7965
KP
2974 /* reset and initialize the h/w */
2975 err = init_device(dev);
2976 if (err) {
2977 dev_err(dev, "failed to initialize device\n");
2978 goto err_out;
2979 }
2980
2981 /* register the RNG, if available */
2982 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2983 err = talitos_register_rng(dev);
2984 if (err) {
2985 dev_err(dev, "failed to register hwrng: %d\n", err);
2986 goto err_out;
2987 } else
2988 dev_info(dev, "hwrng\n");
2989 }
2990
2991 /* register crypto algorithms the device supports */
9c4a7965
KP
2992 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2993 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2994 struct talitos_crypto_alg *t_alg;
aeb4c132 2995 struct crypto_alg *alg = NULL;
9c4a7965
KP
2996
2997 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2998 if (IS_ERR(t_alg)) {
2999 err = PTR_ERR(t_alg);
0b2730d8 3000 if (err == -ENOTSUPP)
79b3a418 3001 continue;
9c4a7965
KP
3002 goto err_out;
3003 }
3004
acbf7c62
LN
3005 switch (t_alg->algt.type) {
3006 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3007 err = crypto_register_alg(
3008 &t_alg->algt.alg.crypto);
aeb4c132 3009 alg = &t_alg->algt.alg.crypto;
acbf7c62 3010 break;
aeb4c132
HX
3011
3012 case CRYPTO_ALG_TYPE_AEAD:
3013 err = crypto_register_aead(
3014 &t_alg->algt.alg.aead);
3015 alg = &t_alg->algt.alg.aead.base;
3016 break;
3017
acbf7c62
LN
3018 case CRYPTO_ALG_TYPE_AHASH:
3019 err = crypto_register_ahash(
3020 &t_alg->algt.alg.hash);
aeb4c132 3021 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3022 break;
3023 }
9c4a7965
KP
3024 if (err) {
3025 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3026 alg->cra_driver_name);
9c4a7965 3027 kfree(t_alg);
991155ba 3028 } else
9c4a7965 3029 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3030 }
3031 }
5b859b6e
KP
3032 if (!list_empty(&priv->alg_list))
3033 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3034 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3035
3036 return 0;
3037
3038err_out:
3039 talitos_remove(ofdev);
9c4a7965
KP
3040
3041 return err;
3042}
3043
6c3f975a 3044static const struct of_device_id talitos_match[] = {
0635b7db
LC
3045#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3046 {
3047 .compatible = "fsl,sec1.0",
3048 },
3049#endif
3050#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3051 {
3052 .compatible = "fsl,sec2.0",
3053 },
0635b7db 3054#endif
9c4a7965
KP
3055 {},
3056};
3057MODULE_DEVICE_TABLE(of, talitos_match);
3058
1c48a5c9 3059static struct platform_driver talitos_driver = {
4018294b
GL
3060 .driver = {
3061 .name = "talitos",
4018294b
GL
3062 .of_match_table = talitos_match,
3063 },
9c4a7965 3064 .probe = talitos_probe,
596f1034 3065 .remove = talitos_remove,
9c4a7965
KP
3066};
3067
741e8c2d 3068module_platform_driver(talitos_driver);
9c4a7965
KP
3069
3070MODULE_LICENSE("GPL");
3071MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3072MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 1.79756 seconds and 5 git commands to generate.