crypto: talitos - fix ahash algorithms registration
[deliverable/linux.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
42e8b0d7 66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 67 bool is_sec1)
538caf83 68{
922f9dc8
LC
69 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
538caf83
LC
75}
76
922f9dc8
LC
77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
538caf83 79{
922f9dc8
LC
80 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
538caf83
LC
84}
85
922f9dc8 86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 87{
922f9dc8
LC
88 if (!is_sec1)
89 ptr->j_extent = 0;
185eb79f
LC
90}
91
9c4a7965
KP
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
edc6bd69 96 struct talitos_ptr *ptr,
42e8b0d7 97 unsigned int len, void *data,
9c4a7965
KP
98 enum dma_data_direction dir)
99{
81eb024c 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 103
922f9dc8
LC
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 113 struct talitos_ptr *ptr,
9c4a7965
KP
114 enum dma_data_direction dir)
115{
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
edc6bd69 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 120 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 127 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 128
dd3c0987
LC
129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
9c4a7965 132
dd3c0987
LC
133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
9c4a7965
KP
144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
81eb024c 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 153
fe5720e2
KP
154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
157 TALITOS_CCCR_LO_IWSE);
158
9c4a7965
KP
159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 168
c3e337f8 169 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 170
dd3c0987 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
172 && --timeout)
173 cpu_relax();
174
2cdba3cf 175 if (priv->irq[1]) {
c3e337f8
KP
176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
9c4a7965
KP
180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
dd3c0987 195 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
dd3c0987
LC
219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
9c4a7965 228
fe5720e2
KP
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
232 TALITOS_MDEUICR_LO_ICE);
233
9c4a7965
KP
234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
5228f0f7 240 * @ch: the SEC device channel to be used
9c4a7965
KP
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
865d5061
HG
249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
9c4a7965
KP
254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
5228f0f7 257 unsigned long flags;
9c4a7965 258 int head;
7d607c6a 259 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 260
4b992628 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 262
4b992628 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 264 /* h/w fifo is full */
4b992628 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
266 return -EAGAIN;
267 }
268
4b992628
KP
269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
ec6644d6 271
9c4a7965 272 /* map descriptor and save caller data */
7d607c6a
LC
273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
9c4a7965
KP
284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
4b992628 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
ad42d5fc
KP
295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 298 lower_32_bits(request->dma_desc));
9c4a7965 299
4b992628 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
301
302 return -EINPROGRESS;
303}
865d5061 304EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
7d607c6a 315 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 316
4b992628 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 318
4b992628
KP
319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
321 __be32 hdr;
322
4b992628 323 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
7d607c6a
LC
327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 330 status = 0;
ca38a814 331 else
9c4a7965
KP
332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
7d607c6a 338 TALITOS_DESC_SIZE,
e938e465 339 DMA_BIDIRECTIONAL);
9c4a7965
KP
340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
4b992628 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 352
4b992628 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 354
4b992628 355 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 356
9c4a7965
KP
357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
4b992628
KP
362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
9c4a7965
KP
364 }
365
4b992628 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
367}
368
369/*
370 * process completed requests for channels that have done status
371 */
dd3c0987
LC
372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 406 unsigned long flags; \
c3e337f8
KP
407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 422 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 426}
dd3c0987
LC
427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
431
432/*
433 * locate current (offending) descriptor
434 */
3e721aeb 435static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 438 int tail, iter;
9c4a7965
KP
439 dma_addr_t cur_desc;
440
b62ffd8c
HG
441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 443
b62ffd8c
HG
444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
9c4a7965 455 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 456 return 0;
9c4a7965
KP
457 }
458 }
459
b62ffd8c 460 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
3e721aeb 466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
3e721aeb 471 if (!desc_hdr)
ad42d5fc 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
515 break;
516 }
517
3e721aeb 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
536}
537
538/*
539 * recover from error interrupts
540 */
5e718a09 541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 542{
9c4a7965
KP
543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 545 int ch, error, reset_dev = 0;
42e8b0d7 546 u32 v_lo;
dd3c0987
LC
547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
dd3c0987
LC
552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
9c4a7965
KP
560
561 error = -EINVAL;
562
ad42d5fc 563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
9c4a7965 580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
9c4a7965
KP
583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
9c4a7965 588 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
9c4a7965
KP
600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
ad42d5fc 606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 607 TALITOS2_CCCR_CONT);
ad42d5fc
KP
608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 610 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
dd3c0987
LC
619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
dd3c0987
LC
637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
511d63cb 678 unsigned long flags; \
c3e337f8 679 \
511d63cb 680 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
511d63cb
HG
687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
c3e337f8
KP
692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
511d63cb
HG
698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
c3e337f8
KP
700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
9c4a7965 703}
dd3c0987
LC
704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
9c4a7965
KP
710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
5fa7fa14 722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
5fa7fa14
LC
750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
5fa7fa14 761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 769 int err;
9c4a7965
KP
770
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
776
35a3bb3d
AS
777 err = hwrng_register(&priv->rng);
778 if (!err)
779 priv->rng_registered = true;
780
781 return err;
9c4a7965
KP
782}
783
784static void talitos_unregister_rng(struct device *dev)
785{
786 struct talitos_private *priv = dev_get_drvdata(dev);
787
35a3bb3d
AS
788 if (!priv->rng_registered)
789 return;
790
9c4a7965 791 hwrng_unregister(&priv->rng);
35a3bb3d 792 priv->rng_registered = false;
9c4a7965
KP
793}
794
795/*
796 * crypto alg
797 */
798#define TALITOS_CRA_PRIORITY 3000
357fb605 799#define TALITOS_MAX_KEY_SIZE 96
3952f17e 800#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 801
9c4a7965
KP
802struct talitos_ctx {
803 struct device *dev;
5228f0f7 804 int ch;
9c4a7965
KP
805 __be32 desc_hdr_template;
806 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 807 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
808 unsigned int keylen;
809 unsigned int enckeylen;
810 unsigned int authkeylen;
9c4a7965
KP
811};
812
497f2e6b
LN
813#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
814#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
815
816struct talitos_ahash_req_ctx {
60f208d7 817 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
818 unsigned int hw_context_size;
819 u8 buf[HASH_MAX_BLOCK_SIZE];
820 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 821 unsigned int swinit;
497f2e6b
LN
822 unsigned int first;
823 unsigned int last;
824 unsigned int to_hash_later;
42e8b0d7 825 unsigned int nbuf;
497f2e6b
LN
826 struct scatterlist bufsl[2];
827 struct scatterlist *psrc;
828};
829
3639ca84
HG
830struct talitos_export_state {
831 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
832 u8 buf[HASH_MAX_BLOCK_SIZE];
833 unsigned int swinit;
834 unsigned int first;
835 unsigned int last;
836 unsigned int to_hash_later;
837 unsigned int nbuf;
838};
839
56af8cd4
LN
840static int aead_setkey(struct crypto_aead *authenc,
841 const u8 *key, unsigned int keylen)
9c4a7965
KP
842{
843 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 844 struct crypto_authenc_keys keys;
9c4a7965 845
c306a98d 846 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
847 goto badkey;
848
c306a98d 849 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
850 goto badkey;
851
c306a98d
MK
852 memcpy(ctx->key, keys.authkey, keys.authkeylen);
853 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 854
c306a98d
MK
855 ctx->keylen = keys.authkeylen + keys.enckeylen;
856 ctx->enckeylen = keys.enckeylen;
857 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
858
859 return 0;
860
861badkey:
862 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
863 return -EINVAL;
864}
865
866/*
56af8cd4 867 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
868 * @src_nents: number of segments in input scatterlist
869 * @dst_nents: number of segments in output scatterlist
aeb4c132 870 * @icv_ool: whether ICV is out-of-line
79fd31d3 871 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 872 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 873 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 874 * @desc: h/w descriptor
6f65f6ac
LC
875 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
876 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
877 *
878 * if decrypting (with authcheck), or either one of src_nents or dst_nents
879 * is greater than 1, an integrity check value is concatenated to the end
880 * of link_tbl data
881 */
56af8cd4 882struct talitos_edesc {
9c4a7965
KP
883 int src_nents;
884 int dst_nents;
aeb4c132 885 bool icv_ool;
79fd31d3 886 dma_addr_t iv_dma;
9c4a7965
KP
887 int dma_len;
888 dma_addr_t dma_link_tbl;
889 struct talitos_desc desc;
6f65f6ac
LC
890 union {
891 struct talitos_ptr link_tbl[0];
892 u8 buf[0];
893 };
9c4a7965
KP
894};
895
4de9d0b5
LN
896static void talitos_sg_unmap(struct device *dev,
897 struct talitos_edesc *edesc,
898 struct scatterlist *src,
899 struct scatterlist *dst)
900{
901 unsigned int src_nents = edesc->src_nents ? : 1;
902 unsigned int dst_nents = edesc->dst_nents ? : 1;
903
904 if (src != dst) {
b8a011d4 905 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4de9d0b5 906
497f2e6b 907 if (dst) {
b8a011d4 908 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
497f2e6b 909 }
4de9d0b5 910 } else
b8a011d4 911 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
4de9d0b5
LN
912}
913
9c4a7965 914static void ipsec_esp_unmap(struct device *dev,
56af8cd4 915 struct talitos_edesc *edesc,
9c4a7965
KP
916 struct aead_request *areq)
917{
918 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
919 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
920 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
921 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
922
4de9d0b5 923 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
924
925 if (edesc->dma_len)
926 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
927 DMA_BIDIRECTIONAL);
928}
929
930/*
931 * ipsec_esp descriptor callbacks
932 */
933static void ipsec_esp_encrypt_done(struct device *dev,
934 struct talitos_desc *desc, void *context,
935 int err)
936{
937 struct aead_request *areq = context;
9c4a7965 938 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 939 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 940 struct talitos_edesc *edesc;
9c4a7965
KP
941 struct scatterlist *sg;
942 void *icvdata;
943
19bbbc63
KP
944 edesc = container_of(desc, struct talitos_edesc, desc);
945
9c4a7965
KP
946 ipsec_esp_unmap(dev, edesc, areq);
947
948 /* copy the generated ICV to dst */
aeb4c132 949 if (edesc->icv_ool) {
9c4a7965 950 icvdata = &edesc->link_tbl[edesc->src_nents +
aeb4c132 951 edesc->dst_nents + 2];
9c4a7965 952 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
953 memcpy((char *)sg_virt(sg) + sg->length - authsize,
954 icvdata, authsize);
9c4a7965
KP
955 }
956
957 kfree(edesc);
958
959 aead_request_complete(areq, err);
960}
961
fe5720e2 962static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
963 struct talitos_desc *desc,
964 void *context, int err)
9c4a7965
KP
965{
966 struct aead_request *req = context;
9c4a7965 967 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 968 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 969 struct talitos_edesc *edesc;
9c4a7965 970 struct scatterlist *sg;
aeb4c132 971 char *oicv, *icv;
9c4a7965 972
19bbbc63
KP
973 edesc = container_of(desc, struct talitos_edesc, desc);
974
9c4a7965
KP
975 ipsec_esp_unmap(dev, edesc, req);
976
977 if (!err) {
978 /* auth check */
9c4a7965 979 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
980 icv = (char *)sg_virt(sg) + sg->length - authsize;
981
982 if (edesc->dma_len) {
983 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
984 edesc->dst_nents + 2];
985 if (edesc->icv_ool)
986 icv = oicv + authsize;
987 } else
988 oicv = (char *)&edesc->link_tbl[0];
989
79960943 990 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
991 }
992
993 kfree(edesc);
994
995 aead_request_complete(req, err);
996}
997
fe5720e2 998static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
999 struct talitos_desc *desc,
1000 void *context, int err)
fe5720e2
KP
1001{
1002 struct aead_request *req = context;
19bbbc63
KP
1003 struct talitos_edesc *edesc;
1004
1005 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1006
1007 ipsec_esp_unmap(dev, edesc, req);
1008
1009 /* check ICV auth status */
e938e465
KP
1010 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1011 DESC_HDR_LO_ICCR1_PASS))
1012 err = -EBADMSG;
fe5720e2
KP
1013
1014 kfree(edesc);
1015
1016 aead_request_complete(req, err);
1017}
1018
9c4a7965
KP
1019/*
1020 * convert scatterlist to SEC h/w link table format
1021 * stop at cryptlen bytes
1022 */
aeb4c132
HX
1023static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1024 unsigned int offset, int cryptlen,
1025 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1026{
70bcaca7 1027 int n_sg = sg_count;
aeb4c132 1028 int count = 0;
70bcaca7 1029
aeb4c132
HX
1030 while (cryptlen && sg && n_sg--) {
1031 unsigned int len = sg_dma_len(sg);
9c4a7965 1032
aeb4c132
HX
1033 if (offset >= len) {
1034 offset -= len;
1035 goto next;
1036 }
1037
1038 len -= offset;
1039
1040 if (len > cryptlen)
1041 len = cryptlen;
1042
1043 to_talitos_ptr(link_tbl_ptr + count,
1044 sg_dma_address(sg) + offset, 0);
1045 link_tbl_ptr[count].len = cpu_to_be16(len);
1046 link_tbl_ptr[count].j_extent = 0;
1047 count++;
1048 cryptlen -= len;
1049 offset = 0;
1050
1051next:
1052 sg = sg_next(sg);
70bcaca7 1053 }
9c4a7965
KP
1054
1055 /* tag end of link table */
aeb4c132
HX
1056 if (count > 0)
1057 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7 1058
aeb4c132
HX
1059 return count;
1060}
1061
1062static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1063 int cryptlen,
1064 struct talitos_ptr *link_tbl_ptr)
1065{
1066 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1067 link_tbl_ptr);
9c4a7965
KP
1068}
1069
1070/*
1071 * fill in and submit ipsec_esp descriptor
1072 */
56af8cd4 1073static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1074 void (*callback)(struct device *dev,
1075 struct talitos_desc *desc,
1076 void *context, int error))
9c4a7965
KP
1077{
1078 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1079 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1080 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1081 struct device *dev = ctx->dev;
1082 struct talitos_desc *desc = &edesc->desc;
1083 unsigned int cryptlen = areq->cryptlen;
e41256f1 1084 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1085 int tbl_off = 0;
fa86a267 1086 int sg_count, ret;
fe5720e2 1087 int sg_link_tbl_len;
9c4a7965
KP
1088
1089 /* hmac key */
1090 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1091 DMA_TO_DEVICE);
79fd31d3 1092
b8a011d4
LC
1093 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1094 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1095 : DMA_TO_DEVICE);
aeb4c132 1096
9c4a7965 1097 /* hmac data */
aeb4c132
HX
1098 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1099 if (sg_count > 1 &&
1100 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1101 areq->assoclen,
1102 &edesc->link_tbl[tbl_off])) > 1) {
1103 tbl_off += ret;
79fd31d3
HG
1104
1105 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1106 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1107 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1108
79fd31d3
HG
1109 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1110 edesc->dma_len, DMA_BIDIRECTIONAL);
1111 } else {
aeb4c132 1112 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
79fd31d3
HG
1113 desc->ptr[1].j_extent = 0;
1114 }
1115
9c4a7965 1116 /* cipher iv */
922f9dc8 1117 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1118 desc->ptr[2].len = cpu_to_be16(ivsize);
1119 desc->ptr[2].j_extent = 0;
9c4a7965
KP
1120
1121 /* cipher key */
1122 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1123 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1124 DMA_TO_DEVICE);
1125
1126 /*
1127 * cipher in
1128 * map and adjust cipher len to aead request cryptlen.
1129 * extent is bytes of HMAC postpended to ciphertext,
1130 * typically 12 for ipsec
1131 */
1132 desc->ptr[4].len = cpu_to_be16(cryptlen);
1133 desc->ptr[4].j_extent = authsize;
1134
aeb4c132
HX
1135 sg_link_tbl_len = cryptlen;
1136 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1137 sg_link_tbl_len += authsize;
1138
1139 if (sg_count > 1 &&
1140 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1141 sg_link_tbl_len,
1142 &edesc->link_tbl[tbl_off])) > 1) {
1143 tbl_off += ret;
1144 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1145 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1146 tbl_off *
1147 sizeof(struct talitos_ptr), 0);
1148 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1149 edesc->dma_len,
1150 DMA_BIDIRECTIONAL);
1151 } else
922f9dc8 1152 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
9c4a7965
KP
1153
1154 /* cipher out */
1155 desc->ptr[5].len = cpu_to_be16(cryptlen);
1156 desc->ptr[5].j_extent = authsize;
1157
e938e465 1158 if (areq->src != areq->dst)
b8a011d4
LC
1159 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1160 DMA_FROM_DEVICE);
9c4a7965 1161
aeb4c132
HX
1162 edesc->icv_ool = false;
1163
1164 if (sg_count > 1 &&
1165 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1166 areq->assoclen, cryptlen,
1167 &edesc->link_tbl[tbl_off])) >
1168 1) {
79fd31d3 1169 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1170
81eb024c 1171 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1172 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1173
f3c85bc1 1174 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1175 tbl_ptr += sg_count - 1;
1176 tbl_ptr->j_extent = 0;
1177 tbl_ptr++;
1178 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1179 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1180
1181 /* icv data follows link tables */
79fd31d3 1182 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
aeb4c132
HX
1183 (edesc->src_nents + edesc->dst_nents +
1184 2) * sizeof(struct talitos_ptr) +
1185 authsize, 0);
9c4a7965
KP
1186 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1187 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1188 edesc->dma_len, DMA_BIDIRECTIONAL);
aeb4c132
HX
1189
1190 edesc->icv_ool = true;
1191 } else
1192 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
9c4a7965
KP
1193
1194 /* iv out */
a2b35aa8 1195 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1196 DMA_FROM_DEVICE);
1197
5228f0f7 1198 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1199 if (ret != -EINPROGRESS) {
1200 ipsec_esp_unmap(dev, edesc, areq);
1201 kfree(edesc);
1202 }
1203 return ret;
9c4a7965
KP
1204}
1205
9c4a7965 1206/*
56af8cd4 1207 * allocate and map the extended descriptor
9c4a7965 1208 */
4de9d0b5
LN
1209static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1210 struct scatterlist *src,
1211 struct scatterlist *dst,
79fd31d3
HG
1212 u8 *iv,
1213 unsigned int assoclen,
4de9d0b5
LN
1214 unsigned int cryptlen,
1215 unsigned int authsize,
79fd31d3 1216 unsigned int ivsize,
4de9d0b5 1217 int icv_stashing,
62293a37
HG
1218 u32 cryptoflags,
1219 bool encrypt)
9c4a7965 1220{
56af8cd4 1221 struct talitos_edesc *edesc;
aeb4c132 1222 int src_nents, dst_nents, alloc_len, dma_len;
79fd31d3 1223 dma_addr_t iv_dma = 0;
4de9d0b5 1224 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1225 GFP_ATOMIC;
6f65f6ac
LC
1226 struct talitos_private *priv = dev_get_drvdata(dev);
1227 bool is_sec1 = has_ftr_sec1(priv);
1228 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
8e409fe1 1229 void *err;
9c4a7965 1230
6f65f6ac 1231 if (cryptlen + authsize > max_len) {
4de9d0b5 1232 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1233 return ERR_PTR(-EINVAL);
1234 }
1235
935e99a3 1236 if (ivsize)
79fd31d3
HG
1237 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1238
62293a37 1239 if (!dst || dst == src) {
b8a011d4
LC
1240 src_nents = sg_nents_for_len(src,
1241 assoclen + cryptlen + authsize);
8e409fe1
LC
1242 if (src_nents < 0) {
1243 dev_err(dev, "Invalid number of src SG.\n");
1244 err = ERR_PTR(-EINVAL);
1245 goto error_sg;
1246 }
62293a37
HG
1247 src_nents = (src_nents == 1) ? 0 : src_nents;
1248 dst_nents = dst ? src_nents : 0;
1249 } else { /* dst && dst != src*/
b8a011d4
LC
1250 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1251 (encrypt ? 0 : authsize));
8e409fe1
LC
1252 if (src_nents < 0) {
1253 dev_err(dev, "Invalid number of src SG.\n");
1254 err = ERR_PTR(-EINVAL);
1255 goto error_sg;
1256 }
62293a37 1257 src_nents = (src_nents == 1) ? 0 : src_nents;
b8a011d4
LC
1258 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1259 (encrypt ? authsize : 0));
8e409fe1
LC
1260 if (dst_nents < 0) {
1261 dev_err(dev, "Invalid number of dst SG.\n");
1262 err = ERR_PTR(-EINVAL);
1263 goto error_sg;
1264 }
62293a37 1265 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1266 }
1267
1268 /*
1269 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1270 * allowing for two separate entries for AD and generated ICV (+ 2),
1271 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1272 */
56af8cd4 1273 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1274 if (src_nents || dst_nents) {
6f65f6ac 1275 if (is_sec1)
608f37d0
DC
1276 dma_len = (src_nents ? cryptlen : 0) +
1277 (dst_nents ? cryptlen : 0);
6f65f6ac 1278 else
aeb4c132
HX
1279 dma_len = (src_nents + dst_nents + 2) *
1280 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1281 alloc_len += dma_len;
1282 } else {
1283 dma_len = 0;
4de9d0b5 1284 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1285 }
1286
586725f8 1287 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1288 if (!edesc) {
4de9d0b5 1289 dev_err(dev, "could not allocate edescriptor\n");
8e409fe1
LC
1290 err = ERR_PTR(-ENOMEM);
1291 goto error_sg;
9c4a7965
KP
1292 }
1293
1294 edesc->src_nents = src_nents;
1295 edesc->dst_nents = dst_nents;
79fd31d3 1296 edesc->iv_dma = iv_dma;
9c4a7965 1297 edesc->dma_len = dma_len;
497f2e6b
LN
1298 if (dma_len)
1299 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1300 edesc->dma_len,
1301 DMA_BIDIRECTIONAL);
9c4a7965
KP
1302
1303 return edesc;
8e409fe1
LC
1304error_sg:
1305 if (iv_dma)
1306 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1307 return err;
9c4a7965
KP
1308}
1309
79fd31d3 1310static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1311 int icv_stashing, bool encrypt)
4de9d0b5
LN
1312{
1313 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1314 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1315 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1316 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1317
aeb4c132 1318 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1319 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1320 authsize, ivsize, icv_stashing,
62293a37 1321 areq->base.flags, encrypt);
4de9d0b5
LN
1322}
1323
56af8cd4 1324static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1325{
1326 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1327 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1328 struct talitos_edesc *edesc;
9c4a7965
KP
1329
1330 /* allocate extended descriptor */
62293a37 1331 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1332 if (IS_ERR(edesc))
1333 return PTR_ERR(edesc);
1334
1335 /* set encrypt */
70bcaca7 1336 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1337
aeb4c132 1338 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1339}
1340
56af8cd4 1341static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1342{
1343 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1344 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1345 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1346 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1347 struct talitos_edesc *edesc;
9c4a7965
KP
1348 struct scatterlist *sg;
1349 void *icvdata;
1350
1351 req->cryptlen -= authsize;
1352
1353 /* allocate extended descriptor */
62293a37 1354 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1355 if (IS_ERR(edesc))
1356 return PTR_ERR(edesc);
1357
fe5720e2 1358 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1359 ((!edesc->src_nents && !edesc->dst_nents) ||
1360 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1361
fe5720e2 1362 /* decrypt and check the ICV */
e938e465
KP
1363 edesc->desc.hdr = ctx->desc_hdr_template |
1364 DESC_HDR_DIR_INBOUND |
fe5720e2 1365 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1366
fe5720e2
KP
1367 /* reset integrity check result bits */
1368 edesc->desc.hdr_lo = 0;
9c4a7965 1369
aeb4c132 1370 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1371 }
fe5720e2 1372
e938e465
KP
1373 /* Have to check the ICV with software */
1374 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1375
e938e465
KP
1376 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1377 if (edesc->dma_len)
aeb4c132
HX
1378 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1379 edesc->dst_nents + 2];
e938e465
KP
1380 else
1381 icvdata = &edesc->link_tbl[0];
fe5720e2 1382
e938e465 1383 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1384
aeb4c132 1385 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1386
aeb4c132 1387 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1388}
1389
4de9d0b5
LN
1390static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1391 const u8 *key, unsigned int keylen)
1392{
1393 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1394
1395 memcpy(&ctx->key, key, keylen);
1396 ctx->keylen = keylen;
1397
1398 return 0;
4de9d0b5
LN
1399}
1400
032d197e
LC
1401static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1402 struct scatterlist *dst, unsigned int len,
1403 struct talitos_edesc *edesc)
1404{
6f65f6ac
LC
1405 struct talitos_private *priv = dev_get_drvdata(dev);
1406 bool is_sec1 = has_ftr_sec1(priv);
1407
1408 if (is_sec1) {
1409 if (!edesc->src_nents) {
1410 dma_unmap_sg(dev, src, 1,
1411 dst != src ? DMA_TO_DEVICE
1412 : DMA_BIDIRECTIONAL);
1413 }
1414 if (dst && edesc->dst_nents) {
1415 dma_sync_single_for_device(dev,
1416 edesc->dma_link_tbl + len,
1417 len, DMA_FROM_DEVICE);
1418 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1419 edesc->buf + len, len);
1420 } else if (dst && dst != src) {
1421 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1422 }
1423 } else {
1424 talitos_sg_unmap(dev, edesc, src, dst);
1425 }
032d197e
LC
1426}
1427
4de9d0b5
LN
1428static void common_nonsnoop_unmap(struct device *dev,
1429 struct talitos_edesc *edesc,
1430 struct ablkcipher_request *areq)
1431{
1432 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1433
1434 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1435 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1436 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1437
4de9d0b5
LN
1438 if (edesc->dma_len)
1439 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1440 DMA_BIDIRECTIONAL);
1441}
1442
1443static void ablkcipher_done(struct device *dev,
1444 struct talitos_desc *desc, void *context,
1445 int err)
1446{
1447 struct ablkcipher_request *areq = context;
19bbbc63
KP
1448 struct talitos_edesc *edesc;
1449
1450 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1451
1452 common_nonsnoop_unmap(dev, edesc, areq);
1453
1454 kfree(edesc);
1455
1456 areq->base.complete(&areq->base, err);
1457}
1458
032d197e
LC
1459int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1460 unsigned int len, struct talitos_edesc *edesc,
1461 enum dma_data_direction dir, struct talitos_ptr *ptr)
1462{
1463 int sg_count;
922f9dc8
LC
1464 struct talitos_private *priv = dev_get_drvdata(dev);
1465 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1466
922f9dc8 1467 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1468
6f65f6ac
LC
1469 if (is_sec1) {
1470 sg_count = edesc->src_nents ? : 1;
032d197e 1471
6f65f6ac
LC
1472 if (sg_count == 1) {
1473 dma_map_sg(dev, src, 1, dir);
1474 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1475 } else {
6f65f6ac
LC
1476 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1477 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1478 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1479 len, DMA_TO_DEVICE);
1480 }
1481 } else {
1482 to_talitos_ptr_extent_clear(ptr, is_sec1);
1483
b8a011d4 1484 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
6f65f6ac
LC
1485
1486 if (sg_count == 1) {
922f9dc8 1487 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1488 } else {
1489 sg_count = sg_to_link_tbl(src, sg_count, len,
1490 &edesc->link_tbl[0]);
1491 if (sg_count > 1) {
1492 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1493 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1494 dma_sync_single_for_device(dev,
1495 edesc->dma_link_tbl,
1496 edesc->dma_len,
1497 DMA_BIDIRECTIONAL);
1498 } else {
1499 /* Only one segment now, so no link tbl needed*/
1500 to_talitos_ptr(ptr, sg_dma_address(src),
1501 is_sec1);
1502 }
032d197e
LC
1503 }
1504 }
1505 return sg_count;
1506}
1507
1508void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1509 unsigned int len, struct talitos_edesc *edesc,
1510 enum dma_data_direction dir,
1511 struct talitos_ptr *ptr, int sg_count)
1512{
922f9dc8
LC
1513 struct talitos_private *priv = dev_get_drvdata(dev);
1514 bool is_sec1 = has_ftr_sec1(priv);
1515
032d197e 1516 if (dir != DMA_NONE)
b8a011d4 1517 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
032d197e 1518
6f65f6ac
LC
1519 to_talitos_ptr_len(ptr, len, is_sec1);
1520
1521 if (is_sec1) {
1522 if (sg_count == 1) {
1523 if (dir != DMA_NONE)
1524 dma_map_sg(dev, dst, 1, dir);
1525 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1526 } else {
1527 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1528 dma_sync_single_for_device(dev,
1529 edesc->dma_link_tbl + len,
1530 len, DMA_FROM_DEVICE);
1531 }
032d197e 1532 } else {
6f65f6ac
LC
1533 to_talitos_ptr_extent_clear(ptr, is_sec1);
1534
1535 if (sg_count == 1) {
1536 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1537 } else {
1538 struct talitos_ptr *link_tbl_ptr =
1539 &edesc->link_tbl[edesc->src_nents + 1];
1540
1541 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1542 (edesc->src_nents + 1) *
1543 sizeof(struct talitos_ptr), 0);
1544 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
42e8b0d7 1545 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
6f65f6ac
LC
1546 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1547 edesc->dma_len,
1548 DMA_BIDIRECTIONAL);
1549 }
032d197e
LC
1550 }
1551}
1552
4de9d0b5
LN
1553static int common_nonsnoop(struct talitos_edesc *edesc,
1554 struct ablkcipher_request *areq,
4de9d0b5
LN
1555 void (*callback) (struct device *dev,
1556 struct talitos_desc *desc,
1557 void *context, int error))
1558{
1559 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1560 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1561 struct device *dev = ctx->dev;
1562 struct talitos_desc *desc = &edesc->desc;
1563 unsigned int cryptlen = areq->nbytes;
79fd31d3 1564 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1565 int sg_count, ret;
922f9dc8
LC
1566 struct talitos_private *priv = dev_get_drvdata(dev);
1567 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1568
1569 /* first DWORD empty */
2529bc37 1570 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1571
1572 /* cipher iv */
922f9dc8
LC
1573 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1574 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1575 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1576
1577 /* cipher key */
1578 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1579 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1580
1581 /*
1582 * cipher in
1583 */
032d197e
LC
1584 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1585 (areq->src == areq->dst) ?
1586 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1587 &desc->ptr[3]);
4de9d0b5
LN
1588
1589 /* cipher out */
032d197e
LC
1590 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1591 (areq->src == areq->dst) ? DMA_NONE
1592 : DMA_FROM_DEVICE,
1593 &desc->ptr[4], sg_count);
4de9d0b5
LN
1594
1595 /* iv out */
a2b35aa8 1596 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1597 DMA_FROM_DEVICE);
1598
1599 /* last DWORD empty */
2529bc37 1600 desc->ptr[6] = zero_entry;
4de9d0b5 1601
5228f0f7 1602 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1603 if (ret != -EINPROGRESS) {
1604 common_nonsnoop_unmap(dev, edesc, areq);
1605 kfree(edesc);
1606 }
1607 return ret;
1608}
1609
e938e465 1610static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1611 areq, bool encrypt)
4de9d0b5
LN
1612{
1613 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1614 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1615 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1616
aeb4c132 1617 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1618 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1619 areq->base.flags, encrypt);
4de9d0b5
LN
1620}
1621
1622static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1623{
1624 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1625 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1626 struct talitos_edesc *edesc;
1627
1628 /* allocate extended descriptor */
62293a37 1629 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1630 if (IS_ERR(edesc))
1631 return PTR_ERR(edesc);
1632
1633 /* set encrypt */
1634 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1635
febec542 1636 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1637}
1638
1639static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1640{
1641 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1642 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1643 struct talitos_edesc *edesc;
1644
1645 /* allocate extended descriptor */
62293a37 1646 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1647 if (IS_ERR(edesc))
1648 return PTR_ERR(edesc);
1649
1650 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1651
febec542 1652 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1653}
1654
497f2e6b
LN
1655static void common_nonsnoop_hash_unmap(struct device *dev,
1656 struct talitos_edesc *edesc,
1657 struct ahash_request *areq)
1658{
1659 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1660 struct talitos_private *priv = dev_get_drvdata(dev);
1661 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1662
1663 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1664
032d197e
LC
1665 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1666
497f2e6b 1667 /* When using hashctx-in, must unmap it. */
922f9dc8 1668 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1669 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1670 DMA_TO_DEVICE);
1671
922f9dc8 1672 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1673 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1674 DMA_TO_DEVICE);
1675
497f2e6b
LN
1676 if (edesc->dma_len)
1677 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1678 DMA_BIDIRECTIONAL);
1679
1680}
1681
1682static void ahash_done(struct device *dev,
1683 struct talitos_desc *desc, void *context,
1684 int err)
1685{
1686 struct ahash_request *areq = context;
1687 struct talitos_edesc *edesc =
1688 container_of(desc, struct talitos_edesc, desc);
1689 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1690
1691 if (!req_ctx->last && req_ctx->to_hash_later) {
1692 /* Position any partial block for next update/final/finup */
1693 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1694 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1695 }
1696 common_nonsnoop_hash_unmap(dev, edesc, areq);
1697
1698 kfree(edesc);
1699
1700 areq->base.complete(&areq->base, err);
1701}
1702
2d02905e
LC
1703/*
1704 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1705 * ourself and submit a padded block
1706 */
1707void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1708 struct talitos_edesc *edesc,
1709 struct talitos_ptr *ptr)
1710{
1711 static u8 padded_hash[64] = {
1712 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1713 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1714 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1716 };
1717
1718 pr_err_once("Bug in SEC1, padding ourself\n");
1719 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1720 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1721 (char *)padded_hash, DMA_TO_DEVICE);
1722}
1723
497f2e6b
LN
1724static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1725 struct ahash_request *areq, unsigned int length,
1726 void (*callback) (struct device *dev,
1727 struct talitos_desc *desc,
1728 void *context, int error))
1729{
1730 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1731 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1732 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1733 struct device *dev = ctx->dev;
1734 struct talitos_desc *desc = &edesc->desc;
032d197e 1735 int ret;
922f9dc8
LC
1736 struct talitos_private *priv = dev_get_drvdata(dev);
1737 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1738
1739 /* first DWORD empty */
1740 desc->ptr[0] = zero_entry;
1741
60f208d7
KP
1742 /* hash context in */
1743 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1744 map_single_talitos_ptr(dev, &desc->ptr[1],
1745 req_ctx->hw_context_size,
a2b35aa8 1746 (char *)req_ctx->hw_context,
497f2e6b 1747 DMA_TO_DEVICE);
60f208d7 1748 req_ctx->swinit = 0;
497f2e6b
LN
1749 } else {
1750 desc->ptr[1] = zero_entry;
1751 /* Indicate next op is not the first. */
1752 req_ctx->first = 0;
1753 }
1754
1755 /* HMAC key */
1756 if (ctx->keylen)
1757 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1758 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1759 else
1760 desc->ptr[2] = zero_entry;
1761
1762 /*
1763 * data in
1764 */
032d197e
LC
1765 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1766 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1767
1768 /* fifth DWORD empty */
1769 desc->ptr[4] = zero_entry;
1770
1771 /* hash/HMAC out -or- hash context out */
1772 if (req_ctx->last)
1773 map_single_talitos_ptr(dev, &desc->ptr[5],
1774 crypto_ahash_digestsize(tfm),
a2b35aa8 1775 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1776 else
1777 map_single_talitos_ptr(dev, &desc->ptr[5],
1778 req_ctx->hw_context_size,
a2b35aa8 1779 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1780
1781 /* last DWORD empty */
1782 desc->ptr[6] = zero_entry;
1783
2d02905e
LC
1784 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1785 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1786
5228f0f7 1787 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1788 if (ret != -EINPROGRESS) {
1789 common_nonsnoop_hash_unmap(dev, edesc, areq);
1790 kfree(edesc);
1791 }
1792 return ret;
1793}
1794
1795static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1796 unsigned int nbytes)
1797{
1798 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1799 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1800 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1801
aeb4c132 1802 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1803 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1804}
1805
1806static int ahash_init(struct ahash_request *areq)
1807{
1808 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1809 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1810
1811 /* Initialize the context */
5e833bc4 1812 req_ctx->nbuf = 0;
60f208d7
KP
1813 req_ctx->first = 1; /* first indicates h/w must init its context */
1814 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1815 req_ctx->hw_context_size =
1816 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1817 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1818 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1819
1820 return 0;
1821}
1822
60f208d7
KP
1823/*
1824 * on h/w without explicit sha224 support, we initialize h/w context
1825 * manually with sha224 constants, and tell it to run sha256.
1826 */
1827static int ahash_init_sha224_swinit(struct ahash_request *areq)
1828{
1829 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1830
1831 ahash_init(areq);
1832 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1833
a752447a
KP
1834 req_ctx->hw_context[0] = SHA224_H0;
1835 req_ctx->hw_context[1] = SHA224_H1;
1836 req_ctx->hw_context[2] = SHA224_H2;
1837 req_ctx->hw_context[3] = SHA224_H3;
1838 req_ctx->hw_context[4] = SHA224_H4;
1839 req_ctx->hw_context[5] = SHA224_H5;
1840 req_ctx->hw_context[6] = SHA224_H6;
1841 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1842
1843 /* init 64-bit count */
1844 req_ctx->hw_context[8] = 0;
1845 req_ctx->hw_context[9] = 0;
1846
1847 return 0;
1848}
1849
497f2e6b
LN
1850static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1851{
1852 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1853 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1854 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1855 struct talitos_edesc *edesc;
1856 unsigned int blocksize =
1857 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1858 unsigned int nbytes_to_hash;
1859 unsigned int to_hash_later;
5e833bc4 1860 unsigned int nsg;
8e409fe1 1861 int nents;
497f2e6b 1862
5e833bc4
LN
1863 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1864 /* Buffer up to one whole block */
8e409fe1
LC
1865 nents = sg_nents_for_len(areq->src, nbytes);
1866 if (nents < 0) {
1867 dev_err(ctx->dev, "Invalid number of src SG.\n");
1868 return nents;
1869 }
1870 sg_copy_to_buffer(areq->src, nents,
5e833bc4
LN
1871 req_ctx->buf + req_ctx->nbuf, nbytes);
1872 req_ctx->nbuf += nbytes;
497f2e6b
LN
1873 return 0;
1874 }
1875
5e833bc4
LN
1876 /* At least (blocksize + 1) bytes are available to hash */
1877 nbytes_to_hash = nbytes + req_ctx->nbuf;
1878 to_hash_later = nbytes_to_hash & (blocksize - 1);
1879
1880 if (req_ctx->last)
1881 to_hash_later = 0;
1882 else if (to_hash_later)
1883 /* There is a partial block. Hash the full block(s) now */
1884 nbytes_to_hash -= to_hash_later;
1885 else {
1886 /* Keep one block buffered */
1887 nbytes_to_hash -= blocksize;
1888 to_hash_later = blocksize;
1889 }
1890
1891 /* Chain in any previously buffered data */
1892 if (req_ctx->nbuf) {
1893 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1894 sg_init_table(req_ctx->bufsl, nsg);
1895 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1896 if (nsg > 1)
c56f6d12 1897 sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1898 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1899 } else
497f2e6b 1900 req_ctx->psrc = areq->src;
5e833bc4
LN
1901
1902 if (to_hash_later) {
8e409fe1
LC
1903 nents = sg_nents_for_len(areq->src, nbytes);
1904 if (nents < 0) {
1905 dev_err(ctx->dev, "Invalid number of src SG.\n");
1906 return nents;
1907 }
d0525723 1908 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1909 req_ctx->bufnext,
1910 to_hash_later,
1911 nbytes - to_hash_later);
497f2e6b 1912 }
5e833bc4 1913 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1914
5e833bc4 1915 /* Allocate extended descriptor */
497f2e6b
LN
1916 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1917 if (IS_ERR(edesc))
1918 return PTR_ERR(edesc);
1919
1920 edesc->desc.hdr = ctx->desc_hdr_template;
1921
1922 /* On last one, request SEC to pad; otherwise continue */
1923 if (req_ctx->last)
1924 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1925 else
1926 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1927
60f208d7
KP
1928 /* request SEC to INIT hash. */
1929 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1930 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1931
1932 /* When the tfm context has a keylen, it's an HMAC.
1933 * A first or last (ie. not middle) descriptor must request HMAC.
1934 */
1935 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1936 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1937
1938 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1939 ahash_done);
1940}
1941
1942static int ahash_update(struct ahash_request *areq)
1943{
1944 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1945
1946 req_ctx->last = 0;
1947
1948 return ahash_process_req(areq, areq->nbytes);
1949}
1950
1951static int ahash_final(struct ahash_request *areq)
1952{
1953 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1954
1955 req_ctx->last = 1;
1956
1957 return ahash_process_req(areq, 0);
1958}
1959
1960static int ahash_finup(struct ahash_request *areq)
1961{
1962 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1963
1964 req_ctx->last = 1;
1965
1966 return ahash_process_req(areq, areq->nbytes);
1967}
1968
1969static int ahash_digest(struct ahash_request *areq)
1970{
1971 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1972 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1973
60f208d7 1974 ahash->init(areq);
497f2e6b
LN
1975 req_ctx->last = 1;
1976
1977 return ahash_process_req(areq, areq->nbytes);
1978}
1979
3639ca84
HG
1980static int ahash_export(struct ahash_request *areq, void *out)
1981{
1982 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1983 struct talitos_export_state *export = out;
1984
1985 memcpy(export->hw_context, req_ctx->hw_context,
1986 req_ctx->hw_context_size);
1987 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
1988 export->swinit = req_ctx->swinit;
1989 export->first = req_ctx->first;
1990 export->last = req_ctx->last;
1991 export->to_hash_later = req_ctx->to_hash_later;
1992 export->nbuf = req_ctx->nbuf;
1993
1994 return 0;
1995}
1996
1997static int ahash_import(struct ahash_request *areq, const void *in)
1998{
1999 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2000 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2001 const struct talitos_export_state *export = in;
2002
2003 memset(req_ctx, 0, sizeof(*req_ctx));
2004 req_ctx->hw_context_size =
2005 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2006 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2007 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2008 memcpy(req_ctx->hw_context, export->hw_context,
2009 req_ctx->hw_context_size);
2010 memcpy(req_ctx->buf, export->buf, export->nbuf);
2011 req_ctx->swinit = export->swinit;
2012 req_ctx->first = export->first;
2013 req_ctx->last = export->last;
2014 req_ctx->to_hash_later = export->to_hash_later;
2015 req_ctx->nbuf = export->nbuf;
2016
2017 return 0;
2018}
2019
79b3a418
LN
2020struct keyhash_result {
2021 struct completion completion;
2022 int err;
2023};
2024
2025static void keyhash_complete(struct crypto_async_request *req, int err)
2026{
2027 struct keyhash_result *res = req->data;
2028
2029 if (err == -EINPROGRESS)
2030 return;
2031
2032 res->err = err;
2033 complete(&res->completion);
2034}
2035
2036static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2037 u8 *hash)
2038{
2039 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2040
2041 struct scatterlist sg[1];
2042 struct ahash_request *req;
2043 struct keyhash_result hresult;
2044 int ret;
2045
2046 init_completion(&hresult.completion);
2047
2048 req = ahash_request_alloc(tfm, GFP_KERNEL);
2049 if (!req)
2050 return -ENOMEM;
2051
2052 /* Keep tfm keylen == 0 during hash of the long key */
2053 ctx->keylen = 0;
2054 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2055 keyhash_complete, &hresult);
2056
2057 sg_init_one(&sg[0], key, keylen);
2058
2059 ahash_request_set_crypt(req, sg, hash, keylen);
2060 ret = crypto_ahash_digest(req);
2061 switch (ret) {
2062 case 0:
2063 break;
2064 case -EINPROGRESS:
2065 case -EBUSY:
2066 ret = wait_for_completion_interruptible(
2067 &hresult.completion);
2068 if (!ret)
2069 ret = hresult.err;
2070 break;
2071 default:
2072 break;
2073 }
2074 ahash_request_free(req);
2075
2076 return ret;
2077}
2078
2079static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2080 unsigned int keylen)
2081{
2082 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2083 unsigned int blocksize =
2084 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2085 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2086 unsigned int keysize = keylen;
2087 u8 hash[SHA512_DIGEST_SIZE];
2088 int ret;
2089
2090 if (keylen <= blocksize)
2091 memcpy(ctx->key, key, keysize);
2092 else {
2093 /* Must get the hash of the long key */
2094 ret = keyhash(tfm, key, keylen, hash);
2095
2096 if (ret) {
2097 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2098 return -EINVAL;
2099 }
2100
2101 keysize = digestsize;
2102 memcpy(ctx->key, hash, digestsize);
2103 }
2104
2105 ctx->keylen = keysize;
2106
2107 return 0;
2108}
2109
2110
9c4a7965 2111struct talitos_alg_template {
d5e4aaef
LN
2112 u32 type;
2113 union {
2114 struct crypto_alg crypto;
acbf7c62 2115 struct ahash_alg hash;
aeb4c132 2116 struct aead_alg aead;
d5e4aaef 2117 } alg;
9c4a7965
KP
2118 __be32 desc_hdr_template;
2119};
2120
2121static struct talitos_alg_template driver_algs[] = {
991155ba 2122 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2123 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2124 .alg.aead = {
2125 .base = {
2126 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2127 .cra_driver_name = "authenc-hmac-sha1-"
2128 "cbc-aes-talitos",
2129 .cra_blocksize = AES_BLOCK_SIZE,
2130 .cra_flags = CRYPTO_ALG_ASYNC,
2131 },
2132 .ivsize = AES_BLOCK_SIZE,
2133 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2134 },
9c4a7965
KP
2135 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2136 DESC_HDR_SEL0_AESU |
2137 DESC_HDR_MODE0_AESU_CBC |
2138 DESC_HDR_SEL1_MDEUA |
2139 DESC_HDR_MODE1_MDEU_INIT |
2140 DESC_HDR_MODE1_MDEU_PAD |
2141 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2142 },
d5e4aaef 2143 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2144 .alg.aead = {
2145 .base = {
2146 .cra_name = "authenc(hmac(sha1),"
2147 "cbc(des3_ede))",
2148 .cra_driver_name = "authenc-hmac-sha1-"
2149 "cbc-3des-talitos",
2150 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2151 .cra_flags = CRYPTO_ALG_ASYNC,
2152 },
2153 .ivsize = DES3_EDE_BLOCK_SIZE,
2154 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2155 },
70bcaca7
LN
2156 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2157 DESC_HDR_SEL0_DEU |
2158 DESC_HDR_MODE0_DEU_CBC |
2159 DESC_HDR_MODE0_DEU_3DES |
2160 DESC_HDR_SEL1_MDEUA |
2161 DESC_HDR_MODE1_MDEU_INIT |
2162 DESC_HDR_MODE1_MDEU_PAD |
2163 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2164 },
357fb605 2165 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2166 .alg.aead = {
2167 .base = {
2168 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2169 .cra_driver_name = "authenc-hmac-sha224-"
2170 "cbc-aes-talitos",
2171 .cra_blocksize = AES_BLOCK_SIZE,
2172 .cra_flags = CRYPTO_ALG_ASYNC,
2173 },
2174 .ivsize = AES_BLOCK_SIZE,
2175 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2176 },
2177 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2178 DESC_HDR_SEL0_AESU |
2179 DESC_HDR_MODE0_AESU_CBC |
2180 DESC_HDR_SEL1_MDEUA |
2181 DESC_HDR_MODE1_MDEU_INIT |
2182 DESC_HDR_MODE1_MDEU_PAD |
2183 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2184 },
2185 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2186 .alg.aead = {
2187 .base = {
2188 .cra_name = "authenc(hmac(sha224),"
2189 "cbc(des3_ede))",
2190 .cra_driver_name = "authenc-hmac-sha224-"
2191 "cbc-3des-talitos",
2192 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2193 .cra_flags = CRYPTO_ALG_ASYNC,
2194 },
2195 .ivsize = DES3_EDE_BLOCK_SIZE,
2196 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2197 },
2198 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2199 DESC_HDR_SEL0_DEU |
2200 DESC_HDR_MODE0_DEU_CBC |
2201 DESC_HDR_MODE0_DEU_3DES |
2202 DESC_HDR_SEL1_MDEUA |
2203 DESC_HDR_MODE1_MDEU_INIT |
2204 DESC_HDR_MODE1_MDEU_PAD |
2205 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2206 },
d5e4aaef 2207 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2208 .alg.aead = {
2209 .base = {
2210 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2211 .cra_driver_name = "authenc-hmac-sha256-"
2212 "cbc-aes-talitos",
2213 .cra_blocksize = AES_BLOCK_SIZE,
2214 .cra_flags = CRYPTO_ALG_ASYNC,
2215 },
2216 .ivsize = AES_BLOCK_SIZE,
2217 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2218 },
3952f17e
LN
2219 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2220 DESC_HDR_SEL0_AESU |
2221 DESC_HDR_MODE0_AESU_CBC |
2222 DESC_HDR_SEL1_MDEUA |
2223 DESC_HDR_MODE1_MDEU_INIT |
2224 DESC_HDR_MODE1_MDEU_PAD |
2225 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2226 },
d5e4aaef 2227 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2228 .alg.aead = {
2229 .base = {
2230 .cra_name = "authenc(hmac(sha256),"
2231 "cbc(des3_ede))",
2232 .cra_driver_name = "authenc-hmac-sha256-"
2233 "cbc-3des-talitos",
2234 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2235 .cra_flags = CRYPTO_ALG_ASYNC,
2236 },
2237 .ivsize = DES3_EDE_BLOCK_SIZE,
2238 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2239 },
3952f17e
LN
2240 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2241 DESC_HDR_SEL0_DEU |
2242 DESC_HDR_MODE0_DEU_CBC |
2243 DESC_HDR_MODE0_DEU_3DES |
2244 DESC_HDR_SEL1_MDEUA |
2245 DESC_HDR_MODE1_MDEU_INIT |
2246 DESC_HDR_MODE1_MDEU_PAD |
2247 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2248 },
d5e4aaef 2249 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2250 .alg.aead = {
2251 .base = {
2252 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2253 .cra_driver_name = "authenc-hmac-sha384-"
2254 "cbc-aes-talitos",
2255 .cra_blocksize = AES_BLOCK_SIZE,
2256 .cra_flags = CRYPTO_ALG_ASYNC,
2257 },
2258 .ivsize = AES_BLOCK_SIZE,
2259 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2260 },
2261 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2262 DESC_HDR_SEL0_AESU |
2263 DESC_HDR_MODE0_AESU_CBC |
2264 DESC_HDR_SEL1_MDEUB |
2265 DESC_HDR_MODE1_MDEU_INIT |
2266 DESC_HDR_MODE1_MDEU_PAD |
2267 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2268 },
2269 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2270 .alg.aead = {
2271 .base = {
2272 .cra_name = "authenc(hmac(sha384),"
2273 "cbc(des3_ede))",
2274 .cra_driver_name = "authenc-hmac-sha384-"
2275 "cbc-3des-talitos",
2276 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2277 .cra_flags = CRYPTO_ALG_ASYNC,
2278 },
2279 .ivsize = DES3_EDE_BLOCK_SIZE,
2280 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2281 },
2282 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2283 DESC_HDR_SEL0_DEU |
2284 DESC_HDR_MODE0_DEU_CBC |
2285 DESC_HDR_MODE0_DEU_3DES |
2286 DESC_HDR_SEL1_MDEUB |
2287 DESC_HDR_MODE1_MDEU_INIT |
2288 DESC_HDR_MODE1_MDEU_PAD |
2289 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2290 },
2291 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2292 .alg.aead = {
2293 .base = {
2294 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2295 .cra_driver_name = "authenc-hmac-sha512-"
2296 "cbc-aes-talitos",
2297 .cra_blocksize = AES_BLOCK_SIZE,
2298 .cra_flags = CRYPTO_ALG_ASYNC,
2299 },
2300 .ivsize = AES_BLOCK_SIZE,
2301 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2302 },
2303 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2304 DESC_HDR_SEL0_AESU |
2305 DESC_HDR_MODE0_AESU_CBC |
2306 DESC_HDR_SEL1_MDEUB |
2307 DESC_HDR_MODE1_MDEU_INIT |
2308 DESC_HDR_MODE1_MDEU_PAD |
2309 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2310 },
2311 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2312 .alg.aead = {
2313 .base = {
2314 .cra_name = "authenc(hmac(sha512),"
2315 "cbc(des3_ede))",
2316 .cra_driver_name = "authenc-hmac-sha512-"
2317 "cbc-3des-talitos",
2318 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2319 .cra_flags = CRYPTO_ALG_ASYNC,
2320 },
2321 .ivsize = DES3_EDE_BLOCK_SIZE,
2322 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2323 },
2324 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325 DESC_HDR_SEL0_DEU |
2326 DESC_HDR_MODE0_DEU_CBC |
2327 DESC_HDR_MODE0_DEU_3DES |
2328 DESC_HDR_SEL1_MDEUB |
2329 DESC_HDR_MODE1_MDEU_INIT |
2330 DESC_HDR_MODE1_MDEU_PAD |
2331 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2332 },
2333 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2334 .alg.aead = {
2335 .base = {
2336 .cra_name = "authenc(hmac(md5),cbc(aes))",
2337 .cra_driver_name = "authenc-hmac-md5-"
2338 "cbc-aes-talitos",
2339 .cra_blocksize = AES_BLOCK_SIZE,
2340 .cra_flags = CRYPTO_ALG_ASYNC,
2341 },
2342 .ivsize = AES_BLOCK_SIZE,
2343 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2344 },
3952f17e
LN
2345 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2346 DESC_HDR_SEL0_AESU |
2347 DESC_HDR_MODE0_AESU_CBC |
2348 DESC_HDR_SEL1_MDEUA |
2349 DESC_HDR_MODE1_MDEU_INIT |
2350 DESC_HDR_MODE1_MDEU_PAD |
2351 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2352 },
d5e4aaef 2353 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2354 .alg.aead = {
2355 .base = {
2356 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2357 .cra_driver_name = "authenc-hmac-md5-"
2358 "cbc-3des-talitos",
2359 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2360 .cra_flags = CRYPTO_ALG_ASYNC,
2361 },
2362 .ivsize = DES3_EDE_BLOCK_SIZE,
2363 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2364 },
3952f17e
LN
2365 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2366 DESC_HDR_SEL0_DEU |
2367 DESC_HDR_MODE0_DEU_CBC |
2368 DESC_HDR_MODE0_DEU_3DES |
2369 DESC_HDR_SEL1_MDEUA |
2370 DESC_HDR_MODE1_MDEU_INIT |
2371 DESC_HDR_MODE1_MDEU_PAD |
2372 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2373 },
2374 /* ABLKCIPHER algorithms. */
5e75ae1b
LC
2375 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2376 .alg.crypto = {
2377 .cra_name = "ecb(aes)",
2378 .cra_driver_name = "ecb-aes-talitos",
2379 .cra_blocksize = AES_BLOCK_SIZE,
2380 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2381 CRYPTO_ALG_ASYNC,
2382 .cra_ablkcipher = {
2383 .min_keysize = AES_MIN_KEY_SIZE,
2384 .max_keysize = AES_MAX_KEY_SIZE,
2385 .ivsize = AES_BLOCK_SIZE,
2386 }
2387 },
2388 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2389 DESC_HDR_SEL0_AESU,
2390 },
d5e4aaef
LN
2391 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2392 .alg.crypto = {
4de9d0b5
LN
2393 .cra_name = "cbc(aes)",
2394 .cra_driver_name = "cbc-aes-talitos",
2395 .cra_blocksize = AES_BLOCK_SIZE,
2396 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2397 CRYPTO_ALG_ASYNC,
4de9d0b5 2398 .cra_ablkcipher = {
4de9d0b5
LN
2399 .min_keysize = AES_MIN_KEY_SIZE,
2400 .max_keysize = AES_MAX_KEY_SIZE,
2401 .ivsize = AES_BLOCK_SIZE,
2402 }
2403 },
2404 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405 DESC_HDR_SEL0_AESU |
2406 DESC_HDR_MODE0_AESU_CBC,
2407 },
5e75ae1b
LC
2408 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2409 .alg.crypto = {
2410 .cra_name = "ctr(aes)",
2411 .cra_driver_name = "ctr-aes-talitos",
2412 .cra_blocksize = AES_BLOCK_SIZE,
2413 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2414 CRYPTO_ALG_ASYNC,
2415 .cra_ablkcipher = {
2416 .min_keysize = AES_MIN_KEY_SIZE,
2417 .max_keysize = AES_MAX_KEY_SIZE,
2418 .ivsize = AES_BLOCK_SIZE,
2419 }
2420 },
2421 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2422 DESC_HDR_SEL0_AESU |
2423 DESC_HDR_MODE0_AESU_CTR,
2424 },
2425 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2426 .alg.crypto = {
2427 .cra_name = "ecb(des)",
2428 .cra_driver_name = "ecb-des-talitos",
2429 .cra_blocksize = DES_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2431 CRYPTO_ALG_ASYNC,
2432 .cra_ablkcipher = {
2433 .min_keysize = DES_KEY_SIZE,
2434 .max_keysize = DES_KEY_SIZE,
2435 .ivsize = DES_BLOCK_SIZE,
2436 }
2437 },
2438 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2439 DESC_HDR_SEL0_DEU,
2440 },
2441 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2442 .alg.crypto = {
2443 .cra_name = "cbc(des)",
2444 .cra_driver_name = "cbc-des-talitos",
2445 .cra_blocksize = DES_BLOCK_SIZE,
2446 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2447 CRYPTO_ALG_ASYNC,
2448 .cra_ablkcipher = {
2449 .min_keysize = DES_KEY_SIZE,
2450 .max_keysize = DES_KEY_SIZE,
2451 .ivsize = DES_BLOCK_SIZE,
2452 }
2453 },
2454 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2455 DESC_HDR_SEL0_DEU |
2456 DESC_HDR_MODE0_DEU_CBC,
2457 },
2458 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2459 .alg.crypto = {
2460 .cra_name = "ecb(des3_ede)",
2461 .cra_driver_name = "ecb-3des-talitos",
2462 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2463 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2464 CRYPTO_ALG_ASYNC,
2465 .cra_ablkcipher = {
2466 .min_keysize = DES3_EDE_KEY_SIZE,
2467 .max_keysize = DES3_EDE_KEY_SIZE,
2468 .ivsize = DES3_EDE_BLOCK_SIZE,
2469 }
2470 },
2471 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2472 DESC_HDR_SEL0_DEU |
2473 DESC_HDR_MODE0_DEU_3DES,
2474 },
d5e4aaef
LN
2475 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2476 .alg.crypto = {
4de9d0b5
LN
2477 .cra_name = "cbc(des3_ede)",
2478 .cra_driver_name = "cbc-3des-talitos",
2479 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2480 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2481 CRYPTO_ALG_ASYNC,
4de9d0b5 2482 .cra_ablkcipher = {
4de9d0b5
LN
2483 .min_keysize = DES3_EDE_KEY_SIZE,
2484 .max_keysize = DES3_EDE_KEY_SIZE,
2485 .ivsize = DES3_EDE_BLOCK_SIZE,
2486 }
2487 },
2488 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2489 DESC_HDR_SEL0_DEU |
2490 DESC_HDR_MODE0_DEU_CBC |
2491 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2492 },
2493 /* AHASH algorithms. */
2494 { .type = CRYPTO_ALG_TYPE_AHASH,
2495 .alg.hash = {
497f2e6b 2496 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2497 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2498 .halg.base = {
2499 .cra_name = "md5",
2500 .cra_driver_name = "md5-talitos",
b3988618 2501 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2502 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2503 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2504 }
2505 },
2506 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2507 DESC_HDR_SEL0_MDEUA |
2508 DESC_HDR_MODE0_MDEU_MD5,
2509 },
2510 { .type = CRYPTO_ALG_TYPE_AHASH,
2511 .alg.hash = {
497f2e6b 2512 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2513 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2514 .halg.base = {
2515 .cra_name = "sha1",
2516 .cra_driver_name = "sha1-talitos",
2517 .cra_blocksize = SHA1_BLOCK_SIZE,
2518 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2519 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2520 }
2521 },
2522 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2523 DESC_HDR_SEL0_MDEUA |
2524 DESC_HDR_MODE0_MDEU_SHA1,
2525 },
60f208d7
KP
2526 { .type = CRYPTO_ALG_TYPE_AHASH,
2527 .alg.hash = {
60f208d7 2528 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2529 .halg.statesize = sizeof(struct talitos_export_state),
60f208d7
KP
2530 .halg.base = {
2531 .cra_name = "sha224",
2532 .cra_driver_name = "sha224-talitos",
2533 .cra_blocksize = SHA224_BLOCK_SIZE,
2534 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2535 CRYPTO_ALG_ASYNC,
60f208d7
KP
2536 }
2537 },
2538 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2539 DESC_HDR_SEL0_MDEUA |
2540 DESC_HDR_MODE0_MDEU_SHA224,
2541 },
497f2e6b
LN
2542 { .type = CRYPTO_ALG_TYPE_AHASH,
2543 .alg.hash = {
497f2e6b 2544 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2545 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2546 .halg.base = {
2547 .cra_name = "sha256",
2548 .cra_driver_name = "sha256-talitos",
2549 .cra_blocksize = SHA256_BLOCK_SIZE,
2550 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2551 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2552 }
2553 },
2554 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2555 DESC_HDR_SEL0_MDEUA |
2556 DESC_HDR_MODE0_MDEU_SHA256,
2557 },
2558 { .type = CRYPTO_ALG_TYPE_AHASH,
2559 .alg.hash = {
497f2e6b 2560 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2561 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2562 .halg.base = {
2563 .cra_name = "sha384",
2564 .cra_driver_name = "sha384-talitos",
2565 .cra_blocksize = SHA384_BLOCK_SIZE,
2566 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2567 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2568 }
2569 },
2570 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2571 DESC_HDR_SEL0_MDEUB |
2572 DESC_HDR_MODE0_MDEUB_SHA384,
2573 },
2574 { .type = CRYPTO_ALG_TYPE_AHASH,
2575 .alg.hash = {
497f2e6b 2576 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2577 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2578 .halg.base = {
2579 .cra_name = "sha512",
2580 .cra_driver_name = "sha512-talitos",
2581 .cra_blocksize = SHA512_BLOCK_SIZE,
2582 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2583 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2584 }
2585 },
2586 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2587 DESC_HDR_SEL0_MDEUB |
2588 DESC_HDR_MODE0_MDEUB_SHA512,
2589 },
79b3a418
LN
2590 { .type = CRYPTO_ALG_TYPE_AHASH,
2591 .alg.hash = {
79b3a418 2592 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2593 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2594 .halg.base = {
2595 .cra_name = "hmac(md5)",
2596 .cra_driver_name = "hmac-md5-talitos",
b3988618 2597 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2598 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2599 CRYPTO_ALG_ASYNC,
79b3a418
LN
2600 }
2601 },
2602 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2603 DESC_HDR_SEL0_MDEUA |
2604 DESC_HDR_MODE0_MDEU_MD5,
2605 },
2606 { .type = CRYPTO_ALG_TYPE_AHASH,
2607 .alg.hash = {
79b3a418 2608 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2609 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2610 .halg.base = {
2611 .cra_name = "hmac(sha1)",
2612 .cra_driver_name = "hmac-sha1-talitos",
2613 .cra_blocksize = SHA1_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2615 CRYPTO_ALG_ASYNC,
79b3a418
LN
2616 }
2617 },
2618 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2619 DESC_HDR_SEL0_MDEUA |
2620 DESC_HDR_MODE0_MDEU_SHA1,
2621 },
2622 { .type = CRYPTO_ALG_TYPE_AHASH,
2623 .alg.hash = {
79b3a418 2624 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2625 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2626 .halg.base = {
2627 .cra_name = "hmac(sha224)",
2628 .cra_driver_name = "hmac-sha224-talitos",
2629 .cra_blocksize = SHA224_BLOCK_SIZE,
2630 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2631 CRYPTO_ALG_ASYNC,
79b3a418
LN
2632 }
2633 },
2634 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2635 DESC_HDR_SEL0_MDEUA |
2636 DESC_HDR_MODE0_MDEU_SHA224,
2637 },
2638 { .type = CRYPTO_ALG_TYPE_AHASH,
2639 .alg.hash = {
79b3a418 2640 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2641 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2642 .halg.base = {
2643 .cra_name = "hmac(sha256)",
2644 .cra_driver_name = "hmac-sha256-talitos",
2645 .cra_blocksize = SHA256_BLOCK_SIZE,
2646 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2647 CRYPTO_ALG_ASYNC,
79b3a418
LN
2648 }
2649 },
2650 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2651 DESC_HDR_SEL0_MDEUA |
2652 DESC_HDR_MODE0_MDEU_SHA256,
2653 },
2654 { .type = CRYPTO_ALG_TYPE_AHASH,
2655 .alg.hash = {
79b3a418 2656 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2657 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2658 .halg.base = {
2659 .cra_name = "hmac(sha384)",
2660 .cra_driver_name = "hmac-sha384-talitos",
2661 .cra_blocksize = SHA384_BLOCK_SIZE,
2662 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2663 CRYPTO_ALG_ASYNC,
79b3a418
LN
2664 }
2665 },
2666 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2667 DESC_HDR_SEL0_MDEUB |
2668 DESC_HDR_MODE0_MDEUB_SHA384,
2669 },
2670 { .type = CRYPTO_ALG_TYPE_AHASH,
2671 .alg.hash = {
79b3a418 2672 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2673 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2674 .halg.base = {
2675 .cra_name = "hmac(sha512)",
2676 .cra_driver_name = "hmac-sha512-talitos",
2677 .cra_blocksize = SHA512_BLOCK_SIZE,
2678 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2679 CRYPTO_ALG_ASYNC,
79b3a418
LN
2680 }
2681 },
2682 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2683 DESC_HDR_SEL0_MDEUB |
2684 DESC_HDR_MODE0_MDEUB_SHA512,
2685 }
9c4a7965
KP
2686};
2687
2688struct talitos_crypto_alg {
2689 struct list_head entry;
2690 struct device *dev;
acbf7c62 2691 struct talitos_alg_template algt;
9c4a7965
KP
2692};
2693
2694static int talitos_cra_init(struct crypto_tfm *tfm)
2695{
2696 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2697 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2698 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2699 struct talitos_private *priv;
9c4a7965 2700
497f2e6b
LN
2701 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2702 talitos_alg = container_of(__crypto_ahash_alg(alg),
2703 struct talitos_crypto_alg,
2704 algt.alg.hash);
2705 else
2706 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2707 algt.alg.crypto);
19bbbc63 2708
9c4a7965
KP
2709 /* update context with ptr to dev */
2710 ctx->dev = talitos_alg->dev;
19bbbc63 2711
5228f0f7
KP
2712 /* assign SEC channel to tfm in round-robin fashion */
2713 priv = dev_get_drvdata(ctx->dev);
2714 ctx->ch = atomic_inc_return(&priv->last_chan) &
2715 (priv->num_channels - 1);
2716
9c4a7965 2717 /* copy descriptor header template value */
acbf7c62 2718 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2719
602dba5a
KP
2720 /* select done notification */
2721 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2722
497f2e6b
LN
2723 return 0;
2724}
2725
aeb4c132 2726static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2727{
aeb4c132 2728 talitos_cra_init(crypto_aead_tfm(tfm));
9c4a7965
KP
2729 return 0;
2730}
2731
497f2e6b
LN
2732static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2733{
2734 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2735
2736 talitos_cra_init(tfm);
2737
2738 ctx->keylen = 0;
2739 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2740 sizeof(struct talitos_ahash_req_ctx));
2741
2742 return 0;
2743}
2744
9c4a7965
KP
2745/*
2746 * given the alg's descriptor header template, determine whether descriptor
2747 * type and primary/secondary execution units required match the hw
2748 * capabilities description provided in the device tree node.
2749 */
2750static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2751{
2752 struct talitos_private *priv = dev_get_drvdata(dev);
2753 int ret;
2754
2755 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2756 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2757
2758 if (SECONDARY_EU(desc_hdr_template))
2759 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2760 & priv->exec_units);
2761
2762 return ret;
2763}
2764
2dc11581 2765static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2766{
2767 struct device *dev = &ofdev->dev;
2768 struct talitos_private *priv = dev_get_drvdata(dev);
2769 struct talitos_crypto_alg *t_alg, *n;
2770 int i;
2771
2772 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2773 switch (t_alg->algt.type) {
2774 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2775 break;
aeb4c132
HX
2776 case CRYPTO_ALG_TYPE_AEAD:
2777 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2778 case CRYPTO_ALG_TYPE_AHASH:
2779 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2780 break;
2781 }
9c4a7965
KP
2782 list_del(&t_alg->entry);
2783 kfree(t_alg);
2784 }
2785
2786 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2787 talitos_unregister_rng(dev);
2788
35a3bb3d 2789 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2790 kfree(priv->chan[i].fifo);
9c4a7965 2791
4b992628 2792 kfree(priv->chan);
9c4a7965 2793
c3e337f8 2794 for (i = 0; i < 2; i++)
2cdba3cf 2795 if (priv->irq[i]) {
c3e337f8
KP
2796 free_irq(priv->irq[i], dev);
2797 irq_dispose_mapping(priv->irq[i]);
2798 }
9c4a7965 2799
c3e337f8 2800 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2801 if (priv->irq[1])
c3e337f8 2802 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2803
2804 iounmap(priv->reg);
2805
9c4a7965
KP
2806 kfree(priv);
2807
2808 return 0;
2809}
2810
2811static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2812 struct talitos_alg_template
2813 *template)
2814{
60f208d7 2815 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2816 struct talitos_crypto_alg *t_alg;
2817 struct crypto_alg *alg;
2818
2819 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2820 if (!t_alg)
2821 return ERR_PTR(-ENOMEM);
2822
acbf7c62
LN
2823 t_alg->algt = *template;
2824
2825 switch (t_alg->algt.type) {
2826 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2827 alg = &t_alg->algt.alg.crypto;
2828 alg->cra_init = talitos_cra_init;
d4cd3283 2829 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2830 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2831 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2832 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2833 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2834 break;
acbf7c62 2835 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2836 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2837 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2838 t_alg->algt.alg.aead.setkey = aead_setkey;
2839 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2840 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2841 break;
2842 case CRYPTO_ALG_TYPE_AHASH:
2843 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2844 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2845 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2846 t_alg->algt.alg.hash.init = ahash_init;
2847 t_alg->algt.alg.hash.update = ahash_update;
2848 t_alg->algt.alg.hash.final = ahash_final;
2849 t_alg->algt.alg.hash.finup = ahash_finup;
2850 t_alg->algt.alg.hash.digest = ahash_digest;
2851 t_alg->algt.alg.hash.setkey = ahash_setkey;
3639ca84
HG
2852 t_alg->algt.alg.hash.import = ahash_import;
2853 t_alg->algt.alg.hash.export = ahash_export;
b286e003 2854
79b3a418 2855 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2856 !strncmp(alg->cra_name, "hmac", 4)) {
2857 kfree(t_alg);
79b3a418 2858 return ERR_PTR(-ENOTSUPP);
0b2730d8 2859 }
60f208d7 2860 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2861 (!strcmp(alg->cra_name, "sha224") ||
2862 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2863 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2864 t_alg->algt.desc_hdr_template =
2865 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2866 DESC_HDR_SEL0_MDEUA |
2867 DESC_HDR_MODE0_MDEU_SHA256;
2868 }
497f2e6b 2869 break;
1d11911a
KP
2870 default:
2871 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2872 kfree(t_alg);
1d11911a 2873 return ERR_PTR(-EINVAL);
acbf7c62 2874 }
9c4a7965 2875
9c4a7965 2876 alg->cra_module = THIS_MODULE;
9c4a7965 2877 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2878 alg->cra_alignmask = 0;
9c4a7965 2879 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2880 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2881
9c4a7965
KP
2882 t_alg->dev = dev;
2883
2884 return t_alg;
2885}
2886
c3e337f8
KP
2887static int talitos_probe_irq(struct platform_device *ofdev)
2888{
2889 struct device *dev = &ofdev->dev;
2890 struct device_node *np = ofdev->dev.of_node;
2891 struct talitos_private *priv = dev_get_drvdata(dev);
2892 int err;
dd3c0987 2893 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2894
2895 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2896 if (!priv->irq[0]) {
c3e337f8
KP
2897 dev_err(dev, "failed to map irq\n");
2898 return -EINVAL;
2899 }
dd3c0987
LC
2900 if (is_sec1) {
2901 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2902 dev_driver_string(dev), dev);
2903 goto primary_out;
2904 }
c3e337f8
KP
2905
2906 priv->irq[1] = irq_of_parse_and_map(np, 1);
2907
2908 /* get the primary irq line */
2cdba3cf 2909 if (!priv->irq[1]) {
dd3c0987 2910 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2911 dev_driver_string(dev), dev);
2912 goto primary_out;
2913 }
2914
dd3c0987 2915 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2916 dev_driver_string(dev), dev);
2917 if (err)
2918 goto primary_out;
2919
2920 /* get the secondary irq line */
dd3c0987 2921 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2922 dev_driver_string(dev), dev);
2923 if (err) {
2924 dev_err(dev, "failed to request secondary irq\n");
2925 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2926 priv->irq[1] = 0;
c3e337f8
KP
2927 }
2928
2929 return err;
2930
2931primary_out:
2932 if (err) {
2933 dev_err(dev, "failed to request primary irq\n");
2934 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2935 priv->irq[0] = 0;
c3e337f8
KP
2936 }
2937
2938 return err;
2939}
2940
1c48a5c9 2941static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2942{
2943 struct device *dev = &ofdev->dev;
61c7a080 2944 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2945 struct talitos_private *priv;
2946 const unsigned int *prop;
2947 int i, err;
5fa7fa14 2948 int stride;
9c4a7965
KP
2949
2950 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2951 if (!priv)
2952 return -ENOMEM;
2953
f3de9cb1
KH
2954 INIT_LIST_HEAD(&priv->alg_list);
2955
9c4a7965
KP
2956 dev_set_drvdata(dev, priv);
2957
2958 priv->ofdev = ofdev;
2959
511d63cb
HG
2960 spin_lock_init(&priv->reg_lock);
2961
9c4a7965
KP
2962 priv->reg = of_iomap(np, 0);
2963 if (!priv->reg) {
2964 dev_err(dev, "failed to of_iomap\n");
2965 err = -ENOMEM;
2966 goto err_out;
2967 }
2968
2969 /* get SEC version capabilities from device tree */
2970 prop = of_get_property(np, "fsl,num-channels", NULL);
2971 if (prop)
2972 priv->num_channels = *prop;
2973
2974 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2975 if (prop)
2976 priv->chfifo_len = *prop;
2977
2978 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2979 if (prop)
2980 priv->exec_units = *prop;
2981
2982 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2983 if (prop)
2984 priv->desc_types = *prop;
2985
2986 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2987 !priv->exec_units || !priv->desc_types) {
2988 dev_err(dev, "invalid property data in device tree node\n");
2989 err = -EINVAL;
2990 goto err_out;
2991 }
2992
f3c85bc1
LN
2993 if (of_device_is_compatible(np, "fsl,sec3.0"))
2994 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2995
fe5720e2 2996 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2997 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2998 TALITOS_FTR_SHA224_HWINIT |
2999 TALITOS_FTR_HMAC_OK;
fe5720e2 3000
21590888
LC
3001 if (of_device_is_compatible(np, "fsl,sec1.0"))
3002 priv->features |= TALITOS_FTR_SEC1;
3003
5fa7fa14
LC
3004 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3005 priv->reg_deu = priv->reg + TALITOS12_DEU;
3006 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3007 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3008 stride = TALITOS1_CH_STRIDE;
3009 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3010 priv->reg_deu = priv->reg + TALITOS10_DEU;
3011 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3012 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3013 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3014 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3015 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3016 stride = TALITOS1_CH_STRIDE;
3017 } else {
3018 priv->reg_deu = priv->reg + TALITOS2_DEU;
3019 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3020 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3021 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3022 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3023 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3024 priv->reg_keu = priv->reg + TALITOS2_KEU;
3025 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3026 stride = TALITOS2_CH_STRIDE;
3027 }
3028
dd3c0987
LC
3029 err = talitos_probe_irq(ofdev);
3030 if (err)
3031 goto err_out;
3032
3033 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3034 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3035 (unsigned long)dev);
3036 } else {
3037 if (!priv->irq[1]) {
3038 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3039 (unsigned long)dev);
3040 } else {
3041 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3042 (unsigned long)dev);
3043 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3044 (unsigned long)dev);
3045 }
3046 }
3047
4b992628
KP
3048 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3049 priv->num_channels, GFP_KERNEL);
3050 if (!priv->chan) {
3051 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
3052 err = -ENOMEM;
3053 goto err_out;
3054 }
3055
f641dddd
MH
3056 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3057
c3e337f8 3058 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 3059 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 3060 if (!priv->irq[1] || !(i & 1))
c3e337f8 3061 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 3062
4b992628
KP
3063 spin_lock_init(&priv->chan[i].head_lock);
3064 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3065
4b992628
KP
3066 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3067 priv->fifo_len, GFP_KERNEL);
3068 if (!priv->chan[i].fifo) {
9c4a7965
KP
3069 dev_err(dev, "failed to allocate request fifo %d\n", i);
3070 err = -ENOMEM;
3071 goto err_out;
3072 }
9c4a7965 3073
4b992628
KP
3074 atomic_set(&priv->chan[i].submit_count,
3075 -(priv->chfifo_len - 1));
f641dddd 3076 }
9c4a7965 3077
81eb024c
KP
3078 dma_set_mask(dev, DMA_BIT_MASK(36));
3079
9c4a7965
KP
3080 /* reset and initialize the h/w */
3081 err = init_device(dev);
3082 if (err) {
3083 dev_err(dev, "failed to initialize device\n");
3084 goto err_out;
3085 }
3086
3087 /* register the RNG, if available */
3088 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3089 err = talitos_register_rng(dev);
3090 if (err) {
3091 dev_err(dev, "failed to register hwrng: %d\n", err);
3092 goto err_out;
3093 } else
3094 dev_info(dev, "hwrng\n");
3095 }
3096
3097 /* register crypto algorithms the device supports */
9c4a7965
KP
3098 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3099 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3100 struct talitos_crypto_alg *t_alg;
aeb4c132 3101 struct crypto_alg *alg = NULL;
9c4a7965
KP
3102
3103 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3104 if (IS_ERR(t_alg)) {
3105 err = PTR_ERR(t_alg);
0b2730d8 3106 if (err == -ENOTSUPP)
79b3a418 3107 continue;
9c4a7965
KP
3108 goto err_out;
3109 }
3110
acbf7c62
LN
3111 switch (t_alg->algt.type) {
3112 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3113 err = crypto_register_alg(
3114 &t_alg->algt.alg.crypto);
aeb4c132 3115 alg = &t_alg->algt.alg.crypto;
acbf7c62 3116 break;
aeb4c132
HX
3117
3118 case CRYPTO_ALG_TYPE_AEAD:
3119 err = crypto_register_aead(
3120 &t_alg->algt.alg.aead);
3121 alg = &t_alg->algt.alg.aead.base;
3122 break;
3123
acbf7c62
LN
3124 case CRYPTO_ALG_TYPE_AHASH:
3125 err = crypto_register_ahash(
3126 &t_alg->algt.alg.hash);
aeb4c132 3127 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3128 break;
3129 }
9c4a7965
KP
3130 if (err) {
3131 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3132 alg->cra_driver_name);
9c4a7965 3133 kfree(t_alg);
991155ba 3134 } else
9c4a7965 3135 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3136 }
3137 }
5b859b6e
KP
3138 if (!list_empty(&priv->alg_list))
3139 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3140 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3141
3142 return 0;
3143
3144err_out:
3145 talitos_remove(ofdev);
9c4a7965
KP
3146
3147 return err;
3148}
3149
6c3f975a 3150static const struct of_device_id talitos_match[] = {
0635b7db
LC
3151#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3152 {
3153 .compatible = "fsl,sec1.0",
3154 },
3155#endif
3156#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3157 {
3158 .compatible = "fsl,sec2.0",
3159 },
0635b7db 3160#endif
9c4a7965
KP
3161 {},
3162};
3163MODULE_DEVICE_TABLE(of, talitos_match);
3164
1c48a5c9 3165static struct platform_driver talitos_driver = {
4018294b
GL
3166 .driver = {
3167 .name = "talitos",
4018294b
GL
3168 .of_match_table = talitos_match,
3169 },
9c4a7965 3170 .probe = talitos_probe,
596f1034 3171 .remove = talitos_remove,
9c4a7965
KP
3172};
3173
741e8c2d 3174module_platform_driver(talitos_driver);
9c4a7965
KP
3175
3176MODULE_LICENSE("GPL");
3177MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3178MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 0.691943 seconds and 5 git commands to generate.