Merge tag 'media/v4.8-5' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / drivers / dma / ppc4xx / adma.c
CommitLineData
12458ea0
AG
1/*
2 * Copyright (C) 2006-2009 DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * Further porting to arch/powerpc by
7 * Anatolij Gustschin <agust@denx.de>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
12458ea0
AG
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This driver supports the asynchrounous DMA copy and RAID engines available
25 * on the AMCC PPC440SPe Processors.
26 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
27 * ADMA driver written by D.Williams.
28 */
29
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/async_tx.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/spinlock.h>
36#include <linux/interrupt.h>
5a0e3ad6 37#include <linux/slab.h>
12458ea0
AG
38#include <linux/uaccess.h>
39#include <linux/proc_fs.h>
40#include <linux/of.h>
c11eede6
RH
41#include <linux/of_address.h>
42#include <linux/of_irq.h>
12458ea0
AG
43#include <linux/of_platform.h>
44#include <asm/dcr.h>
45#include <asm/dcr-regs.h>
46#include "adma.h"
d2ebfb33 47#include "../dmaengine.h"
12458ea0
AG
48
49enum ppc_adma_init_code {
50 PPC_ADMA_INIT_OK = 0,
51 PPC_ADMA_INIT_MEMRES,
52 PPC_ADMA_INIT_MEMREG,
53 PPC_ADMA_INIT_ALLOC,
54 PPC_ADMA_INIT_COHERENT,
55 PPC_ADMA_INIT_CHANNEL,
56 PPC_ADMA_INIT_IRQ1,
57 PPC_ADMA_INIT_IRQ2,
58 PPC_ADMA_INIT_REGISTER
59};
60
61static char *ppc_adma_errors[] = {
62 [PPC_ADMA_INIT_OK] = "ok",
63 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
64 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
65 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
66 "structure",
67 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
68 "hardware descriptors",
69 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
70 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
71 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
72 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
73};
74
75static enum ppc_adma_init_code
76ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
77
78struct ppc_dma_chan_ref {
79 struct dma_chan *chan;
80 struct list_head node;
81};
82
83/* The list of channels exported by ppc440spe ADMA */
84struct list_head
85ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
86
87/* This flag is set when want to refetch the xor chain in the interrupt
88 * handler
89 */
90static u32 do_xor_refetch;
91
92/* Pointer to DMA0, DMA1 CP/CS FIFO */
93static void *ppc440spe_dma_fifo_buf;
94
95/* Pointers to last submitted to DMA0, DMA1 CDBs */
96static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
97static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
98
99/* Pointer to last linked and submitted xor CB */
100static struct ppc440spe_adma_desc_slot *xor_last_linked;
101static struct ppc440spe_adma_desc_slot *xor_last_submit;
102
103/* This array is used in data-check operations for storing a pattern */
104static char ppc440spe_qword[16];
105
106static atomic_t ppc440spe_adma_err_irq_ref;
107static dcr_host_t ppc440spe_mq_dcr_host;
108static unsigned int ppc440spe_mq_dcr_len;
109
110/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
111 * the block size in transactions, then we do not allow to activate more than
112 * only one RXOR transactions simultaneously. So use this var to store
113 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
114 * set) or not (PPC440SPE_RXOR_RUN is clear).
115 */
116static unsigned long ppc440spe_rxor_state;
117
118/* These are used in enable & check routines
119 */
120static u32 ppc440spe_r6_enabled;
121static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
122static struct completion ppc440spe_r6_test_comp;
123
124static int ppc440spe_adma_dma2rxor_prep_src(
125 struct ppc440spe_adma_desc_slot *desc,
126 struct ppc440spe_rxor *cursor, int index,
127 int src_cnt, u32 addr);
128static void ppc440spe_adma_dma2rxor_set_src(
129 struct ppc440spe_adma_desc_slot *desc,
130 int index, dma_addr_t addr);
131static void ppc440spe_adma_dma2rxor_set_mult(
132 struct ppc440spe_adma_desc_slot *desc,
133 int index, u8 mult);
134
135#ifdef ADMA_LL_DEBUG
136#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
137#else
138#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
139#endif
140
141static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
142{
143 struct dma_cdb *cdb;
144 struct xor_cb *cb;
145 int i;
146
147 switch (chan->device->id) {
148 case 0:
149 case 1:
150 cdb = block;
151
152 pr_debug("CDB at %p [%d]:\n"
153 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
154 "\t sg1u 0x%08x sg1l 0x%08x\n"
155 "\t sg2u 0x%08x sg2l 0x%08x\n"
156 "\t sg3u 0x%08x sg3l 0x%08x\n",
157 cdb, chan->device->id,
158 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
159 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
160 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
161 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
162 );
163 break;
164 case 2:
165 cb = block;
166
167 pr_debug("CB at %p [%d]:\n"
168 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
169 "\t cbtah 0x%08x cbtal 0x%08x\n"
170 "\t cblah 0x%08x cblal 0x%08x\n",
171 cb, chan->device->id,
172 cb->cbc, cb->cbbc, cb->cbs,
173 cb->cbtah, cb->cbtal,
174 cb->cblah, cb->cblal);
175 for (i = 0; i < 16; i++) {
176 if (i && !cb->ops[i].h && !cb->ops[i].l)
177 continue;
178 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
179 i, cb->ops[i].h, cb->ops[i].l);
180 }
181 break;
182 }
183}
184
185static void print_cb_list(struct ppc440spe_adma_chan *chan,
186 struct ppc440spe_adma_desc_slot *iter)
187{
188 for (; iter; iter = iter->hw_next)
189 print_cb(chan, iter->hw_desc);
190}
191
192static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
193 unsigned int src_cnt)
194{
195 int i;
196
197 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
198 for (i = 0; i < src_cnt; i++)
199 pr_debug("\t0x%016llx ", src[i]);
200 pr_debug("dst:\n\t0x%016llx\n", dst);
201}
202
203static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
204 unsigned int src_cnt)
205{
206 int i;
207
208 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
209 for (i = 0; i < src_cnt; i++)
210 pr_debug("\t0x%016llx ", src[i]);
211 pr_debug("dst: ");
212 for (i = 0; i < 2; i++)
213 pr_debug("\t0x%016llx ", dst[i]);
214}
215
216static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
217 unsigned int src_cnt,
218 const unsigned char *scf)
219{
220 int i;
221
222 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
223 if (scf) {
224 for (i = 0; i < src_cnt; i++)
225 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
226 } else {
227 for (i = 0; i < src_cnt; i++)
228 pr_debug("\t0x%016llx(no) ", src[i]);
229 }
230
231 pr_debug("dst: ");
232 for (i = 0; i < 2; i++)
233 pr_debug("\t0x%016llx ", src[src_cnt + i]);
234}
235
236/******************************************************************************
237 * Command (Descriptor) Blocks low-level routines
238 ******************************************************************************/
239/**
240 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
241 * pseudo operation
242 */
243static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
244 struct ppc440spe_adma_chan *chan)
245{
246 struct xor_cb *p;
247
248 switch (chan->device->id) {
249 case PPC440SPE_XOR_ID:
250 p = desc->hw_desc;
251 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
252 /* NOP with Command Block Complete Enable */
253 p->cbc = XOR_CBCR_CBCE_BIT;
254 break;
255 case PPC440SPE_DMA0_ID:
256 case PPC440SPE_DMA1_ID:
257 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
258 /* NOP with interrupt */
259 set_bit(PPC440SPE_DESC_INT, &desc->flags);
260 break;
261 default:
262 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
263 __func__);
264 break;
265 }
266}
267
268/**
269 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
270 * pseudo operation
271 */
272static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
273{
274 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
275 desc->hw_next = NULL;
276 desc->src_cnt = 0;
277 desc->dst_cnt = 1;
278}
279
280/**
281 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
282 */
283static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
284 int src_cnt, unsigned long flags)
285{
286 struct xor_cb *hw_desc = desc->hw_desc;
287
288 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
289 desc->hw_next = NULL;
290 desc->src_cnt = src_cnt;
291 desc->dst_cnt = 1;
292
293 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
294 if (flags & DMA_PREP_INTERRUPT)
295 /* Enable interrupt on completion */
296 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
297}
298
299/**
300 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
301 * operation in DMA2 controller
302 */
303static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
304 int dst_cnt, int src_cnt, unsigned long flags)
305{
306 struct xor_cb *hw_desc = desc->hw_desc;
307
308 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
309 desc->hw_next = NULL;
310 desc->src_cnt = src_cnt;
311 desc->dst_cnt = dst_cnt;
312 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
313 desc->descs_per_op = 0;
314
315 hw_desc->cbc = XOR_CBCR_TGT_BIT;
316 if (flags & DMA_PREP_INTERRUPT)
317 /* Enable interrupt on completion */
318 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
319}
320
321#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
322#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
323#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
324
325/**
326 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
327 * with DMA0/1
328 */
329static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
330 int dst_cnt, int src_cnt, unsigned long flags,
331 unsigned long op)
332{
333 struct dma_cdb *hw_desc;
334 struct ppc440spe_adma_desc_slot *iter;
335 u8 dopc;
336
337 /* Common initialization of a PQ descriptors chain */
338 set_bits(op, &desc->flags);
339 desc->src_cnt = src_cnt;
340 desc->dst_cnt = dst_cnt;
341
342 /* WXOR MULTICAST if both P and Q are being computed
343 * MV_SG1_SG2 if Q only
344 */
345 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
346 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
347
348 list_for_each_entry(iter, &desc->group_list, chain_node) {
349 hw_desc = iter->hw_desc;
350 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
351
352 if (likely(!list_is_last(&iter->chain_node,
353 &desc->group_list))) {
354 /* set 'next' pointer */
355 iter->hw_next = list_entry(iter->chain_node.next,
356 struct ppc440spe_adma_desc_slot, chain_node);
357 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
358 } else {
359 /* this is the last descriptor.
360 * this slot will be pasted from ADMA level
361 * each time it wants to configure parameters
362 * of the transaction (src, dst, ...)
363 */
364 iter->hw_next = NULL;
365 if (flags & DMA_PREP_INTERRUPT)
366 set_bit(PPC440SPE_DESC_INT, &iter->flags);
367 else
368 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
369 }
370 }
371
372 /* Set OPS depending on WXOR/RXOR type of operation */
373 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
374 /* This is a WXOR only chain:
375 * - first descriptors are for zeroing destinations
376 * if PPC440SPE_ZERO_P/Q set;
377 * - descriptors remained are for GF-XOR operations.
378 */
379 iter = list_first_entry(&desc->group_list,
380 struct ppc440spe_adma_desc_slot,
381 chain_node);
382
383 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
384 hw_desc = iter->hw_desc;
385 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
386 iter = list_first_entry(&iter->chain_node,
387 struct ppc440spe_adma_desc_slot,
388 chain_node);
389 }
390
391 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
392 hw_desc = iter->hw_desc;
393 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
394 iter = list_first_entry(&iter->chain_node,
395 struct ppc440spe_adma_desc_slot,
396 chain_node);
397 }
398
399 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
400 hw_desc = iter->hw_desc;
401 hw_desc->opc = dopc;
402 }
403 } else {
404 /* This is either RXOR-only or mixed RXOR/WXOR */
405
406 /* The first 1 or 2 slots in chain are always RXOR,
407 * if need to calculate P & Q, then there are two
408 * RXOR slots; if only P or only Q, then there is one
409 */
410 iter = list_first_entry(&desc->group_list,
411 struct ppc440spe_adma_desc_slot,
412 chain_node);
413 hw_desc = iter->hw_desc;
414 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
415
416 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
417 iter = list_first_entry(&iter->chain_node,
418 struct ppc440spe_adma_desc_slot,
419 chain_node);
420 hw_desc = iter->hw_desc;
421 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
422 }
423
424 /* The remaining descs (if any) are WXORs */
425 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
426 iter = list_first_entry(&iter->chain_node,
427 struct ppc440spe_adma_desc_slot,
428 chain_node);
429 list_for_each_entry_from(iter, &desc->group_list,
430 chain_node) {
431 hw_desc = iter->hw_desc;
432 hw_desc->opc = dopc;
433 }
434 }
435 }
436}
437
438/**
439 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
440 * for PQ_ZERO_SUM operation
441 */
442static void ppc440spe_desc_init_dma01pqzero_sum(
443 struct ppc440spe_adma_desc_slot *desc,
444 int dst_cnt, int src_cnt)
445{
446 struct dma_cdb *hw_desc;
447 struct ppc440spe_adma_desc_slot *iter;
448 int i = 0;
449 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
450 DMA_CDB_OPC_MV_SG1_SG2;
451 /*
452 * Initialize starting from 2nd or 3rd descriptor dependent
453 * on dst_cnt. First one or two slots are for cloning P
454 * and/or Q to chan->pdest and/or chan->qdest as we have
455 * to preserve original P/Q.
456 */
457 iter = list_first_entry(&desc->group_list,
458 struct ppc440spe_adma_desc_slot, chain_node);
459 iter = list_entry(iter->chain_node.next,
460 struct ppc440spe_adma_desc_slot, chain_node);
461
462 if (dst_cnt > 1) {
463 iter = list_entry(iter->chain_node.next,
464 struct ppc440spe_adma_desc_slot, chain_node);
465 }
466 /* initialize each source descriptor in chain */
467 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
468 hw_desc = iter->hw_desc;
469 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
470 iter->src_cnt = 0;
471 iter->dst_cnt = 0;
472
473 /* This is a ZERO_SUM operation:
474 * - <src_cnt> descriptors starting from 2nd or 3rd
475 * descriptor are for GF-XOR operations;
476 * - remaining <dst_cnt> descriptors are for checking the result
477 */
478 if (i++ < src_cnt)
479 /* MV_SG1_SG2 if only Q is being verified
480 * MULTICAST if both P and Q are being verified
481 */
482 hw_desc->opc = dopc;
483 else
484 /* DMA_CDB_OPC_DCHECK128 operation */
485 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
486
487 if (likely(!list_is_last(&iter->chain_node,
488 &desc->group_list))) {
489 /* set 'next' pointer */
490 iter->hw_next = list_entry(iter->chain_node.next,
491 struct ppc440spe_adma_desc_slot,
492 chain_node);
493 } else {
494 /* this is the last descriptor.
495 * this slot will be pasted from ADMA level
496 * each time it wants to configure parameters
497 * of the transaction (src, dst, ...)
498 */
499 iter->hw_next = NULL;
500 /* always enable interrupt generation since we get
501 * the status of pqzero from the handler
502 */
503 set_bit(PPC440SPE_DESC_INT, &iter->flags);
504 }
505 }
506 desc->src_cnt = src_cnt;
507 desc->dst_cnt = dst_cnt;
508}
509
510/**
511 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
512 */
513static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
514 unsigned long flags)
515{
516 struct dma_cdb *hw_desc = desc->hw_desc;
517
518 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
519 desc->hw_next = NULL;
520 desc->src_cnt = 1;
521 desc->dst_cnt = 1;
522
523 if (flags & DMA_PREP_INTERRUPT)
524 set_bit(PPC440SPE_DESC_INT, &desc->flags);
525 else
526 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
527
528 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
529}
530
12458ea0
AG
531/**
532 * ppc440spe_desc_set_src_addr - set source address into the descriptor
533 */
534static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
535 struct ppc440spe_adma_chan *chan,
536 int src_idx, dma_addr_t addrh,
537 dma_addr_t addrl)
538{
539 struct dma_cdb *dma_hw_desc;
540 struct xor_cb *xor_hw_desc;
541 phys_addr_t addr64, tmplow, tmphi;
542
543 switch (chan->device->id) {
544 case PPC440SPE_DMA0_ID:
545 case PPC440SPE_DMA1_ID:
546 if (!addrh) {
547 addr64 = addrl;
548 tmphi = (addr64 >> 32);
549 tmplow = (addr64 & 0xFFFFFFFF);
550 } else {
551 tmphi = addrh;
552 tmplow = addrl;
553 }
554 dma_hw_desc = desc->hw_desc;
555 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
556 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
557 break;
558 case PPC440SPE_XOR_ID:
559 xor_hw_desc = desc->hw_desc;
560 xor_hw_desc->ops[src_idx].l = addrl;
561 xor_hw_desc->ops[src_idx].h |= addrh;
562 break;
563 }
564}
565
566/**
567 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
568 */
569static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
570 struct ppc440spe_adma_chan *chan, u32 mult_index,
571 int sg_index, unsigned char mult_value)
572{
573 struct dma_cdb *dma_hw_desc;
574 struct xor_cb *xor_hw_desc;
575 u32 *psgu;
576
577 switch (chan->device->id) {
578 case PPC440SPE_DMA0_ID:
579 case PPC440SPE_DMA1_ID:
580 dma_hw_desc = desc->hw_desc;
581
582 switch (sg_index) {
583 /* for RXOR operations set multiplier
584 * into source cued address
585 */
586 case DMA_CDB_SG_SRC:
587 psgu = &dma_hw_desc->sg1u;
588 break;
589 /* for WXOR operations set multiplier
590 * into destination cued address(es)
591 */
592 case DMA_CDB_SG_DST1:
593 psgu = &dma_hw_desc->sg2u;
594 break;
595 case DMA_CDB_SG_DST2:
596 psgu = &dma_hw_desc->sg3u;
597 break;
598 default:
599 BUG();
600 }
601
602 *psgu |= cpu_to_le32(mult_value << mult_index);
603 break;
604 case PPC440SPE_XOR_ID:
605 xor_hw_desc = desc->hw_desc;
606 break;
607 default:
608 BUG();
609 }
610}
611
612/**
613 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
614 */
615static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
616 struct ppc440spe_adma_chan *chan,
617 dma_addr_t addrh, dma_addr_t addrl,
618 u32 dst_idx)
619{
620 struct dma_cdb *dma_hw_desc;
621 struct xor_cb *xor_hw_desc;
622 phys_addr_t addr64, tmphi, tmplow;
623 u32 *psgu, *psgl;
624
625 switch (chan->device->id) {
626 case PPC440SPE_DMA0_ID:
627 case PPC440SPE_DMA1_ID:
628 if (!addrh) {
629 addr64 = addrl;
630 tmphi = (addr64 >> 32);
631 tmplow = (addr64 & 0xFFFFFFFF);
632 } else {
633 tmphi = addrh;
634 tmplow = addrl;
635 }
636 dma_hw_desc = desc->hw_desc;
637
638 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
639 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
640
641 *psgl = cpu_to_le32((u32)tmplow);
642 *psgu |= cpu_to_le32((u32)tmphi);
643 break;
644 case PPC440SPE_XOR_ID:
645 xor_hw_desc = desc->hw_desc;
646 xor_hw_desc->cbtal = addrl;
647 xor_hw_desc->cbtah |= addrh;
648 break;
649 }
650}
651
652/**
653 * ppc440spe_desc_set_byte_count - set number of data bytes involved
654 * into the operation
655 */
656static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
657 struct ppc440spe_adma_chan *chan,
658 u32 byte_count)
659{
660 struct dma_cdb *dma_hw_desc;
661 struct xor_cb *xor_hw_desc;
662
663 switch (chan->device->id) {
664 case PPC440SPE_DMA0_ID:
665 case PPC440SPE_DMA1_ID:
666 dma_hw_desc = desc->hw_desc;
667 dma_hw_desc->cnt = cpu_to_le32(byte_count);
668 break;
669 case PPC440SPE_XOR_ID:
670 xor_hw_desc = desc->hw_desc;
671 xor_hw_desc->cbbc = byte_count;
672 break;
673 }
674}
675
676/**
677 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
678 */
679static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
680{
681 /* assume that byte_count is aligned on the 512-boundary;
682 * thus write it directly to the register (bits 23:31 are
683 * reserved there).
684 */
685 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
686}
687
688/**
689 * ppc440spe_desc_set_dcheck - set CHECK pattern
690 */
691static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
692 struct ppc440spe_adma_chan *chan, u8 *qword)
693{
694 struct dma_cdb *dma_hw_desc;
695
696 switch (chan->device->id) {
697 case PPC440SPE_DMA0_ID:
698 case PPC440SPE_DMA1_ID:
699 dma_hw_desc = desc->hw_desc;
700 iowrite32(qword[0], &dma_hw_desc->sg3l);
701 iowrite32(qword[4], &dma_hw_desc->sg3u);
702 iowrite32(qword[8], &dma_hw_desc->sg2l);
703 iowrite32(qword[12], &dma_hw_desc->sg2u);
704 break;
705 default:
706 BUG();
707 }
708}
709
710/**
711 * ppc440spe_xor_set_link - set link address in xor CB
712 */
713static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
714 struct ppc440spe_adma_desc_slot *next_desc)
715{
716 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
717
718 if (unlikely(!next_desc || !(next_desc->phys))) {
719 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
720 __func__, next_desc,
721 next_desc ? next_desc->phys : 0);
722 BUG();
723 }
724
725 xor_hw_desc->cbs = 0;
726 xor_hw_desc->cblal = next_desc->phys;
727 xor_hw_desc->cblah = 0;
728 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
729}
730
731/**
732 * ppc440spe_desc_set_link - set the address of descriptor following this
733 * descriptor in chain
734 */
735static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
736 struct ppc440spe_adma_desc_slot *prev_desc,
737 struct ppc440spe_adma_desc_slot *next_desc)
738{
739 unsigned long flags;
740 struct ppc440spe_adma_desc_slot *tail = next_desc;
741
742 if (unlikely(!prev_desc || !next_desc ||
743 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
744 /* If previous next is overwritten something is wrong.
745 * though we may refetch from append to initiate list
746 * processing; in this case - it's ok.
747 */
748 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
749 "prev->hw_next=0x%p\n", __func__, prev_desc,
750 next_desc, prev_desc ? prev_desc->hw_next : 0);
751 BUG();
752 }
753
754 local_irq_save(flags);
755
756 /* do s/w chaining both for DMA and XOR descriptors */
757 prev_desc->hw_next = next_desc;
758
759 switch (chan->device->id) {
760 case PPC440SPE_DMA0_ID:
761 case PPC440SPE_DMA1_ID:
762 break;
763 case PPC440SPE_XOR_ID:
764 /* bind descriptor to the chain */
765 while (tail->hw_next)
766 tail = tail->hw_next;
767 xor_last_linked = tail;
768
769 if (prev_desc == xor_last_submit)
770 /* do not link to the last submitted CB */
771 break;
772 ppc440spe_xor_set_link(prev_desc, next_desc);
773 break;
774 }
775
776 local_irq_restore(flags);
777}
778
12458ea0
AG
779/**
780 * ppc440spe_desc_get_link - get the address of the descriptor that
781 * follows this one
782 */
783static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
784 struct ppc440spe_adma_chan *chan)
785{
786 if (!desc->hw_next)
787 return 0;
788
789 return desc->hw_next->phys;
790}
791
792/**
793 * ppc440spe_desc_is_aligned - check alignment
794 */
795static inline int ppc440spe_desc_is_aligned(
796 struct ppc440spe_adma_desc_slot *desc, int num_slots)
797{
798 return (desc->idx & (num_slots - 1)) ? 0 : 1;
799}
800
801/**
802 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
803 * XOR operation
804 */
805static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
806 int *slots_per_op)
807{
808 int slot_cnt;
809
810 /* each XOR descriptor provides up to 16 source operands */
811 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
812
813 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
814 return slot_cnt;
815
816 printk(KERN_ERR "%s: len %d > max %d !!\n",
817 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
818 BUG();
819 return slot_cnt;
820}
821
822/**
823 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
824 * DMA2 PQ operation
825 */
826static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
827 int src_cnt, size_t len)
828{
829 signed long long order = 0;
830 int state = 0;
831 int addr_count = 0;
832 int i;
833 for (i = 1; i < src_cnt; i++) {
834 dma_addr_t cur_addr = srcs[i];
835 dma_addr_t old_addr = srcs[i-1];
836 switch (state) {
837 case 0:
838 if (cur_addr == old_addr + len) {
839 /* direct RXOR */
840 order = 1;
841 state = 1;
842 if (i == src_cnt-1)
843 addr_count++;
844 } else if (old_addr == cur_addr + len) {
845 /* reverse RXOR */
846 order = -1;
847 state = 1;
848 if (i == src_cnt-1)
849 addr_count++;
850 } else {
851 state = 3;
852 }
853 break;
854 case 1:
855 if (i == src_cnt-2 || (order == -1
856 && cur_addr != old_addr - len)) {
857 order = 0;
858 state = 0;
859 addr_count++;
860 } else if (cur_addr == old_addr + len*order) {
861 state = 2;
862 if (i == src_cnt-1)
863 addr_count++;
864 } else if (cur_addr == old_addr + 2*len) {
865 state = 2;
866 if (i == src_cnt-1)
867 addr_count++;
868 } else if (cur_addr == old_addr + 3*len) {
869 state = 2;
870 if (i == src_cnt-1)
871 addr_count++;
872 } else {
873 order = 0;
874 state = 0;
875 addr_count++;
876 }
877 break;
878 case 2:
879 order = 0;
880 state = 0;
881 addr_count++;
882 break;
883 }
884 if (state == 3)
885 break;
886 }
887 if (src_cnt <= 1 || (state != 1 && state != 2)) {
888 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
889 __func__, src_cnt, state, addr_count, order);
890 for (i = 0; i < src_cnt; i++)
891 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
892 BUG();
893 }
894
895 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
896}
897
898
899/******************************************************************************
900 * ADMA channel low-level routines
901 ******************************************************************************/
902
903static u32
904ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
905static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
906
907/**
908 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
909 */
910static void ppc440spe_adma_device_clear_eot_status(
911 struct ppc440spe_adma_chan *chan)
912{
913 struct dma_regs *dma_reg;
914 struct xor_regs *xor_reg;
915 u8 *p = chan->device->dma_desc_pool_virt;
916 struct dma_cdb *cdb;
917 u32 rv, i;
918
919 switch (chan->device->id) {
920 case PPC440SPE_DMA0_ID:
921 case PPC440SPE_DMA1_ID:
922 /* read FIFO to ack */
923 dma_reg = chan->device->dma_reg;
924 while ((rv = ioread32(&dma_reg->csfpl))) {
925 i = rv & DMA_CDB_ADDR_MSK;
926 cdb = (struct dma_cdb *)&p[i -
927 (u32)chan->device->dma_desc_pool];
928
929 /* Clear opcode to ack. This is necessary for
930 * ZeroSum operations only
931 */
932 cdb->opc = 0;
933
934 if (test_bit(PPC440SPE_RXOR_RUN,
935 &ppc440spe_rxor_state)) {
936 /* probably this is a completed RXOR op,
937 * get pointer to CDB using the fact that
938 * physical and virtual addresses of CDB
939 * in pools have the same offsets
940 */
941 if (le32_to_cpu(cdb->sg1u) &
942 DMA_CUED_XOR_BASE) {
943 /* this is a RXOR */
944 clear_bit(PPC440SPE_RXOR_RUN,
945 &ppc440spe_rxor_state);
946 }
947 }
948
949 if (rv & DMA_CDB_STATUS_MSK) {
950 /* ZeroSum check failed
951 */
952 struct ppc440spe_adma_desc_slot *iter;
953 dma_addr_t phys = rv & ~DMA_CDB_MSK;
954
955 /*
956 * Update the status of corresponding
957 * descriptor.
958 */
959 list_for_each_entry(iter, &chan->chain,
960 chain_node) {
961 if (iter->phys == phys)
962 break;
963 }
964 /*
965 * if cannot find the corresponding
966 * slot it's a bug
967 */
968 BUG_ON(&iter->chain_node == &chan->chain);
969
970 if (iter->xor_check_result) {
971 if (test_bit(PPC440SPE_DESC_PCHECK,
972 &iter->flags)) {
973 *iter->xor_check_result |=
974 SUM_CHECK_P_RESULT;
975 } else
976 if (test_bit(PPC440SPE_DESC_QCHECK,
977 &iter->flags)) {
978 *iter->xor_check_result |=
979 SUM_CHECK_Q_RESULT;
980 } else
981 BUG();
982 }
983 }
984 }
985
986 rv = ioread32(&dma_reg->dsts);
987 if (rv) {
988 pr_err("DMA%d err status: 0x%x\n",
989 chan->device->id, rv);
990 /* write back to clear */
991 iowrite32(rv, &dma_reg->dsts);
992 }
993 break;
994 case PPC440SPE_XOR_ID:
995 /* reset status bits to ack */
996 xor_reg = chan->device->xor_reg;
997 rv = ioread32be(&xor_reg->sr);
998 iowrite32be(rv, &xor_reg->sr);
999
1000 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
1001 if (rv & XOR_IE_RPTIE_BIT) {
1002 /* Read PLB Timeout Error.
1003 * Try to resubmit the CB
1004 */
1005 u32 val = ioread32be(&xor_reg->ccbalr);
1006
1007 iowrite32be(val, &xor_reg->cblalr);
1008
1009 val = ioread32be(&xor_reg->crsr);
1010 iowrite32be(val | XOR_CRSR_XAE_BIT,
1011 &xor_reg->crsr);
1012 } else
1013 pr_err("XOR ERR 0x%x status\n", rv);
1014 break;
1015 }
1016
1017 /* if the XORcore is idle, but there are unprocessed CBs
1018 * then refetch the s/w chain here
1019 */
1020 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1021 do_xor_refetch)
1022 ppc440spe_chan_append(chan);
1023 break;
1024 }
1025}
1026
1027/**
1028 * ppc440spe_chan_is_busy - get the channel status
1029 */
1030static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1031{
1032 struct dma_regs *dma_reg;
1033 struct xor_regs *xor_reg;
1034 int busy = 0;
1035
1036 switch (chan->device->id) {
1037 case PPC440SPE_DMA0_ID:
1038 case PPC440SPE_DMA1_ID:
1039 dma_reg = chan->device->dma_reg;
1040 /* if command FIFO's head and tail pointers are equal and
1041 * status tail is the same as command, then channel is free
1042 */
1043 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1044 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1045 busy = 1;
1046 break;
1047 case PPC440SPE_XOR_ID:
1048 /* use the special status bit for the XORcore
1049 */
1050 xor_reg = chan->device->xor_reg;
1051 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1052 break;
1053 }
1054
1055 return busy;
1056}
1057
1058/**
1059 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1060 */
1061static void ppc440spe_chan_set_first_xor_descriptor(
1062 struct ppc440spe_adma_chan *chan,
1063 struct ppc440spe_adma_desc_slot *next_desc)
1064{
1065 struct xor_regs *xor_reg = chan->device->xor_reg;
1066
1067 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1068 printk(KERN_INFO "%s: Warn: XORcore is running "
1069 "when try to set the first CDB!\n",
1070 __func__);
1071
1072 xor_last_submit = xor_last_linked = next_desc;
1073
1074 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1075
1076 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1077 iowrite32be(0, &xor_reg->cblahr);
1078 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1079 &xor_reg->cbcr);
1080
1081 chan->hw_chain_inited = 1;
1082}
1083
1084/**
1085 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1086 * called with irqs disabled
1087 */
1088static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1089 struct ppc440spe_adma_desc_slot *desc)
1090{
1091 u32 pcdb;
1092 struct dma_regs *dma_reg = chan->device->dma_reg;
1093
1094 pcdb = desc->phys;
1095 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1096 pcdb |= DMA_CDB_NO_INT;
1097
1098 chan_last_sub[chan->device->id] = desc;
1099
1100 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1101
1102 iowrite32(pcdb, &dma_reg->cpfpl);
1103}
1104
1105/**
1106 * ppc440spe_chan_append - update the h/w chain in the channel
1107 */
1108static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1109{
1110 struct xor_regs *xor_reg;
1111 struct ppc440spe_adma_desc_slot *iter;
1112 struct xor_cb *xcb;
1113 u32 cur_desc;
1114 unsigned long flags;
1115
1116 local_irq_save(flags);
1117
1118 switch (chan->device->id) {
1119 case PPC440SPE_DMA0_ID:
1120 case PPC440SPE_DMA1_ID:
1121 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1122
1123 if (likely(cur_desc)) {
1124 iter = chan_last_sub[chan->device->id];
1125 BUG_ON(!iter);
1126 } else {
1127 /* first peer */
1128 iter = chan_first_cdb[chan->device->id];
1129 BUG_ON(!iter);
1130 ppc440spe_dma_put_desc(chan, iter);
1131 chan->hw_chain_inited = 1;
1132 }
1133
1134 /* is there something new to append */
1135 if (!iter->hw_next)
1136 break;
1137
1138 /* flush descriptors from the s/w queue to fifo */
1139 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1140 ppc440spe_dma_put_desc(chan, iter);
1141 if (!iter->hw_next)
1142 break;
1143 }
1144 break;
1145 case PPC440SPE_XOR_ID:
1146 /* update h/w links and refetch */
1147 if (!xor_last_submit->hw_next)
1148 break;
1149
1150 xor_reg = chan->device->xor_reg;
1151 /* the last linked CDB has to generate an interrupt
1152 * that we'd be able to append the next lists to h/w
1153 * regardless of the XOR engine state at the moment of
1154 * appending of these next lists
1155 */
1156 xcb = xor_last_linked->hw_desc;
1157 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1158
1159 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1160 /* XORcore is idle. Refetch now */
1161 do_xor_refetch = 0;
1162 ppc440spe_xor_set_link(xor_last_submit,
1163 xor_last_submit->hw_next);
1164
1165 ADMA_LL_DBG(print_cb_list(chan,
1166 xor_last_submit->hw_next));
1167
1168 xor_last_submit = xor_last_linked;
1169 iowrite32be(ioread32be(&xor_reg->crsr) |
1170 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1171 &xor_reg->crsr);
1172 } else {
1173 /* XORcore is running. Refetch later in the handler */
1174 do_xor_refetch = 1;
1175 }
1176
1177 break;
1178 }
1179
1180 local_irq_restore(flags);
1181}
1182
1183/**
1184 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1185 */
1186static u32
1187ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1188{
1189 struct dma_regs *dma_reg;
1190 struct xor_regs *xor_reg;
1191
1192 if (unlikely(!chan->hw_chain_inited))
1193 /* h/w descriptor chain is not initialized yet */
1194 return 0;
1195
1196 switch (chan->device->id) {
1197 case PPC440SPE_DMA0_ID:
1198 case PPC440SPE_DMA1_ID:
1199 dma_reg = chan->device->dma_reg;
1200 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1201 case PPC440SPE_XOR_ID:
1202 xor_reg = chan->device->xor_reg;
1203 return ioread32be(&xor_reg->ccbalr);
1204 }
1205 return 0;
1206}
1207
1208/**
1209 * ppc440spe_chan_run - enable the channel
1210 */
1211static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1212{
1213 struct xor_regs *xor_reg;
1214
1215 switch (chan->device->id) {
1216 case PPC440SPE_DMA0_ID:
1217 case PPC440SPE_DMA1_ID:
1218 /* DMAs are always enabled, do nothing */
1219 break;
1220 case PPC440SPE_XOR_ID:
1221 /* drain write buffer */
1222 xor_reg = chan->device->xor_reg;
1223
1224 /* fetch descriptor pointed to in <link> */
1225 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1226 &xor_reg->crsr);
1227 break;
1228 }
1229}
1230
1231/******************************************************************************
1232 * ADMA device level
1233 ******************************************************************************/
1234
1235static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1236static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1237
1238static dma_cookie_t
1239ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1240
1241static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1242 dma_addr_t addr, int index);
1243static void
1244ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1245 dma_addr_t addr, int index);
1246
1247static void
1248ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1249 dma_addr_t *paddr, unsigned long flags);
1250static void
1251ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1252 dma_addr_t addr, int index);
1253static void
1254ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1255 unsigned char mult, int index, int dst_pos);
1256static void
1257ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1258 dma_addr_t paddr, dma_addr_t qaddr);
1259
1260static struct page *ppc440spe_rxor_srcs[32];
1261
1262/**
1263 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1264 */
1265static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1266{
1267 int i, order = 0, state = 0;
1268 int idx = 0;
1269
1270 if (unlikely(!(src_cnt > 1)))
1271 return 0;
1272
1273 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1274
1275 /* Skip holes in the source list before checking */
1276 for (i = 0; i < src_cnt; i++) {
1277 if (!srcs[i])
1278 continue;
1279 ppc440spe_rxor_srcs[idx++] = srcs[i];
1280 }
1281 src_cnt = idx;
1282
1283 for (i = 1; i < src_cnt; i++) {
1284 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1285 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1286
1287 switch (state) {
1288 case 0:
1289 if (cur_addr == old_addr + len) {
1290 /* direct RXOR */
1291 order = 1;
1292 state = 1;
1293 } else if (old_addr == cur_addr + len) {
1294 /* reverse RXOR */
1295 order = -1;
1296 state = 1;
1297 } else
1298 goto out;
1299 break;
1300 case 1:
1301 if ((i == src_cnt - 2) ||
1302 (order == -1 && cur_addr != old_addr - len)) {
1303 order = 0;
1304 state = 0;
1305 } else if ((cur_addr == old_addr + len * order) ||
1306 (cur_addr == old_addr + 2 * len) ||
1307 (cur_addr == old_addr + 3 * len)) {
1308 state = 2;
1309 } else {
1310 order = 0;
1311 state = 0;
1312 }
1313 break;
1314 case 2:
1315 order = 0;
1316 state = 0;
1317 break;
1318 }
1319 }
1320
1321out:
1322 if (state == 1 || state == 2)
1323 return 1;
1324
1325 return 0;
1326}
1327
1328/**
1329 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1330 * the operation given on this channel. It's assumed that 'chan' is
1331 * capable to process 'cap' type of operation.
1332 * @chan: channel to use
1333 * @cap: type of transaction
1334 * @dst_lst: array of destination pointers
1335 * @dst_cnt: number of destination operands
1336 * @src_lst: array of source pointers
1337 * @src_cnt: number of source operands
1338 * @src_sz: size of each source operand
1339 */
1340static int ppc440spe_adma_estimate(struct dma_chan *chan,
1341 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1342 struct page **src_lst, int src_cnt, size_t src_sz)
1343{
1344 int ef = 1;
1345
1346 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1347 /* If RAID-6 capabilities were not activated don't try
1348 * to use them
1349 */
1350 if (unlikely(!ppc440spe_r6_enabled))
1351 return -1;
1352 }
1353 /* In the current implementation of ppc440spe ADMA driver it
1354 * makes sense to pick out only pq case, because it may be
1355 * processed:
1356 * (1) either using Biskup method on DMA2;
1357 * (2) or on DMA0/1.
1358 * Thus we give a favour to (1) if the sources are suitable;
1359 * else let it be processed on one of the DMA0/1 engines.
1360 * In the sum_product case where destination is also the
1361 * source process it on DMA0/1 only.
1362 */
1363 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1364
1365 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1366 ef = 0; /* sum_product case, process on DMA0/1 */
1367 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1368 ef = 3; /* override (DMA0/1 + idle) */
1369 else
1370 ef = 0; /* can't process on DMA2 if !rxor */
1371 }
1372
1373 /* channel idleness increases the priority */
1374 if (likely(ef) &&
1375 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1376 ef++;
1377
1378 return ef;
1379}
1380
1381struct dma_chan *
1382ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1383 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1384 int src_cnt, size_t src_sz)
1385{
1386 struct dma_chan *best_chan = NULL;
1387 struct ppc_dma_chan_ref *ref;
1388 int best_rank = -1;
1389
1390 if (unlikely(!src_sz))
1391 return NULL;
1392 if (src_sz > PAGE_SIZE) {
1393 /*
1394 * should a user of the api ever pass > PAGE_SIZE requests
1395 * we sort out cases where temporary page-sized buffers
1396 * are used.
1397 */
1398 switch (cap) {
1399 case DMA_PQ:
1400 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1401 return NULL;
1402 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1403 return NULL;
1404 break;
1405 case DMA_PQ_VAL:
1406 case DMA_XOR_VAL:
1407 return NULL;
1408 default:
1409 break;
1410 }
1411 }
1412
1413 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1414 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1415 int rank;
1416
1417 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1418 dst_cnt, src_lst, src_cnt, src_sz);
1419 if (rank > best_rank) {
1420 best_rank = rank;
1421 best_chan = ref->chan;
1422 }
1423 }
1424 }
1425
1426 return best_chan;
1427}
1428EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1429
1430/**
1431 * ppc440spe_get_group_entry - get group entry with index idx
1432 * @tdesc: is the last allocated slot in the group.
1433 */
1434static struct ppc440spe_adma_desc_slot *
1435ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1436{
1437 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1438 int i = 0;
1439
1440 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1441 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1442 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1443 BUG();
1444 }
1445
1446 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1447 if (i++ == entry_idx)
1448 break;
1449 }
1450 return iter;
1451}
1452
1453/**
1454 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1455 * @slot: Slot to free
1456 * Caller must hold &ppc440spe_chan->lock while calling this function
1457 */
1458static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1459 struct ppc440spe_adma_chan *chan)
1460{
1461 int stride = slot->slots_per_op;
1462
1463 while (stride--) {
1464 slot->slots_per_op = 0;
1465 slot = list_entry(slot->slot_node.next,
1466 struct ppc440spe_adma_desc_slot,
1467 slot_node);
1468 }
1469}
1470
12458ea0
AG
1471/**
1472 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1473 * upon completion
1474 */
1475static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1476 struct ppc440spe_adma_desc_slot *desc,
1477 struct ppc440spe_adma_chan *chan,
1478 dma_cookie_t cookie)
1479{
12458ea0
AG
1480 BUG_ON(desc->async_tx.cookie < 0);
1481 if (desc->async_tx.cookie > 0) {
1482 cookie = desc->async_tx.cookie;
1483 desc->async_tx.cookie = 0;
1484
1485 /* call the callback (must not sleep or submit new
1486 * operations to this channel)
1487 */
1488 if (desc->async_tx.callback)
1489 desc->async_tx.callback(
1490 desc->async_tx.callback_param);
1491
d38a8c62 1492 dma_descriptor_unmap(&desc->async_tx);
12458ea0
AG
1493 }
1494
1495 /* run dependent operations */
1496 dma_run_dependencies(&desc->async_tx);
1497
1498 return cookie;
1499}
1500
1501/**
1502 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1503 */
1504static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1505 struct ppc440spe_adma_chan *chan)
1506{
1507 /* the client is allowed to attach dependent operations
1508 * until 'ack' is set
1509 */
1510 if (!async_tx_test_ack(&desc->async_tx))
1511 return 0;
1512
1513 /* leave the last descriptor in the chain
1514 * so we can append to it
1515 */
1516 if (list_is_last(&desc->chain_node, &chan->chain) ||
1517 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1518 return 1;
1519
1520 if (chan->device->id != PPC440SPE_XOR_ID) {
1521 /* our DMA interrupt handler clears opc field of
1522 * each processed descriptor. For all types of
1523 * operations except for ZeroSum we do not actually
1524 * need ack from the interrupt handler. ZeroSum is a
1525 * special case since the result of this operation
1526 * is available from the handler only, so if we see
1527 * such type of descriptor (which is unprocessed yet)
1528 * then leave it in chain.
1529 */
1530 struct dma_cdb *cdb = desc->hw_desc;
1531 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1532 return 1;
1533 }
1534
1535 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1536 desc->phys, desc->idx, desc->slots_per_op);
1537
1538 list_del(&desc->chain_node);
1539 ppc440spe_adma_free_slots(desc, chan);
1540 return 0;
1541}
1542
1543/**
1544 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1545 * which runs through the channel CDBs list until reach the descriptor
1546 * currently processed. When routine determines that all CDBs of group
1547 * are completed then corresponding callbacks (if any) are called and slots
1548 * are freed.
1549 */
1550static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1551{
1552 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1553 dma_cookie_t cookie = 0;
1554 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1555 int busy = ppc440spe_chan_is_busy(chan);
1556 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1557
1558 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1559 chan->device->id, __func__);
1560
1561 if (!current_desc) {
1562 /* There were no transactions yet, so
1563 * nothing to clean
1564 */
1565 return;
1566 }
1567
1568 /* free completed slots from the chain starting with
1569 * the oldest descriptor
1570 */
1571 list_for_each_entry_safe(iter, _iter, &chan->chain,
1572 chain_node) {
1573 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1574 "busy: %d this_desc: %#llx next_desc: %#x "
1575 "cur: %#x ack: %d\n",
1576 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1577 ppc440spe_desc_get_link(iter, chan), current_desc,
1578 async_tx_test_ack(&iter->async_tx));
1579 prefetch(_iter);
1580 prefetch(&_iter->async_tx);
1581
1582 /* do not advance past the current descriptor loaded into the
1583 * hardware channel,subsequent descriptors are either in process
1584 * or have not been submitted
1585 */
1586 if (seen_current)
1587 break;
1588
1589 /* stop the search if we reach the current descriptor and the
1590 * channel is busy, or if it appears that the current descriptor
1591 * needs to be re-read (i.e. has been appended to)
1592 */
1593 if (iter->phys == current_desc) {
1594 BUG_ON(seen_current++);
1595 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1596 /* not all descriptors of the group have
1597 * been completed; exit.
1598 */
1599 break;
1600 }
1601 }
1602
1603 /* detect the start of a group transaction */
1604 if (!slot_cnt && !slots_per_op) {
1605 slot_cnt = iter->slot_cnt;
1606 slots_per_op = iter->slots_per_op;
1607 if (slot_cnt <= slots_per_op) {
1608 slot_cnt = 0;
1609 slots_per_op = 0;
1610 }
1611 }
1612
1613 if (slot_cnt) {
1614 if (!group_start)
1615 group_start = iter;
1616 slot_cnt -= slots_per_op;
1617 }
1618
1619 /* all the members of a group are complete */
1620 if (slots_per_op != 0 && slot_cnt == 0) {
1621 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1622 int end_of_chain = 0;
1623
1624 /* clean up the group */
1625 slot_cnt = group_start->slot_cnt;
1626 grp_iter = group_start;
1627 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1628 &chan->chain, chain_node) {
1629
1630 cookie = ppc440spe_adma_run_tx_complete_actions(
1631 grp_iter, chan, cookie);
1632
1633 slot_cnt -= slots_per_op;
1634 end_of_chain = ppc440spe_adma_clean_slot(
1635 grp_iter, chan);
1636 if (end_of_chain && slot_cnt) {
1637 /* Should wait for ZeroSum completion */
1638 if (cookie > 0)
4d4e58de 1639 chan->common.completed_cookie = cookie;
12458ea0
AG
1640 return;
1641 }
1642
1643 if (slot_cnt == 0 || end_of_chain)
1644 break;
1645 }
1646
1647 /* the group should be complete at this point */
1648 BUG_ON(slot_cnt);
1649
1650 slots_per_op = 0;
1651 group_start = NULL;
1652 if (end_of_chain)
1653 break;
1654 else
1655 continue;
1656 } else if (slots_per_op) /* wait for group completion */
1657 continue;
1658
1659 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1660 cookie);
1661
1662 if (ppc440spe_adma_clean_slot(iter, chan))
1663 break;
1664 }
1665
1666 BUG_ON(!seen_current);
1667
1668 if (cookie > 0) {
4d4e58de 1669 chan->common.completed_cookie = cookie;
12458ea0
AG
1670 pr_debug("\tcompleted cookie %d\n", cookie);
1671 }
1672
1673}
1674
1675/**
1676 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1677 */
1678static void ppc440spe_adma_tasklet(unsigned long data)
1679{
1680 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1681
1682 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1683 __ppc440spe_adma_slot_cleanup(chan);
1684 spin_unlock(&chan->lock);
1685}
1686
1687/**
1688 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1689 */
1690static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1691{
1692 spin_lock_bh(&chan->lock);
1693 __ppc440spe_adma_slot_cleanup(chan);
1694 spin_unlock_bh(&chan->lock);
1695}
1696
1697/**
1698 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1699 */
1700static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1701 struct ppc440spe_adma_chan *chan, int num_slots,
1702 int slots_per_op)
1703{
1704 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
1705 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
1706 struct list_head chain = LIST_HEAD_INIT(chain);
1707 int slots_found, retry = 0;
1708
1709
1710 BUG_ON(!num_slots || !slots_per_op);
1711 /* start search from the last allocated descrtiptor
1712 * if a contiguous allocation can not be found start searching
1713 * from the beginning of the list
1714 */
1715retry:
1716 slots_found = 0;
1717 if (retry == 0)
1718 iter = chan->last_used;
1719 else
1720 iter = list_entry(&chan->all_slots,
1721 struct ppc440spe_adma_desc_slot,
1722 slot_node);
1723 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
1724 slot_node) {
1725 prefetch(_iter);
1726 prefetch(&_iter->async_tx);
1727 if (iter->slots_per_op) {
1728 slots_found = 0;
1729 continue;
1730 }
1731
1732 /* start the allocation if the slot is correctly aligned */
1733 if (!slots_found++)
1734 alloc_start = iter;
1735
1736 if (slots_found == num_slots) {
1737 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
1738 struct ppc440spe_adma_desc_slot *last_used = NULL;
1739
1740 iter = alloc_start;
1741 while (num_slots) {
1742 int i;
1743 /* pre-ack all but the last descriptor */
1744 if (num_slots != slots_per_op)
1745 async_tx_ack(&iter->async_tx);
1746
1747 list_add_tail(&iter->chain_node, &chain);
1748 alloc_tail = iter;
1749 iter->async_tx.cookie = 0;
1750 iter->hw_next = NULL;
1751 iter->flags = 0;
1752 iter->slot_cnt = num_slots;
1753 iter->xor_check_result = NULL;
1754 for (i = 0; i < slots_per_op; i++) {
1755 iter->slots_per_op = slots_per_op - i;
1756 last_used = iter;
1757 iter = list_entry(iter->slot_node.next,
1758 struct ppc440spe_adma_desc_slot,
1759 slot_node);
1760 }
1761 num_slots -= slots_per_op;
1762 }
1763 alloc_tail->group_head = alloc_start;
1764 alloc_tail->async_tx.cookie = -EBUSY;
1765 list_splice(&chain, &alloc_tail->group_list);
1766 chan->last_used = last_used;
1767 return alloc_tail;
1768 }
1769 }
1770 if (!retry++)
1771 goto retry;
1772
1773 /* try to free some slots if the allocation fails */
1774 tasklet_schedule(&chan->irq_tasklet);
1775 return NULL;
1776}
1777
1778/**
1779 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
1780 */
1781static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
1782{
1783 struct ppc440spe_adma_chan *ppc440spe_chan;
1784 struct ppc440spe_adma_desc_slot *slot = NULL;
1785 char *hw_desc;
1786 int i, db_sz;
1787 int init;
1788
1789 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1790 init = ppc440spe_chan->slots_allocated ? 0 : 1;
1791 chan->chan_id = ppc440spe_chan->device->id;
1792
1793 /* Allocate descriptor slots */
1794 i = ppc440spe_chan->slots_allocated;
1795 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
1796 db_sz = sizeof(struct dma_cdb);
1797 else
1798 db_sz = sizeof(struct xor_cb);
1799
1800 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
1801 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
1802 GFP_KERNEL);
1803 if (!slot) {
1804 printk(KERN_INFO "SPE ADMA Channel only initialized"
1805 " %d descriptor slots", i--);
1806 break;
1807 }
1808
1809 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
1810 slot->hw_desc = (void *) &hw_desc[i * db_sz];
1811 dma_async_tx_descriptor_init(&slot->async_tx, chan);
1812 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
1813 INIT_LIST_HEAD(&slot->chain_node);
1814 INIT_LIST_HEAD(&slot->slot_node);
1815 INIT_LIST_HEAD(&slot->group_list);
1816 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
1817 slot->idx = i;
1818
1819 spin_lock_bh(&ppc440spe_chan->lock);
1820 ppc440spe_chan->slots_allocated++;
1821 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
1822 spin_unlock_bh(&ppc440spe_chan->lock);
1823 }
1824
1825 if (i && !ppc440spe_chan->last_used) {
1826 ppc440spe_chan->last_used =
1827 list_entry(ppc440spe_chan->all_slots.next,
1828 struct ppc440spe_adma_desc_slot,
1829 slot_node);
1830 }
1831
1832 dev_dbg(ppc440spe_chan->device->common.dev,
1833 "ppc440spe adma%d: allocated %d descriptor slots\n",
1834 ppc440spe_chan->device->id, i);
1835
1836 /* initialize the channel and the chain with a null operation */
1837 if (init) {
1838 switch (ppc440spe_chan->device->id) {
1839 case PPC440SPE_DMA0_ID:
1840 case PPC440SPE_DMA1_ID:
1841 ppc440spe_chan->hw_chain_inited = 0;
1842 /* Use WXOR for self-testing */
1843 if (!ppc440spe_r6_tchan)
1844 ppc440spe_r6_tchan = ppc440spe_chan;
1845 break;
1846 case PPC440SPE_XOR_ID:
1847 ppc440spe_chan_start_null_xor(ppc440spe_chan);
1848 break;
1849 default:
1850 BUG();
1851 }
1852 ppc440spe_chan->needs_unmap = 1;
1853 }
1854
1855 return (i > 0) ? i : -ENOMEM;
1856}
1857
12458ea0
AG
1858/**
1859 * ppc440spe_rxor_set_region_data -
1860 */
1861static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
1862 u8 xor_arg_no, u32 mask)
1863{
1864 struct xor_cb *xcb = desc->hw_desc;
1865
1866 xcb->ops[xor_arg_no].h |= mask;
1867}
1868
1869/**
1870 * ppc440spe_rxor_set_src -
1871 */
1872static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
1873 u8 xor_arg_no, dma_addr_t addr)
1874{
1875 struct xor_cb *xcb = desc->hw_desc;
1876
1877 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
1878 xcb->ops[xor_arg_no].l = addr;
1879}
1880
1881/**
1882 * ppc440spe_rxor_set_mult -
1883 */
1884static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
1885 u8 xor_arg_no, u8 idx, u8 mult)
1886{
1887 struct xor_cb *xcb = desc->hw_desc;
1888
1889 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
1890}
1891
1892/**
1893 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
1894 * has been achieved
1895 */
1896static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
1897{
1898 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
1899 chan->device->id, chan->pending);
1900
1901 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
1902 chan->pending = 0;
1903 ppc440spe_chan_append(chan);
1904 }
1905}
1906
1907/**
1908 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
1909 * (it's not necessary that descriptors will be submitted to the h/w
1910 * chains too right now)
1911 */
1912static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
1913{
1914 struct ppc440spe_adma_desc_slot *sw_desc;
1915 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
1916 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
1917 int slot_cnt;
1918 int slots_per_op;
1919 dma_cookie_t cookie;
1920
1921 sw_desc = tx_to_ppc440spe_adma_slot(tx);
1922
1923 group_start = sw_desc->group_head;
1924 slot_cnt = group_start->slot_cnt;
1925 slots_per_op = group_start->slots_per_op;
1926
1927 spin_lock_bh(&chan->lock);
884485e1 1928 cookie = dma_cookie_assign(tx);
12458ea0
AG
1929
1930 if (unlikely(list_empty(&chan->chain))) {
1931 /* first peer */
1932 list_splice_init(&sw_desc->group_list, &chan->chain);
1933 chan_first_cdb[chan->device->id] = group_start;
1934 } else {
1935 /* isn't first peer, bind CDBs to chain */
1936 old_chain_tail = list_entry(chan->chain.prev,
1937 struct ppc440spe_adma_desc_slot,
1938 chain_node);
1939 list_splice_init(&sw_desc->group_list,
1940 &old_chain_tail->chain_node);
1941 /* fix up the hardware chain */
1942 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
1943 }
1944
1945 /* increment the pending count by the number of operations */
1946 chan->pending += slot_cnt / slots_per_op;
1947 ppc440spe_adma_check_threshold(chan);
1948 spin_unlock_bh(&chan->lock);
1949
1950 dev_dbg(chan->device->common.dev,
1951 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
1952 chan->device->id, __func__,
1953 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
1954
1955 return cookie;
1956}
1957
1958/**
1959 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
1960 */
1961static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
1962 struct dma_chan *chan, unsigned long flags)
1963{
1964 struct ppc440spe_adma_chan *ppc440spe_chan;
1965 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
1966 int slot_cnt, slots_per_op;
1967
1968 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1969
1970 dev_dbg(ppc440spe_chan->device->common.dev,
1971 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
1972 __func__);
1973
1974 spin_lock_bh(&ppc440spe_chan->lock);
1975 slot_cnt = slots_per_op = 1;
1976 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
1977 slots_per_op);
1978 if (sw_desc) {
1979 group_start = sw_desc->group_head;
1980 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
1981 group_start->unmap_len = 0;
1982 sw_desc->async_tx.flags = flags;
1983 }
1984 spin_unlock_bh(&ppc440spe_chan->lock);
1985
1986 return sw_desc ? &sw_desc->async_tx : NULL;
1987}
1988
1989/**
1990 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
1991 */
1992static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
1993 struct dma_chan *chan, dma_addr_t dma_dest,
1994 dma_addr_t dma_src, size_t len, unsigned long flags)
1995{
1996 struct ppc440spe_adma_chan *ppc440spe_chan;
1997 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
1998 int slot_cnt, slots_per_op;
1999
2000 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2001
2002 if (unlikely(!len))
2003 return NULL;
2004
427cdf19 2005 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
12458ea0
AG
2006
2007 spin_lock_bh(&ppc440spe_chan->lock);
2008
2009 dev_dbg(ppc440spe_chan->device->common.dev,
2010 "ppc440spe adma%d: %s len: %u int_en %d\n",
2011 ppc440spe_chan->device->id, __func__, len,
2012 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2013 slot_cnt = slots_per_op = 1;
2014 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2015 slots_per_op);
2016 if (sw_desc) {
2017 group_start = sw_desc->group_head;
2018 ppc440spe_desc_init_memcpy(group_start, flags);
2019 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2020 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2021 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2022 sw_desc->unmap_len = len;
2023 sw_desc->async_tx.flags = flags;
2024 }
2025 spin_unlock_bh(&ppc440spe_chan->lock);
2026
2027 return sw_desc ? &sw_desc->async_tx : NULL;
2028}
2029
12458ea0
AG
2030/**
2031 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2032 */
2033static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2034 struct dma_chan *chan, dma_addr_t dma_dest,
2035 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2036 unsigned long flags)
2037{
2038 struct ppc440spe_adma_chan *ppc440spe_chan;
2039 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2040 int slot_cnt, slots_per_op;
2041
2042 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2043
2044 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2045 dma_dest, dma_src, src_cnt));
2046 if (unlikely(!len))
2047 return NULL;
427cdf19 2048 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2049
2050 dev_dbg(ppc440spe_chan->device->common.dev,
2051 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2052 ppc440spe_chan->device->id, __func__, src_cnt, len,
2053 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2054
2055 spin_lock_bh(&ppc440spe_chan->lock);
2056 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2057 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2058 slots_per_op);
2059 if (sw_desc) {
2060 group_start = sw_desc->group_head;
2061 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2062 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2063 while (src_cnt--)
2064 ppc440spe_adma_memcpy_xor_set_src(group_start,
2065 dma_src[src_cnt], src_cnt);
2066 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2067 sw_desc->unmap_len = len;
2068 sw_desc->async_tx.flags = flags;
2069 }
2070 spin_unlock_bh(&ppc440spe_chan->lock);
2071
2072 return sw_desc ? &sw_desc->async_tx : NULL;
2073}
2074
2075static inline void
2076ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2077 int src_cnt);
2078static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2079
2080/**
2081 * ppc440spe_adma_init_dma2rxor_slot -
2082 */
2083static void ppc440spe_adma_init_dma2rxor_slot(
2084 struct ppc440spe_adma_desc_slot *desc,
2085 dma_addr_t *src, int src_cnt)
2086{
2087 int i;
2088
2089 /* initialize CDB */
2090 for (i = 0; i < src_cnt; i++) {
2091 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2092 desc->src_cnt, (u32)src[i]);
2093 }
2094}
2095
2096/**
2097 * ppc440spe_dma01_prep_mult -
2098 * for Q operation where destination is also the source
2099 */
2100static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2101 struct ppc440spe_adma_chan *ppc440spe_chan,
2102 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2103 const unsigned char *scf, size_t len, unsigned long flags)
2104{
2105 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2106 unsigned long op = 0;
2107 int slot_cnt;
2108
2109 set_bit(PPC440SPE_DESC_WXOR, &op);
2110 slot_cnt = 2;
2111
2112 spin_lock_bh(&ppc440spe_chan->lock);
2113
2114 /* use WXOR, each descriptor occupies one slot */
2115 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2116 if (sw_desc) {
2117 struct ppc440spe_adma_chan *chan;
2118 struct ppc440spe_adma_desc_slot *iter;
2119 struct dma_cdb *hw_desc;
2120
2121 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2122 set_bits(op, &sw_desc->flags);
2123 sw_desc->src_cnt = src_cnt;
2124 sw_desc->dst_cnt = dst_cnt;
2125 /* First descriptor, zero data in the destination and copy it
2126 * to q page using MULTICAST transfer.
2127 */
2128 iter = list_first_entry(&sw_desc->group_list,
2129 struct ppc440spe_adma_desc_slot,
2130 chain_node);
2131 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2132 /* set 'next' pointer */
2133 iter->hw_next = list_entry(iter->chain_node.next,
2134 struct ppc440spe_adma_desc_slot,
2135 chain_node);
2136 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2137 hw_desc = iter->hw_desc;
2138 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2139
2140 ppc440spe_desc_set_dest_addr(iter, chan,
2141 DMA_CUED_XOR_BASE, dst[0], 0);
2142 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2143 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2144 src[0]);
2145 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2146 iter->unmap_len = len;
2147
2148 /*
2149 * Second descriptor, multiply data from the q page
2150 * and store the result in real destination.
2151 */
2152 iter = list_first_entry(&iter->chain_node,
2153 struct ppc440spe_adma_desc_slot,
2154 chain_node);
2155 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2156 iter->hw_next = NULL;
2157 if (flags & DMA_PREP_INTERRUPT)
2158 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2159 else
2160 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2161
2162 hw_desc = iter->hw_desc;
2163 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2164 ppc440spe_desc_set_src_addr(iter, chan, 0,
2165 DMA_CUED_XOR_HB, dst[1]);
2166 ppc440spe_desc_set_dest_addr(iter, chan,
2167 DMA_CUED_XOR_BASE, dst[0], 0);
2168
2169 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2170 DMA_CDB_SG_DST1, scf[0]);
2171 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2172 iter->unmap_len = len;
2173 sw_desc->async_tx.flags = flags;
2174 }
2175
2176 spin_unlock_bh(&ppc440spe_chan->lock);
2177
2178 return sw_desc;
2179}
2180
2181/**
2182 * ppc440spe_dma01_prep_sum_product -
2183 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
2184 * the source.
2185 */
2186static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2187 struct ppc440spe_adma_chan *ppc440spe_chan,
2188 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2189 const unsigned char *scf, size_t len, unsigned long flags)
2190{
2191 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2192 unsigned long op = 0;
2193 int slot_cnt;
2194
2195 set_bit(PPC440SPE_DESC_WXOR, &op);
2196 slot_cnt = 3;
2197
2198 spin_lock_bh(&ppc440spe_chan->lock);
2199
2200 /* WXOR, each descriptor occupies one slot */
2201 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2202 if (sw_desc) {
2203 struct ppc440spe_adma_chan *chan;
2204 struct ppc440spe_adma_desc_slot *iter;
2205 struct dma_cdb *hw_desc;
2206
2207 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2208 set_bits(op, &sw_desc->flags);
2209 sw_desc->src_cnt = src_cnt;
2210 sw_desc->dst_cnt = 1;
2211 /* 1st descriptor, src[1] data to q page and zero destination */
2212 iter = list_first_entry(&sw_desc->group_list,
2213 struct ppc440spe_adma_desc_slot,
2214 chain_node);
2215 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2216 iter->hw_next = list_entry(iter->chain_node.next,
2217 struct ppc440spe_adma_desc_slot,
2218 chain_node);
2219 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2220 hw_desc = iter->hw_desc;
2221 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2222
2223 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2224 *dst, 0);
2225 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2226 ppc440spe_chan->qdest, 1);
2227 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2228 src[1]);
2229 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2230 iter->unmap_len = len;
2231
2232 /* 2nd descriptor, multiply src[1] data and store the
2233 * result in destination */
2234 iter = list_first_entry(&iter->chain_node,
2235 struct ppc440spe_adma_desc_slot,
2236 chain_node);
2237 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2238 /* set 'next' pointer */
2239 iter->hw_next = list_entry(iter->chain_node.next,
2240 struct ppc440spe_adma_desc_slot,
2241 chain_node);
2242 if (flags & DMA_PREP_INTERRUPT)
2243 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2244 else
2245 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2246
2247 hw_desc = iter->hw_desc;
2248 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2249 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2250 ppc440spe_chan->qdest);
2251 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2252 *dst, 0);
2253 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2254 DMA_CDB_SG_DST1, scf[1]);
2255 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2256 iter->unmap_len = len;
2257
2258 /*
2259 * 3rd descriptor, multiply src[0] data and xor it
2260 * with destination
2261 */
2262 iter = list_first_entry(&iter->chain_node,
2263 struct ppc440spe_adma_desc_slot,
2264 chain_node);
2265 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2266 iter->hw_next = NULL;
2267 if (flags & DMA_PREP_INTERRUPT)
2268 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2269 else
2270 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2271
2272 hw_desc = iter->hw_desc;
2273 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2274 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2275 src[0]);
2276 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2277 *dst, 0);
2278 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2279 DMA_CDB_SG_DST1, scf[0]);
2280 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2281 iter->unmap_len = len;
2282 sw_desc->async_tx.flags = flags;
2283 }
2284
2285 spin_unlock_bh(&ppc440spe_chan->lock);
2286
2287 return sw_desc;
2288}
2289
2290static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2291 struct ppc440spe_adma_chan *ppc440spe_chan,
2292 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2293 const unsigned char *scf, size_t len, unsigned long flags)
2294{
2295 int slot_cnt;
2296 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2297 unsigned long op = 0;
2298 unsigned char mult = 1;
2299
2300 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2301 __func__, dst_cnt, src_cnt, len);
2302 /* select operations WXOR/RXOR depending on the
2303 * source addresses of operators and the number
2304 * of destinations (RXOR support only Q-parity calculations)
2305 */
2306 set_bit(PPC440SPE_DESC_WXOR, &op);
2307 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2308 /* no active RXOR;
2309 * do RXOR if:
2310 * - there are more than 1 source,
2311 * - len is aligned on 512-byte boundary,
2312 * - source addresses fit to one of 4 possible regions.
2313 */
2314 if (src_cnt > 1 &&
2315 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2316 (src[0] + len) == src[1]) {
2317 /* may do RXOR R1 R2 */
2318 set_bit(PPC440SPE_DESC_RXOR, &op);
2319 if (src_cnt != 2) {
2320 /* may try to enhance region of RXOR */
2321 if ((src[1] + len) == src[2]) {
2322 /* do RXOR R1 R2 R3 */
2323 set_bit(PPC440SPE_DESC_RXOR123,
2324 &op);
2325 } else if ((src[1] + len * 2) == src[2]) {
2326 /* do RXOR R1 R2 R4 */
2327 set_bit(PPC440SPE_DESC_RXOR124, &op);
2328 } else if ((src[1] + len * 3) == src[2]) {
2329 /* do RXOR R1 R2 R5 */
2330 set_bit(PPC440SPE_DESC_RXOR125,
2331 &op);
2332 } else {
2333 /* do RXOR R1 R2 */
2334 set_bit(PPC440SPE_DESC_RXOR12,
2335 &op);
2336 }
2337 } else {
2338 /* do RXOR R1 R2 */
2339 set_bit(PPC440SPE_DESC_RXOR12, &op);
2340 }
2341 }
2342
2343 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2344 /* can not do this operation with RXOR */
2345 clear_bit(PPC440SPE_RXOR_RUN,
2346 &ppc440spe_rxor_state);
2347 } else {
2348 /* can do; set block size right now */
2349 ppc440spe_desc_set_rxor_block_size(len);
2350 }
2351 }
2352
2353 /* Number of necessary slots depends on operation type selected */
2354 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2355 /* This is a WXOR only chain. Need descriptors for each
2356 * source to GF-XOR them with WXOR, and need descriptors
2357 * for each destination to zero them with WXOR
2358 */
2359 slot_cnt = src_cnt;
2360
2361 if (flags & DMA_PREP_ZERO_P) {
2362 slot_cnt++;
2363 set_bit(PPC440SPE_ZERO_P, &op);
2364 }
2365 if (flags & DMA_PREP_ZERO_Q) {
2366 slot_cnt++;
2367 set_bit(PPC440SPE_ZERO_Q, &op);
2368 }
2369 } else {
2370 /* Need 1/2 descriptor for RXOR operation, and
2371 * need (src_cnt - (2 or 3)) for WXOR of sources
2372 * remained (if any)
2373 */
2374 slot_cnt = dst_cnt;
2375
2376 if (flags & DMA_PREP_ZERO_P)
2377 set_bit(PPC440SPE_ZERO_P, &op);
2378 if (flags & DMA_PREP_ZERO_Q)
2379 set_bit(PPC440SPE_ZERO_Q, &op);
2380
2381 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2382 slot_cnt += src_cnt - 2;
2383 else
2384 slot_cnt += src_cnt - 3;
2385
2386 /* Thus we have either RXOR only chain or
2387 * mixed RXOR/WXOR
2388 */
2389 if (slot_cnt == dst_cnt)
2390 /* RXOR only chain */
2391 clear_bit(PPC440SPE_DESC_WXOR, &op);
2392 }
2393
2394 spin_lock_bh(&ppc440spe_chan->lock);
2395 /* for both RXOR/WXOR each descriptor occupies one slot */
2396 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2397 if (sw_desc) {
2398 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2399 flags, op);
2400
2401 /* setup dst/src/mult */
2402 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2403 __func__, dst[0], dst[1]);
2404 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2405 while (src_cnt--) {
2406 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2407 src_cnt);
2408
2409 /* NOTE: "Multi = 0 is equivalent to = 1" as it
2410 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
2411 * doesn't work for RXOR with DMA0/1! Instead, multi=0
2412 * leads to zeroing source data after RXOR.
2413 * So, for P case set-up mult=1 explicitly.
2414 */
2415 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2416 mult = scf[src_cnt];
2417 ppc440spe_adma_pq_set_src_mult(sw_desc,
2418 mult, src_cnt, dst_cnt - 1);
2419 }
2420
2421 /* Setup byte count foreach slot just allocated */
2422 sw_desc->async_tx.flags = flags;
2423 list_for_each_entry(iter, &sw_desc->group_list,
2424 chain_node) {
2425 ppc440spe_desc_set_byte_count(iter,
2426 ppc440spe_chan, len);
2427 iter->unmap_len = len;
2428 }
2429 }
2430 spin_unlock_bh(&ppc440spe_chan->lock);
2431
2432 return sw_desc;
2433}
2434
2435static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2436 struct ppc440spe_adma_chan *ppc440spe_chan,
2437 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2438 const unsigned char *scf, size_t len, unsigned long flags)
2439{
2440 int slot_cnt, descs_per_op;
2441 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2442 unsigned long op = 0;
2443 unsigned char mult = 1;
2444
2445 BUG_ON(!dst_cnt);
2446 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2447 __func__, dst_cnt, src_cnt, len);*/
2448
2449 spin_lock_bh(&ppc440spe_chan->lock);
2450 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2451 if (descs_per_op < 0) {
2452 spin_unlock_bh(&ppc440spe_chan->lock);
2453 return NULL;
2454 }
2455
2456 /* depending on number of sources we have 1 or 2 RXOR chains */
2457 slot_cnt = descs_per_op * dst_cnt;
2458
2459 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2460 if (sw_desc) {
2461 op = slot_cnt;
2462 sw_desc->async_tx.flags = flags;
2463 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2464 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2465 --op ? 0 : flags);
2466 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2467 len);
2468 iter->unmap_len = len;
2469
2470 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2471 iter->rxor_cursor.len = len;
2472 iter->descs_per_op = descs_per_op;
2473 }
2474 op = 0;
2475 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2476 op++;
2477 if (op % descs_per_op == 0)
2478 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2479 src_cnt);
2480 if (likely(!list_is_last(&iter->chain_node,
2481 &sw_desc->group_list))) {
2482 /* set 'next' pointer */
2483 iter->hw_next =
2484 list_entry(iter->chain_node.next,
2485 struct ppc440spe_adma_desc_slot,
2486 chain_node);
2487 ppc440spe_xor_set_link(iter, iter->hw_next);
2488 } else {
2489 /* this is the last descriptor. */
2490 iter->hw_next = NULL;
2491 }
2492 }
2493
2494 /* fixup head descriptor */
2495 sw_desc->dst_cnt = dst_cnt;
2496 if (flags & DMA_PREP_ZERO_P)
2497 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2498 if (flags & DMA_PREP_ZERO_Q)
2499 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2500
2501 /* setup dst/src/mult */
2502 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2503
2504 while (src_cnt--) {
2505 /* handle descriptors (if dst_cnt == 2) inside
2506 * the ppc440spe_adma_pq_set_srcxxx() functions
2507 */
2508 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2509 src_cnt);
2510 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2511 mult = scf[src_cnt];
2512 ppc440spe_adma_pq_set_src_mult(sw_desc,
2513 mult, src_cnt, dst_cnt - 1);
2514 }
2515 }
2516 spin_unlock_bh(&ppc440spe_chan->lock);
2517 ppc440spe_desc_set_rxor_block_size(len);
2518 return sw_desc;
2519}
2520
2521/**
2522 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2523 */
2524static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2525 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2526 unsigned int src_cnt, const unsigned char *scf,
2527 size_t len, unsigned long flags)
2528{
2529 struct ppc440spe_adma_chan *ppc440spe_chan;
2530 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2531 int dst_cnt = 0;
2532
2533 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2534
2535 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2536 dst, src, src_cnt));
2537 BUG_ON(!len);
427cdf19 2538 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2539 BUG_ON(!src_cnt);
2540
2541 if (src_cnt == 1 && dst[1] == src[0]) {
2542 dma_addr_t dest[2];
2543
2544 /* dst[1] is real destination (Q) */
2545 dest[0] = dst[1];
2546 /* this is the page to multicast source data to */
2547 dest[1] = ppc440spe_chan->qdest;
2548 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2549 dest, 2, src, src_cnt, scf, len, flags);
2550 return sw_desc ? &sw_desc->async_tx : NULL;
2551 }
2552
2553 if (src_cnt == 2 && dst[1] == src[1]) {
2554 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2555 &dst[1], src, 2, scf, len, flags);
2556 return sw_desc ? &sw_desc->async_tx : NULL;
2557 }
2558
2559 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2560 BUG_ON(!dst[0]);
2561 dst_cnt++;
2562 flags |= DMA_PREP_ZERO_P;
2563 }
2564
2565 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2566 BUG_ON(!dst[1]);
2567 dst_cnt++;
2568 flags |= DMA_PREP_ZERO_Q;
2569 }
2570
2571 BUG_ON(!dst_cnt);
2572
2573 dev_dbg(ppc440spe_chan->device->common.dev,
2574 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2575 ppc440spe_chan->device->id, __func__, src_cnt, len,
2576 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2577
2578 switch (ppc440spe_chan->device->id) {
2579 case PPC440SPE_DMA0_ID:
2580 case PPC440SPE_DMA1_ID:
2581 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2582 dst, dst_cnt, src, src_cnt, scf,
2583 len, flags);
2584 break;
2585
2586 case PPC440SPE_XOR_ID:
2587 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2588 dst, dst_cnt, src, src_cnt, scf,
2589 len, flags);
2590 break;
2591 }
2592
2593 return sw_desc ? &sw_desc->async_tx : NULL;
2594}
2595
2596/**
2597 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2598 * a PQ_ZERO_SUM operation
2599 */
2600static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2601 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2602 unsigned int src_cnt, const unsigned char *scf, size_t len,
2603 enum sum_check_flags *pqres, unsigned long flags)
2604{
2605 struct ppc440spe_adma_chan *ppc440spe_chan;
2606 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2607 dma_addr_t pdest, qdest;
2608 int slot_cnt, slots_per_op, idst, dst_cnt;
2609
2610 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2611
2612 if (flags & DMA_PREP_PQ_DISABLE_P)
2613 pdest = 0;
2614 else
2615 pdest = pq[0];
2616
2617 if (flags & DMA_PREP_PQ_DISABLE_Q)
2618 qdest = 0;
2619 else
2620 qdest = pq[1];
2621
2622 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2623 src, src_cnt, scf));
2624
2625 /* Always use WXOR for P/Q calculations (two destinations).
2626 * Need 1 or 2 extra slots to verify results are zero.
2627 */
2628 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2629
2630 /* One additional slot per destination to clone P/Q
2631 * before calculation (we have to preserve destinations).
2632 */
2633 slot_cnt = src_cnt + dst_cnt * 2;
2634 slots_per_op = 1;
2635
2636 spin_lock_bh(&ppc440spe_chan->lock);
2637 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2638 slots_per_op);
2639 if (sw_desc) {
2640 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2641
2642 /* Setup byte count for each slot just allocated */
2643 sw_desc->async_tx.flags = flags;
2644 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2645 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2646 len);
2647 iter->unmap_len = len;
2648 }
2649
2650 if (pdest) {
2651 struct dma_cdb *hw_desc;
2652 struct ppc440spe_adma_chan *chan;
2653
2654 iter = sw_desc->group_head;
2655 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2656 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2657 iter->hw_next = list_entry(iter->chain_node.next,
2658 struct ppc440spe_adma_desc_slot,
2659 chain_node);
2660 hw_desc = iter->hw_desc;
2661 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2662 iter->src_cnt = 0;
2663 iter->dst_cnt = 0;
2664 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2665 ppc440spe_chan->pdest, 0);
2666 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
2667 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2668 len);
2669 iter->unmap_len = 0;
2670 /* override pdest to preserve original P */
2671 pdest = ppc440spe_chan->pdest;
2672 }
2673 if (qdest) {
2674 struct dma_cdb *hw_desc;
2675 struct ppc440spe_adma_chan *chan;
2676
2677 iter = list_first_entry(&sw_desc->group_list,
2678 struct ppc440spe_adma_desc_slot,
2679 chain_node);
2680 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2681
2682 if (pdest) {
2683 iter = list_entry(iter->chain_node.next,
2684 struct ppc440spe_adma_desc_slot,
2685 chain_node);
2686 }
2687
2688 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2689 iter->hw_next = list_entry(iter->chain_node.next,
2690 struct ppc440spe_adma_desc_slot,
2691 chain_node);
2692 hw_desc = iter->hw_desc;
2693 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2694 iter->src_cnt = 0;
2695 iter->dst_cnt = 0;
2696 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2697 ppc440spe_chan->qdest, 0);
2698 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
2699 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2700 len);
2701 iter->unmap_len = 0;
2702 /* override qdest to preserve original Q */
2703 qdest = ppc440spe_chan->qdest;
2704 }
2705
2706 /* Setup destinations for P/Q ops */
2707 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
2708
2709 /* Setup zero QWORDs into DCHECK CDBs */
2710 idst = dst_cnt;
2711 list_for_each_entry_reverse(iter, &sw_desc->group_list,
2712 chain_node) {
2713 /*
2714 * The last CDB corresponds to Q-parity check,
2715 * the one before last CDB corresponds
2716 * P-parity check
2717 */
2718 if (idst == DMA_DEST_MAX_NUM) {
2719 if (idst == dst_cnt) {
2720 set_bit(PPC440SPE_DESC_QCHECK,
2721 &iter->flags);
2722 } else {
2723 set_bit(PPC440SPE_DESC_PCHECK,
2724 &iter->flags);
2725 }
2726 } else {
2727 if (qdest) {
2728 set_bit(PPC440SPE_DESC_QCHECK,
2729 &iter->flags);
2730 } else {
2731 set_bit(PPC440SPE_DESC_PCHECK,
2732 &iter->flags);
2733 }
2734 }
2735 iter->xor_check_result = pqres;
2736
2737 /*
2738 * set it to zero, if check fail then result will
2739 * be updated
2740 */
2741 *iter->xor_check_result = 0;
2742 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
2743 ppc440spe_qword);
2744
2745 if (!(--dst_cnt))
2746 break;
2747 }
2748
2749 /* Setup sources and mults for P/Q ops */
2750 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
2751 chain_node) {
2752 struct ppc440spe_adma_chan *chan;
2753 u32 mult_dst;
2754
2755 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2756 ppc440spe_desc_set_src_addr(iter, chan, 0,
2757 DMA_CUED_XOR_HB,
2758 src[src_cnt - 1]);
2759 if (qdest) {
2760 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
2761 DMA_CDB_SG_DST1;
2762 ppc440spe_desc_set_src_mult(iter, chan,
2763 DMA_CUED_MULT1_OFF,
2764 mult_dst,
2765 scf[src_cnt - 1]);
2766 }
2767 if (!(--src_cnt))
2768 break;
2769 }
2770 }
2771 spin_unlock_bh(&ppc440spe_chan->lock);
2772 return sw_desc ? &sw_desc->async_tx : NULL;
2773}
2774
2775/**
2776 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
2777 * XOR ZERO_SUM operation
2778 */
2779static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
2780 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
2781 size_t len, enum sum_check_flags *result, unsigned long flags)
2782{
2783 struct dma_async_tx_descriptor *tx;
2784 dma_addr_t pq[2];
2785
2786 /* validate P, disable Q */
2787 pq[0] = src[0];
2788 pq[1] = 0;
2789 flags |= DMA_PREP_PQ_DISABLE_Q;
2790
2791 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
2792 src_cnt - 1, 0, len,
2793 result, flags);
2794 return tx;
2795}
2796
2797/**
2798 * ppc440spe_adma_set_dest - set destination address into descriptor
2799 */
2800static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2801 dma_addr_t addr, int index)
2802{
2803 struct ppc440spe_adma_chan *chan;
2804
2805 BUG_ON(index >= sw_desc->dst_cnt);
2806
2807 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2808
2809 switch (chan->device->id) {
2810 case PPC440SPE_DMA0_ID:
2811 case PPC440SPE_DMA1_ID:
2812 /* to do: support transfers lengths >
2813 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
2814 */
2815 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
2816 chan, 0, addr, index);
2817 break;
2818 case PPC440SPE_XOR_ID:
2819 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
2820 ppc440spe_desc_set_dest_addr(sw_desc,
2821 chan, 0, addr, index);
2822 break;
2823 }
2824}
2825
2826static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
2827 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
2828{
2829 /* To clear destinations update the descriptor
2830 * (P or Q depending on index) as follows:
2831 * addr is destination (0 corresponds to SG2):
2832 */
2833 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
2834
2835 /* ... and the addr is source: */
2836 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
2837
2838 /* addr is always SG2 then the mult is always DST1 */
2839 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2840 DMA_CDB_SG_DST1, 1);
2841}
2842
2843/**
2844 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
2845 * for the PQXOR operation
2846 */
2847static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2848 dma_addr_t *addrs, unsigned long flags)
2849{
2850 struct ppc440spe_adma_desc_slot *iter;
2851 struct ppc440spe_adma_chan *chan;
2852 dma_addr_t paddr, qaddr;
2853 dma_addr_t addr = 0, ppath, qpath;
2854 int index = 0, i;
2855
2856 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2857
2858 if (flags & DMA_PREP_PQ_DISABLE_P)
2859 paddr = 0;
2860 else
2861 paddr = addrs[0];
2862
2863 if (flags & DMA_PREP_PQ_DISABLE_Q)
2864 qaddr = 0;
2865 else
2866 qaddr = addrs[1];
2867
2868 if (!paddr || !qaddr)
2869 addr = paddr ? paddr : qaddr;
2870
2871 switch (chan->device->id) {
2872 case PPC440SPE_DMA0_ID:
2873 case PPC440SPE_DMA1_ID:
2874 /* walk through the WXOR source list and set P/Q-destinations
2875 * for each slot:
2876 */
2877 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
2878 /* This is WXOR-only chain; may have 1/2 zero descs */
2879 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
2880 index++;
2881 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
2882 index++;
2883
2884 iter = ppc440spe_get_group_entry(sw_desc, index);
2885 if (addr) {
2886 /* one destination */
2887 list_for_each_entry_from(iter,
2888 &sw_desc->group_list, chain_node)
2889 ppc440spe_desc_set_dest_addr(iter, chan,
2890 DMA_CUED_XOR_BASE, addr, 0);
2891 } else {
2892 /* two destinations */
2893 list_for_each_entry_from(iter,
2894 &sw_desc->group_list, chain_node) {
2895 ppc440spe_desc_set_dest_addr(iter, chan,
2896 DMA_CUED_XOR_BASE, paddr, 0);
2897 ppc440spe_desc_set_dest_addr(iter, chan,
2898 DMA_CUED_XOR_BASE, qaddr, 1);
2899 }
2900 }
2901
2902 if (index) {
2903 /* To clear destinations update the descriptor
2904 * (1st,2nd, or both depending on flags)
2905 */
2906 index = 0;
2907 if (test_bit(PPC440SPE_ZERO_P,
2908 &sw_desc->flags)) {
2909 iter = ppc440spe_get_group_entry(
2910 sw_desc, index++);
2911 ppc440spe_adma_pq_zero_op(iter, chan,
2912 paddr);
2913 }
2914
2915 if (test_bit(PPC440SPE_ZERO_Q,
2916 &sw_desc->flags)) {
2917 iter = ppc440spe_get_group_entry(
2918 sw_desc, index++);
2919 ppc440spe_adma_pq_zero_op(iter, chan,
2920 qaddr);
2921 }
2922
2923 return;
2924 }
2925 } else {
2926 /* This is RXOR-only or RXOR/WXOR mixed chain */
2927
2928 /* If we want to include destination into calculations,
2929 * then make dest addresses cued with mult=1 (XOR).
2930 */
2931 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
2932 DMA_CUED_XOR_HB :
2933 DMA_CUED_XOR_BASE |
2934 (1 << DMA_CUED_MULT1_OFF);
2935 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
2936 DMA_CUED_XOR_HB :
2937 DMA_CUED_XOR_BASE |
2938 (1 << DMA_CUED_MULT1_OFF);
2939
2940 /* Setup destination(s) in RXOR slot(s) */
2941 iter = ppc440spe_get_group_entry(sw_desc, index++);
2942 ppc440spe_desc_set_dest_addr(iter, chan,
2943 paddr ? ppath : qpath,
2944 paddr ? paddr : qaddr, 0);
2945 if (!addr) {
2946 /* two destinations */
2947 iter = ppc440spe_get_group_entry(sw_desc,
2948 index++);
2949 ppc440spe_desc_set_dest_addr(iter, chan,
2950 qpath, qaddr, 0);
2951 }
2952
2953 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
2954 /* Setup destination(s) in remaining WXOR
2955 * slots
2956 */
2957 iter = ppc440spe_get_group_entry(sw_desc,
2958 index);
2959 if (addr) {
2960 /* one destination */
2961 list_for_each_entry_from(iter,
2962 &sw_desc->group_list,
2963 chain_node)
2964 ppc440spe_desc_set_dest_addr(
2965 iter, chan,
2966 DMA_CUED_XOR_BASE,
2967 addr, 0);
2968
2969 } else {
2970 /* two destinations */
2971 list_for_each_entry_from(iter,
2972 &sw_desc->group_list,
2973 chain_node) {
2974 ppc440spe_desc_set_dest_addr(
2975 iter, chan,
2976 DMA_CUED_XOR_BASE,
2977 paddr, 0);
2978 ppc440spe_desc_set_dest_addr(
2979 iter, chan,
2980 DMA_CUED_XOR_BASE,
2981 qaddr, 1);
2982 }
2983 }
2984 }
2985
2986 }
2987 break;
2988
2989 case PPC440SPE_XOR_ID:
2990 /* DMA2 descriptors have only 1 destination, so there are
2991 * two chains - one for each dest.
2992 * If we want to include destination into calculations,
2993 * then make dest addresses cued with mult=1 (XOR).
2994 */
2995 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
2996 DMA_CUED_XOR_HB :
2997 DMA_CUED_XOR_BASE |
2998 (1 << DMA_CUED_MULT1_OFF);
2999
3000 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3001 DMA_CUED_XOR_HB :
3002 DMA_CUED_XOR_BASE |
3003 (1 << DMA_CUED_MULT1_OFF);
3004
3005 iter = ppc440spe_get_group_entry(sw_desc, 0);
3006 for (i = 0; i < sw_desc->descs_per_op; i++) {
3007 ppc440spe_desc_set_dest_addr(iter, chan,
3008 paddr ? ppath : qpath,
3009 paddr ? paddr : qaddr, 0);
3010 iter = list_entry(iter->chain_node.next,
3011 struct ppc440spe_adma_desc_slot,
3012 chain_node);
3013 }
3014
3015 if (!addr) {
3016 /* Two destinations; setup Q here */
3017 iter = ppc440spe_get_group_entry(sw_desc,
3018 sw_desc->descs_per_op);
3019 for (i = 0; i < sw_desc->descs_per_op; i++) {
3020 ppc440spe_desc_set_dest_addr(iter,
3021 chan, qpath, qaddr, 0);
3022 iter = list_entry(iter->chain_node.next,
3023 struct ppc440spe_adma_desc_slot,
3024 chain_node);
3025 }
3026 }
3027
3028 break;
3029 }
3030}
3031
3032/**
3033 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3034 * for the PQ_ZERO_SUM operation
3035 */
3036static void ppc440spe_adma_pqzero_sum_set_dest(
3037 struct ppc440spe_adma_desc_slot *sw_desc,
3038 dma_addr_t paddr, dma_addr_t qaddr)
3039{
3040 struct ppc440spe_adma_desc_slot *iter, *end;
3041 struct ppc440spe_adma_chan *chan;
3042 dma_addr_t addr = 0;
3043 int idx;
3044
3045 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3046
3047 /* walk through the WXOR source list and set P/Q-destinations
3048 * for each slot
3049 */
3050 idx = (paddr && qaddr) ? 2 : 1;
3051 /* set end */
3052 list_for_each_entry_reverse(end, &sw_desc->group_list,
3053 chain_node) {
3054 if (!(--idx))
3055 break;
3056 }
3057 /* set start */
3058 idx = (paddr && qaddr) ? 2 : 1;
3059 iter = ppc440spe_get_group_entry(sw_desc, idx);
3060
3061 if (paddr && qaddr) {
3062 /* two destinations */
3063 list_for_each_entry_from(iter, &sw_desc->group_list,
3064 chain_node) {
3065 if (unlikely(iter == end))
3066 break;
3067 ppc440spe_desc_set_dest_addr(iter, chan,
3068 DMA_CUED_XOR_BASE, paddr, 0);
3069 ppc440spe_desc_set_dest_addr(iter, chan,
3070 DMA_CUED_XOR_BASE, qaddr, 1);
3071 }
3072 } else {
3073 /* one destination */
3074 addr = paddr ? paddr : qaddr;
3075 list_for_each_entry_from(iter, &sw_desc->group_list,
3076 chain_node) {
3077 if (unlikely(iter == end))
3078 break;
3079 ppc440spe_desc_set_dest_addr(iter, chan,
3080 DMA_CUED_XOR_BASE, addr, 0);
3081 }
3082 }
3083
3084 /* The remaining descriptors are DATACHECK. These have no need in
3085 * destination. Actually, these destinations are used there
3086 * as sources for check operation. So, set addr as source.
3087 */
3088 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3089
3090 if (!addr) {
3091 end = list_entry(end->chain_node.next,
3092 struct ppc440spe_adma_desc_slot, chain_node);
3093 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3094 }
3095}
3096
3097/**
3098 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3099 */
3100static inline void ppc440spe_desc_set_xor_src_cnt(
3101 struct ppc440spe_adma_desc_slot *desc,
3102 int src_cnt)
3103{
3104 struct xor_cb *hw_desc = desc->hw_desc;
3105
3106 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3107 hw_desc->cbc |= src_cnt;
3108}
3109
3110/**
3111 * ppc440spe_adma_pq_set_src - set source address into descriptor
3112 */
3113static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3114 dma_addr_t addr, int index)
3115{
3116 struct ppc440spe_adma_chan *chan;
3117 dma_addr_t haddr = 0;
3118 struct ppc440spe_adma_desc_slot *iter = NULL;
3119
3120 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3121
3122 switch (chan->device->id) {
3123 case PPC440SPE_DMA0_ID:
3124 case PPC440SPE_DMA1_ID:
3125 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
3126 */
3127 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3128 /* RXOR-only or RXOR/WXOR operation */
3129 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3130 &sw_desc->flags) ? 2 : 3;
3131
3132 if (index == 0) {
3133 /* 1st slot (RXOR) */
3134 /* setup sources region (R1-2-3, R1-2-4,
3135 * or R1-2-5)
3136 */
3137 if (test_bit(PPC440SPE_DESC_RXOR12,
3138 &sw_desc->flags))
3139 haddr = DMA_RXOR12 <<
3140 DMA_CUED_REGION_OFF;
3141 else if (test_bit(PPC440SPE_DESC_RXOR123,
3142 &sw_desc->flags))
3143 haddr = DMA_RXOR123 <<
3144 DMA_CUED_REGION_OFF;
3145 else if (test_bit(PPC440SPE_DESC_RXOR124,
3146 &sw_desc->flags))
3147 haddr = DMA_RXOR124 <<
3148 DMA_CUED_REGION_OFF;
3149 else if (test_bit(PPC440SPE_DESC_RXOR125,
3150 &sw_desc->flags))
3151 haddr = DMA_RXOR125 <<
3152 DMA_CUED_REGION_OFF;
3153 else
3154 BUG();
3155 haddr |= DMA_CUED_XOR_BASE;
3156 iter = ppc440spe_get_group_entry(sw_desc, 0);
3157 } else if (index < iskip) {
3158 /* 1st slot (RXOR)
3159 * shall actually set source address only once
3160 * instead of first <iskip>
3161 */
3162 iter = NULL;
3163 } else {
3164 /* 2nd/3d and next slots (WXOR);
3165 * skip first slot with RXOR
3166 */
3167 haddr = DMA_CUED_XOR_HB;
3168 iter = ppc440spe_get_group_entry(sw_desc,
3169 index - iskip + sw_desc->dst_cnt);
3170 }
3171 } else {
3172 int znum = 0;
3173
3174 /* WXOR-only operation; skip first slots with
3175 * zeroing destinations
3176 */
3177 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3178 znum++;
3179 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3180 znum++;
3181
3182 haddr = DMA_CUED_XOR_HB;
3183 iter = ppc440spe_get_group_entry(sw_desc,
3184 index + znum);
3185 }
3186
3187 if (likely(iter)) {
3188 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3189
3190 if (!index &&
3191 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3192 sw_desc->dst_cnt == 2) {
3193 /* if we have two destinations for RXOR, then
3194 * setup source in the second descr too
3195 */
3196 iter = ppc440spe_get_group_entry(sw_desc, 1);
3197 ppc440spe_desc_set_src_addr(iter, chan, 0,
3198 haddr, addr);
3199 }
3200 }
3201 break;
3202
3203 case PPC440SPE_XOR_ID:
3204 /* DMA2 may do Biskup */
3205 iter = sw_desc->group_head;
3206 if (iter->dst_cnt == 2) {
3207 /* both P & Q calculations required; set P src here */
3208 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3209
3210 /* this is for Q */
3211 iter = ppc440spe_get_group_entry(sw_desc,
3212 sw_desc->descs_per_op);
3213 }
3214 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3215 break;
3216 }
3217}
3218
3219/**
3220 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3221 */
3222static void ppc440spe_adma_memcpy_xor_set_src(
3223 struct ppc440spe_adma_desc_slot *sw_desc,
3224 dma_addr_t addr, int index)
3225{
3226 struct ppc440spe_adma_chan *chan;
3227
3228 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3229 sw_desc = sw_desc->group_head;
3230
3231 if (likely(sw_desc))
3232 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3233}
3234
3235/**
3236 * ppc440spe_adma_dma2rxor_inc_addr -
3237 */
3238static void ppc440spe_adma_dma2rxor_inc_addr(
3239 struct ppc440spe_adma_desc_slot *desc,
3240 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3241{
3242 cursor->addr_count++;
3243 if (index == src_cnt - 1) {
3244 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3245 } else if (cursor->addr_count == XOR_MAX_OPS) {
3246 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3247 cursor->addr_count = 0;
3248 cursor->desc_count++;
3249 }
3250}
3251
3252/**
3253 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3254 */
3255static int ppc440spe_adma_dma2rxor_prep_src(
3256 struct ppc440spe_adma_desc_slot *hdesc,
3257 struct ppc440spe_rxor *cursor, int index,
3258 int src_cnt, u32 addr)
3259{
3260 int rval = 0;
3261 u32 sign;
3262 struct ppc440spe_adma_desc_slot *desc = hdesc;
3263 int i;
3264
3265 for (i = 0; i < cursor->desc_count; i++) {
3266 desc = list_entry(hdesc->chain_node.next,
3267 struct ppc440spe_adma_desc_slot,
3268 chain_node);
3269 }
3270
3271 switch (cursor->state) {
3272 case 0:
3273 if (addr == cursor->addrl + cursor->len) {
3274 /* direct RXOR */
3275 cursor->state = 1;
3276 cursor->xor_count++;
3277 if (index == src_cnt-1) {
3278 ppc440spe_rxor_set_region(desc,
3279 cursor->addr_count,
3280 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3281 ppc440spe_adma_dma2rxor_inc_addr(
3282 desc, cursor, index, src_cnt);
3283 }
3284 } else if (cursor->addrl == addr + cursor->len) {
3285 /* reverse RXOR */
3286 cursor->state = 1;
3287 cursor->xor_count++;
3288 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3289 if (index == src_cnt-1) {
3290 ppc440spe_rxor_set_region(desc,
3291 cursor->addr_count,
3292 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3293 ppc440spe_adma_dma2rxor_inc_addr(
3294 desc, cursor, index, src_cnt);
3295 }
3296 } else {
3297 printk(KERN_ERR "Cannot build "
3298 "DMA2 RXOR command block.\n");
3299 BUG();
3300 }
3301 break;
3302 case 1:
3303 sign = test_bit(cursor->addr_count,
3304 desc->reverse_flags)
3305 ? -1 : 1;
3306 if (index == src_cnt-2 || (sign == -1
3307 && addr != cursor->addrl - 2*cursor->len)) {
3308 cursor->state = 0;
3309 cursor->xor_count = 1;
3310 cursor->addrl = addr;
3311 ppc440spe_rxor_set_region(desc,
3312 cursor->addr_count,
3313 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3314 ppc440spe_adma_dma2rxor_inc_addr(
3315 desc, cursor, index, src_cnt);
3316 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3317 cursor->state = 2;
3318 cursor->xor_count = 0;
3319 ppc440spe_rxor_set_region(desc,
3320 cursor->addr_count,
3321 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3322 if (index == src_cnt-1) {
3323 ppc440spe_adma_dma2rxor_inc_addr(
3324 desc, cursor, index, src_cnt);
3325 }
3326 } else if (addr == cursor->addrl + 3*cursor->len) {
3327 cursor->state = 2;
3328 cursor->xor_count = 0;
3329 ppc440spe_rxor_set_region(desc,
3330 cursor->addr_count,
3331 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3332 if (index == src_cnt-1) {
3333 ppc440spe_adma_dma2rxor_inc_addr(
3334 desc, cursor, index, src_cnt);
3335 }
3336 } else if (addr == cursor->addrl + 4*cursor->len) {
3337 cursor->state = 2;
3338 cursor->xor_count = 0;
3339 ppc440spe_rxor_set_region(desc,
3340 cursor->addr_count,
3341 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3342 if (index == src_cnt-1) {
3343 ppc440spe_adma_dma2rxor_inc_addr(
3344 desc, cursor, index, src_cnt);
3345 }
3346 } else {
3347 cursor->state = 0;
3348 cursor->xor_count = 1;
3349 cursor->addrl = addr;
3350 ppc440spe_rxor_set_region(desc,
3351 cursor->addr_count,
3352 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3353 ppc440spe_adma_dma2rxor_inc_addr(
3354 desc, cursor, index, src_cnt);
3355 }
3356 break;
3357 case 2:
3358 cursor->state = 0;
3359 cursor->addrl = addr;
3360 cursor->xor_count++;
3361 if (index) {
3362 ppc440spe_adma_dma2rxor_inc_addr(
3363 desc, cursor, index, src_cnt);
3364 }
3365 break;
3366 }
3367
3368 return rval;
3369}
3370
3371/**
3372 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3373 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3374 */
3375static void ppc440spe_adma_dma2rxor_set_src(
3376 struct ppc440spe_adma_desc_slot *desc,
3377 int index, dma_addr_t addr)
3378{
3379 struct xor_cb *xcb = desc->hw_desc;
3380 int k = 0, op = 0, lop = 0;
3381
3382 /* get the RXOR operand which corresponds to index addr */
3383 while (op <= index) {
3384 lop = op;
3385 if (k == XOR_MAX_OPS) {
3386 k = 0;
3387 desc = list_entry(desc->chain_node.next,
3388 struct ppc440spe_adma_desc_slot, chain_node);
3389 xcb = desc->hw_desc;
3390
3391 }
3392 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3393 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3394 op += 2;
3395 else
3396 op += 3;
3397 }
3398
3399 BUG_ON(k < 1);
3400
3401 if (test_bit(k-1, desc->reverse_flags)) {
3402 /* reverse operand order; put last op in RXOR group */
3403 if (index == op - 1)
3404 ppc440spe_rxor_set_src(desc, k - 1, addr);
3405 } else {
3406 /* direct operand order; put first op in RXOR group */
3407 if (index == lop)
3408 ppc440spe_rxor_set_src(desc, k - 1, addr);
3409 }
3410}
3411
3412/**
3413 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3414 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3415 */
3416static void ppc440spe_adma_dma2rxor_set_mult(
3417 struct ppc440spe_adma_desc_slot *desc,
3418 int index, u8 mult)
3419{
3420 struct xor_cb *xcb = desc->hw_desc;
3421 int k = 0, op = 0, lop = 0;
3422
3423 /* get the RXOR operand which corresponds to index mult */
3424 while (op <= index) {
3425 lop = op;
3426 if (k == XOR_MAX_OPS) {
3427 k = 0;
3428 desc = list_entry(desc->chain_node.next,
3429 struct ppc440spe_adma_desc_slot,
3430 chain_node);
3431 xcb = desc->hw_desc;
3432
3433 }
3434 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3435 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3436 op += 2;
3437 else
3438 op += 3;
3439 }
3440
3441 BUG_ON(k < 1);
3442 if (test_bit(k-1, desc->reverse_flags)) {
3443 /* reverse order */
3444 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3445 } else {
3446 /* direct order */
3447 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3448 }
3449}
3450
3451/**
3452 * ppc440spe_init_rxor_cursor -
3453 */
3454static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3455{
3456 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3457 cursor->state = 2;
3458}
3459
3460/**
3461 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3462 * descriptor for the PQXOR operation
3463 */
3464static void ppc440spe_adma_pq_set_src_mult(
3465 struct ppc440spe_adma_desc_slot *sw_desc,
3466 unsigned char mult, int index, int dst_pos)
3467{
3468 struct ppc440spe_adma_chan *chan;
3469 u32 mult_idx, mult_dst;
3470 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3471
3472 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3473
3474 switch (chan->device->id) {
3475 case PPC440SPE_DMA0_ID:
3476 case PPC440SPE_DMA1_ID:
3477 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3478 int region = test_bit(PPC440SPE_DESC_RXOR12,
3479 &sw_desc->flags) ? 2 : 3;
3480
3481 if (index < region) {
3482 /* RXOR multipliers */
3483 iter = ppc440spe_get_group_entry(sw_desc,
3484 sw_desc->dst_cnt - 1);
3485 if (sw_desc->dst_cnt == 2)
3486 iter1 = ppc440spe_get_group_entry(
3487 sw_desc, 0);
3488
3489 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3490 mult_dst = DMA_CDB_SG_SRC;
3491 } else {
3492 /* WXOR multiplier */
3493 iter = ppc440spe_get_group_entry(sw_desc,
3494 index - region +
3495 sw_desc->dst_cnt);
3496 mult_idx = DMA_CUED_MULT1_OFF;
3497 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3498 DMA_CDB_SG_DST1;
3499 }
3500 } else {
3501 int znum = 0;
3502
3503 /* WXOR-only;
3504 * skip first slots with destinations (if ZERO_DST has
3505 * place)
3506 */
3507 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3508 znum++;
3509 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3510 znum++;
3511
3512 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3513 mult_idx = DMA_CUED_MULT1_OFF;
3514 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3515 }
3516
3517 if (likely(iter)) {
3518 ppc440spe_desc_set_src_mult(iter, chan,
3519 mult_idx, mult_dst, mult);
3520
3521 if (unlikely(iter1)) {
3522 /* if we have two destinations for RXOR, then
3523 * we've just set Q mult. Set-up P now.
3524 */
3525 ppc440spe_desc_set_src_mult(iter1, chan,
3526 mult_idx, mult_dst, 1);
3527 }
3528
3529 }
3530 break;
3531
3532 case PPC440SPE_XOR_ID:
3533 iter = sw_desc->group_head;
3534 if (sw_desc->dst_cnt == 2) {
3535 /* both P & Q calculations required; set P mult here */
3536 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3537
3538 /* and then set Q mult */
3539 iter = ppc440spe_get_group_entry(sw_desc,
3540 sw_desc->descs_per_op);
3541 }
3542 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3543 break;
3544 }
3545}
3546
3547/**
3548 * ppc440spe_adma_free_chan_resources - free the resources allocated
3549 */
3550static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3551{
3552 struct ppc440spe_adma_chan *ppc440spe_chan;
3553 struct ppc440spe_adma_desc_slot *iter, *_iter;
3554 int in_use_descs = 0;
3555
3556 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3557 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3558
3559 spin_lock_bh(&ppc440spe_chan->lock);
3560 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3561 chain_node) {
3562 in_use_descs++;
3563 list_del(&iter->chain_node);
3564 }
3565 list_for_each_entry_safe_reverse(iter, _iter,
3566 &ppc440spe_chan->all_slots, slot_node) {
3567 list_del(&iter->slot_node);
3568 kfree(iter);
3569 ppc440spe_chan->slots_allocated--;
3570 }
3571 ppc440spe_chan->last_used = NULL;
3572
3573 dev_dbg(ppc440spe_chan->device->common.dev,
3574 "ppc440spe adma%d %s slots_allocated %d\n",
3575 ppc440spe_chan->device->id,
3576 __func__, ppc440spe_chan->slots_allocated);
3577 spin_unlock_bh(&ppc440spe_chan->lock);
3578
3579 /* one is ok since we left it on there on purpose */
3580 if (in_use_descs > 1)
3581 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3582 in_use_descs - 1);
3583}
3584
3585/**
07934481 3586 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
12458ea0
AG
3587 * @chan: ADMA channel handle
3588 * @cookie: ADMA transaction identifier
07934481 3589 * @txstate: a holder for the current state of the channel
12458ea0 3590 */
07934481
LW
3591static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3592 dma_cookie_t cookie, struct dma_tx_state *txstate)
12458ea0
AG
3593{
3594 struct ppc440spe_adma_chan *ppc440spe_chan;
12458ea0
AG
3595 enum dma_status ret;
3596
3597 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
96a2af41 3598 ret = dma_cookie_status(chan, cookie, txstate);
5738992b 3599 if (ret == DMA_COMPLETE)
12458ea0
AG
3600 return ret;
3601
3602 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3603
96a2af41 3604 return dma_cookie_status(chan, cookie, txstate);
12458ea0
AG
3605}
3606
3607/**
3608 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3609 */
3610static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3611{
3612 struct ppc440spe_adma_chan *chan = data;
3613
3614 dev_dbg(chan->device->common.dev,
3615 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3616
3617 tasklet_schedule(&chan->irq_tasklet);
3618 ppc440spe_adma_device_clear_eot_status(chan);
3619
3620 return IRQ_HANDLED;
3621}
3622
3623/**
3624 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3625 * do the same things as a eot handler
3626 */
3627static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3628{
3629 struct ppc440spe_adma_chan *chan = data;
3630
3631 dev_dbg(chan->device->common.dev,
3632 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3633
3634 tasklet_schedule(&chan->irq_tasklet);
3635 ppc440spe_adma_device_clear_eot_status(chan);
3636
3637 return IRQ_HANDLED;
3638}
3639
3640/**
3641 * ppc440spe_test_callback - called when test operation has been done
3642 */
3643static void ppc440spe_test_callback(void *unused)
3644{
3645 complete(&ppc440spe_r6_test_comp);
3646}
3647
3648/**
3649 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
3650 */
3651static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
3652{
3653 struct ppc440spe_adma_chan *ppc440spe_chan;
3654
3655 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3656 dev_dbg(ppc440spe_chan->device->common.dev,
3657 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
3658 __func__, ppc440spe_chan->pending);
3659
3660 if (ppc440spe_chan->pending) {
3661 ppc440spe_chan->pending = 0;
3662 ppc440spe_chan_append(ppc440spe_chan);
3663 }
3664}
3665
3666/**
3667 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
3668 * use FIFOs (as opposite to chains used in XOR) so this is a XOR
3669 * specific operation)
3670 */
3671static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
3672{
3673 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
3674 dma_cookie_t cookie;
3675 int slot_cnt, slots_per_op;
3676
3677 dev_dbg(chan->device->common.dev,
3678 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3679
3680 spin_lock_bh(&chan->lock);
3681 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
3682 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
3683 if (sw_desc) {
3684 group_start = sw_desc->group_head;
3685 list_splice_init(&sw_desc->group_list, &chan->chain);
3686 async_tx_ack(&sw_desc->async_tx);
3687 ppc440spe_desc_init_null_xor(group_start);
3688
2a926e46 3689 cookie = dma_cookie_assign(&sw_desc->async_tx);
12458ea0
AG
3690
3691 /* initialize the completed cookie to be less than
3692 * the most recently used cookie
3693 */
4d4e58de 3694 chan->common.completed_cookie = cookie - 1;
12458ea0
AG
3695
3696 /* channel should not be busy */
3697 BUG_ON(ppc440spe_chan_is_busy(chan));
3698
3699 /* set the descriptor address */
3700 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
3701
3702 /* run the descriptor */
3703 ppc440spe_chan_run(chan);
3704 } else
3705 printk(KERN_ERR "ppc440spe adma%d"
3706 " failed to allocate null descriptor\n",
3707 chan->device->id);
3708 spin_unlock_bh(&chan->lock);
3709}
3710
3711/**
3712 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
3713 * For this we just perform one WXOR operation with the same source
3714 * and destination addresses, the GF-multiplier is 1; so if RAID-6
3715 * capabilities are enabled then we'll get src/dst filled with zero.
3716 */
3717static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
3718{
3719 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
3720 struct page *pg;
3721 char *a;
3722 dma_addr_t dma_addr, addrs[2];
3723 unsigned long op = 0;
3724 int rval = 0;
3725
3726 set_bit(PPC440SPE_DESC_WXOR, &op);
3727
3728 pg = alloc_page(GFP_KERNEL);
3729 if (!pg)
3730 return -ENOMEM;
3731
3732 spin_lock_bh(&chan->lock);
3733 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
3734 if (sw_desc) {
3735 /* 1 src, 1 dsr, int_ena, WXOR */
3736 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
3737 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
3738 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
3739 iter->unmap_len = PAGE_SIZE;
3740 }
3741 } else {
3742 rval = -EFAULT;
3743 spin_unlock_bh(&chan->lock);
3744 goto exit;
3745 }
3746 spin_unlock_bh(&chan->lock);
3747
3748 /* Fill the test page with ones */
3749 memset(page_address(pg), 0xFF, PAGE_SIZE);
3750 dma_addr = dma_map_page(chan->device->dev, pg, 0,
3751 PAGE_SIZE, DMA_BIDIRECTIONAL);
3752
3753 /* Setup addresses */
3754 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
3755 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
3756 addrs[0] = dma_addr;
3757 addrs[1] = 0;
3758 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
3759
3760 async_tx_ack(&sw_desc->async_tx);
3761 sw_desc->async_tx.callback = ppc440spe_test_callback;
3762 sw_desc->async_tx.callback_param = NULL;
3763
3764 init_completion(&ppc440spe_r6_test_comp);
3765
3766 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
3767 ppc440spe_adma_issue_pending(&chan->common);
3768
3769 wait_for_completion(&ppc440spe_r6_test_comp);
3770
3771 /* Now check if the test page is zeroed */
3772 a = page_address(pg);
3773 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
3774 /* page is zero - RAID-6 enabled */
3775 rval = 0;
3776 } else {
3777 /* RAID-6 was not enabled */
3778 rval = -EINVAL;
3779 }
3780exit:
3781 __free_page(pg);
3782 return rval;
3783}
3784
3785static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3786{
3787 switch (adev->id) {
3788 case PPC440SPE_DMA0_ID:
3789 case PPC440SPE_DMA1_ID:
3790 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
3791 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
12458ea0
AG
3792 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3793 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
3794 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
3795 break;
3796 case PPC440SPE_XOR_ID:
3797 dma_cap_set(DMA_XOR, adev->common.cap_mask);
3798 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3799 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
3800 adev->common.cap_mask = adev->common.cap_mask;
3801 break;
3802 }
3803
3804 /* Set base routines */
3805 adev->common.device_alloc_chan_resources =
3806 ppc440spe_adma_alloc_chan_resources;
3807 adev->common.device_free_chan_resources =
3808 ppc440spe_adma_free_chan_resources;
07934481 3809 adev->common.device_tx_status = ppc440spe_adma_tx_status;
12458ea0
AG
3810 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
3811
3812 /* Set prep routines based on capability */
3813 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
3814 adev->common.device_prep_dma_memcpy =
3815 ppc440spe_adma_prep_dma_memcpy;
3816 }
12458ea0
AG
3817 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
3818 adev->common.max_xor = XOR_MAX_OPS;
3819 adev->common.device_prep_dma_xor =
3820 ppc440spe_adma_prep_dma_xor;
3821 }
3822 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
3823 switch (adev->id) {
3824 case PPC440SPE_DMA0_ID:
3825 dma_set_maxpq(&adev->common,
3826 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3827 break;
3828 case PPC440SPE_DMA1_ID:
3829 dma_set_maxpq(&adev->common,
3830 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3831 break;
3832 case PPC440SPE_XOR_ID:
3833 adev->common.max_pq = XOR_MAX_OPS * 3;
3834 break;
3835 }
3836 adev->common.device_prep_dma_pq =
3837 ppc440spe_adma_prep_dma_pq;
3838 }
3839 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
3840 switch (adev->id) {
3841 case PPC440SPE_DMA0_ID:
3842 adev->common.max_pq = DMA0_FIFO_SIZE /
3843 sizeof(struct dma_cdb);
3844 break;
3845 case PPC440SPE_DMA1_ID:
3846 adev->common.max_pq = DMA1_FIFO_SIZE /
3847 sizeof(struct dma_cdb);
3848 break;
3849 }
3850 adev->common.device_prep_dma_pq_val =
3851 ppc440spe_adma_prep_dma_pqzero_sum;
3852 }
3853 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
3854 switch (adev->id) {
3855 case PPC440SPE_DMA0_ID:
3856 adev->common.max_xor = DMA0_FIFO_SIZE /
3857 sizeof(struct dma_cdb);
3858 break;
3859 case PPC440SPE_DMA1_ID:
3860 adev->common.max_xor = DMA1_FIFO_SIZE /
3861 sizeof(struct dma_cdb);
3862 break;
3863 }
3864 adev->common.device_prep_dma_xor_val =
3865 ppc440spe_adma_prep_dma_xor_zero_sum;
3866 }
3867 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
3868 adev->common.device_prep_dma_interrupt =
3869 ppc440spe_adma_prep_dma_interrupt;
3870 }
3871 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
6aa2731c 3872 "( %s%s%s%s%s%s)\n",
12458ea0
AG
3873 dev_name(adev->dev),
3874 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3875 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
3876 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
3877 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
3878 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
12458ea0
AG
3879 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
3880}
3881
3882static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
3883 struct ppc440spe_adma_chan *chan,
3884 int *initcode)
3885{
2dc11581 3886 struct platform_device *ofdev;
12458ea0
AG
3887 struct device_node *np;
3888 int ret;
3889
2dc11581 3890 ofdev = container_of(adev->dev, struct platform_device, dev);
3e6b02d9 3891 np = ofdev->dev.of_node;
12458ea0
AG
3892 if (adev->id != PPC440SPE_XOR_ID) {
3893 adev->err_irq = irq_of_parse_and_map(np, 1);
3894 if (adev->err_irq == NO_IRQ) {
3895 dev_warn(adev->dev, "no err irq resource?\n");
3896 *initcode = PPC_ADMA_INIT_IRQ2;
3897 adev->err_irq = -ENXIO;
3898 } else
3899 atomic_inc(&ppc440spe_adma_err_irq_ref);
3900 } else {
3901 adev->err_irq = -ENXIO;
3902 }
3903
3904 adev->irq = irq_of_parse_and_map(np, 0);
3905 if (adev->irq == NO_IRQ) {
3906 dev_err(adev->dev, "no irq resource\n");
3907 *initcode = PPC_ADMA_INIT_IRQ1;
3908 ret = -ENXIO;
3909 goto err_irq_map;
3910 }
3911 dev_dbg(adev->dev, "irq %d, err irq %d\n",
3912 adev->irq, adev->err_irq);
3913
3914 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
3915 0, dev_driver_string(adev->dev), chan);
3916 if (ret) {
3917 dev_err(adev->dev, "can't request irq %d\n",
3918 adev->irq);
3919 *initcode = PPC_ADMA_INIT_IRQ1;
3920 ret = -EIO;
3921 goto err_req1;
3922 }
3923
3924 /* only DMA engines have a separate error IRQ
3925 * so it's Ok if err_irq < 0 in XOR engine case.
3926 */
3927 if (adev->err_irq > 0) {
3928 /* both DMA engines share common error IRQ */
3929 ret = request_irq(adev->err_irq,
3930 ppc440spe_adma_err_handler,
3931 IRQF_SHARED,
3932 dev_driver_string(adev->dev),
3933 chan);
3934 if (ret) {
3935 dev_err(adev->dev, "can't request irq %d\n",
3936 adev->err_irq);
3937 *initcode = PPC_ADMA_INIT_IRQ2;
3938 ret = -EIO;
3939 goto err_req2;
3940 }
3941 }
3942
3943 if (adev->id == PPC440SPE_XOR_ID) {
3944 /* enable XOR engine interrupts */
3945 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
3946 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
3947 &adev->xor_reg->ier);
3948 } else {
3949 u32 mask, enable;
3950
3951 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
3952 if (!np) {
3953 pr_err("%s: can't find I2O device tree node\n",
3954 __func__);
3955 ret = -ENODEV;
3956 goto err_req2;
3957 }
3958 adev->i2o_reg = of_iomap(np, 0);
3959 if (!adev->i2o_reg) {
3960 pr_err("%s: failed to map I2O registers\n", __func__);
3961 of_node_put(np);
3962 ret = -EINVAL;
3963 goto err_req2;
3964 }
3965 of_node_put(np);
3966 /* Unmask 'CS FIFO Attention' interrupts and
3967 * enable generating interrupts on errors
3968 */
3969 enable = (adev->id == PPC440SPE_DMA0_ID) ?
3970 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
3971 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
3972 mask = ioread32(&adev->i2o_reg->iopim) & enable;
3973 iowrite32(mask, &adev->i2o_reg->iopim);
3974 }
3975 return 0;
3976
3977err_req2:
3978 free_irq(adev->irq, chan);
3979err_req1:
3980 irq_dispose_mapping(adev->irq);
3981err_irq_map:
3982 if (adev->err_irq > 0) {
3983 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
3984 irq_dispose_mapping(adev->err_irq);
3985 }
3986 return ret;
3987}
3988
3989static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
3990 struct ppc440spe_adma_chan *chan)
3991{
3992 u32 mask, disable;
3993
3994 if (adev->id == PPC440SPE_XOR_ID) {
3995 /* disable XOR engine interrupts */
3996 mask = ioread32be(&adev->xor_reg->ier);
3997 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
3998 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
3999 iowrite32be(mask, &adev->xor_reg->ier);
4000 } else {
4001 /* disable DMAx engine interrupts */
4002 disable = (adev->id == PPC440SPE_DMA0_ID) ?
4003 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4004 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4005 mask = ioread32(&adev->i2o_reg->iopim) | disable;
4006 iowrite32(mask, &adev->i2o_reg->iopim);
4007 }
4008 free_irq(adev->irq, chan);
4009 irq_dispose_mapping(adev->irq);
4010 if (adev->err_irq > 0) {
4011 free_irq(adev->err_irq, chan);
4012 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
4013 irq_dispose_mapping(adev->err_irq);
4014 iounmap(adev->i2o_reg);
4015 }
4016 }
4017}
4018
4019/**
4020 * ppc440spe_adma_probe - probe the asynch device
4021 */
463a1f8b 4022static int ppc440spe_adma_probe(struct platform_device *ofdev)
12458ea0 4023{
05c02542 4024 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4025 struct resource res;
4026 struct ppc440spe_adma_device *adev;
4027 struct ppc440spe_adma_chan *chan;
4028 struct ppc_dma_chan_ref *ref, *_ref;
4029 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4030 const u32 *idx;
4031 int len;
4032 void *regs;
4033 u32 id, pool_size;
4034
4035 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4036 id = PPC440SPE_XOR_ID;
4037 /* As far as the XOR engine is concerned, it does not
4038 * use FIFOs but uses linked list. So there is no dependency
4039 * between pool size to allocate and the engine configuration.
4040 */
4041 pool_size = PAGE_SIZE << 1;
4042 } else {
4043 /* it is DMA0 or DMA1 */
4044 idx = of_get_property(np, "cell-index", &len);
4045 if (!idx || (len != sizeof(u32))) {
4046 dev_err(&ofdev->dev, "Device node %s has missing "
4047 "or invalid cell-index property\n",
4048 np->full_name);
4049 return -EINVAL;
4050 }
4051 id = *idx;
4052 /* DMA0,1 engines use FIFO to maintain CDBs, so we
4053 * should allocate the pool accordingly to size of this
4054 * FIFO. Thus, the pool size depends on the FIFO depth:
4055 * how much CDBs pointers the FIFO may contain then so
4056 * much CDBs we should provide in the pool.
4057 * That is
4058 * CDB size = 32B;
4059 * CDBs number = (DMA0_FIFO_SIZE >> 3);
4060 * Pool size = CDBs number * CDB size =
4061 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
4062 */
4063 pool_size = (id == PPC440SPE_DMA0_ID) ?
4064 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4065 pool_size <<= 2;
4066 }
4067
4068 if (of_address_to_resource(np, 0, &res)) {
4069 dev_err(&ofdev->dev, "failed to get memory resource\n");
4070 initcode = PPC_ADMA_INIT_MEMRES;
4071 ret = -ENODEV;
4072 goto out;
4073 }
4074
4075 if (!request_mem_region(res.start, resource_size(&res),
4076 dev_driver_string(&ofdev->dev))) {
a584bff5
JP
4077 dev_err(&ofdev->dev, "failed to request memory region %pR\n",
4078 &res);
12458ea0
AG
4079 initcode = PPC_ADMA_INIT_MEMREG;
4080 ret = -EBUSY;
4081 goto out;
4082 }
4083
4084 /* create a device */
4085 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4086 if (!adev) {
12458ea0
AG
4087 initcode = PPC_ADMA_INIT_ALLOC;
4088 ret = -ENOMEM;
4089 goto err_adev_alloc;
4090 }
4091
4092 adev->id = id;
4093 adev->pool_size = pool_size;
4094 /* allocate coherent memory for hardware descriptors */
4095 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4096 adev->pool_size, &adev->dma_desc_pool,
4097 GFP_KERNEL);
4098 if (adev->dma_desc_pool_virt == NULL) {
4099 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4100 "memory for hardware descriptors\n",
4101 adev->pool_size);
4102 initcode = PPC_ADMA_INIT_COHERENT;
4103 ret = -ENOMEM;
4104 goto err_dma_alloc;
4105 }
d73111c6 4106 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n",
12458ea0
AG
4107 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4108
4109 regs = ioremap(res.start, resource_size(&res));
4110 if (!regs) {
4111 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
f3b77727 4112 ret = -ENOMEM;
12458ea0
AG
4113 goto err_regs_alloc;
4114 }
4115
4116 if (adev->id == PPC440SPE_XOR_ID) {
4117 adev->xor_reg = regs;
4118 /* Reset XOR */
4119 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4120 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4121 } else {
4122 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4123 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4124 adev->dma_reg = regs;
4125 /* DMAx_FIFO_SIZE is defined in bytes,
4126 * <fsiz> - is defined in number of CDB pointers (8byte).
4127 * DMA FIFO Length = CSlength + CPlength, where
4128 * CSlength = CPlength = (fsiz + 1) * 8.
4129 */
4130 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4131 &adev->dma_reg->fsiz);
4132 /* Configure DMA engine */
4133 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4134 &adev->dma_reg->cfg);
4135 /* Clear Status */
4136 iowrite32(~0, &adev->dma_reg->dsts);
4137 }
4138
4139 adev->dev = &ofdev->dev;
4140 adev->common.dev = &ofdev->dev;
4141 INIT_LIST_HEAD(&adev->common.channels);
dd3daca1 4142 platform_set_drvdata(ofdev, adev);
12458ea0
AG
4143
4144 /* create a channel */
4145 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4146 if (!chan) {
12458ea0
AG
4147 initcode = PPC_ADMA_INIT_CHANNEL;
4148 ret = -ENOMEM;
4149 goto err_chan_alloc;
4150 }
4151
4152 spin_lock_init(&chan->lock);
4153 INIT_LIST_HEAD(&chan->chain);
4154 INIT_LIST_HEAD(&chan->all_slots);
4155 chan->device = adev;
4156 chan->common.device = &adev->common;
8ac69546 4157 dma_cookie_init(&chan->common);
12458ea0
AG
4158 list_add_tail(&chan->common.device_node, &adev->common.channels);
4159 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4160 (unsigned long)chan);
4161
4162 /* allocate and map helper pages for async validation or
4163 * async_mult/async_sum_product operations on DMA0/1.
4164 */
4165 if (adev->id != PPC440SPE_XOR_ID) {
4166 chan->pdest_page = alloc_page(GFP_KERNEL);
4167 chan->qdest_page = alloc_page(GFP_KERNEL);
4168 if (!chan->pdest_page ||
4169 !chan->qdest_page) {
4170 if (chan->pdest_page)
4171 __free_page(chan->pdest_page);
4172 if (chan->qdest_page)
4173 __free_page(chan->qdest_page);
4174 ret = -ENOMEM;
4175 goto err_page_alloc;
4176 }
4177 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4178 PAGE_SIZE, DMA_BIDIRECTIONAL);
4179 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4180 PAGE_SIZE, DMA_BIDIRECTIONAL);
4181 }
4182
4183 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4184 if (ref) {
4185 ref->chan = &chan->common;
4186 INIT_LIST_HEAD(&ref->node);
4187 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4188 } else {
4189 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4190 ret = -ENOMEM;
4191 goto err_ref_alloc;
4192 }
4193
4194 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4195 if (ret)
4196 goto err_irq;
4197
4198 ppc440spe_adma_init_capabilities(adev);
4199
4200 ret = dma_async_device_register(&adev->common);
4201 if (ret) {
4202 initcode = PPC_ADMA_INIT_REGISTER;
4203 dev_err(&ofdev->dev, "failed to register dma device\n");
4204 goto err_dev_reg;
4205 }
4206
4207 goto out;
4208
4209err_dev_reg:
4210 ppc440spe_adma_release_irqs(adev, chan);
4211err_irq:
4212 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4213 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4214 list_del(&ref->node);
4215 kfree(ref);
4216 }
4217 }
4218err_ref_alloc:
4219 if (adev->id != PPC440SPE_XOR_ID) {
4220 dma_unmap_page(&ofdev->dev, chan->pdest,
4221 PAGE_SIZE, DMA_BIDIRECTIONAL);
4222 dma_unmap_page(&ofdev->dev, chan->qdest,
4223 PAGE_SIZE, DMA_BIDIRECTIONAL);
4224 __free_page(chan->pdest_page);
4225 __free_page(chan->qdest_page);
4226 }
4227err_page_alloc:
4228 kfree(chan);
4229err_chan_alloc:
4230 if (adev->id == PPC440SPE_XOR_ID)
4231 iounmap(adev->xor_reg);
4232 else
4233 iounmap(adev->dma_reg);
4234err_regs_alloc:
4235 dma_free_coherent(adev->dev, adev->pool_size,
4236 adev->dma_desc_pool_virt,
4237 adev->dma_desc_pool);
4238err_dma_alloc:
4239 kfree(adev);
4240err_adev_alloc:
4241 release_mem_region(res.start, resource_size(&res));
4242out:
4243 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4244 ppc440spe_adma_devices[id] = initcode;
4245
4246 return ret;
4247}
4248
4249/**
4250 * ppc440spe_adma_remove - remove the asynch device
4251 */
4bf27b8b 4252static int ppc440spe_adma_remove(struct platform_device *ofdev)
12458ea0 4253{
dd3daca1 4254 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
05c02542 4255 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4256 struct resource res;
4257 struct dma_chan *chan, *_chan;
4258 struct ppc_dma_chan_ref *ref, *_ref;
4259 struct ppc440spe_adma_chan *ppc440spe_chan;
4260
12458ea0
AG
4261 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4262 ppc440spe_adma_devices[adev->id] = -1;
4263
4264 dma_async_device_unregister(&adev->common);
4265
4266 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4267 device_node) {
4268 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4269 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4270 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4271 if (adev->id != PPC440SPE_XOR_ID) {
4272 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4273 PAGE_SIZE, DMA_BIDIRECTIONAL);
4274 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4275 PAGE_SIZE, DMA_BIDIRECTIONAL);
4276 __free_page(ppc440spe_chan->pdest_page);
4277 __free_page(ppc440spe_chan->qdest_page);
4278 }
4279 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4280 node) {
4281 if (ppc440spe_chan ==
4282 to_ppc440spe_adma_chan(ref->chan)) {
4283 list_del(&ref->node);
4284 kfree(ref);
4285 }
4286 }
4287 list_del(&chan->device_node);
4288 kfree(ppc440spe_chan);
4289 }
4290
4291 dma_free_coherent(adev->dev, adev->pool_size,
4292 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4293 if (adev->id == PPC440SPE_XOR_ID)
4294 iounmap(adev->xor_reg);
4295 else
4296 iounmap(adev->dma_reg);
4297 of_address_to_resource(np, 0, &res);
4298 release_mem_region(res.start, resource_size(&res));
4299 kfree(adev);
4300 return 0;
4301}
4302
4303/*
4304 * /sys driver interface to enable h/w RAID-6 capabilities
4305 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
4306 * directory are "devices", "enable" and "poly".
4307 * "devices" shows available engines.
4308 * "enable" is used to enable RAID-6 capabilities or to check
4309 * whether these has been activated.
4310 * "poly" allows setting/checking used polynomial (for PPC440SPe only).
4311 */
4312
4313static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
4314{
4315 ssize_t size = 0;
4316 int i;
4317
4318 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4319 if (ppc440spe_adma_devices[i] == -1)
4320 continue;
4321 size += snprintf(buf + size, PAGE_SIZE - size,
4322 "PPC440SP(E)-ADMA.%d: %s\n", i,
4323 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4324 }
4325 return size;
4326}
4327
4328static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
4329{
4330 return snprintf(buf, PAGE_SIZE,
4331 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4332 ppc440spe_r6_enabled ? "EN" : "DIS");
4333}
4334
4335static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
4336 const char *buf, size_t count)
4337{
4338 unsigned long val;
4339
4340 if (!count || count > 11)
4341 return -EINVAL;
4342
4343 if (!ppc440spe_r6_tchan)
4344 return -EFAULT;
4345
4346 /* Write a key */
4347 sscanf(buf, "%lx", &val);
4348 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4349 isync();
4350
4351 /* Verify whether it really works now */
4352 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4353 pr_info("PPC440SP(e) RAID-6 has been activated "
4354 "successfully\n");
4355 ppc440spe_r6_enabled = 1;
4356 } else {
4357 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4358 " Error key ?\n");
4359 ppc440spe_r6_enabled = 0;
4360 }
4361 return count;
4362}
4363
4364static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
4365{
4366 ssize_t size = 0;
4367 u32 reg;
4368
4369#ifdef CONFIG_440SP
4370 /* 440SP has fixed polynomial */
4371 reg = 0x4d;
4372#else
4373 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4374 reg >>= MQ0_CFBHL_POLY;
4375 reg &= 0xFF;
4376#endif
4377
4378 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4379 "uses 0x1%02x polynomial.\n", reg);
4380 return size;
4381}
4382
4383static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
4384 const char *buf, size_t count)
4385{
4386 unsigned long reg, val;
4387
4388#ifdef CONFIG_440SP
4389 /* 440SP uses default 0x14D polynomial only */
4390 return -EINVAL;
4391#endif
4392
4393 if (!count || count > 6)
4394 return -EINVAL;
4395
4396 /* e.g., 0x14D or 0x11D */
4397 sscanf(buf, "%lx", &val);
4398
4399 if (val & ~0x1FF)
4400 return -EINVAL;
4401
4402 val &= 0xFF;
4403 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4404 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4405 reg |= val << MQ0_CFBHL_POLY;
4406 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4407
4408 return count;
4409}
4410
4411static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
4412static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
4413 store_ppc440spe_r6enable);
4414static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
4415 store_ppc440spe_r6poly);
4416
4417/*
4418 * Common initialisation for RAID engines; allocate memory for
4419 * DMAx FIFOs, perform configuration common for all DMA engines.
4420 * Further DMA engine specific configuration is done at probe time.
4421 */
4422static int ppc440spe_configure_raid_devices(void)
4423{
4424 struct device_node *np;
4425 struct resource i2o_res;
4426 struct i2o_regs __iomem *i2o_reg;
4427 dcr_host_t i2o_dcr_host;
4428 unsigned int dcr_base, dcr_len;
4429 int i, ret;
4430
4431 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4432 if (!np) {
4433 pr_err("%s: can't find I2O device tree node\n",
4434 __func__);
4435 return -ENODEV;
4436 }
4437
4438 if (of_address_to_resource(np, 0, &i2o_res)) {
4439 of_node_put(np);
4440 return -EINVAL;
4441 }
4442
4443 i2o_reg = of_iomap(np, 0);
4444 if (!i2o_reg) {
4445 pr_err("%s: failed to map I2O registers\n", __func__);
4446 of_node_put(np);
4447 return -EINVAL;
4448 }
4449
4450 /* Get I2O DCRs base */
4451 dcr_base = dcr_resource_start(np, 0);
4452 dcr_len = dcr_resource_len(np, 0);
4453 if (!dcr_base && !dcr_len) {
4454 pr_err("%s: can't get DCR registers base/len!\n",
4455 np->full_name);
4456 of_node_put(np);
4457 iounmap(i2o_reg);
4458 return -ENODEV;
4459 }
4460
4461 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4462 if (!DCR_MAP_OK(i2o_dcr_host)) {
4463 pr_err("%s: failed to map DCRs!\n", np->full_name);
4464 of_node_put(np);
4465 iounmap(i2o_reg);
4466 return -ENODEV;
4467 }
4468 of_node_put(np);
4469
4470 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
4471 * the base address of FIFO memory space.
4472 * Actually we need twice more physical memory than programmed in the
4473 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
4474 */
4475 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4476 GFP_KERNEL);
4477 if (!ppc440spe_dma_fifo_buf) {
4478 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4479 iounmap(i2o_reg);
4480 dcr_unmap(i2o_dcr_host, dcr_len);
4481 return -ENOMEM;
4482 }
4483
4484 /*
4485 * Configure h/w
4486 */
4487 /* Reset I2O/DMA */
4488 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4489 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4490
4491 /* Setup the base address of mmaped registers */
4492 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4493 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4494 I2O_REG_ENABLE);
4495 dcr_unmap(i2o_dcr_host, dcr_len);
4496
4497 /* Setup FIFO memory space base address */
4498 iowrite32(0, &i2o_reg->ifbah);
4499 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4500
4501 /* set zero FIFO size for I2O, so the whole
4502 * ppc440spe_dma_fifo_buf is used by DMAs.
4503 * DMAx_FIFOs will be configured while probe.
4504 */
4505 iowrite32(0, &i2o_reg->ifsiz);
4506 iounmap(i2o_reg);
4507
4508 /* To prepare WXOR/RXOR functionality we need access to
4509 * Memory Queue Module DCRs (finally it will be enabled
4510 * via /sys interface of the ppc440spe ADMA driver).
4511 */
4512 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4513 if (!np) {
4514 pr_err("%s: can't find MQ device tree node\n",
4515 __func__);
4516 ret = -ENODEV;
4517 goto out_free;
4518 }
4519
4520 /* Get MQ DCRs base */
4521 dcr_base = dcr_resource_start(np, 0);
4522 dcr_len = dcr_resource_len(np, 0);
4523 if (!dcr_base && !dcr_len) {
4524 pr_err("%s: can't get DCR registers base/len!\n",
4525 np->full_name);
4526 ret = -ENODEV;
4527 goto out_mq;
4528 }
4529
4530 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4531 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4532 pr_err("%s: failed to map DCRs!\n", np->full_name);
4533 ret = -ENODEV;
4534 goto out_mq;
4535 }
4536 of_node_put(np);
4537 ppc440spe_mq_dcr_len = dcr_len;
4538
4539 /* Set HB alias */
4540 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4541
4542 /* Set:
4543 * - LL transaction passing limit to 1;
4544 * - Memory controller cycle limit to 1;
4545 * - Galois Polynomial to 0x14d (default)
4546 */
4547 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4548 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4549 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4550
4551 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4552 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4553 ppc440spe_adma_devices[i] = -1;
4554
4555 return 0;
4556
4557out_mq:
4558 of_node_put(np);
4559out_free:
4560 kfree(ppc440spe_dma_fifo_buf);
4561 return ret;
4562}
4563
4bf27b8b 4564static const struct of_device_id ppc440spe_adma_of_match[] = {
12458ea0
AG
4565 { .compatible = "ibm,dma-440spe", },
4566 { .compatible = "amcc,xor-accelerator", },
4567 {},
4568};
4569MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4570
00006124 4571static struct platform_driver ppc440spe_adma_driver = {
12458ea0 4572 .probe = ppc440spe_adma_probe,
a7d6e3ec 4573 .remove = ppc440spe_adma_remove,
12458ea0
AG
4574 .driver = {
4575 .name = "PPC440SP(E)-ADMA",
4018294b 4576 .of_match_table = ppc440spe_adma_of_match,
12458ea0
AG
4577 },
4578};
4579
4580static __init int ppc440spe_adma_init(void)
4581{
4582 int ret;
4583
4584 ret = ppc440spe_configure_raid_devices();
4585 if (ret)
4586 return ret;
4587
00006124 4588 ret = platform_driver_register(&ppc440spe_adma_driver);
12458ea0
AG
4589 if (ret) {
4590 pr_err("%s: failed to register platform driver\n",
4591 __func__);
4592 goto out_reg;
4593 }
4594
4595 /* Initialization status */
4596 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4597 &driver_attr_devices);
4598 if (ret)
4599 goto out_dev;
4600
4601 /* RAID-6 h/w enable entry */
4602 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4603 &driver_attr_enable);
4604 if (ret)
4605 goto out_en;
4606
4607 /* GF polynomial to use */
4608 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4609 &driver_attr_poly);
4610 if (!ret)
4611 return ret;
4612
4613 driver_remove_file(&ppc440spe_adma_driver.driver,
4614 &driver_attr_enable);
4615out_en:
4616 driver_remove_file(&ppc440spe_adma_driver.driver,
4617 &driver_attr_devices);
4618out_dev:
4619 /* User will not be able to enable h/w RAID-6 */
4620 pr_err("%s: failed to create RAID-6 driver interface\n",
4621 __func__);
00006124 4622 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
4623out_reg:
4624 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4625 kfree(ppc440spe_dma_fifo_buf);
4626 return ret;
4627}
4628
4629static void __exit ppc440spe_adma_exit(void)
4630{
4631 driver_remove_file(&ppc440spe_adma_driver.driver,
4632 &driver_attr_poly);
4633 driver_remove_file(&ppc440spe_adma_driver.driver,
4634 &driver_attr_enable);
4635 driver_remove_file(&ppc440spe_adma_driver.driver,
4636 &driver_attr_devices);
00006124 4637 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
4638 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4639 kfree(ppc440spe_dma_fifo_buf);
4640}
4641
4642arch_initcall(ppc440spe_adma_init);
4643module_exit(ppc440spe_adma_exit);
4644
4645MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
4646MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
4647MODULE_LICENSE("GPL");
This page took 0.792378 seconds and 5 git commands to generate.