dmaengine: add private header file
[deliverable/linux.git] / drivers / dma / ppc4xx / adma.c
CommitLineData
12458ea0
AG
1/*
2 * Copyright (C) 2006-2009 DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * Further porting to arch/powerpc by
7 * Anatolij Gustschin <agust@denx.de>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27/*
28 * This driver supports the asynchrounous DMA copy and RAID engines available
29 * on the AMCC PPC440SPe Processors.
30 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
31 * ADMA driver written by D.Williams.
32 */
33
34#include <linux/init.h>
35#include <linux/module.h>
36#include <linux/async_tx.h>
37#include <linux/delay.h>
38#include <linux/dma-mapping.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
5a0e3ad6 41#include <linux/slab.h>
12458ea0
AG
42#include <linux/uaccess.h>
43#include <linux/proc_fs.h>
44#include <linux/of.h>
45#include <linux/of_platform.h>
46#include <asm/dcr.h>
47#include <asm/dcr-regs.h>
48#include "adma.h"
d2ebfb33 49#include "../dmaengine.h"
12458ea0
AG
50
51enum ppc_adma_init_code {
52 PPC_ADMA_INIT_OK = 0,
53 PPC_ADMA_INIT_MEMRES,
54 PPC_ADMA_INIT_MEMREG,
55 PPC_ADMA_INIT_ALLOC,
56 PPC_ADMA_INIT_COHERENT,
57 PPC_ADMA_INIT_CHANNEL,
58 PPC_ADMA_INIT_IRQ1,
59 PPC_ADMA_INIT_IRQ2,
60 PPC_ADMA_INIT_REGISTER
61};
62
63static char *ppc_adma_errors[] = {
64 [PPC_ADMA_INIT_OK] = "ok",
65 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
66 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
67 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
68 "structure",
69 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
70 "hardware descriptors",
71 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
72 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
73 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
74 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
75};
76
77static enum ppc_adma_init_code
78ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
79
80struct ppc_dma_chan_ref {
81 struct dma_chan *chan;
82 struct list_head node;
83};
84
85/* The list of channels exported by ppc440spe ADMA */
86struct list_head
87ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
88
89/* This flag is set when want to refetch the xor chain in the interrupt
90 * handler
91 */
92static u32 do_xor_refetch;
93
94/* Pointer to DMA0, DMA1 CP/CS FIFO */
95static void *ppc440spe_dma_fifo_buf;
96
97/* Pointers to last submitted to DMA0, DMA1 CDBs */
98static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
99static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
100
101/* Pointer to last linked and submitted xor CB */
102static struct ppc440spe_adma_desc_slot *xor_last_linked;
103static struct ppc440spe_adma_desc_slot *xor_last_submit;
104
105/* This array is used in data-check operations for storing a pattern */
106static char ppc440spe_qword[16];
107
108static atomic_t ppc440spe_adma_err_irq_ref;
109static dcr_host_t ppc440spe_mq_dcr_host;
110static unsigned int ppc440spe_mq_dcr_len;
111
112/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
113 * the block size in transactions, then we do not allow to activate more than
114 * only one RXOR transactions simultaneously. So use this var to store
115 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
116 * set) or not (PPC440SPE_RXOR_RUN is clear).
117 */
118static unsigned long ppc440spe_rxor_state;
119
120/* These are used in enable & check routines
121 */
122static u32 ppc440spe_r6_enabled;
123static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
124static struct completion ppc440spe_r6_test_comp;
125
126static int ppc440spe_adma_dma2rxor_prep_src(
127 struct ppc440spe_adma_desc_slot *desc,
128 struct ppc440spe_rxor *cursor, int index,
129 int src_cnt, u32 addr);
130static void ppc440spe_adma_dma2rxor_set_src(
131 struct ppc440spe_adma_desc_slot *desc,
132 int index, dma_addr_t addr);
133static void ppc440spe_adma_dma2rxor_set_mult(
134 struct ppc440spe_adma_desc_slot *desc,
135 int index, u8 mult);
136
137#ifdef ADMA_LL_DEBUG
138#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
139#else
140#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
141#endif
142
143static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
144{
145 struct dma_cdb *cdb;
146 struct xor_cb *cb;
147 int i;
148
149 switch (chan->device->id) {
150 case 0:
151 case 1:
152 cdb = block;
153
154 pr_debug("CDB at %p [%d]:\n"
155 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
156 "\t sg1u 0x%08x sg1l 0x%08x\n"
157 "\t sg2u 0x%08x sg2l 0x%08x\n"
158 "\t sg3u 0x%08x sg3l 0x%08x\n",
159 cdb, chan->device->id,
160 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
161 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
162 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
163 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
164 );
165 break;
166 case 2:
167 cb = block;
168
169 pr_debug("CB at %p [%d]:\n"
170 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
171 "\t cbtah 0x%08x cbtal 0x%08x\n"
172 "\t cblah 0x%08x cblal 0x%08x\n",
173 cb, chan->device->id,
174 cb->cbc, cb->cbbc, cb->cbs,
175 cb->cbtah, cb->cbtal,
176 cb->cblah, cb->cblal);
177 for (i = 0; i < 16; i++) {
178 if (i && !cb->ops[i].h && !cb->ops[i].l)
179 continue;
180 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
181 i, cb->ops[i].h, cb->ops[i].l);
182 }
183 break;
184 }
185}
186
187static void print_cb_list(struct ppc440spe_adma_chan *chan,
188 struct ppc440spe_adma_desc_slot *iter)
189{
190 for (; iter; iter = iter->hw_next)
191 print_cb(chan, iter->hw_desc);
192}
193
194static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
195 unsigned int src_cnt)
196{
197 int i;
198
199 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
200 for (i = 0; i < src_cnt; i++)
201 pr_debug("\t0x%016llx ", src[i]);
202 pr_debug("dst:\n\t0x%016llx\n", dst);
203}
204
205static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
206 unsigned int src_cnt)
207{
208 int i;
209
210 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
211 for (i = 0; i < src_cnt; i++)
212 pr_debug("\t0x%016llx ", src[i]);
213 pr_debug("dst: ");
214 for (i = 0; i < 2; i++)
215 pr_debug("\t0x%016llx ", dst[i]);
216}
217
218static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
219 unsigned int src_cnt,
220 const unsigned char *scf)
221{
222 int i;
223
224 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
225 if (scf) {
226 for (i = 0; i < src_cnt; i++)
227 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
228 } else {
229 for (i = 0; i < src_cnt; i++)
230 pr_debug("\t0x%016llx(no) ", src[i]);
231 }
232
233 pr_debug("dst: ");
234 for (i = 0; i < 2; i++)
235 pr_debug("\t0x%016llx ", src[src_cnt + i]);
236}
237
238/******************************************************************************
239 * Command (Descriptor) Blocks low-level routines
240 ******************************************************************************/
241/**
242 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
243 * pseudo operation
244 */
245static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
246 struct ppc440spe_adma_chan *chan)
247{
248 struct xor_cb *p;
249
250 switch (chan->device->id) {
251 case PPC440SPE_XOR_ID:
252 p = desc->hw_desc;
253 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
254 /* NOP with Command Block Complete Enable */
255 p->cbc = XOR_CBCR_CBCE_BIT;
256 break;
257 case PPC440SPE_DMA0_ID:
258 case PPC440SPE_DMA1_ID:
259 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
260 /* NOP with interrupt */
261 set_bit(PPC440SPE_DESC_INT, &desc->flags);
262 break;
263 default:
264 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
265 __func__);
266 break;
267 }
268}
269
270/**
271 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
272 * pseudo operation
273 */
274static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
275{
276 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
277 desc->hw_next = NULL;
278 desc->src_cnt = 0;
279 desc->dst_cnt = 1;
280}
281
282/**
283 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
284 */
285static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
286 int src_cnt, unsigned long flags)
287{
288 struct xor_cb *hw_desc = desc->hw_desc;
289
290 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
291 desc->hw_next = NULL;
292 desc->src_cnt = src_cnt;
293 desc->dst_cnt = 1;
294
295 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
296 if (flags & DMA_PREP_INTERRUPT)
297 /* Enable interrupt on completion */
298 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
299}
300
301/**
302 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
303 * operation in DMA2 controller
304 */
305static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
306 int dst_cnt, int src_cnt, unsigned long flags)
307{
308 struct xor_cb *hw_desc = desc->hw_desc;
309
310 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
311 desc->hw_next = NULL;
312 desc->src_cnt = src_cnt;
313 desc->dst_cnt = dst_cnt;
314 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
315 desc->descs_per_op = 0;
316
317 hw_desc->cbc = XOR_CBCR_TGT_BIT;
318 if (flags & DMA_PREP_INTERRUPT)
319 /* Enable interrupt on completion */
320 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
321}
322
323#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
324#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
325#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
326
327/**
328 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
329 * with DMA0/1
330 */
331static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
332 int dst_cnt, int src_cnt, unsigned long flags,
333 unsigned long op)
334{
335 struct dma_cdb *hw_desc;
336 struct ppc440spe_adma_desc_slot *iter;
337 u8 dopc;
338
339 /* Common initialization of a PQ descriptors chain */
340 set_bits(op, &desc->flags);
341 desc->src_cnt = src_cnt;
342 desc->dst_cnt = dst_cnt;
343
344 /* WXOR MULTICAST if both P and Q are being computed
345 * MV_SG1_SG2 if Q only
346 */
347 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
348 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
349
350 list_for_each_entry(iter, &desc->group_list, chain_node) {
351 hw_desc = iter->hw_desc;
352 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
353
354 if (likely(!list_is_last(&iter->chain_node,
355 &desc->group_list))) {
356 /* set 'next' pointer */
357 iter->hw_next = list_entry(iter->chain_node.next,
358 struct ppc440spe_adma_desc_slot, chain_node);
359 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
360 } else {
361 /* this is the last descriptor.
362 * this slot will be pasted from ADMA level
363 * each time it wants to configure parameters
364 * of the transaction (src, dst, ...)
365 */
366 iter->hw_next = NULL;
367 if (flags & DMA_PREP_INTERRUPT)
368 set_bit(PPC440SPE_DESC_INT, &iter->flags);
369 else
370 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
371 }
372 }
373
374 /* Set OPS depending on WXOR/RXOR type of operation */
375 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
376 /* This is a WXOR only chain:
377 * - first descriptors are for zeroing destinations
378 * if PPC440SPE_ZERO_P/Q set;
379 * - descriptors remained are for GF-XOR operations.
380 */
381 iter = list_first_entry(&desc->group_list,
382 struct ppc440spe_adma_desc_slot,
383 chain_node);
384
385 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
386 hw_desc = iter->hw_desc;
387 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
388 iter = list_first_entry(&iter->chain_node,
389 struct ppc440spe_adma_desc_slot,
390 chain_node);
391 }
392
393 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
394 hw_desc = iter->hw_desc;
395 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
396 iter = list_first_entry(&iter->chain_node,
397 struct ppc440spe_adma_desc_slot,
398 chain_node);
399 }
400
401 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
402 hw_desc = iter->hw_desc;
403 hw_desc->opc = dopc;
404 }
405 } else {
406 /* This is either RXOR-only or mixed RXOR/WXOR */
407
408 /* The first 1 or 2 slots in chain are always RXOR,
409 * if need to calculate P & Q, then there are two
410 * RXOR slots; if only P or only Q, then there is one
411 */
412 iter = list_first_entry(&desc->group_list,
413 struct ppc440spe_adma_desc_slot,
414 chain_node);
415 hw_desc = iter->hw_desc;
416 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
417
418 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
419 iter = list_first_entry(&iter->chain_node,
420 struct ppc440spe_adma_desc_slot,
421 chain_node);
422 hw_desc = iter->hw_desc;
423 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
424 }
425
426 /* The remaining descs (if any) are WXORs */
427 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
428 iter = list_first_entry(&iter->chain_node,
429 struct ppc440spe_adma_desc_slot,
430 chain_node);
431 list_for_each_entry_from(iter, &desc->group_list,
432 chain_node) {
433 hw_desc = iter->hw_desc;
434 hw_desc->opc = dopc;
435 }
436 }
437 }
438}
439
440/**
441 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
442 * for PQ_ZERO_SUM operation
443 */
444static void ppc440spe_desc_init_dma01pqzero_sum(
445 struct ppc440spe_adma_desc_slot *desc,
446 int dst_cnt, int src_cnt)
447{
448 struct dma_cdb *hw_desc;
449 struct ppc440spe_adma_desc_slot *iter;
450 int i = 0;
451 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
452 DMA_CDB_OPC_MV_SG1_SG2;
453 /*
454 * Initialize starting from 2nd or 3rd descriptor dependent
455 * on dst_cnt. First one or two slots are for cloning P
456 * and/or Q to chan->pdest and/or chan->qdest as we have
457 * to preserve original P/Q.
458 */
459 iter = list_first_entry(&desc->group_list,
460 struct ppc440spe_adma_desc_slot, chain_node);
461 iter = list_entry(iter->chain_node.next,
462 struct ppc440spe_adma_desc_slot, chain_node);
463
464 if (dst_cnt > 1) {
465 iter = list_entry(iter->chain_node.next,
466 struct ppc440spe_adma_desc_slot, chain_node);
467 }
468 /* initialize each source descriptor in chain */
469 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
470 hw_desc = iter->hw_desc;
471 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
472 iter->src_cnt = 0;
473 iter->dst_cnt = 0;
474
475 /* This is a ZERO_SUM operation:
476 * - <src_cnt> descriptors starting from 2nd or 3rd
477 * descriptor are for GF-XOR operations;
478 * - remaining <dst_cnt> descriptors are for checking the result
479 */
480 if (i++ < src_cnt)
481 /* MV_SG1_SG2 if only Q is being verified
482 * MULTICAST if both P and Q are being verified
483 */
484 hw_desc->opc = dopc;
485 else
486 /* DMA_CDB_OPC_DCHECK128 operation */
487 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
488
489 if (likely(!list_is_last(&iter->chain_node,
490 &desc->group_list))) {
491 /* set 'next' pointer */
492 iter->hw_next = list_entry(iter->chain_node.next,
493 struct ppc440spe_adma_desc_slot,
494 chain_node);
495 } else {
496 /* this is the last descriptor.
497 * this slot will be pasted from ADMA level
498 * each time it wants to configure parameters
499 * of the transaction (src, dst, ...)
500 */
501 iter->hw_next = NULL;
502 /* always enable interrupt generation since we get
503 * the status of pqzero from the handler
504 */
505 set_bit(PPC440SPE_DESC_INT, &iter->flags);
506 }
507 }
508 desc->src_cnt = src_cnt;
509 desc->dst_cnt = dst_cnt;
510}
511
512/**
513 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
514 */
515static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
516 unsigned long flags)
517{
518 struct dma_cdb *hw_desc = desc->hw_desc;
519
520 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
521 desc->hw_next = NULL;
522 desc->src_cnt = 1;
523 desc->dst_cnt = 1;
524
525 if (flags & DMA_PREP_INTERRUPT)
526 set_bit(PPC440SPE_DESC_INT, &desc->flags);
527 else
528 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
529
530 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
531}
532
533/**
534 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
535 */
536static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
537 int value, unsigned long flags)
538{
539 struct dma_cdb *hw_desc = desc->hw_desc;
540
541 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
542 desc->hw_next = NULL;
543 desc->src_cnt = 1;
544 desc->dst_cnt = 1;
545
546 if (flags & DMA_PREP_INTERRUPT)
547 set_bit(PPC440SPE_DESC_INT, &desc->flags);
548 else
549 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
550
551 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
552 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
553 hw_desc->opc = DMA_CDB_OPC_DFILL128;
554}
555
556/**
557 * ppc440spe_desc_set_src_addr - set source address into the descriptor
558 */
559static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
560 struct ppc440spe_adma_chan *chan,
561 int src_idx, dma_addr_t addrh,
562 dma_addr_t addrl)
563{
564 struct dma_cdb *dma_hw_desc;
565 struct xor_cb *xor_hw_desc;
566 phys_addr_t addr64, tmplow, tmphi;
567
568 switch (chan->device->id) {
569 case PPC440SPE_DMA0_ID:
570 case PPC440SPE_DMA1_ID:
571 if (!addrh) {
572 addr64 = addrl;
573 tmphi = (addr64 >> 32);
574 tmplow = (addr64 & 0xFFFFFFFF);
575 } else {
576 tmphi = addrh;
577 tmplow = addrl;
578 }
579 dma_hw_desc = desc->hw_desc;
580 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
581 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
582 break;
583 case PPC440SPE_XOR_ID:
584 xor_hw_desc = desc->hw_desc;
585 xor_hw_desc->ops[src_idx].l = addrl;
586 xor_hw_desc->ops[src_idx].h |= addrh;
587 break;
588 }
589}
590
591/**
592 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
593 */
594static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
595 struct ppc440spe_adma_chan *chan, u32 mult_index,
596 int sg_index, unsigned char mult_value)
597{
598 struct dma_cdb *dma_hw_desc;
599 struct xor_cb *xor_hw_desc;
600 u32 *psgu;
601
602 switch (chan->device->id) {
603 case PPC440SPE_DMA0_ID:
604 case PPC440SPE_DMA1_ID:
605 dma_hw_desc = desc->hw_desc;
606
607 switch (sg_index) {
608 /* for RXOR operations set multiplier
609 * into source cued address
610 */
611 case DMA_CDB_SG_SRC:
612 psgu = &dma_hw_desc->sg1u;
613 break;
614 /* for WXOR operations set multiplier
615 * into destination cued address(es)
616 */
617 case DMA_CDB_SG_DST1:
618 psgu = &dma_hw_desc->sg2u;
619 break;
620 case DMA_CDB_SG_DST2:
621 psgu = &dma_hw_desc->sg3u;
622 break;
623 default:
624 BUG();
625 }
626
627 *psgu |= cpu_to_le32(mult_value << mult_index);
628 break;
629 case PPC440SPE_XOR_ID:
630 xor_hw_desc = desc->hw_desc;
631 break;
632 default:
633 BUG();
634 }
635}
636
637/**
638 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
639 */
640static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
641 struct ppc440spe_adma_chan *chan,
642 dma_addr_t addrh, dma_addr_t addrl,
643 u32 dst_idx)
644{
645 struct dma_cdb *dma_hw_desc;
646 struct xor_cb *xor_hw_desc;
647 phys_addr_t addr64, tmphi, tmplow;
648 u32 *psgu, *psgl;
649
650 switch (chan->device->id) {
651 case PPC440SPE_DMA0_ID:
652 case PPC440SPE_DMA1_ID:
653 if (!addrh) {
654 addr64 = addrl;
655 tmphi = (addr64 >> 32);
656 tmplow = (addr64 & 0xFFFFFFFF);
657 } else {
658 tmphi = addrh;
659 tmplow = addrl;
660 }
661 dma_hw_desc = desc->hw_desc;
662
663 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
664 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
665
666 *psgl = cpu_to_le32((u32)tmplow);
667 *psgu |= cpu_to_le32((u32)tmphi);
668 break;
669 case PPC440SPE_XOR_ID:
670 xor_hw_desc = desc->hw_desc;
671 xor_hw_desc->cbtal = addrl;
672 xor_hw_desc->cbtah |= addrh;
673 break;
674 }
675}
676
677/**
678 * ppc440spe_desc_set_byte_count - set number of data bytes involved
679 * into the operation
680 */
681static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
682 struct ppc440spe_adma_chan *chan,
683 u32 byte_count)
684{
685 struct dma_cdb *dma_hw_desc;
686 struct xor_cb *xor_hw_desc;
687
688 switch (chan->device->id) {
689 case PPC440SPE_DMA0_ID:
690 case PPC440SPE_DMA1_ID:
691 dma_hw_desc = desc->hw_desc;
692 dma_hw_desc->cnt = cpu_to_le32(byte_count);
693 break;
694 case PPC440SPE_XOR_ID:
695 xor_hw_desc = desc->hw_desc;
696 xor_hw_desc->cbbc = byte_count;
697 break;
698 }
699}
700
701/**
702 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
703 */
704static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
705{
706 /* assume that byte_count is aligned on the 512-boundary;
707 * thus write it directly to the register (bits 23:31 are
708 * reserved there).
709 */
710 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
711}
712
713/**
714 * ppc440spe_desc_set_dcheck - set CHECK pattern
715 */
716static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
717 struct ppc440spe_adma_chan *chan, u8 *qword)
718{
719 struct dma_cdb *dma_hw_desc;
720
721 switch (chan->device->id) {
722 case PPC440SPE_DMA0_ID:
723 case PPC440SPE_DMA1_ID:
724 dma_hw_desc = desc->hw_desc;
725 iowrite32(qword[0], &dma_hw_desc->sg3l);
726 iowrite32(qword[4], &dma_hw_desc->sg3u);
727 iowrite32(qword[8], &dma_hw_desc->sg2l);
728 iowrite32(qword[12], &dma_hw_desc->sg2u);
729 break;
730 default:
731 BUG();
732 }
733}
734
735/**
736 * ppc440spe_xor_set_link - set link address in xor CB
737 */
738static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
739 struct ppc440spe_adma_desc_slot *next_desc)
740{
741 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
742
743 if (unlikely(!next_desc || !(next_desc->phys))) {
744 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
745 __func__, next_desc,
746 next_desc ? next_desc->phys : 0);
747 BUG();
748 }
749
750 xor_hw_desc->cbs = 0;
751 xor_hw_desc->cblal = next_desc->phys;
752 xor_hw_desc->cblah = 0;
753 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
754}
755
756/**
757 * ppc440spe_desc_set_link - set the address of descriptor following this
758 * descriptor in chain
759 */
760static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
761 struct ppc440spe_adma_desc_slot *prev_desc,
762 struct ppc440spe_adma_desc_slot *next_desc)
763{
764 unsigned long flags;
765 struct ppc440spe_adma_desc_slot *tail = next_desc;
766
767 if (unlikely(!prev_desc || !next_desc ||
768 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
769 /* If previous next is overwritten something is wrong.
770 * though we may refetch from append to initiate list
771 * processing; in this case - it's ok.
772 */
773 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
774 "prev->hw_next=0x%p\n", __func__, prev_desc,
775 next_desc, prev_desc ? prev_desc->hw_next : 0);
776 BUG();
777 }
778
779 local_irq_save(flags);
780
781 /* do s/w chaining both for DMA and XOR descriptors */
782 prev_desc->hw_next = next_desc;
783
784 switch (chan->device->id) {
785 case PPC440SPE_DMA0_ID:
786 case PPC440SPE_DMA1_ID:
787 break;
788 case PPC440SPE_XOR_ID:
789 /* bind descriptor to the chain */
790 while (tail->hw_next)
791 tail = tail->hw_next;
792 xor_last_linked = tail;
793
794 if (prev_desc == xor_last_submit)
795 /* do not link to the last submitted CB */
796 break;
797 ppc440spe_xor_set_link(prev_desc, next_desc);
798 break;
799 }
800
801 local_irq_restore(flags);
802}
803
804/**
805 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
806 */
807static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
808 struct ppc440spe_adma_chan *chan, int src_idx)
809{
810 struct dma_cdb *dma_hw_desc;
811 struct xor_cb *xor_hw_desc;
812
813 switch (chan->device->id) {
814 case PPC440SPE_DMA0_ID:
815 case PPC440SPE_DMA1_ID:
816 dma_hw_desc = desc->hw_desc;
817 /* May have 0, 1, 2, or 3 sources */
818 switch (dma_hw_desc->opc) {
819 case DMA_CDB_OPC_NO_OP:
820 case DMA_CDB_OPC_DFILL128:
821 return 0;
822 case DMA_CDB_OPC_DCHECK128:
823 if (unlikely(src_idx)) {
824 printk(KERN_ERR "%s: try to get %d source for"
825 " DCHECK128\n", __func__, src_idx);
826 BUG();
827 }
828 return le32_to_cpu(dma_hw_desc->sg1l);
829 case DMA_CDB_OPC_MULTICAST:
830 case DMA_CDB_OPC_MV_SG1_SG2:
831 if (unlikely(src_idx > 2)) {
832 printk(KERN_ERR "%s: try to get %d source from"
833 " DMA descr\n", __func__, src_idx);
834 BUG();
835 }
836 if (src_idx) {
837 if (le32_to_cpu(dma_hw_desc->sg1u) &
838 DMA_CUED_XOR_WIN_MSK) {
839 u8 region;
840
841 if (src_idx == 1)
842 return le32_to_cpu(
843 dma_hw_desc->sg1l) +
844 desc->unmap_len;
845
846 region = (le32_to_cpu(
847 dma_hw_desc->sg1u)) >>
848 DMA_CUED_REGION_OFF;
849
850 region &= DMA_CUED_REGION_MSK;
851 switch (region) {
852 case DMA_RXOR123:
853 return le32_to_cpu(
854 dma_hw_desc->sg1l) +
855 (desc->unmap_len << 1);
856 case DMA_RXOR124:
857 return le32_to_cpu(
858 dma_hw_desc->sg1l) +
859 (desc->unmap_len * 3);
860 case DMA_RXOR125:
861 return le32_to_cpu(
862 dma_hw_desc->sg1l) +
863 (desc->unmap_len << 2);
864 default:
865 printk(KERN_ERR
866 "%s: try to"
867 " get src3 for region %02x"
868 "PPC440SPE_DESC_RXOR12?\n",
869 __func__, region);
870 BUG();
871 }
872 } else {
873 printk(KERN_ERR
874 "%s: try to get %d"
875 " source for non-cued descr\n",
876 __func__, src_idx);
877 BUG();
878 }
879 }
880 return le32_to_cpu(dma_hw_desc->sg1l);
881 default:
882 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
883 __func__, dma_hw_desc->opc);
884 BUG();
885 }
886 return le32_to_cpu(dma_hw_desc->sg1l);
887 case PPC440SPE_XOR_ID:
888 /* May have up to 16 sources */
889 xor_hw_desc = desc->hw_desc;
890 return xor_hw_desc->ops[src_idx].l;
891 }
892 return 0;
893}
894
895/**
896 * ppc440spe_desc_get_dest_addr - extract the destination address from the
897 * descriptor
898 */
899static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
900 struct ppc440spe_adma_chan *chan, int idx)
901{
902 struct dma_cdb *dma_hw_desc;
903 struct xor_cb *xor_hw_desc;
904
905 switch (chan->device->id) {
906 case PPC440SPE_DMA0_ID:
907 case PPC440SPE_DMA1_ID:
908 dma_hw_desc = desc->hw_desc;
909
910 if (likely(!idx))
911 return le32_to_cpu(dma_hw_desc->sg2l);
912 return le32_to_cpu(dma_hw_desc->sg3l);
913 case PPC440SPE_XOR_ID:
914 xor_hw_desc = desc->hw_desc;
915 return xor_hw_desc->cbtal;
916 }
917 return 0;
918}
919
920/**
921 * ppc440spe_desc_get_src_num - extract the number of source addresses from
922 * the descriptor
923 */
924static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
925 struct ppc440spe_adma_chan *chan)
926{
927 struct dma_cdb *dma_hw_desc;
928 struct xor_cb *xor_hw_desc;
929
930 switch (chan->device->id) {
931 case PPC440SPE_DMA0_ID:
932 case PPC440SPE_DMA1_ID:
933 dma_hw_desc = desc->hw_desc;
934
935 switch (dma_hw_desc->opc) {
936 case DMA_CDB_OPC_NO_OP:
937 case DMA_CDB_OPC_DFILL128:
938 return 0;
939 case DMA_CDB_OPC_DCHECK128:
940 return 1;
941 case DMA_CDB_OPC_MV_SG1_SG2:
942 case DMA_CDB_OPC_MULTICAST:
943 /*
944 * Only for RXOR operations we have more than
945 * one source
946 */
947 if (le32_to_cpu(dma_hw_desc->sg1u) &
948 DMA_CUED_XOR_WIN_MSK) {
949 /* RXOR op, there are 2 or 3 sources */
950 if (((le32_to_cpu(dma_hw_desc->sg1u) >>
951 DMA_CUED_REGION_OFF) &
952 DMA_CUED_REGION_MSK) == DMA_RXOR12) {
953 /* RXOR 1-2 */
954 return 2;
955 } else {
956 /* RXOR 1-2-3/1-2-4/1-2-5 */
957 return 3;
958 }
959 }
960 return 1;
961 default:
962 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
963 __func__, dma_hw_desc->opc);
964 BUG();
965 }
966 case PPC440SPE_XOR_ID:
967 /* up to 16 sources */
968 xor_hw_desc = desc->hw_desc;
969 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
970 default:
971 BUG();
972 }
973 return 0;
974}
975
976/**
977 * ppc440spe_desc_get_dst_num - get the number of destination addresses in
978 * this descriptor
979 */
980static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
981 struct ppc440spe_adma_chan *chan)
982{
983 struct dma_cdb *dma_hw_desc;
984
985 switch (chan->device->id) {
986 case PPC440SPE_DMA0_ID:
987 case PPC440SPE_DMA1_ID:
988 /* May be 1 or 2 destinations */
989 dma_hw_desc = desc->hw_desc;
990 switch (dma_hw_desc->opc) {
991 case DMA_CDB_OPC_NO_OP:
992 case DMA_CDB_OPC_DCHECK128:
993 return 0;
994 case DMA_CDB_OPC_MV_SG1_SG2:
995 case DMA_CDB_OPC_DFILL128:
996 return 1;
997 case DMA_CDB_OPC_MULTICAST:
998 if (desc->dst_cnt == 2)
999 return 2;
1000 else
1001 return 1;
1002 default:
1003 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
1004 __func__, dma_hw_desc->opc);
1005 BUG();
1006 }
1007 case PPC440SPE_XOR_ID:
1008 /* Always only 1 destination */
1009 return 1;
1010 default:
1011 BUG();
1012 }
1013 return 0;
1014}
1015
1016/**
1017 * ppc440spe_desc_get_link - get the address of the descriptor that
1018 * follows this one
1019 */
1020static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
1021 struct ppc440spe_adma_chan *chan)
1022{
1023 if (!desc->hw_next)
1024 return 0;
1025
1026 return desc->hw_next->phys;
1027}
1028
1029/**
1030 * ppc440spe_desc_is_aligned - check alignment
1031 */
1032static inline int ppc440spe_desc_is_aligned(
1033 struct ppc440spe_adma_desc_slot *desc, int num_slots)
1034{
1035 return (desc->idx & (num_slots - 1)) ? 0 : 1;
1036}
1037
1038/**
1039 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
1040 * XOR operation
1041 */
1042static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
1043 int *slots_per_op)
1044{
1045 int slot_cnt;
1046
1047 /* each XOR descriptor provides up to 16 source operands */
1048 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
1049
1050 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
1051 return slot_cnt;
1052
1053 printk(KERN_ERR "%s: len %d > max %d !!\n",
1054 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
1055 BUG();
1056 return slot_cnt;
1057}
1058
1059/**
1060 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
1061 * DMA2 PQ operation
1062 */
1063static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
1064 int src_cnt, size_t len)
1065{
1066 signed long long order = 0;
1067 int state = 0;
1068 int addr_count = 0;
1069 int i;
1070 for (i = 1; i < src_cnt; i++) {
1071 dma_addr_t cur_addr = srcs[i];
1072 dma_addr_t old_addr = srcs[i-1];
1073 switch (state) {
1074 case 0:
1075 if (cur_addr == old_addr + len) {
1076 /* direct RXOR */
1077 order = 1;
1078 state = 1;
1079 if (i == src_cnt-1)
1080 addr_count++;
1081 } else if (old_addr == cur_addr + len) {
1082 /* reverse RXOR */
1083 order = -1;
1084 state = 1;
1085 if (i == src_cnt-1)
1086 addr_count++;
1087 } else {
1088 state = 3;
1089 }
1090 break;
1091 case 1:
1092 if (i == src_cnt-2 || (order == -1
1093 && cur_addr != old_addr - len)) {
1094 order = 0;
1095 state = 0;
1096 addr_count++;
1097 } else if (cur_addr == old_addr + len*order) {
1098 state = 2;
1099 if (i == src_cnt-1)
1100 addr_count++;
1101 } else if (cur_addr == old_addr + 2*len) {
1102 state = 2;
1103 if (i == src_cnt-1)
1104 addr_count++;
1105 } else if (cur_addr == old_addr + 3*len) {
1106 state = 2;
1107 if (i == src_cnt-1)
1108 addr_count++;
1109 } else {
1110 order = 0;
1111 state = 0;
1112 addr_count++;
1113 }
1114 break;
1115 case 2:
1116 order = 0;
1117 state = 0;
1118 addr_count++;
1119 break;
1120 }
1121 if (state == 3)
1122 break;
1123 }
1124 if (src_cnt <= 1 || (state != 1 && state != 2)) {
1125 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
1126 __func__, src_cnt, state, addr_count, order);
1127 for (i = 0; i < src_cnt; i++)
1128 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
1129 BUG();
1130 }
1131
1132 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
1133}
1134
1135
1136/******************************************************************************
1137 * ADMA channel low-level routines
1138 ******************************************************************************/
1139
1140static u32
1141ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
1142static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
1143
1144/**
1145 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
1146 */
1147static void ppc440spe_adma_device_clear_eot_status(
1148 struct ppc440spe_adma_chan *chan)
1149{
1150 struct dma_regs *dma_reg;
1151 struct xor_regs *xor_reg;
1152 u8 *p = chan->device->dma_desc_pool_virt;
1153 struct dma_cdb *cdb;
1154 u32 rv, i;
1155
1156 switch (chan->device->id) {
1157 case PPC440SPE_DMA0_ID:
1158 case PPC440SPE_DMA1_ID:
1159 /* read FIFO to ack */
1160 dma_reg = chan->device->dma_reg;
1161 while ((rv = ioread32(&dma_reg->csfpl))) {
1162 i = rv & DMA_CDB_ADDR_MSK;
1163 cdb = (struct dma_cdb *)&p[i -
1164 (u32)chan->device->dma_desc_pool];
1165
1166 /* Clear opcode to ack. This is necessary for
1167 * ZeroSum operations only
1168 */
1169 cdb->opc = 0;
1170
1171 if (test_bit(PPC440SPE_RXOR_RUN,
1172 &ppc440spe_rxor_state)) {
1173 /* probably this is a completed RXOR op,
1174 * get pointer to CDB using the fact that
1175 * physical and virtual addresses of CDB
1176 * in pools have the same offsets
1177 */
1178 if (le32_to_cpu(cdb->sg1u) &
1179 DMA_CUED_XOR_BASE) {
1180 /* this is a RXOR */
1181 clear_bit(PPC440SPE_RXOR_RUN,
1182 &ppc440spe_rxor_state);
1183 }
1184 }
1185
1186 if (rv & DMA_CDB_STATUS_MSK) {
1187 /* ZeroSum check failed
1188 */
1189 struct ppc440spe_adma_desc_slot *iter;
1190 dma_addr_t phys = rv & ~DMA_CDB_MSK;
1191
1192 /*
1193 * Update the status of corresponding
1194 * descriptor.
1195 */
1196 list_for_each_entry(iter, &chan->chain,
1197 chain_node) {
1198 if (iter->phys == phys)
1199 break;
1200 }
1201 /*
1202 * if cannot find the corresponding
1203 * slot it's a bug
1204 */
1205 BUG_ON(&iter->chain_node == &chan->chain);
1206
1207 if (iter->xor_check_result) {
1208 if (test_bit(PPC440SPE_DESC_PCHECK,
1209 &iter->flags)) {
1210 *iter->xor_check_result |=
1211 SUM_CHECK_P_RESULT;
1212 } else
1213 if (test_bit(PPC440SPE_DESC_QCHECK,
1214 &iter->flags)) {
1215 *iter->xor_check_result |=
1216 SUM_CHECK_Q_RESULT;
1217 } else
1218 BUG();
1219 }
1220 }
1221 }
1222
1223 rv = ioread32(&dma_reg->dsts);
1224 if (rv) {
1225 pr_err("DMA%d err status: 0x%x\n",
1226 chan->device->id, rv);
1227 /* write back to clear */
1228 iowrite32(rv, &dma_reg->dsts);
1229 }
1230 break;
1231 case PPC440SPE_XOR_ID:
1232 /* reset status bits to ack */
1233 xor_reg = chan->device->xor_reg;
1234 rv = ioread32be(&xor_reg->sr);
1235 iowrite32be(rv, &xor_reg->sr);
1236
1237 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
1238 if (rv & XOR_IE_RPTIE_BIT) {
1239 /* Read PLB Timeout Error.
1240 * Try to resubmit the CB
1241 */
1242 u32 val = ioread32be(&xor_reg->ccbalr);
1243
1244 iowrite32be(val, &xor_reg->cblalr);
1245
1246 val = ioread32be(&xor_reg->crsr);
1247 iowrite32be(val | XOR_CRSR_XAE_BIT,
1248 &xor_reg->crsr);
1249 } else
1250 pr_err("XOR ERR 0x%x status\n", rv);
1251 break;
1252 }
1253
1254 /* if the XORcore is idle, but there are unprocessed CBs
1255 * then refetch the s/w chain here
1256 */
1257 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1258 do_xor_refetch)
1259 ppc440spe_chan_append(chan);
1260 break;
1261 }
1262}
1263
1264/**
1265 * ppc440spe_chan_is_busy - get the channel status
1266 */
1267static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1268{
1269 struct dma_regs *dma_reg;
1270 struct xor_regs *xor_reg;
1271 int busy = 0;
1272
1273 switch (chan->device->id) {
1274 case PPC440SPE_DMA0_ID:
1275 case PPC440SPE_DMA1_ID:
1276 dma_reg = chan->device->dma_reg;
1277 /* if command FIFO's head and tail pointers are equal and
1278 * status tail is the same as command, then channel is free
1279 */
1280 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1281 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1282 busy = 1;
1283 break;
1284 case PPC440SPE_XOR_ID:
1285 /* use the special status bit for the XORcore
1286 */
1287 xor_reg = chan->device->xor_reg;
1288 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1289 break;
1290 }
1291
1292 return busy;
1293}
1294
1295/**
1296 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1297 */
1298static void ppc440spe_chan_set_first_xor_descriptor(
1299 struct ppc440spe_adma_chan *chan,
1300 struct ppc440spe_adma_desc_slot *next_desc)
1301{
1302 struct xor_regs *xor_reg = chan->device->xor_reg;
1303
1304 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1305 printk(KERN_INFO "%s: Warn: XORcore is running "
1306 "when try to set the first CDB!\n",
1307 __func__);
1308
1309 xor_last_submit = xor_last_linked = next_desc;
1310
1311 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1312
1313 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1314 iowrite32be(0, &xor_reg->cblahr);
1315 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1316 &xor_reg->cbcr);
1317
1318 chan->hw_chain_inited = 1;
1319}
1320
1321/**
1322 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1323 * called with irqs disabled
1324 */
1325static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1326 struct ppc440spe_adma_desc_slot *desc)
1327{
1328 u32 pcdb;
1329 struct dma_regs *dma_reg = chan->device->dma_reg;
1330
1331 pcdb = desc->phys;
1332 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1333 pcdb |= DMA_CDB_NO_INT;
1334
1335 chan_last_sub[chan->device->id] = desc;
1336
1337 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1338
1339 iowrite32(pcdb, &dma_reg->cpfpl);
1340}
1341
1342/**
1343 * ppc440spe_chan_append - update the h/w chain in the channel
1344 */
1345static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1346{
1347 struct xor_regs *xor_reg;
1348 struct ppc440spe_adma_desc_slot *iter;
1349 struct xor_cb *xcb;
1350 u32 cur_desc;
1351 unsigned long flags;
1352
1353 local_irq_save(flags);
1354
1355 switch (chan->device->id) {
1356 case PPC440SPE_DMA0_ID:
1357 case PPC440SPE_DMA1_ID:
1358 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1359
1360 if (likely(cur_desc)) {
1361 iter = chan_last_sub[chan->device->id];
1362 BUG_ON(!iter);
1363 } else {
1364 /* first peer */
1365 iter = chan_first_cdb[chan->device->id];
1366 BUG_ON(!iter);
1367 ppc440spe_dma_put_desc(chan, iter);
1368 chan->hw_chain_inited = 1;
1369 }
1370
1371 /* is there something new to append */
1372 if (!iter->hw_next)
1373 break;
1374
1375 /* flush descriptors from the s/w queue to fifo */
1376 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1377 ppc440spe_dma_put_desc(chan, iter);
1378 if (!iter->hw_next)
1379 break;
1380 }
1381 break;
1382 case PPC440SPE_XOR_ID:
1383 /* update h/w links and refetch */
1384 if (!xor_last_submit->hw_next)
1385 break;
1386
1387 xor_reg = chan->device->xor_reg;
1388 /* the last linked CDB has to generate an interrupt
1389 * that we'd be able to append the next lists to h/w
1390 * regardless of the XOR engine state at the moment of
1391 * appending of these next lists
1392 */
1393 xcb = xor_last_linked->hw_desc;
1394 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1395
1396 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1397 /* XORcore is idle. Refetch now */
1398 do_xor_refetch = 0;
1399 ppc440spe_xor_set_link(xor_last_submit,
1400 xor_last_submit->hw_next);
1401
1402 ADMA_LL_DBG(print_cb_list(chan,
1403 xor_last_submit->hw_next));
1404
1405 xor_last_submit = xor_last_linked;
1406 iowrite32be(ioread32be(&xor_reg->crsr) |
1407 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1408 &xor_reg->crsr);
1409 } else {
1410 /* XORcore is running. Refetch later in the handler */
1411 do_xor_refetch = 1;
1412 }
1413
1414 break;
1415 }
1416
1417 local_irq_restore(flags);
1418}
1419
1420/**
1421 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1422 */
1423static u32
1424ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1425{
1426 struct dma_regs *dma_reg;
1427 struct xor_regs *xor_reg;
1428
1429 if (unlikely(!chan->hw_chain_inited))
1430 /* h/w descriptor chain is not initialized yet */
1431 return 0;
1432
1433 switch (chan->device->id) {
1434 case PPC440SPE_DMA0_ID:
1435 case PPC440SPE_DMA1_ID:
1436 dma_reg = chan->device->dma_reg;
1437 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1438 case PPC440SPE_XOR_ID:
1439 xor_reg = chan->device->xor_reg;
1440 return ioread32be(&xor_reg->ccbalr);
1441 }
1442 return 0;
1443}
1444
1445/**
1446 * ppc440spe_chan_run - enable the channel
1447 */
1448static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1449{
1450 struct xor_regs *xor_reg;
1451
1452 switch (chan->device->id) {
1453 case PPC440SPE_DMA0_ID:
1454 case PPC440SPE_DMA1_ID:
1455 /* DMAs are always enabled, do nothing */
1456 break;
1457 case PPC440SPE_XOR_ID:
1458 /* drain write buffer */
1459 xor_reg = chan->device->xor_reg;
1460
1461 /* fetch descriptor pointed to in <link> */
1462 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1463 &xor_reg->crsr);
1464 break;
1465 }
1466}
1467
1468/******************************************************************************
1469 * ADMA device level
1470 ******************************************************************************/
1471
1472static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1473static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1474
1475static dma_cookie_t
1476ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1477
1478static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1479 dma_addr_t addr, int index);
1480static void
1481ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1482 dma_addr_t addr, int index);
1483
1484static void
1485ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1486 dma_addr_t *paddr, unsigned long flags);
1487static void
1488ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1489 dma_addr_t addr, int index);
1490static void
1491ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1492 unsigned char mult, int index, int dst_pos);
1493static void
1494ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1495 dma_addr_t paddr, dma_addr_t qaddr);
1496
1497static struct page *ppc440spe_rxor_srcs[32];
1498
1499/**
1500 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1501 */
1502static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1503{
1504 int i, order = 0, state = 0;
1505 int idx = 0;
1506
1507 if (unlikely(!(src_cnt > 1)))
1508 return 0;
1509
1510 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1511
1512 /* Skip holes in the source list before checking */
1513 for (i = 0; i < src_cnt; i++) {
1514 if (!srcs[i])
1515 continue;
1516 ppc440spe_rxor_srcs[idx++] = srcs[i];
1517 }
1518 src_cnt = idx;
1519
1520 for (i = 1; i < src_cnt; i++) {
1521 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1522 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1523
1524 switch (state) {
1525 case 0:
1526 if (cur_addr == old_addr + len) {
1527 /* direct RXOR */
1528 order = 1;
1529 state = 1;
1530 } else if (old_addr == cur_addr + len) {
1531 /* reverse RXOR */
1532 order = -1;
1533 state = 1;
1534 } else
1535 goto out;
1536 break;
1537 case 1:
1538 if ((i == src_cnt - 2) ||
1539 (order == -1 && cur_addr != old_addr - len)) {
1540 order = 0;
1541 state = 0;
1542 } else if ((cur_addr == old_addr + len * order) ||
1543 (cur_addr == old_addr + 2 * len) ||
1544 (cur_addr == old_addr + 3 * len)) {
1545 state = 2;
1546 } else {
1547 order = 0;
1548 state = 0;
1549 }
1550 break;
1551 case 2:
1552 order = 0;
1553 state = 0;
1554 break;
1555 }
1556 }
1557
1558out:
1559 if (state == 1 || state == 2)
1560 return 1;
1561
1562 return 0;
1563}
1564
1565/**
1566 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1567 * the operation given on this channel. It's assumed that 'chan' is
1568 * capable to process 'cap' type of operation.
1569 * @chan: channel to use
1570 * @cap: type of transaction
1571 * @dst_lst: array of destination pointers
1572 * @dst_cnt: number of destination operands
1573 * @src_lst: array of source pointers
1574 * @src_cnt: number of source operands
1575 * @src_sz: size of each source operand
1576 */
1577static int ppc440spe_adma_estimate(struct dma_chan *chan,
1578 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1579 struct page **src_lst, int src_cnt, size_t src_sz)
1580{
1581 int ef = 1;
1582
1583 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1584 /* If RAID-6 capabilities were not activated don't try
1585 * to use them
1586 */
1587 if (unlikely(!ppc440spe_r6_enabled))
1588 return -1;
1589 }
1590 /* In the current implementation of ppc440spe ADMA driver it
1591 * makes sense to pick out only pq case, because it may be
1592 * processed:
1593 * (1) either using Biskup method on DMA2;
1594 * (2) or on DMA0/1.
1595 * Thus we give a favour to (1) if the sources are suitable;
1596 * else let it be processed on one of the DMA0/1 engines.
1597 * In the sum_product case where destination is also the
1598 * source process it on DMA0/1 only.
1599 */
1600 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1601
1602 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1603 ef = 0; /* sum_product case, process on DMA0/1 */
1604 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1605 ef = 3; /* override (DMA0/1 + idle) */
1606 else
1607 ef = 0; /* can't process on DMA2 if !rxor */
1608 }
1609
1610 /* channel idleness increases the priority */
1611 if (likely(ef) &&
1612 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1613 ef++;
1614
1615 return ef;
1616}
1617
1618struct dma_chan *
1619ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1620 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1621 int src_cnt, size_t src_sz)
1622{
1623 struct dma_chan *best_chan = NULL;
1624 struct ppc_dma_chan_ref *ref;
1625 int best_rank = -1;
1626
1627 if (unlikely(!src_sz))
1628 return NULL;
1629 if (src_sz > PAGE_SIZE) {
1630 /*
1631 * should a user of the api ever pass > PAGE_SIZE requests
1632 * we sort out cases where temporary page-sized buffers
1633 * are used.
1634 */
1635 switch (cap) {
1636 case DMA_PQ:
1637 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1638 return NULL;
1639 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1640 return NULL;
1641 break;
1642 case DMA_PQ_VAL:
1643 case DMA_XOR_VAL:
1644 return NULL;
1645 default:
1646 break;
1647 }
1648 }
1649
1650 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1651 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1652 int rank;
1653
1654 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1655 dst_cnt, src_lst, src_cnt, src_sz);
1656 if (rank > best_rank) {
1657 best_rank = rank;
1658 best_chan = ref->chan;
1659 }
1660 }
1661 }
1662
1663 return best_chan;
1664}
1665EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1666
1667/**
1668 * ppc440spe_get_group_entry - get group entry with index idx
1669 * @tdesc: is the last allocated slot in the group.
1670 */
1671static struct ppc440spe_adma_desc_slot *
1672ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1673{
1674 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1675 int i = 0;
1676
1677 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1678 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1679 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1680 BUG();
1681 }
1682
1683 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1684 if (i++ == entry_idx)
1685 break;
1686 }
1687 return iter;
1688}
1689
1690/**
1691 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1692 * @slot: Slot to free
1693 * Caller must hold &ppc440spe_chan->lock while calling this function
1694 */
1695static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1696 struct ppc440spe_adma_chan *chan)
1697{
1698 int stride = slot->slots_per_op;
1699
1700 while (stride--) {
1701 slot->slots_per_op = 0;
1702 slot = list_entry(slot->slot_node.next,
1703 struct ppc440spe_adma_desc_slot,
1704 slot_node);
1705 }
1706}
1707
1708static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
1709 struct ppc440spe_adma_desc_slot *desc)
1710{
1711 u32 src_cnt, dst_cnt;
1712 dma_addr_t addr;
1713
1714 /*
1715 * get the number of sources & destination
1716 * included in this descriptor and unmap
1717 * them all
1718 */
1719 src_cnt = ppc440spe_desc_get_src_num(desc, chan);
1720 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
1721
1722 /* unmap destinations */
1723 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1724 while (dst_cnt--) {
1725 addr = ppc440spe_desc_get_dest_addr(
1726 desc, chan, dst_cnt);
1727 dma_unmap_page(chan->device->dev,
1728 addr, desc->unmap_len,
1729 DMA_FROM_DEVICE);
1730 }
1731 }
1732
1733 /* unmap sources */
1734 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1735 while (src_cnt--) {
1736 addr = ppc440spe_desc_get_src_addr(
1737 desc, chan, src_cnt);
1738 dma_unmap_page(chan->device->dev,
1739 addr, desc->unmap_len,
1740 DMA_TO_DEVICE);
1741 }
1742 }
1743}
1744
1745/**
1746 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1747 * upon completion
1748 */
1749static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1750 struct ppc440spe_adma_desc_slot *desc,
1751 struct ppc440spe_adma_chan *chan,
1752 dma_cookie_t cookie)
1753{
1754 int i;
1755
1756 BUG_ON(desc->async_tx.cookie < 0);
1757 if (desc->async_tx.cookie > 0) {
1758 cookie = desc->async_tx.cookie;
1759 desc->async_tx.cookie = 0;
1760
1761 /* call the callback (must not sleep or submit new
1762 * operations to this channel)
1763 */
1764 if (desc->async_tx.callback)
1765 desc->async_tx.callback(
1766 desc->async_tx.callback_param);
1767
1768 /* unmap dma addresses
1769 * (unmap_single vs unmap_page?)
1770 *
1771 * actually, ppc's dma_unmap_page() functions are empty, so
1772 * the following code is just for the sake of completeness
1773 */
1774 if (chan && chan->needs_unmap && desc->group_head &&
1775 desc->unmap_len) {
1776 struct ppc440spe_adma_desc_slot *unmap =
1777 desc->group_head;
1778 /* assume 1 slot per op always */
1779 u32 slot_count = unmap->slot_cnt;
1780
1781 /* Run through the group list and unmap addresses */
1782 for (i = 0; i < slot_count; i++) {
1783 BUG_ON(!unmap);
1784 ppc440spe_adma_unmap(chan, unmap);
1785 unmap = unmap->hw_next;
1786 }
1787 }
1788 }
1789
1790 /* run dependent operations */
1791 dma_run_dependencies(&desc->async_tx);
1792
1793 return cookie;
1794}
1795
1796/**
1797 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1798 */
1799static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1800 struct ppc440spe_adma_chan *chan)
1801{
1802 /* the client is allowed to attach dependent operations
1803 * until 'ack' is set
1804 */
1805 if (!async_tx_test_ack(&desc->async_tx))
1806 return 0;
1807
1808 /* leave the last descriptor in the chain
1809 * so we can append to it
1810 */
1811 if (list_is_last(&desc->chain_node, &chan->chain) ||
1812 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1813 return 1;
1814
1815 if (chan->device->id != PPC440SPE_XOR_ID) {
1816 /* our DMA interrupt handler clears opc field of
1817 * each processed descriptor. For all types of
1818 * operations except for ZeroSum we do not actually
1819 * need ack from the interrupt handler. ZeroSum is a
1820 * special case since the result of this operation
1821 * is available from the handler only, so if we see
1822 * such type of descriptor (which is unprocessed yet)
1823 * then leave it in chain.
1824 */
1825 struct dma_cdb *cdb = desc->hw_desc;
1826 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1827 return 1;
1828 }
1829
1830 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1831 desc->phys, desc->idx, desc->slots_per_op);
1832
1833 list_del(&desc->chain_node);
1834 ppc440spe_adma_free_slots(desc, chan);
1835 return 0;
1836}
1837
1838/**
1839 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1840 * which runs through the channel CDBs list until reach the descriptor
1841 * currently processed. When routine determines that all CDBs of group
1842 * are completed then corresponding callbacks (if any) are called and slots
1843 * are freed.
1844 */
1845static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1846{
1847 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1848 dma_cookie_t cookie = 0;
1849 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1850 int busy = ppc440spe_chan_is_busy(chan);
1851 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1852
1853 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1854 chan->device->id, __func__);
1855
1856 if (!current_desc) {
1857 /* There were no transactions yet, so
1858 * nothing to clean
1859 */
1860 return;
1861 }
1862
1863 /* free completed slots from the chain starting with
1864 * the oldest descriptor
1865 */
1866 list_for_each_entry_safe(iter, _iter, &chan->chain,
1867 chain_node) {
1868 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1869 "busy: %d this_desc: %#llx next_desc: %#x "
1870 "cur: %#x ack: %d\n",
1871 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1872 ppc440spe_desc_get_link(iter, chan), current_desc,
1873 async_tx_test_ack(&iter->async_tx));
1874 prefetch(_iter);
1875 prefetch(&_iter->async_tx);
1876
1877 /* do not advance past the current descriptor loaded into the
1878 * hardware channel,subsequent descriptors are either in process
1879 * or have not been submitted
1880 */
1881 if (seen_current)
1882 break;
1883
1884 /* stop the search if we reach the current descriptor and the
1885 * channel is busy, or if it appears that the current descriptor
1886 * needs to be re-read (i.e. has been appended to)
1887 */
1888 if (iter->phys == current_desc) {
1889 BUG_ON(seen_current++);
1890 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1891 /* not all descriptors of the group have
1892 * been completed; exit.
1893 */
1894 break;
1895 }
1896 }
1897
1898 /* detect the start of a group transaction */
1899 if (!slot_cnt && !slots_per_op) {
1900 slot_cnt = iter->slot_cnt;
1901 slots_per_op = iter->slots_per_op;
1902 if (slot_cnt <= slots_per_op) {
1903 slot_cnt = 0;
1904 slots_per_op = 0;
1905 }
1906 }
1907
1908 if (slot_cnt) {
1909 if (!group_start)
1910 group_start = iter;
1911 slot_cnt -= slots_per_op;
1912 }
1913
1914 /* all the members of a group are complete */
1915 if (slots_per_op != 0 && slot_cnt == 0) {
1916 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1917 int end_of_chain = 0;
1918
1919 /* clean up the group */
1920 slot_cnt = group_start->slot_cnt;
1921 grp_iter = group_start;
1922 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1923 &chan->chain, chain_node) {
1924
1925 cookie = ppc440spe_adma_run_tx_complete_actions(
1926 grp_iter, chan, cookie);
1927
1928 slot_cnt -= slots_per_op;
1929 end_of_chain = ppc440spe_adma_clean_slot(
1930 grp_iter, chan);
1931 if (end_of_chain && slot_cnt) {
1932 /* Should wait for ZeroSum completion */
1933 if (cookie > 0)
4d4e58de 1934 chan->common.completed_cookie = cookie;
12458ea0
AG
1935 return;
1936 }
1937
1938 if (slot_cnt == 0 || end_of_chain)
1939 break;
1940 }
1941
1942 /* the group should be complete at this point */
1943 BUG_ON(slot_cnt);
1944
1945 slots_per_op = 0;
1946 group_start = NULL;
1947 if (end_of_chain)
1948 break;
1949 else
1950 continue;
1951 } else if (slots_per_op) /* wait for group completion */
1952 continue;
1953
1954 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1955 cookie);
1956
1957 if (ppc440spe_adma_clean_slot(iter, chan))
1958 break;
1959 }
1960
1961 BUG_ON(!seen_current);
1962
1963 if (cookie > 0) {
4d4e58de 1964 chan->common.completed_cookie = cookie;
12458ea0
AG
1965 pr_debug("\tcompleted cookie %d\n", cookie);
1966 }
1967
1968}
1969
1970/**
1971 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1972 */
1973static void ppc440spe_adma_tasklet(unsigned long data)
1974{
1975 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1976
1977 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1978 __ppc440spe_adma_slot_cleanup(chan);
1979 spin_unlock(&chan->lock);
1980}
1981
1982/**
1983 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1984 */
1985static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1986{
1987 spin_lock_bh(&chan->lock);
1988 __ppc440spe_adma_slot_cleanup(chan);
1989 spin_unlock_bh(&chan->lock);
1990}
1991
1992/**
1993 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1994 */
1995static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1996 struct ppc440spe_adma_chan *chan, int num_slots,
1997 int slots_per_op)
1998{
1999 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
2000 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
2001 struct list_head chain = LIST_HEAD_INIT(chain);
2002 int slots_found, retry = 0;
2003
2004
2005 BUG_ON(!num_slots || !slots_per_op);
2006 /* start search from the last allocated descrtiptor
2007 * if a contiguous allocation can not be found start searching
2008 * from the beginning of the list
2009 */
2010retry:
2011 slots_found = 0;
2012 if (retry == 0)
2013 iter = chan->last_used;
2014 else
2015 iter = list_entry(&chan->all_slots,
2016 struct ppc440spe_adma_desc_slot,
2017 slot_node);
2018 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
2019 slot_node) {
2020 prefetch(_iter);
2021 prefetch(&_iter->async_tx);
2022 if (iter->slots_per_op) {
2023 slots_found = 0;
2024 continue;
2025 }
2026
2027 /* start the allocation if the slot is correctly aligned */
2028 if (!slots_found++)
2029 alloc_start = iter;
2030
2031 if (slots_found == num_slots) {
2032 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
2033 struct ppc440spe_adma_desc_slot *last_used = NULL;
2034
2035 iter = alloc_start;
2036 while (num_slots) {
2037 int i;
2038 /* pre-ack all but the last descriptor */
2039 if (num_slots != slots_per_op)
2040 async_tx_ack(&iter->async_tx);
2041
2042 list_add_tail(&iter->chain_node, &chain);
2043 alloc_tail = iter;
2044 iter->async_tx.cookie = 0;
2045 iter->hw_next = NULL;
2046 iter->flags = 0;
2047 iter->slot_cnt = num_slots;
2048 iter->xor_check_result = NULL;
2049 for (i = 0; i < slots_per_op; i++) {
2050 iter->slots_per_op = slots_per_op - i;
2051 last_used = iter;
2052 iter = list_entry(iter->slot_node.next,
2053 struct ppc440spe_adma_desc_slot,
2054 slot_node);
2055 }
2056 num_slots -= slots_per_op;
2057 }
2058 alloc_tail->group_head = alloc_start;
2059 alloc_tail->async_tx.cookie = -EBUSY;
2060 list_splice(&chain, &alloc_tail->group_list);
2061 chan->last_used = last_used;
2062 return alloc_tail;
2063 }
2064 }
2065 if (!retry++)
2066 goto retry;
2067
2068 /* try to free some slots if the allocation fails */
2069 tasklet_schedule(&chan->irq_tasklet);
2070 return NULL;
2071}
2072
2073/**
2074 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
2075 */
2076static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
2077{
2078 struct ppc440spe_adma_chan *ppc440spe_chan;
2079 struct ppc440spe_adma_desc_slot *slot = NULL;
2080 char *hw_desc;
2081 int i, db_sz;
2082 int init;
2083
2084 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2085 init = ppc440spe_chan->slots_allocated ? 0 : 1;
2086 chan->chan_id = ppc440spe_chan->device->id;
2087
2088 /* Allocate descriptor slots */
2089 i = ppc440spe_chan->slots_allocated;
2090 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
2091 db_sz = sizeof(struct dma_cdb);
2092 else
2093 db_sz = sizeof(struct xor_cb);
2094
2095 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
2096 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
2097 GFP_KERNEL);
2098 if (!slot) {
2099 printk(KERN_INFO "SPE ADMA Channel only initialized"
2100 " %d descriptor slots", i--);
2101 break;
2102 }
2103
2104 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
2105 slot->hw_desc = (void *) &hw_desc[i * db_sz];
2106 dma_async_tx_descriptor_init(&slot->async_tx, chan);
2107 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
2108 INIT_LIST_HEAD(&slot->chain_node);
2109 INIT_LIST_HEAD(&slot->slot_node);
2110 INIT_LIST_HEAD(&slot->group_list);
2111 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
2112 slot->idx = i;
2113
2114 spin_lock_bh(&ppc440spe_chan->lock);
2115 ppc440spe_chan->slots_allocated++;
2116 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
2117 spin_unlock_bh(&ppc440spe_chan->lock);
2118 }
2119
2120 if (i && !ppc440spe_chan->last_used) {
2121 ppc440spe_chan->last_used =
2122 list_entry(ppc440spe_chan->all_slots.next,
2123 struct ppc440spe_adma_desc_slot,
2124 slot_node);
2125 }
2126
2127 dev_dbg(ppc440spe_chan->device->common.dev,
2128 "ppc440spe adma%d: allocated %d descriptor slots\n",
2129 ppc440spe_chan->device->id, i);
2130
2131 /* initialize the channel and the chain with a null operation */
2132 if (init) {
2133 switch (ppc440spe_chan->device->id) {
2134 case PPC440SPE_DMA0_ID:
2135 case PPC440SPE_DMA1_ID:
2136 ppc440spe_chan->hw_chain_inited = 0;
2137 /* Use WXOR for self-testing */
2138 if (!ppc440spe_r6_tchan)
2139 ppc440spe_r6_tchan = ppc440spe_chan;
2140 break;
2141 case PPC440SPE_XOR_ID:
2142 ppc440spe_chan_start_null_xor(ppc440spe_chan);
2143 break;
2144 default:
2145 BUG();
2146 }
2147 ppc440spe_chan->needs_unmap = 1;
2148 }
2149
2150 return (i > 0) ? i : -ENOMEM;
2151}
2152
2153/**
2154 * ppc440spe_desc_assign_cookie - assign a cookie
2155 */
2156static dma_cookie_t ppc440spe_desc_assign_cookie(
2157 struct ppc440spe_adma_chan *chan,
2158 struct ppc440spe_adma_desc_slot *desc)
2159{
2160 dma_cookie_t cookie = chan->common.cookie;
2161
2162 cookie++;
2163 if (cookie < 0)
2164 cookie = 1;
2165 chan->common.cookie = desc->async_tx.cookie = cookie;
2166 return cookie;
2167}
2168
2169/**
2170 * ppc440spe_rxor_set_region_data -
2171 */
2172static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
2173 u8 xor_arg_no, u32 mask)
2174{
2175 struct xor_cb *xcb = desc->hw_desc;
2176
2177 xcb->ops[xor_arg_no].h |= mask;
2178}
2179
2180/**
2181 * ppc440spe_rxor_set_src -
2182 */
2183static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
2184 u8 xor_arg_no, dma_addr_t addr)
2185{
2186 struct xor_cb *xcb = desc->hw_desc;
2187
2188 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
2189 xcb->ops[xor_arg_no].l = addr;
2190}
2191
2192/**
2193 * ppc440spe_rxor_set_mult -
2194 */
2195static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
2196 u8 xor_arg_no, u8 idx, u8 mult)
2197{
2198 struct xor_cb *xcb = desc->hw_desc;
2199
2200 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
2201}
2202
2203/**
2204 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
2205 * has been achieved
2206 */
2207static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
2208{
2209 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
2210 chan->device->id, chan->pending);
2211
2212 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
2213 chan->pending = 0;
2214 ppc440spe_chan_append(chan);
2215 }
2216}
2217
2218/**
2219 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
2220 * (it's not necessary that descriptors will be submitted to the h/w
2221 * chains too right now)
2222 */
2223static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
2224{
2225 struct ppc440spe_adma_desc_slot *sw_desc;
2226 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
2227 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
2228 int slot_cnt;
2229 int slots_per_op;
2230 dma_cookie_t cookie;
2231
2232 sw_desc = tx_to_ppc440spe_adma_slot(tx);
2233
2234 group_start = sw_desc->group_head;
2235 slot_cnt = group_start->slot_cnt;
2236 slots_per_op = group_start->slots_per_op;
2237
2238 spin_lock_bh(&chan->lock);
2239
2240 cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
2241
2242 if (unlikely(list_empty(&chan->chain))) {
2243 /* first peer */
2244 list_splice_init(&sw_desc->group_list, &chan->chain);
2245 chan_first_cdb[chan->device->id] = group_start;
2246 } else {
2247 /* isn't first peer, bind CDBs to chain */
2248 old_chain_tail = list_entry(chan->chain.prev,
2249 struct ppc440spe_adma_desc_slot,
2250 chain_node);
2251 list_splice_init(&sw_desc->group_list,
2252 &old_chain_tail->chain_node);
2253 /* fix up the hardware chain */
2254 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
2255 }
2256
2257 /* increment the pending count by the number of operations */
2258 chan->pending += slot_cnt / slots_per_op;
2259 ppc440spe_adma_check_threshold(chan);
2260 spin_unlock_bh(&chan->lock);
2261
2262 dev_dbg(chan->device->common.dev,
2263 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
2264 chan->device->id, __func__,
2265 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
2266
2267 return cookie;
2268}
2269
2270/**
2271 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
2272 */
2273static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
2274 struct dma_chan *chan, unsigned long flags)
2275{
2276 struct ppc440spe_adma_chan *ppc440spe_chan;
2277 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2278 int slot_cnt, slots_per_op;
2279
2280 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2281
2282 dev_dbg(ppc440spe_chan->device->common.dev,
2283 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
2284 __func__);
2285
2286 spin_lock_bh(&ppc440spe_chan->lock);
2287 slot_cnt = slots_per_op = 1;
2288 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2289 slots_per_op);
2290 if (sw_desc) {
2291 group_start = sw_desc->group_head;
2292 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
2293 group_start->unmap_len = 0;
2294 sw_desc->async_tx.flags = flags;
2295 }
2296 spin_unlock_bh(&ppc440spe_chan->lock);
2297
2298 return sw_desc ? &sw_desc->async_tx : NULL;
2299}
2300
2301/**
2302 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
2303 */
2304static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2305 struct dma_chan *chan, dma_addr_t dma_dest,
2306 dma_addr_t dma_src, size_t len, unsigned long flags)
2307{
2308 struct ppc440spe_adma_chan *ppc440spe_chan;
2309 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2310 int slot_cnt, slots_per_op;
2311
2312 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2313
2314 if (unlikely(!len))
2315 return NULL;
2316
427cdf19 2317 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
12458ea0
AG
2318
2319 spin_lock_bh(&ppc440spe_chan->lock);
2320
2321 dev_dbg(ppc440spe_chan->device->common.dev,
2322 "ppc440spe adma%d: %s len: %u int_en %d\n",
2323 ppc440spe_chan->device->id, __func__, len,
2324 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2325 slot_cnt = slots_per_op = 1;
2326 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2327 slots_per_op);
2328 if (sw_desc) {
2329 group_start = sw_desc->group_head;
2330 ppc440spe_desc_init_memcpy(group_start, flags);
2331 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2332 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2333 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2334 sw_desc->unmap_len = len;
2335 sw_desc->async_tx.flags = flags;
2336 }
2337 spin_unlock_bh(&ppc440spe_chan->lock);
2338
2339 return sw_desc ? &sw_desc->async_tx : NULL;
2340}
2341
2342/**
2343 * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
2344 */
2345static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
2346 struct dma_chan *chan, dma_addr_t dma_dest, int value,
2347 size_t len, unsigned long flags)
2348{
2349 struct ppc440spe_adma_chan *ppc440spe_chan;
2350 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2351 int slot_cnt, slots_per_op;
2352
2353 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2354
2355 if (unlikely(!len))
2356 return NULL;
2357
427cdf19 2358 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
12458ea0
AG
2359
2360 spin_lock_bh(&ppc440spe_chan->lock);
2361
2362 dev_dbg(ppc440spe_chan->device->common.dev,
2363 "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
2364 ppc440spe_chan->device->id, __func__, value, len,
2365 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2366
2367 slot_cnt = slots_per_op = 1;
2368 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2369 slots_per_op);
2370 if (sw_desc) {
2371 group_start = sw_desc->group_head;
2372 ppc440spe_desc_init_memset(group_start, value, flags);
2373 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2374 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2375 sw_desc->unmap_len = len;
2376 sw_desc->async_tx.flags = flags;
2377 }
2378 spin_unlock_bh(&ppc440spe_chan->lock);
2379
2380 return sw_desc ? &sw_desc->async_tx : NULL;
2381}
2382
2383/**
2384 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2385 */
2386static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2387 struct dma_chan *chan, dma_addr_t dma_dest,
2388 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2389 unsigned long flags)
2390{
2391 struct ppc440spe_adma_chan *ppc440spe_chan;
2392 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2393 int slot_cnt, slots_per_op;
2394
2395 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2396
2397 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2398 dma_dest, dma_src, src_cnt));
2399 if (unlikely(!len))
2400 return NULL;
427cdf19 2401 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2402
2403 dev_dbg(ppc440spe_chan->device->common.dev,
2404 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2405 ppc440spe_chan->device->id, __func__, src_cnt, len,
2406 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2407
2408 spin_lock_bh(&ppc440spe_chan->lock);
2409 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2410 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2411 slots_per_op);
2412 if (sw_desc) {
2413 group_start = sw_desc->group_head;
2414 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2415 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2416 while (src_cnt--)
2417 ppc440spe_adma_memcpy_xor_set_src(group_start,
2418 dma_src[src_cnt], src_cnt);
2419 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2420 sw_desc->unmap_len = len;
2421 sw_desc->async_tx.flags = flags;
2422 }
2423 spin_unlock_bh(&ppc440spe_chan->lock);
2424
2425 return sw_desc ? &sw_desc->async_tx : NULL;
2426}
2427
2428static inline void
2429ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2430 int src_cnt);
2431static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2432
2433/**
2434 * ppc440spe_adma_init_dma2rxor_slot -
2435 */
2436static void ppc440spe_adma_init_dma2rxor_slot(
2437 struct ppc440spe_adma_desc_slot *desc,
2438 dma_addr_t *src, int src_cnt)
2439{
2440 int i;
2441
2442 /* initialize CDB */
2443 for (i = 0; i < src_cnt; i++) {
2444 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2445 desc->src_cnt, (u32)src[i]);
2446 }
2447}
2448
2449/**
2450 * ppc440spe_dma01_prep_mult -
2451 * for Q operation where destination is also the source
2452 */
2453static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2454 struct ppc440spe_adma_chan *ppc440spe_chan,
2455 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2456 const unsigned char *scf, size_t len, unsigned long flags)
2457{
2458 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2459 unsigned long op = 0;
2460 int slot_cnt;
2461
2462 set_bit(PPC440SPE_DESC_WXOR, &op);
2463 slot_cnt = 2;
2464
2465 spin_lock_bh(&ppc440spe_chan->lock);
2466
2467 /* use WXOR, each descriptor occupies one slot */
2468 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2469 if (sw_desc) {
2470 struct ppc440spe_adma_chan *chan;
2471 struct ppc440spe_adma_desc_slot *iter;
2472 struct dma_cdb *hw_desc;
2473
2474 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2475 set_bits(op, &sw_desc->flags);
2476 sw_desc->src_cnt = src_cnt;
2477 sw_desc->dst_cnt = dst_cnt;
2478 /* First descriptor, zero data in the destination and copy it
2479 * to q page using MULTICAST transfer.
2480 */
2481 iter = list_first_entry(&sw_desc->group_list,
2482 struct ppc440spe_adma_desc_slot,
2483 chain_node);
2484 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2485 /* set 'next' pointer */
2486 iter->hw_next = list_entry(iter->chain_node.next,
2487 struct ppc440spe_adma_desc_slot,
2488 chain_node);
2489 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2490 hw_desc = iter->hw_desc;
2491 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2492
2493 ppc440spe_desc_set_dest_addr(iter, chan,
2494 DMA_CUED_XOR_BASE, dst[0], 0);
2495 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2496 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2497 src[0]);
2498 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2499 iter->unmap_len = len;
2500
2501 /*
2502 * Second descriptor, multiply data from the q page
2503 * and store the result in real destination.
2504 */
2505 iter = list_first_entry(&iter->chain_node,
2506 struct ppc440spe_adma_desc_slot,
2507 chain_node);
2508 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2509 iter->hw_next = NULL;
2510 if (flags & DMA_PREP_INTERRUPT)
2511 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2512 else
2513 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2514
2515 hw_desc = iter->hw_desc;
2516 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2517 ppc440spe_desc_set_src_addr(iter, chan, 0,
2518 DMA_CUED_XOR_HB, dst[1]);
2519 ppc440spe_desc_set_dest_addr(iter, chan,
2520 DMA_CUED_XOR_BASE, dst[0], 0);
2521
2522 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2523 DMA_CDB_SG_DST1, scf[0]);
2524 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2525 iter->unmap_len = len;
2526 sw_desc->async_tx.flags = flags;
2527 }
2528
2529 spin_unlock_bh(&ppc440spe_chan->lock);
2530
2531 return sw_desc;
2532}
2533
2534/**
2535 * ppc440spe_dma01_prep_sum_product -
2536 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
2537 * the source.
2538 */
2539static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2540 struct ppc440spe_adma_chan *ppc440spe_chan,
2541 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2542 const unsigned char *scf, size_t len, unsigned long flags)
2543{
2544 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2545 unsigned long op = 0;
2546 int slot_cnt;
2547
2548 set_bit(PPC440SPE_DESC_WXOR, &op);
2549 slot_cnt = 3;
2550
2551 spin_lock_bh(&ppc440spe_chan->lock);
2552
2553 /* WXOR, each descriptor occupies one slot */
2554 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2555 if (sw_desc) {
2556 struct ppc440spe_adma_chan *chan;
2557 struct ppc440spe_adma_desc_slot *iter;
2558 struct dma_cdb *hw_desc;
2559
2560 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2561 set_bits(op, &sw_desc->flags);
2562 sw_desc->src_cnt = src_cnt;
2563 sw_desc->dst_cnt = 1;
2564 /* 1st descriptor, src[1] data to q page and zero destination */
2565 iter = list_first_entry(&sw_desc->group_list,
2566 struct ppc440spe_adma_desc_slot,
2567 chain_node);
2568 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2569 iter->hw_next = list_entry(iter->chain_node.next,
2570 struct ppc440spe_adma_desc_slot,
2571 chain_node);
2572 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2573 hw_desc = iter->hw_desc;
2574 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2575
2576 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2577 *dst, 0);
2578 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2579 ppc440spe_chan->qdest, 1);
2580 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2581 src[1]);
2582 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2583 iter->unmap_len = len;
2584
2585 /* 2nd descriptor, multiply src[1] data and store the
2586 * result in destination */
2587 iter = list_first_entry(&iter->chain_node,
2588 struct ppc440spe_adma_desc_slot,
2589 chain_node);
2590 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2591 /* set 'next' pointer */
2592 iter->hw_next = list_entry(iter->chain_node.next,
2593 struct ppc440spe_adma_desc_slot,
2594 chain_node);
2595 if (flags & DMA_PREP_INTERRUPT)
2596 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2597 else
2598 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2599
2600 hw_desc = iter->hw_desc;
2601 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2602 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2603 ppc440spe_chan->qdest);
2604 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2605 *dst, 0);
2606 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2607 DMA_CDB_SG_DST1, scf[1]);
2608 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2609 iter->unmap_len = len;
2610
2611 /*
2612 * 3rd descriptor, multiply src[0] data and xor it
2613 * with destination
2614 */
2615 iter = list_first_entry(&iter->chain_node,
2616 struct ppc440spe_adma_desc_slot,
2617 chain_node);
2618 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2619 iter->hw_next = NULL;
2620 if (flags & DMA_PREP_INTERRUPT)
2621 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2622 else
2623 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2624
2625 hw_desc = iter->hw_desc;
2626 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2627 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2628 src[0]);
2629 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2630 *dst, 0);
2631 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2632 DMA_CDB_SG_DST1, scf[0]);
2633 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2634 iter->unmap_len = len;
2635 sw_desc->async_tx.flags = flags;
2636 }
2637
2638 spin_unlock_bh(&ppc440spe_chan->lock);
2639
2640 return sw_desc;
2641}
2642
2643static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2644 struct ppc440spe_adma_chan *ppc440spe_chan,
2645 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2646 const unsigned char *scf, size_t len, unsigned long flags)
2647{
2648 int slot_cnt;
2649 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2650 unsigned long op = 0;
2651 unsigned char mult = 1;
2652
2653 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2654 __func__, dst_cnt, src_cnt, len);
2655 /* select operations WXOR/RXOR depending on the
2656 * source addresses of operators and the number
2657 * of destinations (RXOR support only Q-parity calculations)
2658 */
2659 set_bit(PPC440SPE_DESC_WXOR, &op);
2660 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2661 /* no active RXOR;
2662 * do RXOR if:
2663 * - there are more than 1 source,
2664 * - len is aligned on 512-byte boundary,
2665 * - source addresses fit to one of 4 possible regions.
2666 */
2667 if (src_cnt > 1 &&
2668 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2669 (src[0] + len) == src[1]) {
2670 /* may do RXOR R1 R2 */
2671 set_bit(PPC440SPE_DESC_RXOR, &op);
2672 if (src_cnt != 2) {
2673 /* may try to enhance region of RXOR */
2674 if ((src[1] + len) == src[2]) {
2675 /* do RXOR R1 R2 R3 */
2676 set_bit(PPC440SPE_DESC_RXOR123,
2677 &op);
2678 } else if ((src[1] + len * 2) == src[2]) {
2679 /* do RXOR R1 R2 R4 */
2680 set_bit(PPC440SPE_DESC_RXOR124, &op);
2681 } else if ((src[1] + len * 3) == src[2]) {
2682 /* do RXOR R1 R2 R5 */
2683 set_bit(PPC440SPE_DESC_RXOR125,
2684 &op);
2685 } else {
2686 /* do RXOR R1 R2 */
2687 set_bit(PPC440SPE_DESC_RXOR12,
2688 &op);
2689 }
2690 } else {
2691 /* do RXOR R1 R2 */
2692 set_bit(PPC440SPE_DESC_RXOR12, &op);
2693 }
2694 }
2695
2696 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2697 /* can not do this operation with RXOR */
2698 clear_bit(PPC440SPE_RXOR_RUN,
2699 &ppc440spe_rxor_state);
2700 } else {
2701 /* can do; set block size right now */
2702 ppc440spe_desc_set_rxor_block_size(len);
2703 }
2704 }
2705
2706 /* Number of necessary slots depends on operation type selected */
2707 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2708 /* This is a WXOR only chain. Need descriptors for each
2709 * source to GF-XOR them with WXOR, and need descriptors
2710 * for each destination to zero them with WXOR
2711 */
2712 slot_cnt = src_cnt;
2713
2714 if (flags & DMA_PREP_ZERO_P) {
2715 slot_cnt++;
2716 set_bit(PPC440SPE_ZERO_P, &op);
2717 }
2718 if (flags & DMA_PREP_ZERO_Q) {
2719 slot_cnt++;
2720 set_bit(PPC440SPE_ZERO_Q, &op);
2721 }
2722 } else {
2723 /* Need 1/2 descriptor for RXOR operation, and
2724 * need (src_cnt - (2 or 3)) for WXOR of sources
2725 * remained (if any)
2726 */
2727 slot_cnt = dst_cnt;
2728
2729 if (flags & DMA_PREP_ZERO_P)
2730 set_bit(PPC440SPE_ZERO_P, &op);
2731 if (flags & DMA_PREP_ZERO_Q)
2732 set_bit(PPC440SPE_ZERO_Q, &op);
2733
2734 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2735 slot_cnt += src_cnt - 2;
2736 else
2737 slot_cnt += src_cnt - 3;
2738
2739 /* Thus we have either RXOR only chain or
2740 * mixed RXOR/WXOR
2741 */
2742 if (slot_cnt == dst_cnt)
2743 /* RXOR only chain */
2744 clear_bit(PPC440SPE_DESC_WXOR, &op);
2745 }
2746
2747 spin_lock_bh(&ppc440spe_chan->lock);
2748 /* for both RXOR/WXOR each descriptor occupies one slot */
2749 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2750 if (sw_desc) {
2751 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2752 flags, op);
2753
2754 /* setup dst/src/mult */
2755 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2756 __func__, dst[0], dst[1]);
2757 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2758 while (src_cnt--) {
2759 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2760 src_cnt);
2761
2762 /* NOTE: "Multi = 0 is equivalent to = 1" as it
2763 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
2764 * doesn't work for RXOR with DMA0/1! Instead, multi=0
2765 * leads to zeroing source data after RXOR.
2766 * So, for P case set-up mult=1 explicitly.
2767 */
2768 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2769 mult = scf[src_cnt];
2770 ppc440spe_adma_pq_set_src_mult(sw_desc,
2771 mult, src_cnt, dst_cnt - 1);
2772 }
2773
2774 /* Setup byte count foreach slot just allocated */
2775 sw_desc->async_tx.flags = flags;
2776 list_for_each_entry(iter, &sw_desc->group_list,
2777 chain_node) {
2778 ppc440spe_desc_set_byte_count(iter,
2779 ppc440spe_chan, len);
2780 iter->unmap_len = len;
2781 }
2782 }
2783 spin_unlock_bh(&ppc440spe_chan->lock);
2784
2785 return sw_desc;
2786}
2787
2788static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2789 struct ppc440spe_adma_chan *ppc440spe_chan,
2790 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2791 const unsigned char *scf, size_t len, unsigned long flags)
2792{
2793 int slot_cnt, descs_per_op;
2794 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2795 unsigned long op = 0;
2796 unsigned char mult = 1;
2797
2798 BUG_ON(!dst_cnt);
2799 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2800 __func__, dst_cnt, src_cnt, len);*/
2801
2802 spin_lock_bh(&ppc440spe_chan->lock);
2803 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2804 if (descs_per_op < 0) {
2805 spin_unlock_bh(&ppc440spe_chan->lock);
2806 return NULL;
2807 }
2808
2809 /* depending on number of sources we have 1 or 2 RXOR chains */
2810 slot_cnt = descs_per_op * dst_cnt;
2811
2812 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2813 if (sw_desc) {
2814 op = slot_cnt;
2815 sw_desc->async_tx.flags = flags;
2816 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2817 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2818 --op ? 0 : flags);
2819 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2820 len);
2821 iter->unmap_len = len;
2822
2823 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2824 iter->rxor_cursor.len = len;
2825 iter->descs_per_op = descs_per_op;
2826 }
2827 op = 0;
2828 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2829 op++;
2830 if (op % descs_per_op == 0)
2831 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2832 src_cnt);
2833 if (likely(!list_is_last(&iter->chain_node,
2834 &sw_desc->group_list))) {
2835 /* set 'next' pointer */
2836 iter->hw_next =
2837 list_entry(iter->chain_node.next,
2838 struct ppc440spe_adma_desc_slot,
2839 chain_node);
2840 ppc440spe_xor_set_link(iter, iter->hw_next);
2841 } else {
2842 /* this is the last descriptor. */
2843 iter->hw_next = NULL;
2844 }
2845 }
2846
2847 /* fixup head descriptor */
2848 sw_desc->dst_cnt = dst_cnt;
2849 if (flags & DMA_PREP_ZERO_P)
2850 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2851 if (flags & DMA_PREP_ZERO_Q)
2852 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2853
2854 /* setup dst/src/mult */
2855 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2856
2857 while (src_cnt--) {
2858 /* handle descriptors (if dst_cnt == 2) inside
2859 * the ppc440spe_adma_pq_set_srcxxx() functions
2860 */
2861 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2862 src_cnt);
2863 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2864 mult = scf[src_cnt];
2865 ppc440spe_adma_pq_set_src_mult(sw_desc,
2866 mult, src_cnt, dst_cnt - 1);
2867 }
2868 }
2869 spin_unlock_bh(&ppc440spe_chan->lock);
2870 ppc440spe_desc_set_rxor_block_size(len);
2871 return sw_desc;
2872}
2873
2874/**
2875 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2876 */
2877static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2878 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2879 unsigned int src_cnt, const unsigned char *scf,
2880 size_t len, unsigned long flags)
2881{
2882 struct ppc440spe_adma_chan *ppc440spe_chan;
2883 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2884 int dst_cnt = 0;
2885
2886 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2887
2888 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2889 dst, src, src_cnt));
2890 BUG_ON(!len);
427cdf19 2891 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
12458ea0
AG
2892 BUG_ON(!src_cnt);
2893
2894 if (src_cnt == 1 && dst[1] == src[0]) {
2895 dma_addr_t dest[2];
2896
2897 /* dst[1] is real destination (Q) */
2898 dest[0] = dst[1];
2899 /* this is the page to multicast source data to */
2900 dest[1] = ppc440spe_chan->qdest;
2901 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2902 dest, 2, src, src_cnt, scf, len, flags);
2903 return sw_desc ? &sw_desc->async_tx : NULL;
2904 }
2905
2906 if (src_cnt == 2 && dst[1] == src[1]) {
2907 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2908 &dst[1], src, 2, scf, len, flags);
2909 return sw_desc ? &sw_desc->async_tx : NULL;
2910 }
2911
2912 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2913 BUG_ON(!dst[0]);
2914 dst_cnt++;
2915 flags |= DMA_PREP_ZERO_P;
2916 }
2917
2918 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2919 BUG_ON(!dst[1]);
2920 dst_cnt++;
2921 flags |= DMA_PREP_ZERO_Q;
2922 }
2923
2924 BUG_ON(!dst_cnt);
2925
2926 dev_dbg(ppc440spe_chan->device->common.dev,
2927 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2928 ppc440spe_chan->device->id, __func__, src_cnt, len,
2929 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2930
2931 switch (ppc440spe_chan->device->id) {
2932 case PPC440SPE_DMA0_ID:
2933 case PPC440SPE_DMA1_ID:
2934 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2935 dst, dst_cnt, src, src_cnt, scf,
2936 len, flags);
2937 break;
2938
2939 case PPC440SPE_XOR_ID:
2940 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2941 dst, dst_cnt, src, src_cnt, scf,
2942 len, flags);
2943 break;
2944 }
2945
2946 return sw_desc ? &sw_desc->async_tx : NULL;
2947}
2948
2949/**
2950 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2951 * a PQ_ZERO_SUM operation
2952 */
2953static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2954 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2955 unsigned int src_cnt, const unsigned char *scf, size_t len,
2956 enum sum_check_flags *pqres, unsigned long flags)
2957{
2958 struct ppc440spe_adma_chan *ppc440spe_chan;
2959 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2960 dma_addr_t pdest, qdest;
2961 int slot_cnt, slots_per_op, idst, dst_cnt;
2962
2963 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2964
2965 if (flags & DMA_PREP_PQ_DISABLE_P)
2966 pdest = 0;
2967 else
2968 pdest = pq[0];
2969
2970 if (flags & DMA_PREP_PQ_DISABLE_Q)
2971 qdest = 0;
2972 else
2973 qdest = pq[1];
2974
2975 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2976 src, src_cnt, scf));
2977
2978 /* Always use WXOR for P/Q calculations (two destinations).
2979 * Need 1 or 2 extra slots to verify results are zero.
2980 */
2981 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2982
2983 /* One additional slot per destination to clone P/Q
2984 * before calculation (we have to preserve destinations).
2985 */
2986 slot_cnt = src_cnt + dst_cnt * 2;
2987 slots_per_op = 1;
2988
2989 spin_lock_bh(&ppc440spe_chan->lock);
2990 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2991 slots_per_op);
2992 if (sw_desc) {
2993 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2994
2995 /* Setup byte count for each slot just allocated */
2996 sw_desc->async_tx.flags = flags;
2997 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2998 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2999 len);
3000 iter->unmap_len = len;
3001 }
3002
3003 if (pdest) {
3004 struct dma_cdb *hw_desc;
3005 struct ppc440spe_adma_chan *chan;
3006
3007 iter = sw_desc->group_head;
3008 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3009 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
3010 iter->hw_next = list_entry(iter->chain_node.next,
3011 struct ppc440spe_adma_desc_slot,
3012 chain_node);
3013 hw_desc = iter->hw_desc;
3014 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
3015 iter->src_cnt = 0;
3016 iter->dst_cnt = 0;
3017 ppc440spe_desc_set_dest_addr(iter, chan, 0,
3018 ppc440spe_chan->pdest, 0);
3019 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
3020 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
3021 len);
3022 iter->unmap_len = 0;
3023 /* override pdest to preserve original P */
3024 pdest = ppc440spe_chan->pdest;
3025 }
3026 if (qdest) {
3027 struct dma_cdb *hw_desc;
3028 struct ppc440spe_adma_chan *chan;
3029
3030 iter = list_first_entry(&sw_desc->group_list,
3031 struct ppc440spe_adma_desc_slot,
3032 chain_node);
3033 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3034
3035 if (pdest) {
3036 iter = list_entry(iter->chain_node.next,
3037 struct ppc440spe_adma_desc_slot,
3038 chain_node);
3039 }
3040
3041 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
3042 iter->hw_next = list_entry(iter->chain_node.next,
3043 struct ppc440spe_adma_desc_slot,
3044 chain_node);
3045 hw_desc = iter->hw_desc;
3046 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
3047 iter->src_cnt = 0;
3048 iter->dst_cnt = 0;
3049 ppc440spe_desc_set_dest_addr(iter, chan, 0,
3050 ppc440spe_chan->qdest, 0);
3051 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
3052 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
3053 len);
3054 iter->unmap_len = 0;
3055 /* override qdest to preserve original Q */
3056 qdest = ppc440spe_chan->qdest;
3057 }
3058
3059 /* Setup destinations for P/Q ops */
3060 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
3061
3062 /* Setup zero QWORDs into DCHECK CDBs */
3063 idst = dst_cnt;
3064 list_for_each_entry_reverse(iter, &sw_desc->group_list,
3065 chain_node) {
3066 /*
3067 * The last CDB corresponds to Q-parity check,
3068 * the one before last CDB corresponds
3069 * P-parity check
3070 */
3071 if (idst == DMA_DEST_MAX_NUM) {
3072 if (idst == dst_cnt) {
3073 set_bit(PPC440SPE_DESC_QCHECK,
3074 &iter->flags);
3075 } else {
3076 set_bit(PPC440SPE_DESC_PCHECK,
3077 &iter->flags);
3078 }
3079 } else {
3080 if (qdest) {
3081 set_bit(PPC440SPE_DESC_QCHECK,
3082 &iter->flags);
3083 } else {
3084 set_bit(PPC440SPE_DESC_PCHECK,
3085 &iter->flags);
3086 }
3087 }
3088 iter->xor_check_result = pqres;
3089
3090 /*
3091 * set it to zero, if check fail then result will
3092 * be updated
3093 */
3094 *iter->xor_check_result = 0;
3095 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
3096 ppc440spe_qword);
3097
3098 if (!(--dst_cnt))
3099 break;
3100 }
3101
3102 /* Setup sources and mults for P/Q ops */
3103 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
3104 chain_node) {
3105 struct ppc440spe_adma_chan *chan;
3106 u32 mult_dst;
3107
3108 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3109 ppc440spe_desc_set_src_addr(iter, chan, 0,
3110 DMA_CUED_XOR_HB,
3111 src[src_cnt - 1]);
3112 if (qdest) {
3113 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
3114 DMA_CDB_SG_DST1;
3115 ppc440spe_desc_set_src_mult(iter, chan,
3116 DMA_CUED_MULT1_OFF,
3117 mult_dst,
3118 scf[src_cnt - 1]);
3119 }
3120 if (!(--src_cnt))
3121 break;
3122 }
3123 }
3124 spin_unlock_bh(&ppc440spe_chan->lock);
3125 return sw_desc ? &sw_desc->async_tx : NULL;
3126}
3127
3128/**
3129 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
3130 * XOR ZERO_SUM operation
3131 */
3132static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
3133 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
3134 size_t len, enum sum_check_flags *result, unsigned long flags)
3135{
3136 struct dma_async_tx_descriptor *tx;
3137 dma_addr_t pq[2];
3138
3139 /* validate P, disable Q */
3140 pq[0] = src[0];
3141 pq[1] = 0;
3142 flags |= DMA_PREP_PQ_DISABLE_Q;
3143
3144 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
3145 src_cnt - 1, 0, len,
3146 result, flags);
3147 return tx;
3148}
3149
3150/**
3151 * ppc440spe_adma_set_dest - set destination address into descriptor
3152 */
3153static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3154 dma_addr_t addr, int index)
3155{
3156 struct ppc440spe_adma_chan *chan;
3157
3158 BUG_ON(index >= sw_desc->dst_cnt);
3159
3160 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3161
3162 switch (chan->device->id) {
3163 case PPC440SPE_DMA0_ID:
3164 case PPC440SPE_DMA1_ID:
3165 /* to do: support transfers lengths >
3166 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
3167 */
3168 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
3169 chan, 0, addr, index);
3170 break;
3171 case PPC440SPE_XOR_ID:
3172 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
3173 ppc440spe_desc_set_dest_addr(sw_desc,
3174 chan, 0, addr, index);
3175 break;
3176 }
3177}
3178
3179static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
3180 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
3181{
3182 /* To clear destinations update the descriptor
3183 * (P or Q depending on index) as follows:
3184 * addr is destination (0 corresponds to SG2):
3185 */
3186 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
3187
3188 /* ... and the addr is source: */
3189 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
3190
3191 /* addr is always SG2 then the mult is always DST1 */
3192 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
3193 DMA_CDB_SG_DST1, 1);
3194}
3195
3196/**
3197 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
3198 * for the PQXOR operation
3199 */
3200static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3201 dma_addr_t *addrs, unsigned long flags)
3202{
3203 struct ppc440spe_adma_desc_slot *iter;
3204 struct ppc440spe_adma_chan *chan;
3205 dma_addr_t paddr, qaddr;
3206 dma_addr_t addr = 0, ppath, qpath;
3207 int index = 0, i;
3208
3209 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3210
3211 if (flags & DMA_PREP_PQ_DISABLE_P)
3212 paddr = 0;
3213 else
3214 paddr = addrs[0];
3215
3216 if (flags & DMA_PREP_PQ_DISABLE_Q)
3217 qaddr = 0;
3218 else
3219 qaddr = addrs[1];
3220
3221 if (!paddr || !qaddr)
3222 addr = paddr ? paddr : qaddr;
3223
3224 switch (chan->device->id) {
3225 case PPC440SPE_DMA0_ID:
3226 case PPC440SPE_DMA1_ID:
3227 /* walk through the WXOR source list and set P/Q-destinations
3228 * for each slot:
3229 */
3230 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3231 /* This is WXOR-only chain; may have 1/2 zero descs */
3232 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3233 index++;
3234 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3235 index++;
3236
3237 iter = ppc440spe_get_group_entry(sw_desc, index);
3238 if (addr) {
3239 /* one destination */
3240 list_for_each_entry_from(iter,
3241 &sw_desc->group_list, chain_node)
3242 ppc440spe_desc_set_dest_addr(iter, chan,
3243 DMA_CUED_XOR_BASE, addr, 0);
3244 } else {
3245 /* two destinations */
3246 list_for_each_entry_from(iter,
3247 &sw_desc->group_list, chain_node) {
3248 ppc440spe_desc_set_dest_addr(iter, chan,
3249 DMA_CUED_XOR_BASE, paddr, 0);
3250 ppc440spe_desc_set_dest_addr(iter, chan,
3251 DMA_CUED_XOR_BASE, qaddr, 1);
3252 }
3253 }
3254
3255 if (index) {
3256 /* To clear destinations update the descriptor
3257 * (1st,2nd, or both depending on flags)
3258 */
3259 index = 0;
3260 if (test_bit(PPC440SPE_ZERO_P,
3261 &sw_desc->flags)) {
3262 iter = ppc440spe_get_group_entry(
3263 sw_desc, index++);
3264 ppc440spe_adma_pq_zero_op(iter, chan,
3265 paddr);
3266 }
3267
3268 if (test_bit(PPC440SPE_ZERO_Q,
3269 &sw_desc->flags)) {
3270 iter = ppc440spe_get_group_entry(
3271 sw_desc, index++);
3272 ppc440spe_adma_pq_zero_op(iter, chan,
3273 qaddr);
3274 }
3275
3276 return;
3277 }
3278 } else {
3279 /* This is RXOR-only or RXOR/WXOR mixed chain */
3280
3281 /* If we want to include destination into calculations,
3282 * then make dest addresses cued with mult=1 (XOR).
3283 */
3284 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3285 DMA_CUED_XOR_HB :
3286 DMA_CUED_XOR_BASE |
3287 (1 << DMA_CUED_MULT1_OFF);
3288 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3289 DMA_CUED_XOR_HB :
3290 DMA_CUED_XOR_BASE |
3291 (1 << DMA_CUED_MULT1_OFF);
3292
3293 /* Setup destination(s) in RXOR slot(s) */
3294 iter = ppc440spe_get_group_entry(sw_desc, index++);
3295 ppc440spe_desc_set_dest_addr(iter, chan,
3296 paddr ? ppath : qpath,
3297 paddr ? paddr : qaddr, 0);
3298 if (!addr) {
3299 /* two destinations */
3300 iter = ppc440spe_get_group_entry(sw_desc,
3301 index++);
3302 ppc440spe_desc_set_dest_addr(iter, chan,
3303 qpath, qaddr, 0);
3304 }
3305
3306 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
3307 /* Setup destination(s) in remaining WXOR
3308 * slots
3309 */
3310 iter = ppc440spe_get_group_entry(sw_desc,
3311 index);
3312 if (addr) {
3313 /* one destination */
3314 list_for_each_entry_from(iter,
3315 &sw_desc->group_list,
3316 chain_node)
3317 ppc440spe_desc_set_dest_addr(
3318 iter, chan,
3319 DMA_CUED_XOR_BASE,
3320 addr, 0);
3321
3322 } else {
3323 /* two destinations */
3324 list_for_each_entry_from(iter,
3325 &sw_desc->group_list,
3326 chain_node) {
3327 ppc440spe_desc_set_dest_addr(
3328 iter, chan,
3329 DMA_CUED_XOR_BASE,
3330 paddr, 0);
3331 ppc440spe_desc_set_dest_addr(
3332 iter, chan,
3333 DMA_CUED_XOR_BASE,
3334 qaddr, 1);
3335 }
3336 }
3337 }
3338
3339 }
3340 break;
3341
3342 case PPC440SPE_XOR_ID:
3343 /* DMA2 descriptors have only 1 destination, so there are
3344 * two chains - one for each dest.
3345 * If we want to include destination into calculations,
3346 * then make dest addresses cued with mult=1 (XOR).
3347 */
3348 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3349 DMA_CUED_XOR_HB :
3350 DMA_CUED_XOR_BASE |
3351 (1 << DMA_CUED_MULT1_OFF);
3352
3353 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3354 DMA_CUED_XOR_HB :
3355 DMA_CUED_XOR_BASE |
3356 (1 << DMA_CUED_MULT1_OFF);
3357
3358 iter = ppc440spe_get_group_entry(sw_desc, 0);
3359 for (i = 0; i < sw_desc->descs_per_op; i++) {
3360 ppc440spe_desc_set_dest_addr(iter, chan,
3361 paddr ? ppath : qpath,
3362 paddr ? paddr : qaddr, 0);
3363 iter = list_entry(iter->chain_node.next,
3364 struct ppc440spe_adma_desc_slot,
3365 chain_node);
3366 }
3367
3368 if (!addr) {
3369 /* Two destinations; setup Q here */
3370 iter = ppc440spe_get_group_entry(sw_desc,
3371 sw_desc->descs_per_op);
3372 for (i = 0; i < sw_desc->descs_per_op; i++) {
3373 ppc440spe_desc_set_dest_addr(iter,
3374 chan, qpath, qaddr, 0);
3375 iter = list_entry(iter->chain_node.next,
3376 struct ppc440spe_adma_desc_slot,
3377 chain_node);
3378 }
3379 }
3380
3381 break;
3382 }
3383}
3384
3385/**
3386 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3387 * for the PQ_ZERO_SUM operation
3388 */
3389static void ppc440spe_adma_pqzero_sum_set_dest(
3390 struct ppc440spe_adma_desc_slot *sw_desc,
3391 dma_addr_t paddr, dma_addr_t qaddr)
3392{
3393 struct ppc440spe_adma_desc_slot *iter, *end;
3394 struct ppc440spe_adma_chan *chan;
3395 dma_addr_t addr = 0;
3396 int idx;
3397
3398 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3399
3400 /* walk through the WXOR source list and set P/Q-destinations
3401 * for each slot
3402 */
3403 idx = (paddr && qaddr) ? 2 : 1;
3404 /* set end */
3405 list_for_each_entry_reverse(end, &sw_desc->group_list,
3406 chain_node) {
3407 if (!(--idx))
3408 break;
3409 }
3410 /* set start */
3411 idx = (paddr && qaddr) ? 2 : 1;
3412 iter = ppc440spe_get_group_entry(sw_desc, idx);
3413
3414 if (paddr && qaddr) {
3415 /* two destinations */
3416 list_for_each_entry_from(iter, &sw_desc->group_list,
3417 chain_node) {
3418 if (unlikely(iter == end))
3419 break;
3420 ppc440spe_desc_set_dest_addr(iter, chan,
3421 DMA_CUED_XOR_BASE, paddr, 0);
3422 ppc440spe_desc_set_dest_addr(iter, chan,
3423 DMA_CUED_XOR_BASE, qaddr, 1);
3424 }
3425 } else {
3426 /* one destination */
3427 addr = paddr ? paddr : qaddr;
3428 list_for_each_entry_from(iter, &sw_desc->group_list,
3429 chain_node) {
3430 if (unlikely(iter == end))
3431 break;
3432 ppc440spe_desc_set_dest_addr(iter, chan,
3433 DMA_CUED_XOR_BASE, addr, 0);
3434 }
3435 }
3436
3437 /* The remaining descriptors are DATACHECK. These have no need in
3438 * destination. Actually, these destinations are used there
3439 * as sources for check operation. So, set addr as source.
3440 */
3441 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3442
3443 if (!addr) {
3444 end = list_entry(end->chain_node.next,
3445 struct ppc440spe_adma_desc_slot, chain_node);
3446 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3447 }
3448}
3449
3450/**
3451 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3452 */
3453static inline void ppc440spe_desc_set_xor_src_cnt(
3454 struct ppc440spe_adma_desc_slot *desc,
3455 int src_cnt)
3456{
3457 struct xor_cb *hw_desc = desc->hw_desc;
3458
3459 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3460 hw_desc->cbc |= src_cnt;
3461}
3462
3463/**
3464 * ppc440spe_adma_pq_set_src - set source address into descriptor
3465 */
3466static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3467 dma_addr_t addr, int index)
3468{
3469 struct ppc440spe_adma_chan *chan;
3470 dma_addr_t haddr = 0;
3471 struct ppc440spe_adma_desc_slot *iter = NULL;
3472
3473 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3474
3475 switch (chan->device->id) {
3476 case PPC440SPE_DMA0_ID:
3477 case PPC440SPE_DMA1_ID:
3478 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
3479 */
3480 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3481 /* RXOR-only or RXOR/WXOR operation */
3482 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3483 &sw_desc->flags) ? 2 : 3;
3484
3485 if (index == 0) {
3486 /* 1st slot (RXOR) */
3487 /* setup sources region (R1-2-3, R1-2-4,
3488 * or R1-2-5)
3489 */
3490 if (test_bit(PPC440SPE_DESC_RXOR12,
3491 &sw_desc->flags))
3492 haddr = DMA_RXOR12 <<
3493 DMA_CUED_REGION_OFF;
3494 else if (test_bit(PPC440SPE_DESC_RXOR123,
3495 &sw_desc->flags))
3496 haddr = DMA_RXOR123 <<
3497 DMA_CUED_REGION_OFF;
3498 else if (test_bit(PPC440SPE_DESC_RXOR124,
3499 &sw_desc->flags))
3500 haddr = DMA_RXOR124 <<
3501 DMA_CUED_REGION_OFF;
3502 else if (test_bit(PPC440SPE_DESC_RXOR125,
3503 &sw_desc->flags))
3504 haddr = DMA_RXOR125 <<
3505 DMA_CUED_REGION_OFF;
3506 else
3507 BUG();
3508 haddr |= DMA_CUED_XOR_BASE;
3509 iter = ppc440spe_get_group_entry(sw_desc, 0);
3510 } else if (index < iskip) {
3511 /* 1st slot (RXOR)
3512 * shall actually set source address only once
3513 * instead of first <iskip>
3514 */
3515 iter = NULL;
3516 } else {
3517 /* 2nd/3d and next slots (WXOR);
3518 * skip first slot with RXOR
3519 */
3520 haddr = DMA_CUED_XOR_HB;
3521 iter = ppc440spe_get_group_entry(sw_desc,
3522 index - iskip + sw_desc->dst_cnt);
3523 }
3524 } else {
3525 int znum = 0;
3526
3527 /* WXOR-only operation; skip first slots with
3528 * zeroing destinations
3529 */
3530 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3531 znum++;
3532 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3533 znum++;
3534
3535 haddr = DMA_CUED_XOR_HB;
3536 iter = ppc440spe_get_group_entry(sw_desc,
3537 index + znum);
3538 }
3539
3540 if (likely(iter)) {
3541 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3542
3543 if (!index &&
3544 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3545 sw_desc->dst_cnt == 2) {
3546 /* if we have two destinations for RXOR, then
3547 * setup source in the second descr too
3548 */
3549 iter = ppc440spe_get_group_entry(sw_desc, 1);
3550 ppc440spe_desc_set_src_addr(iter, chan, 0,
3551 haddr, addr);
3552 }
3553 }
3554 break;
3555
3556 case PPC440SPE_XOR_ID:
3557 /* DMA2 may do Biskup */
3558 iter = sw_desc->group_head;
3559 if (iter->dst_cnt == 2) {
3560 /* both P & Q calculations required; set P src here */
3561 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3562
3563 /* this is for Q */
3564 iter = ppc440spe_get_group_entry(sw_desc,
3565 sw_desc->descs_per_op);
3566 }
3567 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3568 break;
3569 }
3570}
3571
3572/**
3573 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3574 */
3575static void ppc440spe_adma_memcpy_xor_set_src(
3576 struct ppc440spe_adma_desc_slot *sw_desc,
3577 dma_addr_t addr, int index)
3578{
3579 struct ppc440spe_adma_chan *chan;
3580
3581 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3582 sw_desc = sw_desc->group_head;
3583
3584 if (likely(sw_desc))
3585 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3586}
3587
3588/**
3589 * ppc440spe_adma_dma2rxor_inc_addr -
3590 */
3591static void ppc440spe_adma_dma2rxor_inc_addr(
3592 struct ppc440spe_adma_desc_slot *desc,
3593 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3594{
3595 cursor->addr_count++;
3596 if (index == src_cnt - 1) {
3597 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3598 } else if (cursor->addr_count == XOR_MAX_OPS) {
3599 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3600 cursor->addr_count = 0;
3601 cursor->desc_count++;
3602 }
3603}
3604
3605/**
3606 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3607 */
3608static int ppc440spe_adma_dma2rxor_prep_src(
3609 struct ppc440spe_adma_desc_slot *hdesc,
3610 struct ppc440spe_rxor *cursor, int index,
3611 int src_cnt, u32 addr)
3612{
3613 int rval = 0;
3614 u32 sign;
3615 struct ppc440spe_adma_desc_slot *desc = hdesc;
3616 int i;
3617
3618 for (i = 0; i < cursor->desc_count; i++) {
3619 desc = list_entry(hdesc->chain_node.next,
3620 struct ppc440spe_adma_desc_slot,
3621 chain_node);
3622 }
3623
3624 switch (cursor->state) {
3625 case 0:
3626 if (addr == cursor->addrl + cursor->len) {
3627 /* direct RXOR */
3628 cursor->state = 1;
3629 cursor->xor_count++;
3630 if (index == src_cnt-1) {
3631 ppc440spe_rxor_set_region(desc,
3632 cursor->addr_count,
3633 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3634 ppc440spe_adma_dma2rxor_inc_addr(
3635 desc, cursor, index, src_cnt);
3636 }
3637 } else if (cursor->addrl == addr + cursor->len) {
3638 /* reverse RXOR */
3639 cursor->state = 1;
3640 cursor->xor_count++;
3641 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3642 if (index == src_cnt-1) {
3643 ppc440spe_rxor_set_region(desc,
3644 cursor->addr_count,
3645 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3646 ppc440spe_adma_dma2rxor_inc_addr(
3647 desc, cursor, index, src_cnt);
3648 }
3649 } else {
3650 printk(KERN_ERR "Cannot build "
3651 "DMA2 RXOR command block.\n");
3652 BUG();
3653 }
3654 break;
3655 case 1:
3656 sign = test_bit(cursor->addr_count,
3657 desc->reverse_flags)
3658 ? -1 : 1;
3659 if (index == src_cnt-2 || (sign == -1
3660 && addr != cursor->addrl - 2*cursor->len)) {
3661 cursor->state = 0;
3662 cursor->xor_count = 1;
3663 cursor->addrl = addr;
3664 ppc440spe_rxor_set_region(desc,
3665 cursor->addr_count,
3666 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3667 ppc440spe_adma_dma2rxor_inc_addr(
3668 desc, cursor, index, src_cnt);
3669 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3670 cursor->state = 2;
3671 cursor->xor_count = 0;
3672 ppc440spe_rxor_set_region(desc,
3673 cursor->addr_count,
3674 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3675 if (index == src_cnt-1) {
3676 ppc440spe_adma_dma2rxor_inc_addr(
3677 desc, cursor, index, src_cnt);
3678 }
3679 } else if (addr == cursor->addrl + 3*cursor->len) {
3680 cursor->state = 2;
3681 cursor->xor_count = 0;
3682 ppc440spe_rxor_set_region(desc,
3683 cursor->addr_count,
3684 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3685 if (index == src_cnt-1) {
3686 ppc440spe_adma_dma2rxor_inc_addr(
3687 desc, cursor, index, src_cnt);
3688 }
3689 } else if (addr == cursor->addrl + 4*cursor->len) {
3690 cursor->state = 2;
3691 cursor->xor_count = 0;
3692 ppc440spe_rxor_set_region(desc,
3693 cursor->addr_count,
3694 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3695 if (index == src_cnt-1) {
3696 ppc440spe_adma_dma2rxor_inc_addr(
3697 desc, cursor, index, src_cnt);
3698 }
3699 } else {
3700 cursor->state = 0;
3701 cursor->xor_count = 1;
3702 cursor->addrl = addr;
3703 ppc440spe_rxor_set_region(desc,
3704 cursor->addr_count,
3705 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3706 ppc440spe_adma_dma2rxor_inc_addr(
3707 desc, cursor, index, src_cnt);
3708 }
3709 break;
3710 case 2:
3711 cursor->state = 0;
3712 cursor->addrl = addr;
3713 cursor->xor_count++;
3714 if (index) {
3715 ppc440spe_adma_dma2rxor_inc_addr(
3716 desc, cursor, index, src_cnt);
3717 }
3718 break;
3719 }
3720
3721 return rval;
3722}
3723
3724/**
3725 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3726 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3727 */
3728static void ppc440spe_adma_dma2rxor_set_src(
3729 struct ppc440spe_adma_desc_slot *desc,
3730 int index, dma_addr_t addr)
3731{
3732 struct xor_cb *xcb = desc->hw_desc;
3733 int k = 0, op = 0, lop = 0;
3734
3735 /* get the RXOR operand which corresponds to index addr */
3736 while (op <= index) {
3737 lop = op;
3738 if (k == XOR_MAX_OPS) {
3739 k = 0;
3740 desc = list_entry(desc->chain_node.next,
3741 struct ppc440spe_adma_desc_slot, chain_node);
3742 xcb = desc->hw_desc;
3743
3744 }
3745 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3746 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3747 op += 2;
3748 else
3749 op += 3;
3750 }
3751
3752 BUG_ON(k < 1);
3753
3754 if (test_bit(k-1, desc->reverse_flags)) {
3755 /* reverse operand order; put last op in RXOR group */
3756 if (index == op - 1)
3757 ppc440spe_rxor_set_src(desc, k - 1, addr);
3758 } else {
3759 /* direct operand order; put first op in RXOR group */
3760 if (index == lop)
3761 ppc440spe_rxor_set_src(desc, k - 1, addr);
3762 }
3763}
3764
3765/**
3766 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3767 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3768 */
3769static void ppc440spe_adma_dma2rxor_set_mult(
3770 struct ppc440spe_adma_desc_slot *desc,
3771 int index, u8 mult)
3772{
3773 struct xor_cb *xcb = desc->hw_desc;
3774 int k = 0, op = 0, lop = 0;
3775
3776 /* get the RXOR operand which corresponds to index mult */
3777 while (op <= index) {
3778 lop = op;
3779 if (k == XOR_MAX_OPS) {
3780 k = 0;
3781 desc = list_entry(desc->chain_node.next,
3782 struct ppc440spe_adma_desc_slot,
3783 chain_node);
3784 xcb = desc->hw_desc;
3785
3786 }
3787 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3788 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3789 op += 2;
3790 else
3791 op += 3;
3792 }
3793
3794 BUG_ON(k < 1);
3795 if (test_bit(k-1, desc->reverse_flags)) {
3796 /* reverse order */
3797 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3798 } else {
3799 /* direct order */
3800 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3801 }
3802}
3803
3804/**
3805 * ppc440spe_init_rxor_cursor -
3806 */
3807static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3808{
3809 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3810 cursor->state = 2;
3811}
3812
3813/**
3814 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3815 * descriptor for the PQXOR operation
3816 */
3817static void ppc440spe_adma_pq_set_src_mult(
3818 struct ppc440spe_adma_desc_slot *sw_desc,
3819 unsigned char mult, int index, int dst_pos)
3820{
3821 struct ppc440spe_adma_chan *chan;
3822 u32 mult_idx, mult_dst;
3823 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3824
3825 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3826
3827 switch (chan->device->id) {
3828 case PPC440SPE_DMA0_ID:
3829 case PPC440SPE_DMA1_ID:
3830 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3831 int region = test_bit(PPC440SPE_DESC_RXOR12,
3832 &sw_desc->flags) ? 2 : 3;
3833
3834 if (index < region) {
3835 /* RXOR multipliers */
3836 iter = ppc440spe_get_group_entry(sw_desc,
3837 sw_desc->dst_cnt - 1);
3838 if (sw_desc->dst_cnt == 2)
3839 iter1 = ppc440spe_get_group_entry(
3840 sw_desc, 0);
3841
3842 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3843 mult_dst = DMA_CDB_SG_SRC;
3844 } else {
3845 /* WXOR multiplier */
3846 iter = ppc440spe_get_group_entry(sw_desc,
3847 index - region +
3848 sw_desc->dst_cnt);
3849 mult_idx = DMA_CUED_MULT1_OFF;
3850 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3851 DMA_CDB_SG_DST1;
3852 }
3853 } else {
3854 int znum = 0;
3855
3856 /* WXOR-only;
3857 * skip first slots with destinations (if ZERO_DST has
3858 * place)
3859 */
3860 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3861 znum++;
3862 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3863 znum++;
3864
3865 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3866 mult_idx = DMA_CUED_MULT1_OFF;
3867 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3868 }
3869
3870 if (likely(iter)) {
3871 ppc440spe_desc_set_src_mult(iter, chan,
3872 mult_idx, mult_dst, mult);
3873
3874 if (unlikely(iter1)) {
3875 /* if we have two destinations for RXOR, then
3876 * we've just set Q mult. Set-up P now.
3877 */
3878 ppc440spe_desc_set_src_mult(iter1, chan,
3879 mult_idx, mult_dst, 1);
3880 }
3881
3882 }
3883 break;
3884
3885 case PPC440SPE_XOR_ID:
3886 iter = sw_desc->group_head;
3887 if (sw_desc->dst_cnt == 2) {
3888 /* both P & Q calculations required; set P mult here */
3889 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3890
3891 /* and then set Q mult */
3892 iter = ppc440spe_get_group_entry(sw_desc,
3893 sw_desc->descs_per_op);
3894 }
3895 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3896 break;
3897 }
3898}
3899
3900/**
3901 * ppc440spe_adma_free_chan_resources - free the resources allocated
3902 */
3903static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3904{
3905 struct ppc440spe_adma_chan *ppc440spe_chan;
3906 struct ppc440spe_adma_desc_slot *iter, *_iter;
3907 int in_use_descs = 0;
3908
3909 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3910 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3911
3912 spin_lock_bh(&ppc440spe_chan->lock);
3913 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3914 chain_node) {
3915 in_use_descs++;
3916 list_del(&iter->chain_node);
3917 }
3918 list_for_each_entry_safe_reverse(iter, _iter,
3919 &ppc440spe_chan->all_slots, slot_node) {
3920 list_del(&iter->slot_node);
3921 kfree(iter);
3922 ppc440spe_chan->slots_allocated--;
3923 }
3924 ppc440spe_chan->last_used = NULL;
3925
3926 dev_dbg(ppc440spe_chan->device->common.dev,
3927 "ppc440spe adma%d %s slots_allocated %d\n",
3928 ppc440spe_chan->device->id,
3929 __func__, ppc440spe_chan->slots_allocated);
3930 spin_unlock_bh(&ppc440spe_chan->lock);
3931
3932 /* one is ok since we left it on there on purpose */
3933 if (in_use_descs > 1)
3934 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3935 in_use_descs - 1);
3936}
3937
3938/**
07934481 3939 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
12458ea0
AG
3940 * @chan: ADMA channel handle
3941 * @cookie: ADMA transaction identifier
07934481 3942 * @txstate: a holder for the current state of the channel
12458ea0 3943 */
07934481
LW
3944static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3945 dma_cookie_t cookie, struct dma_tx_state *txstate)
12458ea0
AG
3946{
3947 struct ppc440spe_adma_chan *ppc440spe_chan;
3948 dma_cookie_t last_used;
3949 dma_cookie_t last_complete;
3950 enum dma_status ret;
3951
3952 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3953 last_used = chan->cookie;
4d4e58de 3954 last_complete = chan->completed_cookie;
12458ea0 3955
bca34692 3956 dma_set_tx_state(txstate, last_complete, last_used, 0);
12458ea0
AG
3957
3958 ret = dma_async_is_complete(cookie, last_complete, last_used);
3959 if (ret == DMA_SUCCESS)
3960 return ret;
3961
3962 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3963
3964 last_used = chan->cookie;
4d4e58de 3965 last_complete = chan->completed_cookie;
12458ea0 3966
bca34692 3967 dma_set_tx_state(txstate, last_complete, last_used, 0);
12458ea0
AG
3968
3969 return dma_async_is_complete(cookie, last_complete, last_used);
3970}
3971
3972/**
3973 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3974 */
3975static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3976{
3977 struct ppc440spe_adma_chan *chan = data;
3978
3979 dev_dbg(chan->device->common.dev,
3980 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3981
3982 tasklet_schedule(&chan->irq_tasklet);
3983 ppc440spe_adma_device_clear_eot_status(chan);
3984
3985 return IRQ_HANDLED;
3986}
3987
3988/**
3989 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3990 * do the same things as a eot handler
3991 */
3992static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3993{
3994 struct ppc440spe_adma_chan *chan = data;
3995
3996 dev_dbg(chan->device->common.dev,
3997 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3998
3999 tasklet_schedule(&chan->irq_tasklet);
4000 ppc440spe_adma_device_clear_eot_status(chan);
4001
4002 return IRQ_HANDLED;
4003}
4004
4005/**
4006 * ppc440spe_test_callback - called when test operation has been done
4007 */
4008static void ppc440spe_test_callback(void *unused)
4009{
4010 complete(&ppc440spe_r6_test_comp);
4011}
4012
4013/**
4014 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
4015 */
4016static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
4017{
4018 struct ppc440spe_adma_chan *ppc440spe_chan;
4019
4020 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4021 dev_dbg(ppc440spe_chan->device->common.dev,
4022 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
4023 __func__, ppc440spe_chan->pending);
4024
4025 if (ppc440spe_chan->pending) {
4026 ppc440spe_chan->pending = 0;
4027 ppc440spe_chan_append(ppc440spe_chan);
4028 }
4029}
4030
4031/**
4032 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
4033 * use FIFOs (as opposite to chains used in XOR) so this is a XOR
4034 * specific operation)
4035 */
4036static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
4037{
4038 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
4039 dma_cookie_t cookie;
4040 int slot_cnt, slots_per_op;
4041
4042 dev_dbg(chan->device->common.dev,
4043 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
4044
4045 spin_lock_bh(&chan->lock);
4046 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
4047 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
4048 if (sw_desc) {
4049 group_start = sw_desc->group_head;
4050 list_splice_init(&sw_desc->group_list, &chan->chain);
4051 async_tx_ack(&sw_desc->async_tx);
4052 ppc440spe_desc_init_null_xor(group_start);
4053
4054 cookie = chan->common.cookie;
4055 cookie++;
4056 if (cookie <= 1)
4057 cookie = 2;
4058
4059 /* initialize the completed cookie to be less than
4060 * the most recently used cookie
4061 */
4d4e58de 4062 chan->common.completed_cookie = cookie - 1;
12458ea0
AG
4063 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4064
4065 /* channel should not be busy */
4066 BUG_ON(ppc440spe_chan_is_busy(chan));
4067
4068 /* set the descriptor address */
4069 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
4070
4071 /* run the descriptor */
4072 ppc440spe_chan_run(chan);
4073 } else
4074 printk(KERN_ERR "ppc440spe adma%d"
4075 " failed to allocate null descriptor\n",
4076 chan->device->id);
4077 spin_unlock_bh(&chan->lock);
4078}
4079
4080/**
4081 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
4082 * For this we just perform one WXOR operation with the same source
4083 * and destination addresses, the GF-multiplier is 1; so if RAID-6
4084 * capabilities are enabled then we'll get src/dst filled with zero.
4085 */
4086static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
4087{
4088 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
4089 struct page *pg;
4090 char *a;
4091 dma_addr_t dma_addr, addrs[2];
4092 unsigned long op = 0;
4093 int rval = 0;
4094
4095 set_bit(PPC440SPE_DESC_WXOR, &op);
4096
4097 pg = alloc_page(GFP_KERNEL);
4098 if (!pg)
4099 return -ENOMEM;
4100
4101 spin_lock_bh(&chan->lock);
4102 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
4103 if (sw_desc) {
4104 /* 1 src, 1 dsr, int_ena, WXOR */
4105 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
4106 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
4107 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
4108 iter->unmap_len = PAGE_SIZE;
4109 }
4110 } else {
4111 rval = -EFAULT;
4112 spin_unlock_bh(&chan->lock);
4113 goto exit;
4114 }
4115 spin_unlock_bh(&chan->lock);
4116
4117 /* Fill the test page with ones */
4118 memset(page_address(pg), 0xFF, PAGE_SIZE);
4119 dma_addr = dma_map_page(chan->device->dev, pg, 0,
4120 PAGE_SIZE, DMA_BIDIRECTIONAL);
4121
4122 /* Setup addresses */
4123 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
4124 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
4125 addrs[0] = dma_addr;
4126 addrs[1] = 0;
4127 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
4128
4129 async_tx_ack(&sw_desc->async_tx);
4130 sw_desc->async_tx.callback = ppc440spe_test_callback;
4131 sw_desc->async_tx.callback_param = NULL;
4132
4133 init_completion(&ppc440spe_r6_test_comp);
4134
4135 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
4136 ppc440spe_adma_issue_pending(&chan->common);
4137
4138 wait_for_completion(&ppc440spe_r6_test_comp);
4139
4140 /* Now check if the test page is zeroed */
4141 a = page_address(pg);
4142 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
4143 /* page is zero - RAID-6 enabled */
4144 rval = 0;
4145 } else {
4146 /* RAID-6 was not enabled */
4147 rval = -EINVAL;
4148 }
4149exit:
4150 __free_page(pg);
4151 return rval;
4152}
4153
4154static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4155{
4156 switch (adev->id) {
4157 case PPC440SPE_DMA0_ID:
4158 case PPC440SPE_DMA1_ID:
4159 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
4160 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4161 dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
4162 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4163 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
4164 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
4165 break;
4166 case PPC440SPE_XOR_ID:
4167 dma_cap_set(DMA_XOR, adev->common.cap_mask);
4168 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4169 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4170 adev->common.cap_mask = adev->common.cap_mask;
4171 break;
4172 }
4173
4174 /* Set base routines */
4175 adev->common.device_alloc_chan_resources =
4176 ppc440spe_adma_alloc_chan_resources;
4177 adev->common.device_free_chan_resources =
4178 ppc440spe_adma_free_chan_resources;
07934481 4179 adev->common.device_tx_status = ppc440spe_adma_tx_status;
12458ea0
AG
4180 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
4181
4182 /* Set prep routines based on capability */
4183 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
4184 adev->common.device_prep_dma_memcpy =
4185 ppc440spe_adma_prep_dma_memcpy;
4186 }
4187 if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
4188 adev->common.device_prep_dma_memset =
4189 ppc440spe_adma_prep_dma_memset;
4190 }
4191 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
4192 adev->common.max_xor = XOR_MAX_OPS;
4193 adev->common.device_prep_dma_xor =
4194 ppc440spe_adma_prep_dma_xor;
4195 }
4196 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
4197 switch (adev->id) {
4198 case PPC440SPE_DMA0_ID:
4199 dma_set_maxpq(&adev->common,
4200 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4201 break;
4202 case PPC440SPE_DMA1_ID:
4203 dma_set_maxpq(&adev->common,
4204 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4205 break;
4206 case PPC440SPE_XOR_ID:
4207 adev->common.max_pq = XOR_MAX_OPS * 3;
4208 break;
4209 }
4210 adev->common.device_prep_dma_pq =
4211 ppc440spe_adma_prep_dma_pq;
4212 }
4213 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
4214 switch (adev->id) {
4215 case PPC440SPE_DMA0_ID:
4216 adev->common.max_pq = DMA0_FIFO_SIZE /
4217 sizeof(struct dma_cdb);
4218 break;
4219 case PPC440SPE_DMA1_ID:
4220 adev->common.max_pq = DMA1_FIFO_SIZE /
4221 sizeof(struct dma_cdb);
4222 break;
4223 }
4224 adev->common.device_prep_dma_pq_val =
4225 ppc440spe_adma_prep_dma_pqzero_sum;
4226 }
4227 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
4228 switch (adev->id) {
4229 case PPC440SPE_DMA0_ID:
4230 adev->common.max_xor = DMA0_FIFO_SIZE /
4231 sizeof(struct dma_cdb);
4232 break;
4233 case PPC440SPE_DMA1_ID:
4234 adev->common.max_xor = DMA1_FIFO_SIZE /
4235 sizeof(struct dma_cdb);
4236 break;
4237 }
4238 adev->common.device_prep_dma_xor_val =
4239 ppc440spe_adma_prep_dma_xor_zero_sum;
4240 }
4241 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
4242 adev->common.device_prep_dma_interrupt =
4243 ppc440spe_adma_prep_dma_interrupt;
4244 }
4245 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
4246 "( %s%s%s%s%s%s%s)\n",
4247 dev_name(adev->dev),
4248 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
4249 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
4250 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
4251 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
4252 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
4253 dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
4254 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
4255}
4256
4257static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
4258 struct ppc440spe_adma_chan *chan,
4259 int *initcode)
4260{
2dc11581 4261 struct platform_device *ofdev;
12458ea0
AG
4262 struct device_node *np;
4263 int ret;
4264
2dc11581 4265 ofdev = container_of(adev->dev, struct platform_device, dev);
3e6b02d9 4266 np = ofdev->dev.of_node;
12458ea0
AG
4267 if (adev->id != PPC440SPE_XOR_ID) {
4268 adev->err_irq = irq_of_parse_and_map(np, 1);
4269 if (adev->err_irq == NO_IRQ) {
4270 dev_warn(adev->dev, "no err irq resource?\n");
4271 *initcode = PPC_ADMA_INIT_IRQ2;
4272 adev->err_irq = -ENXIO;
4273 } else
4274 atomic_inc(&ppc440spe_adma_err_irq_ref);
4275 } else {
4276 adev->err_irq = -ENXIO;
4277 }
4278
4279 adev->irq = irq_of_parse_and_map(np, 0);
4280 if (adev->irq == NO_IRQ) {
4281 dev_err(adev->dev, "no irq resource\n");
4282 *initcode = PPC_ADMA_INIT_IRQ1;
4283 ret = -ENXIO;
4284 goto err_irq_map;
4285 }
4286 dev_dbg(adev->dev, "irq %d, err irq %d\n",
4287 adev->irq, adev->err_irq);
4288
4289 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
4290 0, dev_driver_string(adev->dev), chan);
4291 if (ret) {
4292 dev_err(adev->dev, "can't request irq %d\n",
4293 adev->irq);
4294 *initcode = PPC_ADMA_INIT_IRQ1;
4295 ret = -EIO;
4296 goto err_req1;
4297 }
4298
4299 /* only DMA engines have a separate error IRQ
4300 * so it's Ok if err_irq < 0 in XOR engine case.
4301 */
4302 if (adev->err_irq > 0) {
4303 /* both DMA engines share common error IRQ */
4304 ret = request_irq(adev->err_irq,
4305 ppc440spe_adma_err_handler,
4306 IRQF_SHARED,
4307 dev_driver_string(adev->dev),
4308 chan);
4309 if (ret) {
4310 dev_err(adev->dev, "can't request irq %d\n",
4311 adev->err_irq);
4312 *initcode = PPC_ADMA_INIT_IRQ2;
4313 ret = -EIO;
4314 goto err_req2;
4315 }
4316 }
4317
4318 if (adev->id == PPC440SPE_XOR_ID) {
4319 /* enable XOR engine interrupts */
4320 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4321 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
4322 &adev->xor_reg->ier);
4323 } else {
4324 u32 mask, enable;
4325
4326 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4327 if (!np) {
4328 pr_err("%s: can't find I2O device tree node\n",
4329 __func__);
4330 ret = -ENODEV;
4331 goto err_req2;
4332 }
4333 adev->i2o_reg = of_iomap(np, 0);
4334 if (!adev->i2o_reg) {
4335 pr_err("%s: failed to map I2O registers\n", __func__);
4336 of_node_put(np);
4337 ret = -EINVAL;
4338 goto err_req2;
4339 }
4340 of_node_put(np);
4341 /* Unmask 'CS FIFO Attention' interrupts and
4342 * enable generating interrupts on errors
4343 */
4344 enable = (adev->id == PPC440SPE_DMA0_ID) ?
4345 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4346 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4347 mask = ioread32(&adev->i2o_reg->iopim) & enable;
4348 iowrite32(mask, &adev->i2o_reg->iopim);
4349 }
4350 return 0;
4351
4352err_req2:
4353 free_irq(adev->irq, chan);
4354err_req1:
4355 irq_dispose_mapping(adev->irq);
4356err_irq_map:
4357 if (adev->err_irq > 0) {
4358 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
4359 irq_dispose_mapping(adev->err_irq);
4360 }
4361 return ret;
4362}
4363
4364static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4365 struct ppc440spe_adma_chan *chan)
4366{
4367 u32 mask, disable;
4368
4369 if (adev->id == PPC440SPE_XOR_ID) {
4370 /* disable XOR engine interrupts */
4371 mask = ioread32be(&adev->xor_reg->ier);
4372 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4373 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
4374 iowrite32be(mask, &adev->xor_reg->ier);
4375 } else {
4376 /* disable DMAx engine interrupts */
4377 disable = (adev->id == PPC440SPE_DMA0_ID) ?
4378 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4379 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4380 mask = ioread32(&adev->i2o_reg->iopim) | disable;
4381 iowrite32(mask, &adev->i2o_reg->iopim);
4382 }
4383 free_irq(adev->irq, chan);
4384 irq_dispose_mapping(adev->irq);
4385 if (adev->err_irq > 0) {
4386 free_irq(adev->err_irq, chan);
4387 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
4388 irq_dispose_mapping(adev->err_irq);
4389 iounmap(adev->i2o_reg);
4390 }
4391 }
4392}
4393
4394/**
4395 * ppc440spe_adma_probe - probe the asynch device
4396 */
00006124 4397static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
12458ea0 4398{
05c02542 4399 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4400 struct resource res;
4401 struct ppc440spe_adma_device *adev;
4402 struct ppc440spe_adma_chan *chan;
4403 struct ppc_dma_chan_ref *ref, *_ref;
4404 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4405 const u32 *idx;
4406 int len;
4407 void *regs;
4408 u32 id, pool_size;
4409
4410 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4411 id = PPC440SPE_XOR_ID;
4412 /* As far as the XOR engine is concerned, it does not
4413 * use FIFOs but uses linked list. So there is no dependency
4414 * between pool size to allocate and the engine configuration.
4415 */
4416 pool_size = PAGE_SIZE << 1;
4417 } else {
4418 /* it is DMA0 or DMA1 */
4419 idx = of_get_property(np, "cell-index", &len);
4420 if (!idx || (len != sizeof(u32))) {
4421 dev_err(&ofdev->dev, "Device node %s has missing "
4422 "or invalid cell-index property\n",
4423 np->full_name);
4424 return -EINVAL;
4425 }
4426 id = *idx;
4427 /* DMA0,1 engines use FIFO to maintain CDBs, so we
4428 * should allocate the pool accordingly to size of this
4429 * FIFO. Thus, the pool size depends on the FIFO depth:
4430 * how much CDBs pointers the FIFO may contain then so
4431 * much CDBs we should provide in the pool.
4432 * That is
4433 * CDB size = 32B;
4434 * CDBs number = (DMA0_FIFO_SIZE >> 3);
4435 * Pool size = CDBs number * CDB size =
4436 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
4437 */
4438 pool_size = (id == PPC440SPE_DMA0_ID) ?
4439 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4440 pool_size <<= 2;
4441 }
4442
4443 if (of_address_to_resource(np, 0, &res)) {
4444 dev_err(&ofdev->dev, "failed to get memory resource\n");
4445 initcode = PPC_ADMA_INIT_MEMRES;
4446 ret = -ENODEV;
4447 goto out;
4448 }
4449
4450 if (!request_mem_region(res.start, resource_size(&res),
4451 dev_driver_string(&ofdev->dev))) {
a584bff5
JP
4452 dev_err(&ofdev->dev, "failed to request memory region %pR\n",
4453 &res);
12458ea0
AG
4454 initcode = PPC_ADMA_INIT_MEMREG;
4455 ret = -EBUSY;
4456 goto out;
4457 }
4458
4459 /* create a device */
4460 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4461 if (!adev) {
4462 dev_err(&ofdev->dev, "failed to allocate device\n");
4463 initcode = PPC_ADMA_INIT_ALLOC;
4464 ret = -ENOMEM;
4465 goto err_adev_alloc;
4466 }
4467
4468 adev->id = id;
4469 adev->pool_size = pool_size;
4470 /* allocate coherent memory for hardware descriptors */
4471 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4472 adev->pool_size, &adev->dma_desc_pool,
4473 GFP_KERNEL);
4474 if (adev->dma_desc_pool_virt == NULL) {
4475 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4476 "memory for hardware descriptors\n",
4477 adev->pool_size);
4478 initcode = PPC_ADMA_INIT_COHERENT;
4479 ret = -ENOMEM;
4480 goto err_dma_alloc;
4481 }
4482 dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n",
4483 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4484
4485 regs = ioremap(res.start, resource_size(&res));
4486 if (!regs) {
4487 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
4488 goto err_regs_alloc;
4489 }
4490
4491 if (adev->id == PPC440SPE_XOR_ID) {
4492 adev->xor_reg = regs;
4493 /* Reset XOR */
4494 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4495 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4496 } else {
4497 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4498 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4499 adev->dma_reg = regs;
4500 /* DMAx_FIFO_SIZE is defined in bytes,
4501 * <fsiz> - is defined in number of CDB pointers (8byte).
4502 * DMA FIFO Length = CSlength + CPlength, where
4503 * CSlength = CPlength = (fsiz + 1) * 8.
4504 */
4505 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4506 &adev->dma_reg->fsiz);
4507 /* Configure DMA engine */
4508 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4509 &adev->dma_reg->cfg);
4510 /* Clear Status */
4511 iowrite32(~0, &adev->dma_reg->dsts);
4512 }
4513
4514 adev->dev = &ofdev->dev;
4515 adev->common.dev = &ofdev->dev;
4516 INIT_LIST_HEAD(&adev->common.channels);
4517 dev_set_drvdata(&ofdev->dev, adev);
4518
4519 /* create a channel */
4520 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4521 if (!chan) {
4522 dev_err(&ofdev->dev, "can't allocate channel structure\n");
4523 initcode = PPC_ADMA_INIT_CHANNEL;
4524 ret = -ENOMEM;
4525 goto err_chan_alloc;
4526 }
4527
4528 spin_lock_init(&chan->lock);
4529 INIT_LIST_HEAD(&chan->chain);
4530 INIT_LIST_HEAD(&chan->all_slots);
4531 chan->device = adev;
4532 chan->common.device = &adev->common;
4533 list_add_tail(&chan->common.device_node, &adev->common.channels);
4534 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4535 (unsigned long)chan);
4536
4537 /* allocate and map helper pages for async validation or
4538 * async_mult/async_sum_product operations on DMA0/1.
4539 */
4540 if (adev->id != PPC440SPE_XOR_ID) {
4541 chan->pdest_page = alloc_page(GFP_KERNEL);
4542 chan->qdest_page = alloc_page(GFP_KERNEL);
4543 if (!chan->pdest_page ||
4544 !chan->qdest_page) {
4545 if (chan->pdest_page)
4546 __free_page(chan->pdest_page);
4547 if (chan->qdest_page)
4548 __free_page(chan->qdest_page);
4549 ret = -ENOMEM;
4550 goto err_page_alloc;
4551 }
4552 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4553 PAGE_SIZE, DMA_BIDIRECTIONAL);
4554 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4555 PAGE_SIZE, DMA_BIDIRECTIONAL);
4556 }
4557
4558 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4559 if (ref) {
4560 ref->chan = &chan->common;
4561 INIT_LIST_HEAD(&ref->node);
4562 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4563 } else {
4564 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4565 ret = -ENOMEM;
4566 goto err_ref_alloc;
4567 }
4568
4569 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4570 if (ret)
4571 goto err_irq;
4572
4573 ppc440spe_adma_init_capabilities(adev);
4574
4575 ret = dma_async_device_register(&adev->common);
4576 if (ret) {
4577 initcode = PPC_ADMA_INIT_REGISTER;
4578 dev_err(&ofdev->dev, "failed to register dma device\n");
4579 goto err_dev_reg;
4580 }
4581
4582 goto out;
4583
4584err_dev_reg:
4585 ppc440spe_adma_release_irqs(adev, chan);
4586err_irq:
4587 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4588 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4589 list_del(&ref->node);
4590 kfree(ref);
4591 }
4592 }
4593err_ref_alloc:
4594 if (adev->id != PPC440SPE_XOR_ID) {
4595 dma_unmap_page(&ofdev->dev, chan->pdest,
4596 PAGE_SIZE, DMA_BIDIRECTIONAL);
4597 dma_unmap_page(&ofdev->dev, chan->qdest,
4598 PAGE_SIZE, DMA_BIDIRECTIONAL);
4599 __free_page(chan->pdest_page);
4600 __free_page(chan->qdest_page);
4601 }
4602err_page_alloc:
4603 kfree(chan);
4604err_chan_alloc:
4605 if (adev->id == PPC440SPE_XOR_ID)
4606 iounmap(adev->xor_reg);
4607 else
4608 iounmap(adev->dma_reg);
4609err_regs_alloc:
4610 dma_free_coherent(adev->dev, adev->pool_size,
4611 adev->dma_desc_pool_virt,
4612 adev->dma_desc_pool);
4613err_dma_alloc:
4614 kfree(adev);
4615err_adev_alloc:
4616 release_mem_region(res.start, resource_size(&res));
4617out:
4618 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4619 ppc440spe_adma_devices[id] = initcode;
4620
4621 return ret;
4622}
4623
4624/**
4625 * ppc440spe_adma_remove - remove the asynch device
4626 */
2dc11581 4627static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev)
12458ea0
AG
4628{
4629 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
05c02542 4630 struct device_node *np = ofdev->dev.of_node;
12458ea0
AG
4631 struct resource res;
4632 struct dma_chan *chan, *_chan;
4633 struct ppc_dma_chan_ref *ref, *_ref;
4634 struct ppc440spe_adma_chan *ppc440spe_chan;
4635
4636 dev_set_drvdata(&ofdev->dev, NULL);
4637 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4638 ppc440spe_adma_devices[adev->id] = -1;
4639
4640 dma_async_device_unregister(&adev->common);
4641
4642 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4643 device_node) {
4644 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4645 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4646 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4647 if (adev->id != PPC440SPE_XOR_ID) {
4648 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4649 PAGE_SIZE, DMA_BIDIRECTIONAL);
4650 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4651 PAGE_SIZE, DMA_BIDIRECTIONAL);
4652 __free_page(ppc440spe_chan->pdest_page);
4653 __free_page(ppc440spe_chan->qdest_page);
4654 }
4655 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4656 node) {
4657 if (ppc440spe_chan ==
4658 to_ppc440spe_adma_chan(ref->chan)) {
4659 list_del(&ref->node);
4660 kfree(ref);
4661 }
4662 }
4663 list_del(&chan->device_node);
4664 kfree(ppc440spe_chan);
4665 }
4666
4667 dma_free_coherent(adev->dev, adev->pool_size,
4668 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4669 if (adev->id == PPC440SPE_XOR_ID)
4670 iounmap(adev->xor_reg);
4671 else
4672 iounmap(adev->dma_reg);
4673 of_address_to_resource(np, 0, &res);
4674 release_mem_region(res.start, resource_size(&res));
4675 kfree(adev);
4676 return 0;
4677}
4678
4679/*
4680 * /sys driver interface to enable h/w RAID-6 capabilities
4681 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
4682 * directory are "devices", "enable" and "poly".
4683 * "devices" shows available engines.
4684 * "enable" is used to enable RAID-6 capabilities or to check
4685 * whether these has been activated.
4686 * "poly" allows setting/checking used polynomial (for PPC440SPe only).
4687 */
4688
4689static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
4690{
4691 ssize_t size = 0;
4692 int i;
4693
4694 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4695 if (ppc440spe_adma_devices[i] == -1)
4696 continue;
4697 size += snprintf(buf + size, PAGE_SIZE - size,
4698 "PPC440SP(E)-ADMA.%d: %s\n", i,
4699 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4700 }
4701 return size;
4702}
4703
4704static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
4705{
4706 return snprintf(buf, PAGE_SIZE,
4707 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4708 ppc440spe_r6_enabled ? "EN" : "DIS");
4709}
4710
4711static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
4712 const char *buf, size_t count)
4713{
4714 unsigned long val;
4715
4716 if (!count || count > 11)
4717 return -EINVAL;
4718
4719 if (!ppc440spe_r6_tchan)
4720 return -EFAULT;
4721
4722 /* Write a key */
4723 sscanf(buf, "%lx", &val);
4724 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4725 isync();
4726
4727 /* Verify whether it really works now */
4728 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4729 pr_info("PPC440SP(e) RAID-6 has been activated "
4730 "successfully\n");
4731 ppc440spe_r6_enabled = 1;
4732 } else {
4733 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4734 " Error key ?\n");
4735 ppc440spe_r6_enabled = 0;
4736 }
4737 return count;
4738}
4739
4740static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
4741{
4742 ssize_t size = 0;
4743 u32 reg;
4744
4745#ifdef CONFIG_440SP
4746 /* 440SP has fixed polynomial */
4747 reg = 0x4d;
4748#else
4749 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4750 reg >>= MQ0_CFBHL_POLY;
4751 reg &= 0xFF;
4752#endif
4753
4754 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4755 "uses 0x1%02x polynomial.\n", reg);
4756 return size;
4757}
4758
4759static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
4760 const char *buf, size_t count)
4761{
4762 unsigned long reg, val;
4763
4764#ifdef CONFIG_440SP
4765 /* 440SP uses default 0x14D polynomial only */
4766 return -EINVAL;
4767#endif
4768
4769 if (!count || count > 6)
4770 return -EINVAL;
4771
4772 /* e.g., 0x14D or 0x11D */
4773 sscanf(buf, "%lx", &val);
4774
4775 if (val & ~0x1FF)
4776 return -EINVAL;
4777
4778 val &= 0xFF;
4779 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4780 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4781 reg |= val << MQ0_CFBHL_POLY;
4782 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4783
4784 return count;
4785}
4786
4787static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
4788static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
4789 store_ppc440spe_r6enable);
4790static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
4791 store_ppc440spe_r6poly);
4792
4793/*
4794 * Common initialisation for RAID engines; allocate memory for
4795 * DMAx FIFOs, perform configuration common for all DMA engines.
4796 * Further DMA engine specific configuration is done at probe time.
4797 */
4798static int ppc440spe_configure_raid_devices(void)
4799{
4800 struct device_node *np;
4801 struct resource i2o_res;
4802 struct i2o_regs __iomem *i2o_reg;
4803 dcr_host_t i2o_dcr_host;
4804 unsigned int dcr_base, dcr_len;
4805 int i, ret;
4806
4807 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4808 if (!np) {
4809 pr_err("%s: can't find I2O device tree node\n",
4810 __func__);
4811 return -ENODEV;
4812 }
4813
4814 if (of_address_to_resource(np, 0, &i2o_res)) {
4815 of_node_put(np);
4816 return -EINVAL;
4817 }
4818
4819 i2o_reg = of_iomap(np, 0);
4820 if (!i2o_reg) {
4821 pr_err("%s: failed to map I2O registers\n", __func__);
4822 of_node_put(np);
4823 return -EINVAL;
4824 }
4825
4826 /* Get I2O DCRs base */
4827 dcr_base = dcr_resource_start(np, 0);
4828 dcr_len = dcr_resource_len(np, 0);
4829 if (!dcr_base && !dcr_len) {
4830 pr_err("%s: can't get DCR registers base/len!\n",
4831 np->full_name);
4832 of_node_put(np);
4833 iounmap(i2o_reg);
4834 return -ENODEV;
4835 }
4836
4837 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4838 if (!DCR_MAP_OK(i2o_dcr_host)) {
4839 pr_err("%s: failed to map DCRs!\n", np->full_name);
4840 of_node_put(np);
4841 iounmap(i2o_reg);
4842 return -ENODEV;
4843 }
4844 of_node_put(np);
4845
4846 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
4847 * the base address of FIFO memory space.
4848 * Actually we need twice more physical memory than programmed in the
4849 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
4850 */
4851 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4852 GFP_KERNEL);
4853 if (!ppc440spe_dma_fifo_buf) {
4854 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4855 iounmap(i2o_reg);
4856 dcr_unmap(i2o_dcr_host, dcr_len);
4857 return -ENOMEM;
4858 }
4859
4860 /*
4861 * Configure h/w
4862 */
4863 /* Reset I2O/DMA */
4864 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4865 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4866
4867 /* Setup the base address of mmaped registers */
4868 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4869 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4870 I2O_REG_ENABLE);
4871 dcr_unmap(i2o_dcr_host, dcr_len);
4872
4873 /* Setup FIFO memory space base address */
4874 iowrite32(0, &i2o_reg->ifbah);
4875 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4876
4877 /* set zero FIFO size for I2O, so the whole
4878 * ppc440spe_dma_fifo_buf is used by DMAs.
4879 * DMAx_FIFOs will be configured while probe.
4880 */
4881 iowrite32(0, &i2o_reg->ifsiz);
4882 iounmap(i2o_reg);
4883
4884 /* To prepare WXOR/RXOR functionality we need access to
4885 * Memory Queue Module DCRs (finally it will be enabled
4886 * via /sys interface of the ppc440spe ADMA driver).
4887 */
4888 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4889 if (!np) {
4890 pr_err("%s: can't find MQ device tree node\n",
4891 __func__);
4892 ret = -ENODEV;
4893 goto out_free;
4894 }
4895
4896 /* Get MQ DCRs base */
4897 dcr_base = dcr_resource_start(np, 0);
4898 dcr_len = dcr_resource_len(np, 0);
4899 if (!dcr_base && !dcr_len) {
4900 pr_err("%s: can't get DCR registers base/len!\n",
4901 np->full_name);
4902 ret = -ENODEV;
4903 goto out_mq;
4904 }
4905
4906 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4907 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4908 pr_err("%s: failed to map DCRs!\n", np->full_name);
4909 ret = -ENODEV;
4910 goto out_mq;
4911 }
4912 of_node_put(np);
4913 ppc440spe_mq_dcr_len = dcr_len;
4914
4915 /* Set HB alias */
4916 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4917
4918 /* Set:
4919 * - LL transaction passing limit to 1;
4920 * - Memory controller cycle limit to 1;
4921 * - Galois Polynomial to 0x14d (default)
4922 */
4923 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4924 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4925 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4926
4927 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4928 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4929 ppc440spe_adma_devices[i] = -1;
4930
4931 return 0;
4932
4933out_mq:
4934 of_node_put(np);
4935out_free:
4936 kfree(ppc440spe_dma_fifo_buf);
4937 return ret;
4938}
4939
4b1cf1fa 4940static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
12458ea0
AG
4941 { .compatible = "ibm,dma-440spe", },
4942 { .compatible = "amcc,xor-accelerator", },
4943 {},
4944};
4945MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4946
00006124 4947static struct platform_driver ppc440spe_adma_driver = {
12458ea0
AG
4948 .probe = ppc440spe_adma_probe,
4949 .remove = __devexit_p(ppc440spe_adma_remove),
4950 .driver = {
4951 .name = "PPC440SP(E)-ADMA",
4952 .owner = THIS_MODULE,
4018294b 4953 .of_match_table = ppc440spe_adma_of_match,
12458ea0
AG
4954 },
4955};
4956
4957static __init int ppc440spe_adma_init(void)
4958{
4959 int ret;
4960
4961 ret = ppc440spe_configure_raid_devices();
4962 if (ret)
4963 return ret;
4964
00006124 4965 ret = platform_driver_register(&ppc440spe_adma_driver);
12458ea0
AG
4966 if (ret) {
4967 pr_err("%s: failed to register platform driver\n",
4968 __func__);
4969 goto out_reg;
4970 }
4971
4972 /* Initialization status */
4973 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4974 &driver_attr_devices);
4975 if (ret)
4976 goto out_dev;
4977
4978 /* RAID-6 h/w enable entry */
4979 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4980 &driver_attr_enable);
4981 if (ret)
4982 goto out_en;
4983
4984 /* GF polynomial to use */
4985 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4986 &driver_attr_poly);
4987 if (!ret)
4988 return ret;
4989
4990 driver_remove_file(&ppc440spe_adma_driver.driver,
4991 &driver_attr_enable);
4992out_en:
4993 driver_remove_file(&ppc440spe_adma_driver.driver,
4994 &driver_attr_devices);
4995out_dev:
4996 /* User will not be able to enable h/w RAID-6 */
4997 pr_err("%s: failed to create RAID-6 driver interface\n",
4998 __func__);
00006124 4999 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
5000out_reg:
5001 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
5002 kfree(ppc440spe_dma_fifo_buf);
5003 return ret;
5004}
5005
5006static void __exit ppc440spe_adma_exit(void)
5007{
5008 driver_remove_file(&ppc440spe_adma_driver.driver,
5009 &driver_attr_poly);
5010 driver_remove_file(&ppc440spe_adma_driver.driver,
5011 &driver_attr_enable);
5012 driver_remove_file(&ppc440spe_adma_driver.driver,
5013 &driver_attr_devices);
00006124 5014 platform_driver_unregister(&ppc440spe_adma_driver);
12458ea0
AG
5015 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
5016 kfree(ppc440spe_dma_fifo_buf);
5017}
5018
5019arch_initcall(ppc440spe_adma_init);
5020module_exit(ppc440spe_adma_exit);
5021
5022MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
5023MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
5024MODULE_LICENSE("GPL");
This page took 0.439022 seconds and 5 git commands to generate.