2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
58 #include <linux/module.h>
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
73 #define ndest_to_sw(x) ((x) + 1)
74 #define ndest_to_hw(x) ((x) - 1)
75 #define src16_cnt_to_sw(x) ((x) + 9)
76 #define src16_cnt_to_hw(x) ((x) - 9)
78 /* provide a lookup table for setting the source address in the base or
79 * extended descriptor of an xor or pq descriptor
81 static const u8 xor_idx_to_desc
= 0xe0;
82 static const u8 xor_idx_to_field
[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
83 static const u8 pq_idx_to_desc
= 0xf8;
84 static const u8 pq16_idx_to_desc
[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
85 2, 2, 2, 2, 2, 2, 2 };
86 static const u8 pq_idx_to_field
[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87 static const u8 pq16_idx_to_field
[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 };
91 * technically sources 1 and 2 do not require SED, but the op will have
92 * at least 9 descriptors so that's irrelevant.
94 static const u8 pq16_idx_to_sed
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 1, 1, 1, 1, 1, 1, 1 };
97 static void ioat3_eh(struct ioat2_dma_chan
*ioat
);
99 static dma_addr_t
xor_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
101 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
103 return raw
->field
[xor_idx_to_field
[idx
]];
106 static void xor_set_src(struct ioat_raw_descriptor
*descs
[2],
107 dma_addr_t addr
, u32 offset
, int idx
)
109 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
111 raw
->field
[xor_idx_to_field
[idx
]] = addr
+ offset
;
114 static dma_addr_t
pq_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
116 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
118 return raw
->field
[pq_idx_to_field
[idx
]];
121 static dma_addr_t
pq16_get_src(struct ioat_raw_descriptor
*desc
[3], int idx
)
123 struct ioat_raw_descriptor
*raw
= desc
[pq16_idx_to_desc
[idx
]];
125 return raw
->field
[pq16_idx_to_field
[idx
]];
128 static void pq_set_src(struct ioat_raw_descriptor
*descs
[2],
129 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
131 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*) descs
[0];
132 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
134 raw
->field
[pq_idx_to_field
[idx
]] = addr
+ offset
;
135 pq
->coef
[idx
] = coef
;
138 static int sed_get_pq16_pool_idx(int src_cnt
)
141 return pq16_idx_to_sed
[src_cnt
];
144 static bool is_jf_ioat(struct pci_dev
*pdev
)
146 switch (pdev
->device
) {
147 case PCI_DEVICE_ID_INTEL_IOAT_JSF0
:
148 case PCI_DEVICE_ID_INTEL_IOAT_JSF1
:
149 case PCI_DEVICE_ID_INTEL_IOAT_JSF2
:
150 case PCI_DEVICE_ID_INTEL_IOAT_JSF3
:
151 case PCI_DEVICE_ID_INTEL_IOAT_JSF4
:
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF5
:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF6
:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF7
:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF8
:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF9
:
163 static bool is_snb_ioat(struct pci_dev
*pdev
)
165 switch (pdev
->device
) {
166 case PCI_DEVICE_ID_INTEL_IOAT_SNB0
:
167 case PCI_DEVICE_ID_INTEL_IOAT_SNB1
:
168 case PCI_DEVICE_ID_INTEL_IOAT_SNB2
:
169 case PCI_DEVICE_ID_INTEL_IOAT_SNB3
:
170 case PCI_DEVICE_ID_INTEL_IOAT_SNB4
:
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB5
:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB6
:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB7
:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB8
:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB9
:
182 static bool is_ivb_ioat(struct pci_dev
*pdev
)
184 switch (pdev
->device
) {
185 case PCI_DEVICE_ID_INTEL_IOAT_IVB0
:
186 case PCI_DEVICE_ID_INTEL_IOAT_IVB1
:
187 case PCI_DEVICE_ID_INTEL_IOAT_IVB2
:
188 case PCI_DEVICE_ID_INTEL_IOAT_IVB3
:
189 case PCI_DEVICE_ID_INTEL_IOAT_IVB4
:
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB5
:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB6
:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB7
:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB8
:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB9
:
202 static bool is_hsw_ioat(struct pci_dev
*pdev
)
204 switch (pdev
->device
) {
205 case PCI_DEVICE_ID_INTEL_IOAT_HSW0
:
206 case PCI_DEVICE_ID_INTEL_IOAT_HSW1
:
207 case PCI_DEVICE_ID_INTEL_IOAT_HSW2
:
208 case PCI_DEVICE_ID_INTEL_IOAT_HSW3
:
209 case PCI_DEVICE_ID_INTEL_IOAT_HSW4
:
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW5
:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW6
:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW7
:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW8
:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW9
:
222 static bool is_xeon_cb32(struct pci_dev
*pdev
)
224 return is_jf_ioat(pdev
) || is_snb_ioat(pdev
) || is_ivb_ioat(pdev
) ||
228 static bool is_bwd_ioat(struct pci_dev
*pdev
)
230 switch (pdev
->device
) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD0
:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD1
:
233 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
234 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
241 static bool is_bwd_noraid(struct pci_dev
*pdev
)
243 switch (pdev
->device
) {
244 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
245 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
253 static void pq16_set_src(struct ioat_raw_descriptor
*desc
[3],
254 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
256 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*)desc
[0];
257 struct ioat_pq16a_descriptor
*pq16
=
258 (struct ioat_pq16a_descriptor
*)desc
[1];
259 struct ioat_raw_descriptor
*raw
= desc
[pq16_idx_to_desc
[idx
]];
261 raw
->field
[pq16_idx_to_field
[idx
]] = addr
+ offset
;
264 pq
->coef
[idx
] = coef
;
266 pq16
->coef
[idx
- 8] = coef
;
269 static struct ioat_sed_ent
*
270 ioat3_alloc_sed(struct ioatdma_device
*device
, unsigned int hw_pool
)
272 struct ioat_sed_ent
*sed
;
273 gfp_t flags
= __GFP_ZERO
| GFP_ATOMIC
;
275 sed
= kmem_cache_alloc(device
->sed_pool
, flags
);
279 sed
->hw_pool
= hw_pool
;
280 sed
->hw
= dma_pool_alloc(device
->sed_hw_pool
[hw_pool
],
283 kmem_cache_free(device
->sed_pool
, sed
);
290 static void ioat3_free_sed(struct ioatdma_device
*device
, struct ioat_sed_ent
*sed
)
295 dma_pool_free(device
->sed_hw_pool
[sed
->hw_pool
], sed
->hw
, sed
->dma
);
296 kmem_cache_free(device
->sed_pool
, sed
);
299 static void ioat3_dma_unmap(struct ioat2_dma_chan
*ioat
,
300 struct ioat_ring_ent
*desc
, int idx
)
302 struct ioat_chan_common
*chan
= &ioat
->base
;
303 struct pci_dev
*pdev
= chan
->device
->pdev
;
304 size_t len
= desc
->len
;
305 size_t offset
= len
- desc
->hw
->size
;
306 struct dma_async_tx_descriptor
*tx
= &desc
->txd
;
307 enum dma_ctrl_flags flags
= tx
->flags
;
309 switch (desc
->hw
->ctl_f
.op
) {
311 if (!desc
->hw
->ctl_f
.null
) /* skip 'interrupt' ops */
312 ioat_dma_unmap(chan
, flags
, len
, desc
->hw
);
314 case IOAT_OP_XOR_VAL
:
316 struct ioat_xor_descriptor
*xor = desc
->xor;
317 struct ioat_ring_ent
*ext
;
318 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
319 int src_cnt
= src_cnt_to_sw(xor->ctl_f
.src_cnt
);
320 struct ioat_raw_descriptor
*descs
[2];
324 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
325 xor_ex
= ext
->xor_ex
;
328 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
329 descs
[0] = (struct ioat_raw_descriptor
*) xor;
330 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
331 for (i
= 0; i
< src_cnt
; i
++) {
332 dma_addr_t src
= xor_get_src(descs
, i
);
334 ioat_unmap(pdev
, src
- offset
, len
,
335 PCI_DMA_TODEVICE
, flags
, 0);
338 /* dest is a source in xor validate operations */
339 if (xor->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
340 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
341 PCI_DMA_TODEVICE
, flags
, 1);
346 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
347 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
348 PCI_DMA_FROMDEVICE
, flags
, 1);
353 struct ioat_pq_descriptor
*pq
= desc
->pq
;
354 struct ioat_ring_ent
*ext
;
355 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
356 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
357 struct ioat_raw_descriptor
*descs
[2];
361 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
365 /* in the 'continue' case don't unmap the dests as sources */
366 if (dmaf_p_disabled_continue(flags
))
368 else if (dmaf_continue(flags
))
371 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
372 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
373 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
374 for (i
= 0; i
< src_cnt
; i
++) {
375 dma_addr_t src
= pq_get_src(descs
, i
);
377 ioat_unmap(pdev
, src
- offset
, len
,
378 PCI_DMA_TODEVICE
, flags
, 0);
381 /* the dests are sources in pq validate operations */
382 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
383 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
384 ioat_unmap(pdev
, pq
->p_addr
- offset
,
385 len
, PCI_DMA_TODEVICE
, flags
, 0);
386 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
387 ioat_unmap(pdev
, pq
->q_addr
- offset
,
388 len
, PCI_DMA_TODEVICE
, flags
, 0);
393 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
394 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
395 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
396 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
397 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
398 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
399 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
404 case IOAT_OP_PQ_VAL_16S
: {
405 struct ioat_pq_descriptor
*pq
= desc
->pq
;
406 int src_cnt
= src16_cnt_to_sw(pq
->ctl_f
.src_cnt
);
407 struct ioat_raw_descriptor
*descs
[4];
410 /* in the 'continue' case don't unmap the dests as sources */
411 if (dmaf_p_disabled_continue(flags
))
413 else if (dmaf_continue(flags
))
416 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
417 descs
[0] = (struct ioat_raw_descriptor
*)pq
;
418 descs
[1] = (struct ioat_raw_descriptor
*)(desc
->sed
->hw
);
419 descs
[2] = (struct ioat_raw_descriptor
*)(&desc
->sed
->hw
->b
[0]);
420 for (i
= 0; i
< src_cnt
; i
++) {
421 dma_addr_t src
= pq16_get_src(descs
, i
);
423 ioat_unmap(pdev
, src
- offset
, len
,
424 PCI_DMA_TODEVICE
, flags
, 0);
427 /* the dests are sources in pq validate operations */
428 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
429 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
430 ioat_unmap(pdev
, pq
->p_addr
- offset
,
431 len
, PCI_DMA_TODEVICE
,
433 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
434 ioat_unmap(pdev
, pq
->q_addr
- offset
,
435 len
, PCI_DMA_TODEVICE
,
441 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
442 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
443 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
444 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
445 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
446 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
447 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
452 dev_err(&pdev
->dev
, "%s: unknown op type: %#x\n",
453 __func__
, desc
->hw
->ctl_f
.op
);
457 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
459 struct ioat_dma_descriptor
*hw
= desc
->hw
;
461 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
462 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
463 struct ioat_xor_descriptor
*xor = desc
->xor;
465 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
467 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
468 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
469 struct ioat_pq_descriptor
*pq
= desc
->pq
;
471 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
478 static u64
ioat3_get_current_completion(struct ioat_chan_common
*chan
)
483 completion
= *chan
->completion
;
484 phys_complete
= ioat_chansts_to_addr(completion
);
486 dev_dbg(to_dev(chan
), "%s: phys_complete: %#llx\n", __func__
,
487 (unsigned long long) phys_complete
);
489 return phys_complete
;
492 static bool ioat3_cleanup_preamble(struct ioat_chan_common
*chan
,
495 *phys_complete
= ioat3_get_current_completion(chan
);
496 if (*phys_complete
== chan
->last_completion
)
499 clear_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
500 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
506 desc_get_errstat(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
)
508 struct ioat_dma_descriptor
*hw
= desc
->hw
;
510 switch (hw
->ctl_f
.op
) {
512 case IOAT_OP_PQ_VAL_16S
:
514 struct ioat_pq_descriptor
*pq
= desc
->pq
;
516 /* check if there's error written */
517 if (!pq
->dwbes_f
.wbes
)
520 /* need to set a chanerr var for checking to clear later */
522 if (pq
->dwbes_f
.p_val_err
)
523 *desc
->result
|= SUM_CHECK_P_RESULT
;
525 if (pq
->dwbes_f
.q_val_err
)
526 *desc
->result
|= SUM_CHECK_Q_RESULT
;
536 * __cleanup - reclaim used descriptors
537 * @ioat: channel (ring) to clean
539 * The difference from the dma_v2.c __cleanup() is that this routine
540 * handles extended descriptors and dma-unmapping raid operations.
542 static void __cleanup(struct ioat2_dma_chan
*ioat
, dma_addr_t phys_complete
)
544 struct ioat_chan_common
*chan
= &ioat
->base
;
545 struct ioatdma_device
*device
= chan
->device
;
546 struct ioat_ring_ent
*desc
;
547 bool seen_current
= false;
548 int idx
= ioat
->tail
, i
;
551 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
552 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
555 * At restart of the channel, the completion address and the
556 * channel status will be 0 due to starting a new chain. Since
557 * it's new chain and the first descriptor "fails", there is
558 * nothing to clean up. We do not want to reap the entire submitted
559 * chain due to this 0 address value and then BUG.
564 active
= ioat2_ring_active(ioat
);
565 for (i
= 0; i
< active
&& !seen_current
; i
++) {
566 struct dma_async_tx_descriptor
*tx
;
568 smp_read_barrier_depends();
569 prefetch(ioat2_get_ring_ent(ioat
, idx
+ i
+ 1));
570 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
571 dump_desc_dbg(ioat
, desc
);
573 /* set err stat if we are using dwbes */
574 if (device
->cap
& IOAT_CAP_DWBES
)
575 desc_get_errstat(ioat
, desc
);
579 dma_cookie_complete(tx
);
580 ioat3_dma_unmap(ioat
, desc
, idx
+ i
);
582 tx
->callback(tx
->callback_param
);
587 if (tx
->phys
== phys_complete
)
590 /* skip extended descriptors */
591 if (desc_has_ext(desc
)) {
592 BUG_ON(i
+ 1 >= active
);
596 /* cleanup super extended descriptors */
598 ioat3_free_sed(device
, desc
->sed
);
602 smp_mb(); /* finish all descriptor reads before incrementing tail */
603 ioat
->tail
= idx
+ i
;
604 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
605 chan
->last_completion
= phys_complete
;
607 if (active
- i
== 0) {
608 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
610 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
611 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
613 /* 5 microsecond delay per pending descriptor */
614 writew(min((5 * (active
- i
)), IOAT_INTRDELAY_MASK
),
615 chan
->device
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
618 static void ioat3_cleanup(struct ioat2_dma_chan
*ioat
)
620 struct ioat_chan_common
*chan
= &ioat
->base
;
623 spin_lock_bh(&chan
->cleanup_lock
);
625 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
626 __cleanup(ioat
, phys_complete
);
628 if (is_ioat_halted(*chan
->completion
)) {
629 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
631 if (chanerr
& IOAT_CHANERR_HANDLE_MASK
) {
632 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
637 spin_unlock_bh(&chan
->cleanup_lock
);
640 static void ioat3_cleanup_event(unsigned long data
)
642 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
645 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
648 static void ioat3_restart_channel(struct ioat2_dma_chan
*ioat
)
650 struct ioat_chan_common
*chan
= &ioat
->base
;
653 ioat2_quiesce(chan
, 0);
654 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
655 __cleanup(ioat
, phys_complete
);
657 __ioat2_restart_chan(ioat
);
660 static void ioat3_eh(struct ioat2_dma_chan
*ioat
)
662 struct ioat_chan_common
*chan
= &ioat
->base
;
663 struct pci_dev
*pdev
= to_pdev(chan
);
664 struct ioat_dma_descriptor
*hw
;
666 struct ioat_ring_ent
*desc
;
671 /* cleanup so tail points to descriptor that caused the error */
672 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
673 __cleanup(ioat
, phys_complete
);
675 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
676 pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr_int
);
678 dev_dbg(to_dev(chan
), "%s: error = %x:%x\n",
679 __func__
, chanerr
, chanerr_int
);
681 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
683 dump_desc_dbg(ioat
, desc
);
685 switch (hw
->ctl_f
.op
) {
686 case IOAT_OP_XOR_VAL
:
687 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
688 *desc
->result
|= SUM_CHECK_P_RESULT
;
689 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
693 case IOAT_OP_PQ_VAL_16S
:
694 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
695 *desc
->result
|= SUM_CHECK_P_RESULT
;
696 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
698 if (chanerr
& IOAT_CHANERR_XOR_Q_ERR
) {
699 *desc
->result
|= SUM_CHECK_Q_RESULT
;
700 err_handled
|= IOAT_CHANERR_XOR_Q_ERR
;
705 /* fault on unhandled error or spurious halt */
706 if (chanerr
^ err_handled
|| chanerr
== 0) {
707 dev_err(to_dev(chan
), "%s: fatal error (%x:%x)\n",
708 __func__
, chanerr
, err_handled
);
712 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
713 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr_int
);
715 /* mark faulting descriptor as complete */
716 *chan
->completion
= desc
->txd
.phys
;
718 spin_lock_bh(&ioat
->prep_lock
);
719 ioat3_restart_channel(ioat
);
720 spin_unlock_bh(&ioat
->prep_lock
);
723 static void check_active(struct ioat2_dma_chan
*ioat
)
725 struct ioat_chan_common
*chan
= &ioat
->base
;
727 if (ioat2_ring_active(ioat
)) {
728 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
732 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
733 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
734 else if (ioat
->alloc_order
> ioat_get_alloc_order()) {
735 /* if the ring is idle, empty, and oversized try to step
738 reshape_ring(ioat
, ioat
->alloc_order
- 1);
740 /* keep shrinking until we get back to our minimum
743 if (ioat
->alloc_order
> ioat_get_alloc_order())
744 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
749 static void ioat3_timer_event(unsigned long data
)
751 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
752 struct ioat_chan_common
*chan
= &ioat
->base
;
753 dma_addr_t phys_complete
;
756 status
= ioat_chansts(chan
);
758 /* when halted due to errors check for channel
759 * programming errors before advancing the completion state
761 if (is_ioat_halted(status
)) {
764 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
765 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
767 if (test_bit(IOAT_RUN
, &chan
->state
))
768 BUG_ON(is_ioat_bug(chanerr
));
769 else /* we never got off the ground */
773 /* if we haven't made progress and we have already
774 * acknowledged a pending completion once, then be more
775 * forceful with a restart
777 spin_lock_bh(&chan
->cleanup_lock
);
778 if (ioat_cleanup_preamble(chan
, &phys_complete
))
779 __cleanup(ioat
, phys_complete
);
780 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
)) {
781 spin_lock_bh(&ioat
->prep_lock
);
782 ioat3_restart_channel(ioat
);
783 spin_unlock_bh(&ioat
->prep_lock
);
784 spin_unlock_bh(&chan
->cleanup_lock
);
787 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
788 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
792 if (ioat2_ring_active(ioat
))
793 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
795 spin_lock_bh(&ioat
->prep_lock
);
797 spin_unlock_bh(&ioat
->prep_lock
);
799 spin_unlock_bh(&chan
->cleanup_lock
);
802 static enum dma_status
803 ioat3_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
804 struct dma_tx_state
*txstate
)
806 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
809 ret
= dma_cookie_status(c
, cookie
, txstate
);
810 if (ret
== DMA_SUCCESS
)
815 return dma_cookie_status(c
, cookie
, txstate
);
818 static struct dma_async_tx_descriptor
*
819 __ioat3_prep_xor_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
820 dma_addr_t dest
, dma_addr_t
*src
, unsigned int src_cnt
,
821 size_t len
, unsigned long flags
)
823 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
824 struct ioat_ring_ent
*compl_desc
;
825 struct ioat_ring_ent
*desc
;
826 struct ioat_ring_ent
*ext
;
827 size_t total_len
= len
;
828 struct ioat_xor_descriptor
*xor;
829 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
830 struct ioat_dma_descriptor
*hw
;
831 int num_descs
, with_ext
, idx
, i
;
833 u8 op
= result
? IOAT_OP_XOR_VAL
: IOAT_OP_XOR
;
837 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
838 /* we need 2x the number of descriptors to cover greater than 5
847 /* completion writes from the raid engine may pass completion
848 * writes from the legacy engine, so we need one extra null
849 * (legacy) descriptor to ensure all completion writes arrive in
852 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
+1) == 0)
858 struct ioat_raw_descriptor
*descs
[2];
859 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
862 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
865 /* save a branch by unconditionally retrieving the
866 * extended descriptor xor_set_src() knows to not write
867 * to it in the single descriptor case
869 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ 1);
870 xor_ex
= ext
->xor_ex
;
872 descs
[0] = (struct ioat_raw_descriptor
*) xor;
873 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
874 for (s
= 0; s
< src_cnt
; s
++)
875 xor_set_src(descs
, src
[s
], offset
, s
);
876 xor->size
= xfer_size
;
877 xor->dst_addr
= dest
+ offset
;
880 xor->ctl_f
.src_cnt
= src_cnt_to_hw(src_cnt
);
884 dump_desc_dbg(ioat
, desc
);
885 } while ((i
+= 1 + with_ext
) < num_descs
);
887 /* last xor descriptor carries the unmap parameters and fence bit */
888 desc
->txd
.flags
= flags
;
889 desc
->len
= total_len
;
891 desc
->result
= result
;
892 xor->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
894 /* completion descriptor carries interrupt bit */
895 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
896 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
900 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
901 hw
->ctl_f
.compl_write
= 1;
902 hw
->size
= NULL_DESC_BUFFER_SIZE
;
903 dump_desc_dbg(ioat
, compl_desc
);
905 /* we leave the channel locked to ensure in order submission */
906 return &compl_desc
->txd
;
909 static struct dma_async_tx_descriptor
*
910 ioat3_prep_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
911 unsigned int src_cnt
, size_t len
, unsigned long flags
)
913 return __ioat3_prep_xor_lock(chan
, NULL
, dest
, src
, src_cnt
, len
, flags
);
916 struct dma_async_tx_descriptor
*
917 ioat3_prep_xor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
918 unsigned int src_cnt
, size_t len
,
919 enum sum_check_flags
*result
, unsigned long flags
)
921 /* the cleanup routine only sets bits on validate failure, it
922 * does not clear bits on validate success... so clear it here
926 return __ioat3_prep_xor_lock(chan
, result
, src
[0], &src
[1],
927 src_cnt
- 1, len
, flags
);
931 dump_pq_desc_dbg(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
, struct ioat_ring_ent
*ext
)
933 struct device
*dev
= to_dev(&ioat
->base
);
934 struct ioat_pq_descriptor
*pq
= desc
->pq
;
935 struct ioat_pq_ext_descriptor
*pq_ex
= ext
? ext
->pq_ex
: NULL
;
936 struct ioat_raw_descriptor
*descs
[] = { (void *) pq
, (void *) pq_ex
};
937 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
940 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
941 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
943 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
944 (unsigned long long) (pq_ex
? pq_ex
->next
: pq
->next
),
945 desc
->txd
.flags
, pq
->size
, pq
->ctl
, pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
946 pq
->ctl_f
.compl_write
,
947 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
949 for (i
= 0; i
< src_cnt
; i
++)
950 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
951 (unsigned long long) pq_get_src(descs
, i
), pq
->coef
[i
]);
952 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
953 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
954 dev_dbg(dev
, "\tNEXT: %#llx\n", pq
->next
);
957 static void dump_pq16_desc_dbg(struct ioat2_dma_chan
*ioat
,
958 struct ioat_ring_ent
*desc
)
960 struct device
*dev
= to_dev(&ioat
->base
);
961 struct ioat_pq_descriptor
*pq
= desc
->pq
;
962 struct ioat_raw_descriptor
*descs
[] = { (void *)pq
,
965 int src_cnt
= src16_cnt_to_sw(pq
->ctl_f
.src_cnt
);
969 descs
[1] = (void *)desc
->sed
->hw
;
970 descs
[2] = (void *)desc
->sed
->hw
+ 64;
973 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
974 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
976 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
977 (unsigned long long) pq
->next
,
978 desc
->txd
.flags
, pq
->size
, pq
->ctl
,
979 pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
980 pq
->ctl_f
.compl_write
,
981 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
983 for (i
= 0; i
< src_cnt
; i
++) {
984 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
985 (unsigned long long) pq16_get_src(descs
, i
),
988 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
989 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
992 static struct dma_async_tx_descriptor
*
993 __ioat3_prep_pq_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
994 const dma_addr_t
*dst
, const dma_addr_t
*src
,
995 unsigned int src_cnt
, const unsigned char *scf
,
996 size_t len
, unsigned long flags
)
998 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
999 struct ioat_chan_common
*chan
= &ioat
->base
;
1000 struct ioatdma_device
*device
= chan
->device
;
1001 struct ioat_ring_ent
*compl_desc
;
1002 struct ioat_ring_ent
*desc
;
1003 struct ioat_ring_ent
*ext
;
1004 size_t total_len
= len
;
1005 struct ioat_pq_descriptor
*pq
;
1006 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
1007 struct ioat_dma_descriptor
*hw
;
1009 u8 op
= result
? IOAT_OP_PQ_VAL
: IOAT_OP_PQ
;
1010 int i
, s
, idx
, with_ext
, num_descs
;
1011 int cb32
= (device
->version
< IOAT_VER_3_3
) ? 1 : 0;
1013 dev_dbg(to_dev(chan
), "%s\n", __func__
);
1014 /* the engine requires at least two sources (we provide
1015 * at least 1 implied source in the DMA_PREP_CONTINUE case)
1017 BUG_ON(src_cnt
+ dmaf_continue(flags
) < 2);
1019 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
1020 /* we need 2x the number of descriptors to cover greater than 3
1021 * sources (we need 1 extra source in the q-only continuation
1022 * case and 3 extra sources in the p+q continuation case.
1024 if (src_cnt
+ dmaf_p_disabled_continue(flags
) > 3 ||
1025 (dmaf_continue(flags
) && !dmaf_p_disabled_continue(flags
))) {
1031 /* completion writes from the raid engine may pass completion
1032 * writes from the legacy engine, so we need one extra null
1033 * (legacy) descriptor to ensure all completion writes arrive in
1036 if (likely(num_descs
) &&
1037 ioat2_check_space_lock(ioat
, num_descs
+ cb32
) == 0)
1043 struct ioat_raw_descriptor
*descs
[2];
1044 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
1046 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1049 /* save a branch by unconditionally retrieving the
1050 * extended descriptor pq_set_src() knows to not write
1051 * to it in the single descriptor case
1053 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ with_ext
);
1056 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
1057 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
1059 for (s
= 0; s
< src_cnt
; s
++)
1060 pq_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
1062 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1063 if (dmaf_p_disabled_continue(flags
))
1064 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
1065 else if (dmaf_continue(flags
)) {
1066 pq_set_src(descs
, dst
[0], offset
, 0, s
++);
1067 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
1068 pq_set_src(descs
, dst
[1], offset
, 0, s
++);
1070 pq
->size
= xfer_size
;
1071 pq
->p_addr
= dst
[0] + offset
;
1072 pq
->q_addr
= dst
[1] + offset
;
1075 /* we turn on descriptor write back error status */
1076 if (device
->cap
& IOAT_CAP_DWBES
)
1077 pq
->ctl_f
.wb_en
= result
? 1 : 0;
1078 pq
->ctl_f
.src_cnt
= src_cnt_to_hw(s
);
1079 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
1080 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
1083 offset
+= xfer_size
;
1084 } while ((i
+= 1 + with_ext
) < num_descs
);
1086 /* last pq descriptor carries the unmap parameters and fence bit */
1087 desc
->txd
.flags
= flags
;
1088 desc
->len
= total_len
;
1090 desc
->result
= result
;
1091 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1092 dump_pq_desc_dbg(ioat
, desc
, ext
);
1095 pq
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1096 pq
->ctl_f
.compl_write
= 1;
1099 /* completion descriptor carries interrupt bit */
1100 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1101 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
1102 hw
= compl_desc
->hw
;
1105 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1106 hw
->ctl_f
.compl_write
= 1;
1107 hw
->size
= NULL_DESC_BUFFER_SIZE
;
1108 dump_desc_dbg(ioat
, compl_desc
);
1112 /* we leave the channel locked to ensure in order submission */
1113 return &compl_desc
->txd
;
1116 static struct dma_async_tx_descriptor
*
1117 __ioat3_prep_pq16_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
1118 const dma_addr_t
*dst
, const dma_addr_t
*src
,
1119 unsigned int src_cnt
, const unsigned char *scf
,
1120 size_t len
, unsigned long flags
)
1122 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1123 struct ioat_chan_common
*chan
= &ioat
->base
;
1124 struct ioatdma_device
*device
= chan
->device
;
1125 struct ioat_ring_ent
*desc
;
1126 size_t total_len
= len
;
1127 struct ioat_pq_descriptor
*pq
;
1130 int i
, s
, idx
, num_descs
;
1132 /* this function only handles src_cnt 9 - 16 */
1133 BUG_ON(src_cnt
< 9);
1135 /* this function is only called with 9-16 sources */
1136 op
= result
? IOAT_OP_PQ_VAL_16S
: IOAT_OP_PQ_16S
;
1138 dev_dbg(to_dev(chan
), "%s\n", __func__
);
1140 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
1143 * 16 source pq is only available on cb3.3 and has no completion
1146 if (num_descs
&& ioat2_check_space_lock(ioat
, num_descs
) == 0)
1154 struct ioat_raw_descriptor
*descs
[4];
1155 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
1157 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
1160 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
1162 desc
->sed
= ioat3_alloc_sed(device
,
1163 sed_get_pq16_pool_idx(src_cnt
));
1165 dev_err(to_dev(chan
),
1166 "%s: no free sed entries\n", __func__
);
1170 pq
->sed_addr
= desc
->sed
->dma
;
1171 desc
->sed
->parent
= desc
;
1173 descs
[1] = (struct ioat_raw_descriptor
*)desc
->sed
->hw
;
1174 descs
[2] = (void *)descs
[1] + 64;
1176 for (s
= 0; s
< src_cnt
; s
++)
1177 pq16_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
1179 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1180 if (dmaf_p_disabled_continue(flags
))
1181 pq16_set_src(descs
, dst
[1], offset
, 1, s
++);
1182 else if (dmaf_continue(flags
)) {
1183 pq16_set_src(descs
, dst
[0], offset
, 0, s
++);
1184 pq16_set_src(descs
, dst
[1], offset
, 1, s
++);
1185 pq16_set_src(descs
, dst
[1], offset
, 0, s
++);
1188 pq
->size
= xfer_size
;
1189 pq
->p_addr
= dst
[0] + offset
;
1190 pq
->q_addr
= dst
[1] + offset
;
1193 pq
->ctl_f
.src_cnt
= src16_cnt_to_hw(s
);
1194 /* we turn on descriptor write back error status */
1195 if (device
->cap
& IOAT_CAP_DWBES
)
1196 pq
->ctl_f
.wb_en
= result
? 1 : 0;
1197 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
1198 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
1201 offset
+= xfer_size
;
1202 } while (++i
< num_descs
);
1204 /* last pq descriptor carries the unmap parameters and fence bit */
1205 desc
->txd
.flags
= flags
;
1206 desc
->len
= total_len
;
1208 desc
->result
= result
;
1209 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1211 /* with cb3.3 we should be able to do completion w/o a null desc */
1212 pq
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
1213 pq
->ctl_f
.compl_write
= 1;
1215 dump_pq16_desc_dbg(ioat
, desc
);
1217 /* we leave the channel locked to ensure in order submission */
1221 static struct dma_async_tx_descriptor
*
1222 ioat3_prep_pq(struct dma_chan
*chan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1223 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
1224 unsigned long flags
)
1226 struct dma_device
*dma
= chan
->device
;
1228 /* specify valid address for disabled result */
1229 if (flags
& DMA_PREP_PQ_DISABLE_P
)
1231 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
1234 /* handle the single source multiply case from the raid6
1237 if ((flags
& DMA_PREP_PQ_DISABLE_P
) && src_cnt
== 1) {
1238 dma_addr_t single_source
[2];
1239 unsigned char single_source_coef
[2];
1241 BUG_ON(flags
& DMA_PREP_PQ_DISABLE_Q
);
1242 single_source
[0] = src
[0];
1243 single_source
[1] = src
[0];
1244 single_source_coef
[0] = scf
[0];
1245 single_source_coef
[1] = 0;
1247 return (src_cnt
> 8) && (dma
->max_pq
> 8) ?
1248 __ioat3_prep_pq16_lock(chan
, NULL
, dst
, single_source
,
1249 2, single_source_coef
, len
,
1251 __ioat3_prep_pq_lock(chan
, NULL
, dst
, single_source
, 2,
1252 single_source_coef
, len
, flags
);
1255 return (src_cnt
> 8) && (dma
->max_pq
> 8) ?
1256 __ioat3_prep_pq16_lock(chan
, NULL
, dst
, src
, src_cnt
,
1258 __ioat3_prep_pq_lock(chan
, NULL
, dst
, src
, src_cnt
,
1263 struct dma_async_tx_descriptor
*
1264 ioat3_prep_pq_val(struct dma_chan
*chan
, dma_addr_t
*pq
, dma_addr_t
*src
,
1265 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
1266 enum sum_check_flags
*pqres
, unsigned long flags
)
1268 struct dma_device
*dma
= chan
->device
;
1270 /* specify valid address for disabled result */
1271 if (flags
& DMA_PREP_PQ_DISABLE_P
)
1273 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
1276 /* the cleanup routine only sets bits on validate failure, it
1277 * does not clear bits on validate success... so clear it here
1281 return (src_cnt
> 8) && (dma
->max_pq
> 8) ?
1282 __ioat3_prep_pq16_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
1284 __ioat3_prep_pq_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
1288 static struct dma_async_tx_descriptor
*
1289 ioat3_prep_pqxor(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t
*src
,
1290 unsigned int src_cnt
, size_t len
, unsigned long flags
)
1292 struct dma_device
*dma
= chan
->device
;
1293 unsigned char scf
[src_cnt
];
1296 memset(scf
, 0, src_cnt
);
1298 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1299 pq
[1] = dst
; /* specify valid address for disabled result */
1301 return (src_cnt
> 8) && (dma
->max_pq
> 8) ?
1302 __ioat3_prep_pq16_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
1304 __ioat3_prep_pq_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
1308 struct dma_async_tx_descriptor
*
1309 ioat3_prep_pqxor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
1310 unsigned int src_cnt
, size_t len
,
1311 enum sum_check_flags
*result
, unsigned long flags
)
1313 struct dma_device
*dma
= chan
->device
;
1314 unsigned char scf
[src_cnt
];
1317 /* the cleanup routine only sets bits on validate failure, it
1318 * does not clear bits on validate success... so clear it here
1322 memset(scf
, 0, src_cnt
);
1324 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1325 pq
[1] = pq
[0]; /* specify valid address for disabled result */
1328 return (src_cnt
> 8) && (dma
->max_pq
> 8) ?
1329 __ioat3_prep_pq16_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1,
1331 __ioat3_prep_pq_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1,
1335 static struct dma_async_tx_descriptor
*
1336 ioat3_prep_interrupt_lock(struct dma_chan
*c
, unsigned long flags
)
1338 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1339 struct ioat_ring_ent
*desc
;
1340 struct ioat_dma_descriptor
*hw
;
1342 if (ioat2_check_space_lock(ioat
, 1) == 0)
1343 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
);
1350 hw
->ctl_f
.int_en
= 1;
1351 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1352 hw
->ctl_f
.compl_write
= 1;
1353 hw
->size
= NULL_DESC_BUFFER_SIZE
;
1357 desc
->txd
.flags
= flags
;
1360 dump_desc_dbg(ioat
, desc
);
1362 /* we leave the channel locked to ensure in order submission */
1366 static void ioat3_dma_test_callback(void *dma_async_param
)
1368 struct completion
*cmp
= dma_async_param
;
1373 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1374 static int ioat_xor_val_self_test(struct ioatdma_device
*device
)
1378 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
1379 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1380 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1381 dma_addr_t dest_dma
;
1382 struct dma_async_tx_descriptor
*tx
;
1383 struct dma_chan
*dma_chan
;
1384 dma_cookie_t cookie
;
1389 struct completion cmp
;
1391 struct device
*dev
= &device
->pdev
->dev
;
1392 struct dma_device
*dma
= &device
->common
;
1395 dev_dbg(dev
, "%s\n", __func__
);
1397 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
1400 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1401 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
1402 if (!xor_srcs
[src_idx
]) {
1404 __free_page(xor_srcs
[src_idx
]);
1409 dest
= alloc_page(GFP_KERNEL
);
1412 __free_page(xor_srcs
[src_idx
]);
1416 /* Fill in src buffers */
1417 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1418 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
1419 for (i
= 0; i
< PAGE_SIZE
; i
++)
1420 ptr
[i
] = (1 << src_idx
);
1423 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
1424 cmp_byte
^= (u8
) (1 << src_idx
);
1426 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
1427 (cmp_byte
<< 8) | cmp_byte
;
1429 memset(page_address(dest
), 0, PAGE_SIZE
);
1431 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
1433 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
1441 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1442 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1443 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
1445 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
1446 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
1447 DMA_PREP_INTERRUPT
|
1448 DMA_COMPL_SKIP_SRC_UNMAP
|
1449 DMA_COMPL_SKIP_DEST_UNMAP
);
1452 dev_err(dev
, "Self-test xor prep failed\n");
1458 init_completion(&cmp
);
1459 tx
->callback
= ioat3_dma_test_callback
;
1460 tx
->callback_param
= &cmp
;
1461 cookie
= tx
->tx_submit(tx
);
1463 dev_err(dev
, "Self-test xor setup failed\n");
1467 dma
->device_issue_pending(dma_chan
);
1469 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1471 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1472 dev_err(dev
, "Self-test xor timed out\n");
1477 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1478 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1479 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1481 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1482 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
1483 u32
*ptr
= page_address(dest
);
1484 if (ptr
[i
] != cmp_word
) {
1485 dev_err(dev
, "Self-test xor failed compare\n");
1487 goto free_resources
;
1490 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1492 /* skip validate if the capability is not present */
1493 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
1494 goto free_resources
;
1496 op
= IOAT_OP_XOR_VAL
;
1498 /* validate the sources with the destintation page */
1499 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1500 xor_val_srcs
[i
] = xor_srcs
[i
];
1501 xor_val_srcs
[i
] = dest
;
1505 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1506 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1508 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1509 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1510 &xor_val_result
, DMA_PREP_INTERRUPT
|
1511 DMA_COMPL_SKIP_SRC_UNMAP
|
1512 DMA_COMPL_SKIP_DEST_UNMAP
);
1514 dev_err(dev
, "Self-test zero prep failed\n");
1520 init_completion(&cmp
);
1521 tx
->callback
= ioat3_dma_test_callback
;
1522 tx
->callback_param
= &cmp
;
1523 cookie
= tx
->tx_submit(tx
);
1525 dev_err(dev
, "Self-test zero setup failed\n");
1529 dma
->device_issue_pending(dma_chan
);
1531 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1533 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1534 dev_err(dev
, "Self-test validate timed out\n");
1539 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1540 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1542 if (xor_val_result
!= 0) {
1543 dev_err(dev
, "Self-test validate failed compare\n");
1545 goto free_resources
;
1548 /* test for non-zero parity sum */
1549 op
= IOAT_OP_XOR_VAL
;
1552 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1553 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1555 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1556 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1557 &xor_val_result
, DMA_PREP_INTERRUPT
|
1558 DMA_COMPL_SKIP_SRC_UNMAP
|
1559 DMA_COMPL_SKIP_DEST_UNMAP
);
1561 dev_err(dev
, "Self-test 2nd zero prep failed\n");
1567 init_completion(&cmp
);
1568 tx
->callback
= ioat3_dma_test_callback
;
1569 tx
->callback_param
= &cmp
;
1570 cookie
= tx
->tx_submit(tx
);
1572 dev_err(dev
, "Self-test 2nd zero setup failed\n");
1576 dma
->device_issue_pending(dma_chan
);
1578 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1580 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1581 dev_err(dev
, "Self-test 2nd validate timed out\n");
1586 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1587 dev_err(dev
, "Self-test validate failed compare\n");
1592 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1593 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1595 goto free_resources
;
1597 if (op
== IOAT_OP_XOR
) {
1598 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1599 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1600 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1602 } else if (op
== IOAT_OP_XOR_VAL
) {
1603 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1604 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1608 dma
->device_free_chan_resources(dma_chan
);
1610 src_idx
= IOAT_NUM_SRC_TEST
;
1612 __free_page(xor_srcs
[src_idx
]);
1617 static int ioat3_dma_self_test(struct ioatdma_device
*device
)
1619 int rc
= ioat_dma_self_test(device
);
1624 rc
= ioat_xor_val_self_test(device
);
1631 static int ioat3_irq_reinit(struct ioatdma_device
*device
)
1633 int msixcnt
= device
->common
.chancnt
;
1634 struct pci_dev
*pdev
= device
->pdev
;
1636 struct msix_entry
*msix
;
1637 struct ioat_chan_common
*chan
;
1640 switch (device
->irq_mode
) {
1643 for (i
= 0; i
< msixcnt
; i
++) {
1644 msix
= &device
->msix_entries
[i
];
1645 chan
= ioat_chan_by_index(device
, i
);
1646 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1649 pci_disable_msix(pdev
);
1652 case IOAT_MSIX_SINGLE
:
1653 msix
= &device
->msix_entries
[0];
1654 chan
= ioat_chan_by_index(device
, 0);
1655 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1656 pci_disable_msix(pdev
);
1660 chan
= ioat_chan_by_index(device
, 0);
1661 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1662 pci_disable_msi(pdev
);
1666 chan
= ioat_chan_by_index(device
, 0);
1667 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1674 device
->irq_mode
= IOAT_NOIRQ
;
1676 err
= ioat_dma_setup_interrupts(device
);
1681 static int ioat3_reset_hw(struct ioat_chan_common
*chan
)
1683 /* throw away whatever the channel was doing and get it
1684 * initialized, with ioat3 specific workarounds
1686 struct ioatdma_device
*device
= chan
->device
;
1687 struct pci_dev
*pdev
= device
->pdev
;
1692 ioat2_quiesce(chan
, msecs_to_jiffies(100));
1694 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1695 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1697 if (device
->version
< IOAT_VER_3_3
) {
1698 /* clear any pending errors */
1699 err
= pci_read_config_dword(pdev
,
1700 IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
1703 "channel error register unreachable\n");
1706 pci_write_config_dword(pdev
,
1707 IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
1709 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1710 * (workaround for spurious config parity error after restart)
1712 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
1713 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) {
1714 pci_write_config_dword(pdev
,
1715 IOAT_PCI_DMAUNCERRSTS_OFFSET
,
1720 err
= ioat2_reset_sync(chan
, msecs_to_jiffies(200));
1722 dev_err(&pdev
->dev
, "Failed to reset!\n");
1726 if (device
->irq_mode
!= IOAT_NOIRQ
&& is_bwd_ioat(pdev
))
1727 err
= ioat3_irq_reinit(device
);
1732 static void ioat3_intr_quirk(struct ioatdma_device
*device
)
1734 struct dma_device
*dma
;
1736 struct ioat_chan_common
*chan
;
1739 dma
= &device
->common
;
1742 * if we have descriptor write back error status, we mask the
1745 if (device
->cap
& IOAT_CAP_DWBES
) {
1746 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1747 chan
= to_chan_common(c
);
1748 errmask
= readl(chan
->reg_base
+
1749 IOAT_CHANERR_MASK_OFFSET
);
1750 errmask
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
|
1751 IOAT_CHANERR_XOR_Q_ERR
;
1752 writel(errmask
, chan
->reg_base
+
1753 IOAT_CHANERR_MASK_OFFSET
);
1758 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
1760 struct pci_dev
*pdev
= device
->pdev
;
1761 int dca_en
= system_has_dca_enabled(pdev
);
1762 struct dma_device
*dma
;
1764 struct ioat_chan_common
*chan
;
1765 bool is_raid_device
= false;
1768 device
->enumerate_channels
= ioat2_enumerate_channels
;
1769 device
->reset_hw
= ioat3_reset_hw
;
1770 device
->self_test
= ioat3_dma_self_test
;
1771 device
->intr_quirk
= ioat3_intr_quirk
;
1772 dma
= &device
->common
;
1773 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
1774 dma
->device_issue_pending
= ioat2_issue_pending
;
1775 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
1776 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
1778 if (is_xeon_cb32(pdev
))
1779 dma
->copy_align
= 6;
1781 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1782 dma
->device_prep_dma_interrupt
= ioat3_prep_interrupt_lock
;
1784 device
->cap
= readl(device
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1786 if (is_bwd_noraid(pdev
))
1787 device
->cap
&= ~(IOAT_CAP_XOR
| IOAT_CAP_PQ
| IOAT_CAP_RAID16SS
);
1789 /* dca is incompatible with raid operations */
1790 if (dca_en
&& (device
->cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1791 device
->cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1793 if (device
->cap
& IOAT_CAP_XOR
) {
1794 is_raid_device
= true;
1798 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1799 dma
->device_prep_dma_xor
= ioat3_prep_xor
;
1801 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1802 dma
->device_prep_dma_xor_val
= ioat3_prep_xor_val
;
1805 if (device
->cap
& IOAT_CAP_PQ
) {
1806 is_raid_device
= true;
1808 dma
->device_prep_dma_pq
= ioat3_prep_pq
;
1809 dma
->device_prep_dma_pq_val
= ioat3_prep_pq_val
;
1810 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1811 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1813 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1814 dma_set_maxpq(dma
, 16, 0);
1817 dma_set_maxpq(dma
, 8, 0);
1818 if (is_xeon_cb32(pdev
))
1824 if (!(device
->cap
& IOAT_CAP_XOR
)) {
1825 dma
->device_prep_dma_xor
= ioat3_prep_pqxor
;
1826 dma
->device_prep_dma_xor_val
= ioat3_prep_pqxor_val
;
1827 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1828 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1830 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1835 if (is_xeon_cb32(pdev
))
1843 dma
->device_tx_status
= ioat3_tx_status
;
1844 device
->cleanup_fn
= ioat3_cleanup_event
;
1845 device
->timer_fn
= ioat3_timer_event
;
1847 if (is_xeon_cb32(pdev
)) {
1848 dma_cap_clear(DMA_XOR_VAL
, dma
->cap_mask
);
1849 dma
->device_prep_dma_xor_val
= NULL
;
1851 dma_cap_clear(DMA_PQ_VAL
, dma
->cap_mask
);
1852 dma
->device_prep_dma_pq_val
= NULL
;
1855 /* starting with CB3.3 super extended descriptors are supported */
1856 if (device
->cap
& IOAT_CAP_RAID16SS
) {
1860 /* allocate sw descriptor pool for SED */
1861 device
->sed_pool
= kmem_cache_create("ioat_sed",
1862 sizeof(struct ioat_sed_ent
), 0, 0, NULL
);
1863 if (!device
->sed_pool
)
1866 for (i
= 0; i
< MAX_SED_POOLS
; i
++) {
1867 snprintf(pool_name
, 14, "ioat_hw%d_sed", i
);
1869 /* allocate SED DMA pool */
1870 device
->sed_hw_pool
[i
] = dma_pool_create(pool_name
,
1872 SED_SIZE
* (i
+ 1), 64, 0);
1873 if (!device
->sed_hw_pool
[i
])
1874 goto sed_pool_cleanup
;
1879 err
= ioat_probe(device
);
1882 ioat_set_tcp_copy_break(262144);
1884 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1885 chan
= to_chan_common(c
);
1886 writel(IOAT_DMA_DCA_ANY_CPU
,
1887 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1890 err
= ioat_register(device
);
1894 ioat_kobject_add(device
, &ioat2_ktype
);
1897 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);
1902 if (device
->sed_pool
) {
1904 kmem_cache_destroy(device
->sed_pool
);
1906 for (i
= 0; i
< MAX_SED_POOLS
; i
++)
1907 if (device
->sed_hw_pool
[i
])
1908 dma_pool_destroy(device
->sed_hw_pool
[i
]);
1914 void ioat3_dma_remove(struct ioatdma_device
*device
)
1916 if (device
->sed_pool
) {
1918 kmem_cache_destroy(device
->sed_pool
);
1920 for (i
= 0; i
< MAX_SED_POOLS
; i
++)
1921 if (device
->sed_hw_pool
[i
])
1922 dma_pool_destroy(device
->sed_hw_pool
[i
]);