2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
44 static int ioat_pending_level
= 4;
45 module_param(ioat_pending_level
, int, 0644);
46 MODULE_PARM_DESC(ioat_pending_level
,
47 "high-water mark for pushing ioat descriptors (default: 4)");
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan
*ioat_chan
);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*ioat_chan
);
53 static struct ioat_desc_sw
*
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
);
55 static struct ioat_desc_sw
*
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
);
58 static inline struct ioat_dma_chan
*ioat_lookup_chan_by_index(
59 struct ioatdma_device
*device
,
62 return device
->idx
[index
];
66 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
68 * @data: interrupt data
70 static irqreturn_t
ioat_dma_do_interrupt(int irq
, void *data
)
72 struct ioatdma_device
*instance
= data
;
73 struct ioat_dma_chan
*ioat_chan
;
74 unsigned long attnstatus
;
78 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
80 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
83 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
84 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
88 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
89 for_each_bit(bit
, &attnstatus
, BITS_PER_LONG
) {
90 ioat_chan
= ioat_lookup_chan_by_index(instance
, bit
);
91 tasklet_schedule(&ioat_chan
->cleanup_task
);
94 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
99 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
101 * @data: interrupt data
103 static irqreturn_t
ioat_dma_do_interrupt_msix(int irq
, void *data
)
105 struct ioat_dma_chan
*ioat_chan
= data
;
107 tasklet_schedule(&ioat_chan
->cleanup_task
);
112 static void ioat_dma_cleanup_tasklet(unsigned long data
);
115 * ioat_dma_enumerate_channels - find and initialize the device's channels
116 * @device: the device to be enumerated
118 static int ioat_dma_enumerate_channels(struct ioatdma_device
*device
)
123 struct ioat_dma_chan
*ioat_chan
;
125 device
->common
.chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
126 xfercap_scale
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
127 xfercap
= (xfercap_scale
== 0 ? -1 : (1UL << xfercap_scale
));
129 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
130 ioat_chan
= kzalloc(sizeof(*ioat_chan
), GFP_KERNEL
);
132 device
->common
.chancnt
= i
;
136 ioat_chan
->device
= device
;
137 ioat_chan
->reg_base
= device
->reg_base
+ (0x80 * (i
+ 1));
138 ioat_chan
->xfercap
= xfercap
;
139 ioat_chan
->desccount
= 0;
140 if (ioat_chan
->device
->version
!= IOAT_VER_1_2
) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU
,
143 ioat_chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
145 spin_lock_init(&ioat_chan
->cleanup_lock
);
146 spin_lock_init(&ioat_chan
->desc_lock
);
147 INIT_LIST_HEAD(&ioat_chan
->free_desc
);
148 INIT_LIST_HEAD(&ioat_chan
->used_desc
);
149 /* This should be made common somewhere in dmaengine.c */
150 ioat_chan
->common
.device
= &device
->common
;
151 list_add_tail(&ioat_chan
->common
.device_node
,
152 &device
->common
.channels
);
153 device
->idx
[i
] = ioat_chan
;
154 tasklet_init(&ioat_chan
->cleanup_task
,
155 ioat_dma_cleanup_tasklet
,
156 (unsigned long) ioat_chan
);
157 tasklet_disable(&ioat_chan
->cleanup_task
);
159 return device
->common
.chancnt
;
163 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
165 * @chan: DMA channel handle
167 static inline void __ioat1_dma_memcpy_issue_pending(
168 struct ioat_dma_chan
*ioat_chan
)
170 ioat_chan
->pending
= 0;
171 writeb(IOAT_CHANCMD_APPEND
, ioat_chan
->reg_base
+ IOAT1_CHANCMD_OFFSET
);
174 static void ioat1_dma_memcpy_issue_pending(struct dma_chan
*chan
)
176 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
178 if (ioat_chan
->pending
!= 0) {
179 spin_lock_bh(&ioat_chan
->desc_lock
);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan
);
181 spin_unlock_bh(&ioat_chan
->desc_lock
);
185 static inline void __ioat2_dma_memcpy_issue_pending(
186 struct ioat_dma_chan
*ioat_chan
)
188 ioat_chan
->pending
= 0;
189 writew(ioat_chan
->dmacount
,
190 ioat_chan
->reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
193 static void ioat2_dma_memcpy_issue_pending(struct dma_chan
*chan
)
195 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
197 if (ioat_chan
->pending
!= 0) {
198 spin_lock_bh(&ioat_chan
->desc_lock
);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
200 spin_unlock_bh(&ioat_chan
->desc_lock
);
204 static dma_cookie_t
ioat1_tx_submit(struct dma_async_tx_descriptor
*tx
)
206 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
207 struct ioat_desc_sw
*first
= tx_to_ioat_desc(tx
);
208 struct ioat_desc_sw
*prev
, *new;
209 struct ioat_dma_descriptor
*hw
;
211 LIST_HEAD(new_chain
);
215 unsigned long orig_flags
;
216 unsigned int desc_count
= 0;
218 /* src and dest and len are stored in the initial descriptor */
222 orig_flags
= first
->async_tx
.flags
;
225 spin_lock_bh(&ioat_chan
->desc_lock
);
226 prev
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
229 copy
= min_t(size_t, len
, ioat_chan
->xfercap
);
231 async_tx_ack(&new->async_tx
);
240 /* chain together the physical address list for the HW */
242 prev
->hw
->next
= (u64
) new->async_tx
.phys
;
248 list_add_tail(&new->node
, &new_chain
);
251 } while (len
&& (new = ioat1_dma_get_next_descriptor(ioat_chan
)));
253 hw
->ctl
= IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
254 if (new->async_tx
.callback
) {
255 hw
->ctl
|= IOAT_DMA_DESCRIPTOR_CTL_INT_GN
;
257 /* move callback into to last desc */
258 new->async_tx
.callback
= first
->async_tx
.callback
;
259 new->async_tx
.callback_param
260 = first
->async_tx
.callback_param
;
261 first
->async_tx
.callback
= NULL
;
262 first
->async_tx
.callback_param
= NULL
;
266 new->tx_cnt
= desc_count
;
267 new->async_tx
.flags
= orig_flags
; /* client is in control of this ack */
269 /* store the original values for use in later cleanup */
271 new->src
= first
->src
;
272 new->dst
= first
->dst
;
273 new->len
= first
->len
;
276 /* cookie incr and addition to used_list must be atomic */
277 cookie
= ioat_chan
->common
.cookie
;
281 ioat_chan
->common
.cookie
= new->async_tx
.cookie
= cookie
;
283 /* write address into NextDescriptor field of last desc in chain */
284 to_ioat_desc(ioat_chan
->used_desc
.prev
)->hw
->next
=
285 first
->async_tx
.phys
;
286 __list_splice(&new_chain
, ioat_chan
->used_desc
.prev
);
288 ioat_chan
->dmacount
+= desc_count
;
289 ioat_chan
->pending
+= desc_count
;
290 if (ioat_chan
->pending
>= ioat_pending_level
)
291 __ioat1_dma_memcpy_issue_pending(ioat_chan
);
292 spin_unlock_bh(&ioat_chan
->desc_lock
);
297 static dma_cookie_t
ioat2_tx_submit(struct dma_async_tx_descriptor
*tx
)
299 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
300 struct ioat_desc_sw
*first
= tx_to_ioat_desc(tx
);
301 struct ioat_desc_sw
*new;
302 struct ioat_dma_descriptor
*hw
;
307 unsigned long orig_flags
;
308 unsigned int desc_count
= 0;
310 /* src and dest and len are stored in the initial descriptor */
314 orig_flags
= first
->async_tx
.flags
;
318 * ioat_chan->desc_lock is still in force in version 2 path
319 * it gets unlocked at end of this function
322 copy
= min_t(size_t, len
, ioat_chan
->xfercap
);
324 async_tx_ack(&new->async_tx
);
336 } while (len
&& (new = ioat2_dma_get_next_descriptor(ioat_chan
)));
338 hw
->ctl
= IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
339 if (new->async_tx
.callback
) {
340 hw
->ctl
|= IOAT_DMA_DESCRIPTOR_CTL_INT_GN
;
342 /* move callback into to last desc */
343 new->async_tx
.callback
= first
->async_tx
.callback
;
344 new->async_tx
.callback_param
345 = first
->async_tx
.callback_param
;
346 first
->async_tx
.callback
= NULL
;
347 first
->async_tx
.callback_param
= NULL
;
351 new->tx_cnt
= desc_count
;
352 new->async_tx
.flags
= orig_flags
; /* client is in control of this ack */
354 /* store the original values for use in later cleanup */
356 new->src
= first
->src
;
357 new->dst
= first
->dst
;
358 new->len
= first
->len
;
361 /* cookie incr and addition to used_list must be atomic */
362 cookie
= ioat_chan
->common
.cookie
;
366 ioat_chan
->common
.cookie
= new->async_tx
.cookie
= cookie
;
368 ioat_chan
->dmacount
+= desc_count
;
369 ioat_chan
->pending
+= desc_count
;
370 if (ioat_chan
->pending
>= ioat_pending_level
)
371 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
372 spin_unlock_bh(&ioat_chan
->desc_lock
);
378 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
379 * @ioat_chan: the channel supplying the memory pool for the descriptors
380 * @flags: allocation flags
382 static struct ioat_desc_sw
*ioat_dma_alloc_descriptor(
383 struct ioat_dma_chan
*ioat_chan
,
386 struct ioat_dma_descriptor
*desc
;
387 struct ioat_desc_sw
*desc_sw
;
388 struct ioatdma_device
*ioatdma_device
;
391 ioatdma_device
= to_ioatdma_device(ioat_chan
->common
.device
);
392 desc
= pci_pool_alloc(ioatdma_device
->dma_pool
, flags
, &phys
);
396 desc_sw
= kzalloc(sizeof(*desc_sw
), flags
);
397 if (unlikely(!desc_sw
)) {
398 pci_pool_free(ioatdma_device
->dma_pool
, desc
, phys
);
402 memset(desc
, 0, sizeof(*desc
));
403 dma_async_tx_descriptor_init(&desc_sw
->async_tx
, &ioat_chan
->common
);
404 switch (ioat_chan
->device
->version
) {
406 desc_sw
->async_tx
.tx_submit
= ioat1_tx_submit
;
409 desc_sw
->async_tx
.tx_submit
= ioat2_tx_submit
;
412 INIT_LIST_HEAD(&desc_sw
->async_tx
.tx_list
);
415 desc_sw
->async_tx
.phys
= phys
;
420 static int ioat_initial_desc_count
= 256;
421 module_param(ioat_initial_desc_count
, int, 0644);
422 MODULE_PARM_DESC(ioat_initial_desc_count
,
423 "initial descriptors per channel (default: 256)");
426 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
427 * @ioat_chan: the channel to be massaged
429 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan
*ioat_chan
)
431 struct ioat_desc_sw
*desc
, *_desc
;
433 /* setup used_desc */
434 ioat_chan
->used_desc
.next
= ioat_chan
->free_desc
.next
;
435 ioat_chan
->used_desc
.prev
= NULL
;
437 /* pull free_desc out of the circle so that every node is a hw
438 * descriptor, but leave it pointing to the list
440 ioat_chan
->free_desc
.prev
->next
= ioat_chan
->free_desc
.next
;
441 ioat_chan
->free_desc
.next
->prev
= ioat_chan
->free_desc
.prev
;
443 /* circle link the hw descriptors */
444 desc
= to_ioat_desc(ioat_chan
->free_desc
.next
);
445 desc
->hw
->next
= to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
446 list_for_each_entry_safe(desc
, _desc
, ioat_chan
->free_desc
.next
, node
) {
447 desc
->hw
->next
= to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out
455 static int ioat_dma_alloc_chan_resources(struct dma_chan
*chan
,
456 struct dma_client
*client
)
458 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
459 struct ioat_desc_sw
*desc
;
465 /* have we already been set up? */
466 if (!list_empty(&ioat_chan
->free_desc
))
467 return ioat_chan
->desccount
;
469 /* Setup register to interrupt and write completion status on error */
470 chanctrl
= IOAT_CHANCTRL_ERR_INT_EN
|
471 IOAT_CHANCTRL_ANY_ERR_ABORT_EN
|
472 IOAT_CHANCTRL_ERR_COMPLETION_EN
;
473 writew(chanctrl
, ioat_chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
475 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
477 dev_err(&ioat_chan
->device
->pdev
->dev
,
478 "CHANERR = %x, clearing\n", chanerr
);
479 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
482 /* Allocate descriptors */
483 for (i
= 0; i
< ioat_initial_desc_count
; i
++) {
484 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_KERNEL
);
486 dev_err(&ioat_chan
->device
->pdev
->dev
,
487 "Only %d initial descriptors\n", i
);
490 list_add_tail(&desc
->node
, &tmp_list
);
492 spin_lock_bh(&ioat_chan
->desc_lock
);
493 ioat_chan
->desccount
= i
;
494 list_splice(&tmp_list
, &ioat_chan
->free_desc
);
495 if (ioat_chan
->device
->version
!= IOAT_VER_1_2
)
496 ioat2_dma_massage_chan_desc(ioat_chan
);
497 spin_unlock_bh(&ioat_chan
->desc_lock
);
499 /* allocate a completion writeback area */
500 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
501 ioat_chan
->completion_virt
=
502 pci_pool_alloc(ioat_chan
->device
->completion_pool
,
504 &ioat_chan
->completion_addr
);
505 memset(ioat_chan
->completion_virt
, 0,
506 sizeof(*ioat_chan
->completion_virt
));
507 writel(((u64
) ioat_chan
->completion_addr
) & 0x00000000FFFFFFFF,
508 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
509 writel(((u64
) ioat_chan
->completion_addr
) >> 32,
510 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
512 tasklet_enable(&ioat_chan
->cleanup_task
);
513 ioat_dma_start_null_desc(ioat_chan
); /* give chain to dma device */
514 return ioat_chan
->desccount
;
518 * ioat_dma_free_chan_resources - release all the descriptors
519 * @chan: the channel to be cleaned
521 static void ioat_dma_free_chan_resources(struct dma_chan
*chan
)
523 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
524 struct ioatdma_device
*ioatdma_device
= to_ioatdma_device(chan
->device
);
525 struct ioat_desc_sw
*desc
, *_desc
;
526 int in_use_descs
= 0;
528 tasklet_disable(&ioat_chan
->cleanup_task
);
529 ioat_dma_memcpy_cleanup(ioat_chan
);
531 /* Delay 100ms after reset to allow internal DMA logic to quiesce
532 * before removing DMA descriptor resources.
534 writeb(IOAT_CHANCMD_RESET
,
536 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
539 spin_lock_bh(&ioat_chan
->desc_lock
);
540 switch (ioat_chan
->device
->version
) {
542 list_for_each_entry_safe(desc
, _desc
,
543 &ioat_chan
->used_desc
, node
) {
545 list_del(&desc
->node
);
546 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
547 desc
->async_tx
.phys
);
550 list_for_each_entry_safe(desc
, _desc
,
551 &ioat_chan
->free_desc
, node
) {
552 list_del(&desc
->node
);
553 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
554 desc
->async_tx
.phys
);
559 list_for_each_entry_safe(desc
, _desc
,
560 ioat_chan
->free_desc
.next
, node
) {
561 list_del(&desc
->node
);
562 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
563 desc
->async_tx
.phys
);
566 desc
= to_ioat_desc(ioat_chan
->free_desc
.next
);
567 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
568 desc
->async_tx
.phys
);
570 INIT_LIST_HEAD(&ioat_chan
->free_desc
);
571 INIT_LIST_HEAD(&ioat_chan
->used_desc
);
574 spin_unlock_bh(&ioat_chan
->desc_lock
);
576 pci_pool_free(ioatdma_device
->completion_pool
,
577 ioat_chan
->completion_virt
,
578 ioat_chan
->completion_addr
);
580 /* one is ok since we left it on there on purpose */
581 if (in_use_descs
> 1)
582 dev_err(&ioat_chan
->device
->pdev
->dev
,
583 "Freeing %d in use descriptors!\n",
586 ioat_chan
->last_completion
= ioat_chan
->completion_addr
= 0;
587 ioat_chan
->pending
= 0;
588 ioat_chan
->dmacount
= 0;
592 * ioat_dma_get_next_descriptor - return the next available descriptor
593 * @ioat_chan: IOAT DMA channel handle
595 * Gets the next descriptor from the chain, and must be called with the
596 * channel's desc_lock held. Allocates more descriptors if the channel
599 static struct ioat_desc_sw
*
600 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
)
602 struct ioat_desc_sw
*new;
604 if (!list_empty(&ioat_chan
->free_desc
)) {
605 new = to_ioat_desc(ioat_chan
->free_desc
.next
);
606 list_del(&new->node
);
608 /* try to get another desc */
609 new = ioat_dma_alloc_descriptor(ioat_chan
, GFP_ATOMIC
);
611 dev_err(&ioat_chan
->device
->pdev
->dev
,
621 static struct ioat_desc_sw
*
622 ioat2_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
)
624 struct ioat_desc_sw
*new;
627 * used.prev points to where to start processing
628 * used.next points to next free descriptor
629 * if used.prev == NULL, there are none waiting to be processed
630 * if used.next == used.prev.prev, there is only one free descriptor,
631 * and we need to use it to as a noop descriptor before
632 * linking in a new set of descriptors, since the device
633 * has probably already read the pointer to it
635 if (ioat_chan
->used_desc
.prev
&&
636 ioat_chan
->used_desc
.next
== ioat_chan
->used_desc
.prev
->prev
) {
638 struct ioat_desc_sw
*desc
;
639 struct ioat_desc_sw
*noop_desc
;
642 /* set up the noop descriptor */
643 noop_desc
= to_ioat_desc(ioat_chan
->used_desc
.next
);
644 noop_desc
->hw
->size
= 0;
645 noop_desc
->hw
->ctl
= IOAT_DMA_DESCRIPTOR_NUL
;
646 noop_desc
->hw
->src_addr
= 0;
647 noop_desc
->hw
->dst_addr
= 0;
649 ioat_chan
->used_desc
.next
= ioat_chan
->used_desc
.next
->next
;
650 ioat_chan
->pending
++;
651 ioat_chan
->dmacount
++;
653 /* try to get a few more descriptors */
654 for (i
= 16; i
; i
--) {
655 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_ATOMIC
);
657 dev_err(&ioat_chan
->device
->pdev
->dev
,
661 list_add_tail(&desc
->node
, ioat_chan
->used_desc
.next
);
664 = to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
665 to_ioat_desc(desc
->node
.prev
)->hw
->next
666 = desc
->async_tx
.phys
;
667 ioat_chan
->desccount
++;
670 ioat_chan
->used_desc
.next
= noop_desc
->node
.next
;
672 new = to_ioat_desc(ioat_chan
->used_desc
.next
);
674 ioat_chan
->used_desc
.next
= new->node
.next
;
676 if (ioat_chan
->used_desc
.prev
== NULL
)
677 ioat_chan
->used_desc
.prev
= &new->node
;
683 static struct ioat_desc_sw
*ioat_dma_get_next_descriptor(
684 struct ioat_dma_chan
*ioat_chan
)
689 switch (ioat_chan
->device
->version
) {
691 return ioat1_dma_get_next_descriptor(ioat_chan
);
694 return ioat2_dma_get_next_descriptor(ioat_chan
);
700 static struct dma_async_tx_descriptor
*ioat1_dma_prep_memcpy(
701 struct dma_chan
*chan
,
707 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
708 struct ioat_desc_sw
*new;
710 spin_lock_bh(&ioat_chan
->desc_lock
);
711 new = ioat_dma_get_next_descriptor(ioat_chan
);
712 spin_unlock_bh(&ioat_chan
->desc_lock
);
718 new->async_tx
.flags
= flags
;
719 return &new->async_tx
;
724 static struct dma_async_tx_descriptor
*ioat2_dma_prep_memcpy(
725 struct dma_chan
*chan
,
731 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
732 struct ioat_desc_sw
*new;
734 spin_lock_bh(&ioat_chan
->desc_lock
);
735 new = ioat2_dma_get_next_descriptor(ioat_chan
);
738 * leave ioat_chan->desc_lock set in ioat 2 path
739 * it will get unlocked at end of tx_submit
746 new->async_tx
.flags
= flags
;
747 return &new->async_tx
;
752 static void ioat_dma_cleanup_tasklet(unsigned long data
)
754 struct ioat_dma_chan
*chan
= (void *)data
;
755 ioat_dma_memcpy_cleanup(chan
);
756 writew(IOAT_CHANCTRL_INT_DISABLE
,
757 chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
761 ioat_dma_unmap(struct ioat_dma_chan
*ioat_chan
, struct ioat_desc_sw
*desc
)
764 * yes we are unmapping both _page and _single
765 * alloc'd regions with unmap_page. Is this
768 if (!(desc
->async_tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
769 pci_unmap_page(ioat_chan
->device
->pdev
,
770 pci_unmap_addr(desc
, dst
),
771 pci_unmap_len(desc
, len
),
774 if (!(desc
->async_tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
))
775 pci_unmap_page(ioat_chan
->device
->pdev
,
776 pci_unmap_addr(desc
, src
),
777 pci_unmap_len(desc
, len
),
782 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
783 * @chan: ioat channel to be cleaned up
785 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*ioat_chan
)
787 unsigned long phys_complete
;
788 struct ioat_desc_sw
*desc
, *_desc
;
789 dma_cookie_t cookie
= 0;
790 unsigned long desc_phys
;
791 struct ioat_desc_sw
*latest_desc
;
793 prefetch(ioat_chan
->completion_virt
);
795 if (!spin_trylock_bh(&ioat_chan
->cleanup_lock
))
798 /* The completion writeback can happen at any time,
799 so reads by the driver need to be atomic operations
800 The descriptor physical addresses are limited to 32-bits
801 when the CPU can only do a 32-bit mov */
803 #if (BITS_PER_LONG == 64)
805 ioat_chan
->completion_virt
->full
806 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
809 ioat_chan
->completion_virt
->low
& IOAT_LOW_COMPLETION_MASK
;
812 if ((ioat_chan
->completion_virt
->full
813 & IOAT_CHANSTS_DMA_TRANSFER_STATUS
) ==
814 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED
) {
815 dev_err(&ioat_chan
->device
->pdev
->dev
,
816 "Channel halted, chanerr = %x\n",
817 readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
));
819 /* TODO do something to salvage the situation */
822 if (phys_complete
== ioat_chan
->last_completion
) {
823 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
828 spin_lock_bh(&ioat_chan
->desc_lock
);
829 switch (ioat_chan
->device
->version
) {
831 list_for_each_entry_safe(desc
, _desc
,
832 &ioat_chan
->used_desc
, node
) {
835 * Incoming DMA requests may use multiple descriptors,
836 * due to exceeding xfercap, perhaps. If so, only the
837 * last one will have a cookie, and require unmapping.
839 if (desc
->async_tx
.cookie
) {
840 cookie
= desc
->async_tx
.cookie
;
841 ioat_dma_unmap(ioat_chan
, desc
);
842 if (desc
->async_tx
.callback
) {
843 desc
->async_tx
.callback(desc
->async_tx
.callback_param
);
844 desc
->async_tx
.callback
= NULL
;
848 if (desc
->async_tx
.phys
!= phys_complete
) {
850 * a completed entry, but not the last, so clean
851 * up if the client is done with the descriptor
853 if (async_tx_test_ack(&desc
->async_tx
)) {
854 list_del(&desc
->node
);
855 list_add_tail(&desc
->node
,
856 &ioat_chan
->free_desc
);
858 desc
->async_tx
.cookie
= 0;
861 * last used desc. Do not remove, so we can
862 * append from it, but don't look at it next
865 desc
->async_tx
.cookie
= 0;
867 /* TODO check status bits? */
873 /* has some other thread has already cleaned up? */
874 if (ioat_chan
->used_desc
.prev
== NULL
)
877 /* work backwards to find latest finished desc */
878 desc
= to_ioat_desc(ioat_chan
->used_desc
.next
);
881 desc
= to_ioat_desc(desc
->node
.prev
);
882 desc_phys
= (unsigned long)desc
->async_tx
.phys
883 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
884 if (desc_phys
== phys_complete
) {
888 } while (&desc
->node
!= ioat_chan
->used_desc
.prev
);
890 if (latest_desc
!= NULL
) {
892 /* work forwards to clear finished descriptors */
893 for (desc
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
894 &desc
->node
!= latest_desc
->node
.next
&&
895 &desc
->node
!= ioat_chan
->used_desc
.next
;
896 desc
= to_ioat_desc(desc
->node
.next
)) {
897 if (desc
->async_tx
.cookie
) {
898 cookie
= desc
->async_tx
.cookie
;
899 desc
->async_tx
.cookie
= 0;
900 ioat_dma_unmap(ioat_chan
, desc
);
901 if (desc
->async_tx
.callback
) {
902 desc
->async_tx
.callback(desc
->async_tx
.callback_param
);
903 desc
->async_tx
.callback
= NULL
;
908 /* move used.prev up beyond those that are finished */
909 if (&desc
->node
== ioat_chan
->used_desc
.next
)
910 ioat_chan
->used_desc
.prev
= NULL
;
912 ioat_chan
->used_desc
.prev
= &desc
->node
;
917 spin_unlock_bh(&ioat_chan
->desc_lock
);
919 ioat_chan
->last_completion
= phys_complete
;
921 ioat_chan
->completed_cookie
= cookie
;
923 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
927 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
928 * @chan: IOAT DMA channel handle
929 * @cookie: DMA transaction identifier
930 * @done: if not %NULL, updated with last completed transaction
931 * @used: if not %NULL, updated with last used transaction
933 static enum dma_status
ioat_dma_is_complete(struct dma_chan
*chan
,
938 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
939 dma_cookie_t last_used
;
940 dma_cookie_t last_complete
;
943 last_used
= chan
->cookie
;
944 last_complete
= ioat_chan
->completed_cookie
;
947 *done
= last_complete
;
951 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
952 if (ret
== DMA_SUCCESS
)
955 ioat_dma_memcpy_cleanup(ioat_chan
);
957 last_used
= chan
->cookie
;
958 last_complete
= ioat_chan
->completed_cookie
;
961 *done
= last_complete
;
965 return dma_async_is_complete(cookie
, last_complete
, last_used
);
968 static void ioat_dma_start_null_desc(struct ioat_dma_chan
*ioat_chan
)
970 struct ioat_desc_sw
*desc
;
972 spin_lock_bh(&ioat_chan
->desc_lock
);
974 desc
= ioat_dma_get_next_descriptor(ioat_chan
);
975 desc
->hw
->ctl
= IOAT_DMA_DESCRIPTOR_NUL
976 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
977 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
979 desc
->hw
->src_addr
= 0;
980 desc
->hw
->dst_addr
= 0;
981 async_tx_ack(&desc
->async_tx
);
982 switch (ioat_chan
->device
->version
) {
985 list_add_tail(&desc
->node
, &ioat_chan
->used_desc
);
987 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
988 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_LOW
);
989 writel(((u64
) desc
->async_tx
.phys
) >> 32,
990 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_HIGH
);
992 writeb(IOAT_CHANCMD_START
, ioat_chan
->reg_base
993 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
996 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
997 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
998 writel(((u64
) desc
->async_tx
.phys
) >> 32,
999 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
1001 ioat_chan
->dmacount
++;
1002 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
1005 spin_unlock_bh(&ioat_chan
->desc_lock
);
1009 * Perform a IOAT transaction to verify the HW works.
1011 #define IOAT_TEST_SIZE 2000
1013 static void ioat_dma_test_callback(void *dma_async_param
)
1015 printk(KERN_ERR
"ioatdma: ioat_dma_test_callback(%p)\n",
1020 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1021 * @device: device to be tested
1023 static int ioat_dma_self_test(struct ioatdma_device
*device
)
1028 struct dma_chan
*dma_chan
;
1029 struct dma_async_tx_descriptor
*tx
;
1030 dma_addr_t dma_dest
, dma_src
;
1031 dma_cookie_t cookie
;
1034 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
1037 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
1043 /* Fill in src buffer */
1044 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
1047 /* Start copy, using first DMA channel */
1048 dma_chan
= container_of(device
->common
.channels
.next
,
1051 if (device
->common
.device_alloc_chan_resources(dma_chan
, NULL
) < 1) {
1052 dev_err(&device
->pdev
->dev
,
1053 "selftest cannot allocate chan resource\n");
1058 dma_src
= dma_map_single(dma_chan
->device
->dev
, src
, IOAT_TEST_SIZE
,
1060 dma_dest
= dma_map_single(dma_chan
->device
->dev
, dest
, IOAT_TEST_SIZE
,
1062 tx
= device
->common
.device_prep_dma_memcpy(dma_chan
, dma_dest
, dma_src
,
1065 dev_err(&device
->pdev
->dev
,
1066 "Self-test prep failed, disabling\n");
1068 goto free_resources
;
1072 tx
->callback
= ioat_dma_test_callback
;
1073 tx
->callback_param
= (void *)0x8086;
1074 cookie
= tx
->tx_submit(tx
);
1076 dev_err(&device
->pdev
->dev
,
1077 "Self-test setup failed, disabling\n");
1079 goto free_resources
;
1081 device
->common
.device_issue_pending(dma_chan
);
1084 if (device
->common
.device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
)
1086 dev_err(&device
->pdev
->dev
,
1087 "Self-test copy timed out, disabling\n");
1089 goto free_resources
;
1091 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
1092 dev_err(&device
->pdev
->dev
,
1093 "Self-test copy failed compare, disabling\n");
1095 goto free_resources
;
1099 device
->common
.device_free_chan_resources(dma_chan
);
1106 static char ioat_interrupt_style
[32] = "msix";
1107 module_param_string(ioat_interrupt_style
, ioat_interrupt_style
,
1108 sizeof(ioat_interrupt_style
), 0644);
1109 MODULE_PARM_DESC(ioat_interrupt_style
,
1110 "set ioat interrupt style: msix (default), "
1111 "msix-single-vector, msi, intx)");
1114 * ioat_dma_setup_interrupts - setup interrupt handler
1115 * @device: ioat device
1117 static int ioat_dma_setup_interrupts(struct ioatdma_device
*device
)
1119 struct ioat_dma_chan
*ioat_chan
;
1120 int err
, i
, j
, msixcnt
;
1123 if (!strcmp(ioat_interrupt_style
, "msix"))
1125 if (!strcmp(ioat_interrupt_style
, "msix-single-vector"))
1126 goto msix_single_vector
;
1127 if (!strcmp(ioat_interrupt_style
, "msi"))
1129 if (!strcmp(ioat_interrupt_style
, "intx"))
1131 dev_err(&device
->pdev
->dev
, "invalid ioat_interrupt_style %s\n",
1132 ioat_interrupt_style
);
1136 /* The number of MSI-X vectors should equal the number of channels */
1137 msixcnt
= device
->common
.chancnt
;
1138 for (i
= 0; i
< msixcnt
; i
++)
1139 device
->msix_entries
[i
].entry
= i
;
1141 err
= pci_enable_msix(device
->pdev
, device
->msix_entries
, msixcnt
);
1145 goto msix_single_vector
;
1147 for (i
= 0; i
< msixcnt
; i
++) {
1148 ioat_chan
= ioat_lookup_chan_by_index(device
, i
);
1149 err
= request_irq(device
->msix_entries
[i
].vector
,
1150 ioat_dma_do_interrupt_msix
,
1151 0, "ioat-msix", ioat_chan
);
1153 for (j
= 0; j
< i
; j
++) {
1155 ioat_lookup_chan_by_index(device
, j
);
1156 free_irq(device
->msix_entries
[j
].vector
,
1159 goto msix_single_vector
;
1162 intrctrl
|= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL
;
1163 device
->irq_mode
= msix_multi_vector
;
1167 device
->msix_entries
[0].entry
= 0;
1168 err
= pci_enable_msix(device
->pdev
, device
->msix_entries
, 1);
1172 err
= request_irq(device
->msix_entries
[0].vector
, ioat_dma_do_interrupt
,
1173 0, "ioat-msix", device
);
1175 pci_disable_msix(device
->pdev
);
1178 device
->irq_mode
= msix_single_vector
;
1182 err
= pci_enable_msi(device
->pdev
);
1186 err
= request_irq(device
->pdev
->irq
, ioat_dma_do_interrupt
,
1187 0, "ioat-msi", device
);
1189 pci_disable_msi(device
->pdev
);
1193 * CB 1.2 devices need a bit set in configuration space to enable MSI
1195 if (device
->version
== IOAT_VER_1_2
) {
1197 pci_read_config_dword(device
->pdev
,
1198 IOAT_PCI_DMACTRL_OFFSET
, &dmactrl
);
1199 dmactrl
|= IOAT_PCI_DMACTRL_MSI_EN
;
1200 pci_write_config_dword(device
->pdev
,
1201 IOAT_PCI_DMACTRL_OFFSET
, dmactrl
);
1203 device
->irq_mode
= msi
;
1207 err
= request_irq(device
->pdev
->irq
, ioat_dma_do_interrupt
,
1208 IRQF_SHARED
, "ioat-intx", device
);
1211 device
->irq_mode
= intx
;
1214 intrctrl
|= IOAT_INTRCTRL_MASTER_INT_EN
;
1215 writeb(intrctrl
, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1219 /* Disable all interrupt generation */
1220 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1221 dev_err(&device
->pdev
->dev
, "no usable interrupts\n");
1222 device
->irq_mode
= none
;
1227 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1228 * @device: ioat device
1230 static void ioat_dma_remove_interrupts(struct ioatdma_device
*device
)
1232 struct ioat_dma_chan
*ioat_chan
;
1235 /* Disable all interrupt generation */
1236 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1238 switch (device
->irq_mode
) {
1239 case msix_multi_vector
:
1240 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
1241 ioat_chan
= ioat_lookup_chan_by_index(device
, i
);
1242 free_irq(device
->msix_entries
[i
].vector
, ioat_chan
);
1244 pci_disable_msix(device
->pdev
);
1246 case msix_single_vector
:
1247 free_irq(device
->msix_entries
[0].vector
, device
);
1248 pci_disable_msix(device
->pdev
);
1251 free_irq(device
->pdev
->irq
, device
);
1252 pci_disable_msi(device
->pdev
);
1255 free_irq(device
->pdev
->irq
, device
);
1258 dev_warn(&device
->pdev
->dev
,
1259 "call to %s without interrupts setup\n", __func__
);
1261 device
->irq_mode
= none
;
1264 struct ioatdma_device
*ioat_dma_probe(struct pci_dev
*pdev
,
1265 void __iomem
*iobase
)
1268 struct ioatdma_device
*device
;
1270 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1275 device
->pdev
= pdev
;
1276 device
->reg_base
= iobase
;
1277 device
->version
= readb(device
->reg_base
+ IOAT_VER_OFFSET
);
1279 /* DMA coherent memory pool for DMA descriptor allocations */
1280 device
->dma_pool
= pci_pool_create("dma_desc_pool", pdev
,
1281 sizeof(struct ioat_dma_descriptor
),
1283 if (!device
->dma_pool
) {
1288 device
->completion_pool
= pci_pool_create("completion_pool", pdev
,
1289 sizeof(u64
), SMP_CACHE_BYTES
,
1291 if (!device
->completion_pool
) {
1293 goto err_completion_pool
;
1296 INIT_LIST_HEAD(&device
->common
.channels
);
1297 ioat_dma_enumerate_channels(device
);
1299 device
->common
.device_alloc_chan_resources
=
1300 ioat_dma_alloc_chan_resources
;
1301 device
->common
.device_free_chan_resources
=
1302 ioat_dma_free_chan_resources
;
1303 device
->common
.dev
= &pdev
->dev
;
1305 dma_cap_set(DMA_MEMCPY
, device
->common
.cap_mask
);
1306 device
->common
.device_is_tx_complete
= ioat_dma_is_complete
;
1307 switch (device
->version
) {
1309 device
->common
.device_prep_dma_memcpy
= ioat1_dma_prep_memcpy
;
1310 device
->common
.device_issue_pending
=
1311 ioat1_dma_memcpy_issue_pending
;
1314 device
->common
.device_prep_dma_memcpy
= ioat2_dma_prep_memcpy
;
1315 device
->common
.device_issue_pending
=
1316 ioat2_dma_memcpy_issue_pending
;
1320 dev_err(&device
->pdev
->dev
,
1321 "Intel(R) I/OAT DMA Engine found,"
1322 " %d channels, device version 0x%02x, driver version %s\n",
1323 device
->common
.chancnt
, device
->version
, IOAT_DMA_VERSION
);
1325 err
= ioat_dma_setup_interrupts(device
);
1327 goto err_setup_interrupts
;
1329 err
= ioat_dma_self_test(device
);
1333 dma_async_device_register(&device
->common
);
1338 ioat_dma_remove_interrupts(device
);
1339 err_setup_interrupts
:
1340 pci_pool_destroy(device
->completion_pool
);
1341 err_completion_pool
:
1342 pci_pool_destroy(device
->dma_pool
);
1347 "Intel(R) I/OAT DMA Engine initialization failed\n");
1351 void ioat_dma_remove(struct ioatdma_device
*device
)
1353 struct dma_chan
*chan
, *_chan
;
1354 struct ioat_dma_chan
*ioat_chan
;
1356 ioat_dma_remove_interrupts(device
);
1358 dma_async_device_unregister(&device
->common
);
1360 pci_pool_destroy(device
->dma_pool
);
1361 pci_pool_destroy(device
->completion_pool
);
1363 iounmap(device
->reg_base
);
1364 pci_release_regions(device
->pdev
);
1365 pci_disable_device(device
->pdev
);
1367 list_for_each_entry_safe(chan
, _chan
,
1368 &device
->common
.channels
, device_node
) {
1369 ioat_chan
= to_ioat_chan(chan
);
1370 list_del(&chan
->device_node
);