2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
21 * does asynchronous data movement and checksumming operations.
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/i7300_idle.h>
37 #include "registers.h"
40 #include "../dmaengine.h"
42 int ioat_ring_alloc_order
= 8;
43 module_param(ioat_ring_alloc_order
, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order
,
45 "ioat2+: allocate 2^n descriptors per channel"
46 " (default: 8 max: 16)");
47 static int ioat_ring_max_alloc_order
= IOAT_MAX_ORDER
;
48 module_param(ioat_ring_max_alloc_order
, int, 0644);
49 MODULE_PARM_DESC(ioat_ring_max_alloc_order
,
50 "ioat2+: upper limit for ring size (default: 16)");
52 void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
)
54 struct ioat_chan_common
*chan
= &ioat
->base
;
56 ioat
->dmacount
+= ioat2_ring_pending(ioat
);
57 ioat
->issued
= ioat
->head
;
58 writew(ioat
->dmacount
, chan
->reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
60 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
61 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
64 void ioat2_issue_pending(struct dma_chan
*c
)
66 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
68 if (ioat2_ring_pending(ioat
)) {
69 spin_lock_bh(&ioat
->prep_lock
);
70 __ioat2_issue_pending(ioat
);
71 spin_unlock_bh(&ioat
->prep_lock
);
76 * ioat2_update_pending - log pending descriptors
77 * @ioat: ioat2+ channel
79 * Check if the number of unsubmitted descriptors has exceeded the
80 * watermark. Called with prep_lock held
82 static void ioat2_update_pending(struct ioat2_dma_chan
*ioat
)
84 if (ioat2_ring_pending(ioat
) > ioat_pending_level
)
85 __ioat2_issue_pending(ioat
);
88 static void __ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
90 struct ioat_ring_ent
*desc
;
91 struct ioat_dma_descriptor
*hw
;
93 if (ioat2_ring_space(ioat
) < 1) {
94 dev_err(to_dev(&ioat
->base
),
95 "Unable to start null desc - ring full\n");
99 dev_dbg(to_dev(&ioat
->base
), "%s: head: %#x tail: %#x issued: %#x\n",
100 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
101 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
);
106 hw
->ctl_f
.int_en
= 1;
107 hw
->ctl_f
.compl_write
= 1;
108 /* set size to non-zero value (channel returns error when size is 0) */
109 hw
->size
= NULL_DESC_BUFFER_SIZE
;
112 async_tx_ack(&desc
->txd
);
113 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
114 dump_desc_dbg(ioat
, desc
);
117 __ioat2_issue_pending(ioat
);
120 static void ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
122 spin_lock_bh(&ioat
->prep_lock
);
123 __ioat2_start_null_desc(ioat
);
124 spin_unlock_bh(&ioat
->prep_lock
);
127 static void __cleanup(struct ioat2_dma_chan
*ioat
, dma_addr_t phys_complete
)
129 struct ioat_chan_common
*chan
= &ioat
->base
;
130 struct dma_async_tx_descriptor
*tx
;
131 struct ioat_ring_ent
*desc
;
132 bool seen_current
= false;
134 int idx
= ioat
->tail
, i
;
136 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
137 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
139 active
= ioat2_ring_active(ioat
);
140 for (i
= 0; i
< active
&& !seen_current
; i
++) {
141 smp_read_barrier_depends();
142 prefetch(ioat2_get_ring_ent(ioat
, idx
+ i
+ 1));
143 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
145 dump_desc_dbg(ioat
, desc
);
147 dma_descriptor_unmap(tx
);
148 dma_cookie_complete(tx
);
150 tx
->callback(tx
->callback_param
);
155 if (tx
->phys
== phys_complete
)
158 smp_mb(); /* finish all descriptor reads before incrementing tail */
159 ioat
->tail
= idx
+ i
;
160 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
162 chan
->last_completion
= phys_complete
;
163 if (active
- i
== 0) {
164 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
166 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
167 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
172 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
173 * @chan: ioat channel to be cleaned up
175 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
)
177 struct ioat_chan_common
*chan
= &ioat
->base
;
178 dma_addr_t phys_complete
;
180 spin_lock_bh(&chan
->cleanup_lock
);
181 if (ioat_cleanup_preamble(chan
, &phys_complete
))
182 __cleanup(ioat
, phys_complete
);
183 spin_unlock_bh(&chan
->cleanup_lock
);
186 void ioat2_cleanup_event(unsigned long data
)
188 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
189 struct ioat_chan_common
*chan
= &ioat
->base
;
192 if (!test_bit(IOAT_RUN
, &chan
->state
))
194 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
197 void __ioat2_restart_chan(struct ioat2_dma_chan
*ioat
)
199 struct ioat_chan_common
*chan
= &ioat
->base
;
201 /* set the tail to be re-issued */
202 ioat
->issued
= ioat
->tail
;
204 set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
205 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
207 dev_dbg(to_dev(chan
),
208 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
209 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
211 if (ioat2_ring_pending(ioat
)) {
212 struct ioat_ring_ent
*desc
;
214 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
215 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
216 __ioat2_issue_pending(ioat
);
218 __ioat2_start_null_desc(ioat
);
221 int ioat2_quiesce(struct ioat_chan_common
*chan
, unsigned long tmo
)
223 unsigned long end
= jiffies
+ tmo
;
227 status
= ioat_chansts(chan
);
228 if (is_ioat_active(status
) || is_ioat_idle(status
))
230 while (is_ioat_active(status
) || is_ioat_idle(status
)) {
231 if (tmo
&& time_after(jiffies
, end
)) {
235 status
= ioat_chansts(chan
);
242 int ioat2_reset_sync(struct ioat_chan_common
*chan
, unsigned long tmo
)
244 unsigned long end
= jiffies
+ tmo
;
248 while (ioat_reset_pending(chan
)) {
249 if (end
&& time_after(jiffies
, end
)) {
259 static void ioat2_restart_channel(struct ioat2_dma_chan
*ioat
)
261 struct ioat_chan_common
*chan
= &ioat
->base
;
262 dma_addr_t phys_complete
;
264 ioat2_quiesce(chan
, 0);
265 if (ioat_cleanup_preamble(chan
, &phys_complete
))
266 __cleanup(ioat
, phys_complete
);
268 __ioat2_restart_chan(ioat
);
271 static void check_active(struct ioat2_dma_chan
*ioat
)
273 struct ioat_chan_common
*chan
= &ioat
->base
;
275 if (ioat2_ring_active(ioat
)) {
276 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
280 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
281 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
282 else if (ioat
->alloc_order
> ioat_get_alloc_order()) {
283 /* if the ring is idle, empty, and oversized try to step
286 reshape_ring(ioat
, ioat
->alloc_order
- 1);
288 /* keep shrinking until we get back to our minimum
291 if (ioat
->alloc_order
> ioat_get_alloc_order())
292 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
297 void ioat2_timer_event(unsigned long data
)
299 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
300 struct ioat_chan_common
*chan
= &ioat
->base
;
301 dma_addr_t phys_complete
;
304 status
= ioat_chansts(chan
);
306 /* when halted due to errors check for channel
307 * programming errors before advancing the completion state
309 if (is_ioat_halted(status
)) {
312 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
313 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
315 if (test_bit(IOAT_RUN
, &chan
->state
))
316 BUG_ON(is_ioat_bug(chanerr
));
317 else /* we never got off the ground */
321 /* if we haven't made progress and we have already
322 * acknowledged a pending completion once, then be more
323 * forceful with a restart
325 spin_lock_bh(&chan
->cleanup_lock
);
326 if (ioat_cleanup_preamble(chan
, &phys_complete
))
327 __cleanup(ioat
, phys_complete
);
328 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
)) {
329 spin_lock_bh(&ioat
->prep_lock
);
330 ioat2_restart_channel(ioat
);
331 spin_unlock_bh(&ioat
->prep_lock
);
332 spin_unlock_bh(&chan
->cleanup_lock
);
335 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
336 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
340 if (ioat2_ring_active(ioat
))
341 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
343 spin_lock_bh(&ioat
->prep_lock
);
345 spin_unlock_bh(&ioat
->prep_lock
);
347 spin_unlock_bh(&chan
->cleanup_lock
);
350 static int ioat2_reset_hw(struct ioat_chan_common
*chan
)
352 /* throw away whatever the channel was doing and get it initialized */
355 ioat2_quiesce(chan
, msecs_to_jiffies(100));
357 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
358 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
360 return ioat2_reset_sync(chan
, msecs_to_jiffies(200));
364 * ioat2_enumerate_channels - find and initialize the device's channels
365 * @device: the device to be enumerated
367 int ioat2_enumerate_channels(struct ioatdma_device
*device
)
369 struct ioat2_dma_chan
*ioat
;
370 struct device
*dev
= &device
->pdev
->dev
;
371 struct dma_device
*dma
= &device
->common
;
375 INIT_LIST_HEAD(&dma
->channels
);
376 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
377 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
378 if (dma
->chancnt
> ARRAY_SIZE(device
->idx
)) {
379 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
380 dma
->chancnt
, ARRAY_SIZE(device
->idx
));
381 dma
->chancnt
= ARRAY_SIZE(device
->idx
);
383 xfercap_log
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
384 xfercap_log
&= 0x1f; /* bits [4:0] valid */
385 if (xfercap_log
== 0)
387 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
389 /* FIXME which i/oat version is i7300? */
390 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
391 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
394 for (i
= 0; i
< dma
->chancnt
; i
++) {
395 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
399 ioat_init_channel(device
, &ioat
->base
, i
);
400 ioat
->xfercap_log
= xfercap_log
;
401 spin_lock_init(&ioat
->prep_lock
);
402 if (device
->reset_hw(&ioat
->base
)) {
411 static dma_cookie_t
ioat2_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
413 struct dma_chan
*c
= tx
->chan
;
414 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
415 struct ioat_chan_common
*chan
= &ioat
->base
;
418 cookie
= dma_cookie_assign(tx
);
419 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
421 if (!test_and_set_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
422 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
424 /* make descriptor updates visible before advancing ioat->head,
425 * this is purposefully not smp_wmb() since we are also
426 * publishing the descriptor updates to a dma device
430 ioat
->head
+= ioat
->produce
;
432 ioat2_update_pending(ioat
);
433 spin_unlock_bh(&ioat
->prep_lock
);
438 static struct ioat_ring_ent
*ioat2_alloc_ring_ent(struct dma_chan
*chan
, gfp_t flags
)
440 struct ioat_dma_descriptor
*hw
;
441 struct ioat_ring_ent
*desc
;
442 struct ioatdma_device
*dma
;
445 dma
= to_ioatdma_device(chan
->device
);
446 hw
= pci_pool_alloc(dma
->dma_pool
, flags
, &phys
);
449 memset(hw
, 0, sizeof(*hw
));
451 desc
= kmem_cache_zalloc(ioat2_cache
, flags
);
453 pci_pool_free(dma
->dma_pool
, hw
, phys
);
457 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
458 desc
->txd
.tx_submit
= ioat2_tx_submit_unlock
;
460 desc
->txd
.phys
= phys
;
464 static void ioat2_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
466 struct ioatdma_device
*dma
;
468 dma
= to_ioatdma_device(chan
->device
);
469 pci_pool_free(dma
->dma_pool
, desc
->hw
, desc
->txd
.phys
);
470 kmem_cache_free(ioat2_cache
, desc
);
473 static struct ioat_ring_ent
**ioat2_alloc_ring(struct dma_chan
*c
, int order
, gfp_t flags
)
475 struct ioat_ring_ent
**ring
;
476 int descs
= 1 << order
;
479 if (order
> ioat_get_max_alloc_order())
482 /* allocate the array to hold the software ring */
483 ring
= kcalloc(descs
, sizeof(*ring
), flags
);
486 for (i
= 0; i
< descs
; i
++) {
487 ring
[i
] = ioat2_alloc_ring_ent(c
, flags
);
490 ioat2_free_ring_ent(ring
[i
], c
);
494 set_desc_id(ring
[i
], i
);
498 for (i
= 0; i
< descs
-1; i
++) {
499 struct ioat_ring_ent
*next
= ring
[i
+1];
500 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
502 hw
->next
= next
->txd
.phys
;
504 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
509 void ioat2_free_chan_resources(struct dma_chan
*c
);
511 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
512 * @chan: channel to be initialized
514 int ioat2_alloc_chan_resources(struct dma_chan
*c
)
516 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
517 struct ioat_chan_common
*chan
= &ioat
->base
;
518 struct ioat_ring_ent
**ring
;
523 /* have we already been set up? */
525 return 1 << ioat
->alloc_order
;
527 /* Setup register to interrupt and write completion status on error */
528 writew(IOAT_CHANCTRL_RUN
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
530 /* allocate a completion writeback area */
531 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
532 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
533 GFP_KERNEL
, &chan
->completion_dma
);
534 if (!chan
->completion
)
537 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
538 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
539 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
540 writel(((u64
) chan
->completion_dma
) >> 32,
541 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
543 order
= ioat_get_alloc_order();
544 ring
= ioat2_alloc_ring(c
, order
, GFP_KERNEL
);
548 spin_lock_bh(&chan
->cleanup_lock
);
549 spin_lock_bh(&ioat
->prep_lock
);
554 ioat
->alloc_order
= order
;
555 set_bit(IOAT_RUN
, &chan
->state
);
556 spin_unlock_bh(&ioat
->prep_lock
);
557 spin_unlock_bh(&chan
->cleanup_lock
);
559 ioat2_start_null_desc(ioat
);
561 /* check that we got off the ground */
564 status
= ioat_chansts(chan
);
565 } while (i
++ < 20 && !is_ioat_active(status
) && !is_ioat_idle(status
));
567 if (is_ioat_active(status
) || is_ioat_idle(status
)) {
568 return 1 << ioat
->alloc_order
;
570 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
572 dev_WARN(to_dev(chan
),
573 "failed to start channel chanerr: %#x\n", chanerr
);
574 ioat2_free_chan_resources(c
);
579 bool reshape_ring(struct ioat2_dma_chan
*ioat
, int order
)
581 /* reshape differs from normal ring allocation in that we want
582 * to allocate a new software ring while only
583 * extending/truncating the hardware ring
585 struct ioat_chan_common
*chan
= &ioat
->base
;
586 struct dma_chan
*c
= &chan
->common
;
587 const u32 curr_size
= ioat2_ring_size(ioat
);
588 const u16 active
= ioat2_ring_active(ioat
);
589 const u32 new_size
= 1 << order
;
590 struct ioat_ring_ent
**ring
;
593 if (order
> ioat_get_max_alloc_order())
596 /* double check that we have at least 1 free descriptor */
597 if (active
== curr_size
)
600 /* when shrinking, verify that we can hold the current active
601 * set in the new ring
603 if (active
>= new_size
)
606 /* allocate the array to hold the software ring */
607 ring
= kcalloc(new_size
, sizeof(*ring
), GFP_NOWAIT
);
611 /* allocate/trim descriptors as needed */
612 if (new_size
> curr_size
) {
613 /* copy current descriptors to the new ring */
614 for (i
= 0; i
< curr_size
; i
++) {
615 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
616 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
618 ring
[new_idx
] = ioat
->ring
[curr_idx
];
619 set_desc_id(ring
[new_idx
], new_idx
);
622 /* add new descriptors to the ring */
623 for (i
= curr_size
; i
< new_size
; i
++) {
624 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
626 ring
[new_idx
] = ioat2_alloc_ring_ent(c
, GFP_NOWAIT
);
627 if (!ring
[new_idx
]) {
629 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
631 ioat2_free_ring_ent(ring
[new_idx
], c
);
636 set_desc_id(ring
[new_idx
], new_idx
);
639 /* hw link new descriptors */
640 for (i
= curr_size
-1; i
< new_size
; i
++) {
641 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
642 struct ioat_ring_ent
*next
= ring
[(new_idx
+1) & (new_size
-1)];
643 struct ioat_dma_descriptor
*hw
= ring
[new_idx
]->hw
;
645 hw
->next
= next
->txd
.phys
;
648 struct ioat_dma_descriptor
*hw
;
649 struct ioat_ring_ent
*next
;
651 /* copy current descriptors to the new ring, dropping the
652 * removed descriptors
654 for (i
= 0; i
< new_size
; i
++) {
655 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
656 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
658 ring
[new_idx
] = ioat
->ring
[curr_idx
];
659 set_desc_id(ring
[new_idx
], new_idx
);
662 /* free deleted descriptors */
663 for (i
= new_size
; i
< curr_size
; i
++) {
664 struct ioat_ring_ent
*ent
;
666 ent
= ioat2_get_ring_ent(ioat
, ioat
->tail
+i
);
667 ioat2_free_ring_ent(ent
, c
);
670 /* fix up hardware ring */
671 hw
= ring
[(ioat
->tail
+new_size
-1) & (new_size
-1)]->hw
;
672 next
= ring
[(ioat
->tail
+new_size
) & (new_size
-1)];
673 hw
->next
= next
->txd
.phys
;
676 dev_dbg(to_dev(chan
), "%s: allocated %d descriptors\n",
681 ioat
->alloc_order
= order
;
687 * ioat2_check_space_lock - verify space and grab ring producer lock
688 * @ioat: ioat2,3 channel (ring) to operate on
689 * @num_descs: allocation length
691 int ioat2_check_space_lock(struct ioat2_dma_chan
*ioat
, int num_descs
)
693 struct ioat_chan_common
*chan
= &ioat
->base
;
697 spin_lock_bh(&ioat
->prep_lock
);
698 /* never allow the last descriptor to be consumed, we need at
699 * least one free at all times to allow for on-the-fly ring
702 if (likely(ioat2_ring_space(ioat
) > num_descs
)) {
703 dev_dbg(to_dev(chan
), "%s: num_descs: %d (%x:%x:%x)\n",
704 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
705 ioat
->produce
= num_descs
;
706 return 0; /* with ioat->prep_lock held */
708 retry
= test_and_set_bit(IOAT_RESHAPE_PENDING
, &chan
->state
);
709 spin_unlock_bh(&ioat
->prep_lock
);
711 /* is another cpu already trying to expand the ring? */
715 spin_lock_bh(&chan
->cleanup_lock
);
716 spin_lock_bh(&ioat
->prep_lock
);
717 retry
= reshape_ring(ioat
, ioat
->alloc_order
+ 1);
718 clear_bit(IOAT_RESHAPE_PENDING
, &chan
->state
);
719 spin_unlock_bh(&ioat
->prep_lock
);
720 spin_unlock_bh(&chan
->cleanup_lock
);
722 /* if we were able to expand the ring retry the allocation */
726 if (printk_ratelimit())
727 dev_dbg(to_dev(chan
), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
728 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
730 /* progress reclaim in the allocation failure case we may be
731 * called under bh_disabled so we need to trigger the timer
734 if (time_is_before_jiffies(chan
->timer
.expires
)
735 && timer_pending(&chan
->timer
)) {
736 struct ioatdma_device
*device
= chan
->device
;
738 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
739 device
->timer_fn((unsigned long) &chan
->common
);
745 struct dma_async_tx_descriptor
*
746 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
747 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
749 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
750 struct ioat_dma_descriptor
*hw
;
751 struct ioat_ring_ent
*desc
;
752 dma_addr_t dst
= dma_dest
;
753 dma_addr_t src
= dma_src
;
754 size_t total_len
= len
;
755 int num_descs
, idx
, i
;
757 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
758 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
) == 0)
764 size_t copy
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
766 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
777 dump_desc_dbg(ioat
, desc
);
778 } while (++i
< num_descs
);
780 desc
->txd
.flags
= flags
;
781 desc
->len
= total_len
;
782 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
783 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
784 hw
->ctl_f
.compl_write
= 1;
785 dump_desc_dbg(ioat
, desc
);
786 /* we leave the channel locked to ensure in order submission */
792 * ioat2_free_chan_resources - release all the descriptors
793 * @chan: the channel to be cleaned
795 void ioat2_free_chan_resources(struct dma_chan
*c
)
797 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
798 struct ioat_chan_common
*chan
= &ioat
->base
;
799 struct ioatdma_device
*device
= chan
->device
;
800 struct ioat_ring_ent
*desc
;
801 const u16 total_descs
= 1 << ioat
->alloc_order
;
805 /* Before freeing channel resources first check
806 * if they have been previously allocated for this channel.
812 device
->reset_hw(chan
);
814 spin_lock_bh(&chan
->cleanup_lock
);
815 spin_lock_bh(&ioat
->prep_lock
);
816 descs
= ioat2_ring_space(ioat
);
817 dev_dbg(to_dev(chan
), "freeing %d idle descriptors\n", descs
);
818 for (i
= 0; i
< descs
; i
++) {
819 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
+ i
);
820 ioat2_free_ring_ent(desc
, c
);
823 if (descs
< total_descs
)
824 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
825 total_descs
- descs
);
827 for (i
= 0; i
< total_descs
- descs
; i
++) {
828 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
829 dump_desc_dbg(ioat
, desc
);
830 ioat2_free_ring_ent(desc
, c
);
835 ioat
->alloc_order
= 0;
836 pci_pool_free(device
->completion_pool
, chan
->completion
,
837 chan
->completion_dma
);
838 spin_unlock_bh(&ioat
->prep_lock
);
839 spin_unlock_bh(&chan
->cleanup_lock
);
841 chan
->last_completion
= 0;
842 chan
->completion_dma
= 0;
846 static ssize_t
ring_size_show(struct dma_chan
*c
, char *page
)
848 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
850 return sprintf(page
, "%d\n", (1 << ioat
->alloc_order
) & ~1);
852 static struct ioat_sysfs_entry ring_size_attr
= __ATTR_RO(ring_size
);
854 static ssize_t
ring_active_show(struct dma_chan
*c
, char *page
)
856 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
858 /* ...taken outside the lock, no need to be precise */
859 return sprintf(page
, "%d\n", ioat2_ring_active(ioat
));
861 static struct ioat_sysfs_entry ring_active_attr
= __ATTR_RO(ring_active
);
863 static struct attribute
*ioat2_attrs
[] = {
864 &ring_size_attr
.attr
,
865 &ring_active_attr
.attr
,
867 &ioat_version_attr
.attr
,
871 struct kobj_type ioat2_ktype
= {
872 .sysfs_ops
= &ioat_sysfs_ops
,
873 .default_attrs
= ioat2_attrs
,
876 int ioat2_dma_probe(struct ioatdma_device
*device
, int dca
)
878 struct pci_dev
*pdev
= device
->pdev
;
879 struct dma_device
*dma
;
881 struct ioat_chan_common
*chan
;
884 device
->enumerate_channels
= ioat2_enumerate_channels
;
885 device
->reset_hw
= ioat2_reset_hw
;
886 device
->cleanup_fn
= ioat2_cleanup_event
;
887 device
->timer_fn
= ioat2_timer_event
;
888 device
->self_test
= ioat_dma_self_test
;
889 dma
= &device
->common
;
890 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
891 dma
->device_issue_pending
= ioat2_issue_pending
;
892 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
893 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
894 dma
->device_tx_status
= ioat_dma_tx_status
;
896 err
= ioat_probe(device
);
900 list_for_each_entry(c
, &dma
->channels
, device_node
) {
901 chan
= to_chan_common(c
);
902 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU
,
903 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
906 err
= ioat_register(device
);
910 ioat_kobject_add(device
, &ioat2_ktype
);
913 device
->dca
= ioat2_dca_init(pdev
, device
->reg_base
);