Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | |
25 | * does asynchronous data movement and checksumming operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
5cbafa65 DW |
31 | #include <linux/pci.h> |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/dmaengine.h> | |
34 | #include <linux/delay.h> | |
35 | #include <linux/dma-mapping.h> | |
36 | #include <linux/workqueue.h> | |
70c71606 | 37 | #include <linux/prefetch.h> |
5cbafa65 DW |
38 | #include <linux/i7300_idle.h> |
39 | #include "dma.h" | |
40 | #include "dma_v2.h" | |
41 | #include "registers.h" | |
42 | #include "hw.h" | |
43 | ||
d2ebfb33 RKAL |
44 | #include "../dmaengine.h" |
45 | ||
bf40a686 | 46 | int ioat_ring_alloc_order = 8; |
5cbafa65 DW |
47 | module_param(ioat_ring_alloc_order, int, 0644); |
48 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
376ec376 DW |
49 | "ioat2+: allocate 2^n descriptors per channel" |
50 | " (default: 8 max: 16)"); | |
a309218a DW |
51 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; |
52 | module_param(ioat_ring_max_alloc_order, int, 0644); | |
53 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |
376ec376 | 54 | "ioat2+: upper limit for ring size (default: 16)"); |
5cbafa65 | 55 | |
b094ad3b | 56 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) |
5cbafa65 | 57 | { |
281befa5 | 58 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 | 59 | |
376ec376 | 60 | ioat->dmacount += ioat2_ring_pending(ioat); |
5cbafa65 | 61 | ioat->issued = ioat->head; |
281befa5 DW |
62 | writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); |
63 | dev_dbg(to_dev(chan), | |
6df9183a DW |
64 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", |
65 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
5cbafa65 DW |
66 | } |
67 | ||
281befa5 | 68 | void ioat2_issue_pending(struct dma_chan *c) |
5cbafa65 | 69 | { |
281befa5 | 70 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
5cbafa65 | 71 | |
281befa5 | 72 | if (ioat2_ring_pending(ioat)) { |
074cc476 | 73 | spin_lock_bh(&ioat->prep_lock); |
5cbafa65 | 74 | __ioat2_issue_pending(ioat); |
074cc476 | 75 | spin_unlock_bh(&ioat->prep_lock); |
281befa5 | 76 | } |
5cbafa65 DW |
77 | } |
78 | ||
79 | /** | |
80 | * ioat2_update_pending - log pending descriptors | |
81 | * @ioat: ioat2+ channel | |
82 | * | |
281befa5 | 83 | * Check if the number of unsubmitted descriptors has exceeded the |
074cc476 | 84 | * watermark. Called with prep_lock held |
5cbafa65 DW |
85 | */ |
86 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |
87 | { | |
281befa5 | 88 | if (ioat2_ring_pending(ioat) > ioat_pending_level) |
5cbafa65 | 89 | __ioat2_issue_pending(ioat); |
5cbafa65 DW |
90 | } |
91 | ||
92 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
93 | { | |
5cbafa65 DW |
94 | struct ioat_ring_ent *desc; |
95 | struct ioat_dma_descriptor *hw; | |
5cbafa65 DW |
96 | |
97 | if (ioat2_ring_space(ioat) < 1) { | |
98 | dev_err(to_dev(&ioat->base), | |
99 | "Unable to start null desc - ring full\n"); | |
100 | return; | |
101 | } | |
102 | ||
6df9183a DW |
103 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
104 | __func__, ioat->head, ioat->tail, ioat->issued); | |
074cc476 | 105 | desc = ioat2_get_ring_ent(ioat, ioat->head); |
5cbafa65 DW |
106 | |
107 | hw = desc->hw; | |
108 | hw->ctl = 0; | |
109 | hw->ctl_f.null = 1; | |
110 | hw->ctl_f.int_en = 1; | |
111 | hw->ctl_f.compl_write = 1; | |
112 | /* set size to non-zero value (channel returns error when size is 0) */ | |
113 | hw->size = NULL_DESC_BUFFER_SIZE; | |
114 | hw->src_addr = 0; | |
115 | hw->dst_addr = 0; | |
116 | async_tx_ack(&desc->txd); | |
09c8a5b8 | 117 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
6df9183a | 118 | dump_desc_dbg(ioat, desc); |
074cc476 DW |
119 | wmb(); |
120 | ioat->head += 1; | |
5cbafa65 DW |
121 | __ioat2_issue_pending(ioat); |
122 | } | |
123 | ||
124 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
125 | { | |
074cc476 | 126 | spin_lock_bh(&ioat->prep_lock); |
5cbafa65 | 127 | __ioat2_start_null_desc(ioat); |
074cc476 | 128 | spin_unlock_bh(&ioat->prep_lock); |
5cbafa65 DW |
129 | } |
130 | ||
27502935 | 131 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) |
5cbafa65 | 132 | { |
5cbafa65 | 133 | struct ioat_chan_common *chan = &ioat->base; |
09c8a5b8 | 134 | struct dma_async_tx_descriptor *tx; |
5cbafa65 DW |
135 | struct ioat_ring_ent *desc; |
136 | bool seen_current = false; | |
137 | u16 active; | |
074cc476 | 138 | int idx = ioat->tail, i; |
5cbafa65 | 139 | |
6df9183a DW |
140 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
141 | __func__, ioat->head, ioat->tail, ioat->issued); | |
142 | ||
5cbafa65 DW |
143 | active = ioat2_ring_active(ioat); |
144 | for (i = 0; i < active && !seen_current; i++) { | |
074cc476 DW |
145 | smp_read_barrier_depends(); |
146 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | |
147 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
5cbafa65 | 148 | tx = &desc->txd; |
6df9183a | 149 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
150 | if (tx->cookie) { |
151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | |
f7fbce07 | 152 | dma_cookie_complete(tx); |
5cbafa65 DW |
153 | if (tx->callback) { |
154 | tx->callback(tx->callback_param); | |
155 | tx->callback = NULL; | |
156 | } | |
157 | } | |
158 | ||
159 | if (tx->phys == phys_complete) | |
160 | seen_current = true; | |
161 | } | |
074cc476 DW |
162 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
163 | ioat->tail = idx + i; | |
aa75db00 | 164 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
5cbafa65 DW |
165 | |
166 | chan->last_completion = phys_complete; | |
074cc476 | 167 | if (active - i == 0) { |
09c8a5b8 DW |
168 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
169 | __func__); | |
170 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
a309218a | 171 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
09c8a5b8 DW |
172 | } |
173 | } | |
174 | ||
175 | /** | |
176 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | |
177 | * @chan: ioat channel to be cleaned up | |
178 | */ | |
179 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |
180 | { | |
181 | struct ioat_chan_common *chan = &ioat->base; | |
27502935 | 182 | dma_addr_t phys_complete; |
5cbafa65 | 183 | |
074cc476 DW |
184 | spin_lock_bh(&chan->cleanup_lock); |
185 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
186 | __cleanup(ioat, phys_complete); | |
5cbafa65 DW |
187 | spin_unlock_bh(&chan->cleanup_lock); |
188 | } | |
189 | ||
aa4d72ae | 190 | void ioat2_cleanup_event(unsigned long data) |
5cbafa65 | 191 | { |
aa4d72ae | 192 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
5cbafa65 DW |
193 | |
194 | ioat2_cleanup(ioat); | |
f6ab95b5 | 195 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
196 | } |
197 | ||
bf40a686 | 198 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) |
09c8a5b8 DW |
199 | { |
200 | struct ioat_chan_common *chan = &ioat->base; | |
201 | ||
202 | /* set the tail to be re-issued */ | |
203 | ioat->issued = ioat->tail; | |
204 | ioat->dmacount = 0; | |
205 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
206 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
207 | ||
208 | dev_dbg(to_dev(chan), | |
209 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
210 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
211 | ||
212 | if (ioat2_ring_pending(ioat)) { | |
213 | struct ioat_ring_ent *desc; | |
214 | ||
215 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
216 | ioat2_set_chainaddr(ioat, desc->txd.phys); | |
217 | __ioat2_issue_pending(ioat); | |
218 | } else | |
219 | __ioat2_start_null_desc(ioat); | |
220 | } | |
221 | ||
a6d52d70 | 222 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) |
09c8a5b8 | 223 | { |
a6d52d70 DW |
224 | unsigned long end = jiffies + tmo; |
225 | int err = 0; | |
09c8a5b8 DW |
226 | u32 status; |
227 | ||
228 | status = ioat_chansts(chan); | |
229 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
230 | ioat_suspend(chan); | |
231 | while (is_ioat_active(status) || is_ioat_idle(status)) { | |
7e55a70c | 232 | if (tmo && time_after(jiffies, end)) { |
a6d52d70 DW |
233 | err = -ETIMEDOUT; |
234 | break; | |
235 | } | |
09c8a5b8 DW |
236 | status = ioat_chansts(chan); |
237 | cpu_relax(); | |
238 | } | |
239 | ||
a6d52d70 DW |
240 | return err; |
241 | } | |
242 | ||
243 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | |
244 | { | |
245 | unsigned long end = jiffies + tmo; | |
246 | int err = 0; | |
247 | ||
248 | ioat_reset(chan); | |
249 | while (ioat_reset_pending(chan)) { | |
250 | if (end && time_after(jiffies, end)) { | |
251 | err = -ETIMEDOUT; | |
252 | break; | |
253 | } | |
254 | cpu_relax(); | |
255 | } | |
256 | ||
257 | return err; | |
258 | } | |
259 | ||
260 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |
261 | { | |
262 | struct ioat_chan_common *chan = &ioat->base; | |
27502935 | 263 | dma_addr_t phys_complete; |
a6d52d70 DW |
264 | |
265 | ioat2_quiesce(chan, 0); | |
09c8a5b8 DW |
266 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
267 | __cleanup(ioat, phys_complete); | |
268 | ||
bf40a686 | 269 | __ioat2_restart_chan(ioat); |
09c8a5b8 DW |
270 | } |
271 | ||
4dec23d7 | 272 | static void check_active(struct ioat2_dma_chan *ioat) |
09c8a5b8 | 273 | { |
09c8a5b8 DW |
274 | struct ioat_chan_common *chan = &ioat->base; |
275 | ||
4dec23d7 DJ |
276 | if (ioat2_ring_active(ioat)) { |
277 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
278 | return; | |
279 | } | |
a309218a | 280 | |
4dec23d7 DJ |
281 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) |
282 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
283 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | |
a309218a DW |
284 | /* if the ring is idle, empty, and oversized try to step |
285 | * down the size | |
286 | */ | |
4dec23d7 | 287 | reshape_ring(ioat, ioat->alloc_order - 1); |
a309218a DW |
288 | |
289 | /* keep shrinking until we get back to our minimum | |
290 | * default size | |
291 | */ | |
292 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
293 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
09c8a5b8 | 294 | } |
4dec23d7 DJ |
295 | |
296 | } | |
297 | ||
298 | void ioat2_timer_event(unsigned long data) | |
299 | { | |
300 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | |
301 | struct ioat_chan_common *chan = &ioat->base; | |
302 | dma_addr_t phys_complete; | |
303 | u64 status; | |
304 | ||
305 | status = ioat_chansts(chan); | |
306 | ||
307 | /* when halted due to errors check for channel | |
308 | * programming errors before advancing the completion state | |
309 | */ | |
310 | if (is_ioat_halted(status)) { | |
311 | u32 chanerr; | |
312 | ||
313 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
314 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | |
315 | __func__, chanerr); | |
316 | if (test_bit(IOAT_RUN, &chan->state)) | |
317 | BUG_ON(is_ioat_bug(chanerr)); | |
318 | else /* we never got off the ground */ | |
319 | return; | |
320 | } | |
321 | ||
322 | /* if we haven't made progress and we have already | |
323 | * acknowledged a pending completion once, then be more | |
324 | * forceful with a restart | |
325 | */ | |
326 | spin_lock_bh(&chan->cleanup_lock); | |
327 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
328 | __cleanup(ioat, phys_complete); | |
329 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | |
330 | spin_lock_bh(&ioat->prep_lock); | |
331 | ioat2_restart_channel(ioat); | |
332 | spin_unlock_bh(&ioat->prep_lock); | |
333 | spin_unlock_bh(&chan->cleanup_lock); | |
334 | return; | |
335 | } else { | |
336 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
337 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
338 | } | |
339 | ||
340 | ||
341 | if (ioat2_ring_active(ioat)) | |
342 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
343 | else { | |
344 | spin_lock_bh(&ioat->prep_lock); | |
345 | check_active(ioat); | |
346 | spin_unlock_bh(&ioat->prep_lock); | |
347 | } | |
348 | spin_unlock_bh(&chan->cleanup_lock); | |
09c8a5b8 DW |
349 | } |
350 | ||
a6d52d70 DW |
351 | static int ioat2_reset_hw(struct ioat_chan_common *chan) |
352 | { | |
353 | /* throw away whatever the channel was doing and get it initialized */ | |
354 | u32 chanerr; | |
355 | ||
356 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | |
357 | ||
358 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
359 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
360 | ||
361 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | |
362 | } | |
363 | ||
5cbafa65 DW |
364 | /** |
365 | * ioat2_enumerate_channels - find and initialize the device's channels | |
366 | * @device: the device to be enumerated | |
367 | */ | |
bf40a686 | 368 | int ioat2_enumerate_channels(struct ioatdma_device *device) |
5cbafa65 DW |
369 | { |
370 | struct ioat2_dma_chan *ioat; | |
371 | struct device *dev = &device->pdev->dev; | |
372 | struct dma_device *dma = &device->common; | |
373 | u8 xfercap_log; | |
374 | int i; | |
375 | ||
376 | INIT_LIST_HEAD(&dma->channels); | |
377 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
378 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
379 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
380 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
381 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
382 | dma->chancnt = ARRAY_SIZE(device->idx); | |
383 | } | |
5cbafa65 | 384 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 385 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
5cbafa65 DW |
386 | if (xfercap_log == 0) |
387 | return 0; | |
6df9183a | 388 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
5cbafa65 DW |
389 | |
390 | /* FIXME which i/oat version is i7300? */ | |
391 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | |
392 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | |
393 | dma->chancnt--; | |
394 | #endif | |
395 | for (i = 0; i < dma->chancnt; i++) { | |
396 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | |
397 | if (!ioat) | |
398 | break; | |
399 | ||
aa4d72ae | 400 | ioat_init_channel(device, &ioat->base, i); |
5cbafa65 | 401 | ioat->xfercap_log = xfercap_log; |
074cc476 | 402 | spin_lock_init(&ioat->prep_lock); |
a6d52d70 DW |
403 | if (device->reset_hw(&ioat->base)) { |
404 | i = 0; | |
405 | break; | |
406 | } | |
5cbafa65 DW |
407 | } |
408 | dma->chancnt = i; | |
409 | return i; | |
410 | } | |
411 | ||
412 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
413 | { | |
414 | struct dma_chan *c = tx->chan; | |
415 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
09c8a5b8 | 416 | struct ioat_chan_common *chan = &ioat->base; |
884485e1 | 417 | dma_cookie_t cookie; |
5cbafa65 | 418 | |
884485e1 | 419 | cookie = dma_cookie_assign(tx); |
6df9183a DW |
420 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
421 | ||
4dec23d7 | 422 | if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) |
09c8a5b8 | 423 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
074cc476 DW |
424 | |
425 | /* make descriptor updates visible before advancing ioat->head, | |
426 | * this is purposefully not smp_wmb() since we are also | |
427 | * publishing the descriptor updates to a dma device | |
428 | */ | |
429 | wmb(); | |
430 | ||
431 | ioat->head += ioat->produce; | |
432 | ||
5cbafa65 | 433 | ioat2_update_pending(ioat); |
074cc476 | 434 | spin_unlock_bh(&ioat->prep_lock); |
5cbafa65 DW |
435 | |
436 | return cookie; | |
437 | } | |
438 | ||
a309218a | 439 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) |
5cbafa65 DW |
440 | { |
441 | struct ioat_dma_descriptor *hw; | |
442 | struct ioat_ring_ent *desc; | |
443 | struct ioatdma_device *dma; | |
444 | dma_addr_t phys; | |
445 | ||
446 | dma = to_ioatdma_device(chan->device); | |
a309218a | 447 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); |
5cbafa65 DW |
448 | if (!hw) |
449 | return NULL; | |
450 | memset(hw, 0, sizeof(*hw)); | |
451 | ||
921eeadb | 452 | desc = kmem_cache_zalloc(ioat2_cache, flags); |
5cbafa65 DW |
453 | if (!desc) { |
454 | pci_pool_free(dma->dma_pool, hw, phys); | |
455 | return NULL; | |
456 | } | |
457 | ||
458 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
459 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | |
460 | desc->hw = hw; | |
461 | desc->txd.phys = phys; | |
462 | return desc; | |
463 | } | |
464 | ||
465 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |
466 | { | |
467 | struct ioatdma_device *dma; | |
468 | ||
469 | dma = to_ioatdma_device(chan->device); | |
470 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | |
162b96e6 | 471 | kmem_cache_free(ioat2_cache, desc); |
5cbafa65 DW |
472 | } |
473 | ||
a309218a DW |
474 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
475 | { | |
476 | struct ioat_ring_ent **ring; | |
477 | int descs = 1 << order; | |
478 | int i; | |
479 | ||
480 | if (order > ioat_get_max_alloc_order()) | |
481 | return NULL; | |
482 | ||
483 | /* allocate the array to hold the software ring */ | |
484 | ring = kcalloc(descs, sizeof(*ring), flags); | |
485 | if (!ring) | |
486 | return NULL; | |
487 | for (i = 0; i < descs; i++) { | |
488 | ring[i] = ioat2_alloc_ring_ent(c, flags); | |
489 | if (!ring[i]) { | |
490 | while (i--) | |
491 | ioat2_free_ring_ent(ring[i], c); | |
492 | kfree(ring); | |
493 | return NULL; | |
494 | } | |
495 | set_desc_id(ring[i], i); | |
496 | } | |
497 | ||
498 | /* link descs */ | |
499 | for (i = 0; i < descs-1; i++) { | |
500 | struct ioat_ring_ent *next = ring[i+1]; | |
501 | struct ioat_dma_descriptor *hw = ring[i]->hw; | |
502 | ||
503 | hw->next = next->txd.phys; | |
504 | } | |
505 | ring[i]->hw->next = ring[0]->txd.phys; | |
506 | ||
507 | return ring; | |
508 | } | |
509 | ||
556ab45f DW |
510 | void ioat2_free_chan_resources(struct dma_chan *c); |
511 | ||
5cbafa65 DW |
512 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring |
513 | * @chan: channel to be initialized | |
514 | */ | |
bf40a686 | 515 | int ioat2_alloc_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
516 | { |
517 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
518 | struct ioat_chan_common *chan = &ioat->base; | |
519 | struct ioat_ring_ent **ring; | |
556ab45f | 520 | u64 status; |
a309218a | 521 | int order; |
19d78a61 | 522 | int i = 0; |
5cbafa65 DW |
523 | |
524 | /* have we already been set up? */ | |
525 | if (ioat->ring) | |
526 | return 1 << ioat->alloc_order; | |
527 | ||
528 | /* Setup register to interrupt and write completion status on error */ | |
f6ab95b5 | 529 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 | 530 | |
5cbafa65 DW |
531 | /* allocate a completion writeback area */ |
532 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
533 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
534 | GFP_KERNEL, &chan->completion_dma); | |
535 | if (!chan->completion) | |
5cbafa65 DW |
536 | return -ENOMEM; |
537 | ||
4fb9b9e8 DW |
538 | memset(chan->completion, 0, sizeof(*chan->completion)); |
539 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
5cbafa65 | 540 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 541 | writel(((u64) chan->completion_dma) >> 32, |
5cbafa65 DW |
542 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
543 | ||
a309218a DW |
544 | order = ioat_get_alloc_order(); |
545 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | |
5cbafa65 DW |
546 | if (!ring) |
547 | return -ENOMEM; | |
5cbafa65 | 548 | |
074cc476 DW |
549 | spin_lock_bh(&chan->cleanup_lock); |
550 | spin_lock_bh(&ioat->prep_lock); | |
5cbafa65 DW |
551 | ioat->ring = ring; |
552 | ioat->head = 0; | |
553 | ioat->issued = 0; | |
554 | ioat->tail = 0; | |
a309218a | 555 | ioat->alloc_order = order; |
074cc476 DW |
556 | spin_unlock_bh(&ioat->prep_lock); |
557 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
558 | |
559 | tasklet_enable(&chan->cleanup_task); | |
560 | ioat2_start_null_desc(ioat); | |
561 | ||
556ab45f | 562 | /* check that we got off the ground */ |
19d78a61 DS |
563 | do { |
564 | udelay(1); | |
565 | status = ioat_chansts(chan); | |
566 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | |
567 | ||
556ab45f DW |
568 | if (is_ioat_active(status) || is_ioat_idle(status)) { |
569 | set_bit(IOAT_RUN, &chan->state); | |
570 | return 1 << ioat->alloc_order; | |
571 | } else { | |
572 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
573 | ||
574 | dev_WARN(to_dev(chan), | |
575 | "failed to start channel chanerr: %#x\n", chanerr); | |
576 | ioat2_free_chan_resources(c); | |
577 | return -EFAULT; | |
578 | } | |
a309218a DW |
579 | } |
580 | ||
bf40a686 | 581 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order) |
a309218a DW |
582 | { |
583 | /* reshape differs from normal ring allocation in that we want | |
584 | * to allocate a new software ring while only | |
585 | * extending/truncating the hardware ring | |
586 | */ | |
587 | struct ioat_chan_common *chan = &ioat->base; | |
588 | struct dma_chan *c = &chan->common; | |
21b764e0 | 589 | const u32 curr_size = ioat2_ring_size(ioat); |
a309218a | 590 | const u16 active = ioat2_ring_active(ioat); |
21b764e0 | 591 | const u32 new_size = 1 << order; |
a309218a DW |
592 | struct ioat_ring_ent **ring; |
593 | u16 i; | |
594 | ||
595 | if (order > ioat_get_max_alloc_order()) | |
596 | return false; | |
597 | ||
598 | /* double check that we have at least 1 free descriptor */ | |
599 | if (active == curr_size) | |
600 | return false; | |
601 | ||
602 | /* when shrinking, verify that we can hold the current active | |
603 | * set in the new ring | |
604 | */ | |
605 | if (active >= new_size) | |
606 | return false; | |
607 | ||
608 | /* allocate the array to hold the software ring */ | |
609 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | |
610 | if (!ring) | |
611 | return false; | |
612 | ||
613 | /* allocate/trim descriptors as needed */ | |
614 | if (new_size > curr_size) { | |
615 | /* copy current descriptors to the new ring */ | |
616 | for (i = 0; i < curr_size; i++) { | |
617 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
618 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
619 | ||
620 | ring[new_idx] = ioat->ring[curr_idx]; | |
621 | set_desc_id(ring[new_idx], new_idx); | |
622 | } | |
623 | ||
624 | /* add new descriptors to the ring */ | |
625 | for (i = curr_size; i < new_size; i++) { | |
626 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
627 | ||
628 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | |
629 | if (!ring[new_idx]) { | |
630 | while (i--) { | |
631 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
632 | ||
633 | ioat2_free_ring_ent(ring[new_idx], c); | |
634 | } | |
635 | kfree(ring); | |
636 | return false; | |
637 | } | |
638 | set_desc_id(ring[new_idx], new_idx); | |
639 | } | |
640 | ||
641 | /* hw link new descriptors */ | |
642 | for (i = curr_size-1; i < new_size; i++) { | |
643 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
644 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | |
645 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | |
646 | ||
647 | hw->next = next->txd.phys; | |
648 | } | |
649 | } else { | |
650 | struct ioat_dma_descriptor *hw; | |
651 | struct ioat_ring_ent *next; | |
652 | ||
653 | /* copy current descriptors to the new ring, dropping the | |
654 | * removed descriptors | |
655 | */ | |
656 | for (i = 0; i < new_size; i++) { | |
657 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
658 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
659 | ||
660 | ring[new_idx] = ioat->ring[curr_idx]; | |
661 | set_desc_id(ring[new_idx], new_idx); | |
662 | } | |
663 | ||
664 | /* free deleted descriptors */ | |
665 | for (i = new_size; i < curr_size; i++) { | |
666 | struct ioat_ring_ent *ent; | |
667 | ||
668 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | |
669 | ioat2_free_ring_ent(ent, c); | |
670 | } | |
671 | ||
672 | /* fix up hardware ring */ | |
673 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | |
674 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | |
675 | hw->next = next->txd.phys; | |
676 | } | |
677 | ||
678 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | |
679 | __func__, new_size); | |
680 | ||
681 | kfree(ioat->ring); | |
682 | ioat->ring = ring; | |
683 | ioat->alloc_order = order; | |
684 | ||
685 | return true; | |
5cbafa65 DW |
686 | } |
687 | ||
688 | /** | |
074cc476 | 689 | * ioat2_check_space_lock - verify space and grab ring producer lock |
5cbafa65 DW |
690 | * @ioat: ioat2,3 channel (ring) to operate on |
691 | * @num_descs: allocation length | |
692 | */ | |
074cc476 | 693 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) |
5cbafa65 DW |
694 | { |
695 | struct ioat_chan_common *chan = &ioat->base; | |
074cc476 | 696 | bool retry; |
5cbafa65 | 697 | |
074cc476 DW |
698 | retry: |
699 | spin_lock_bh(&ioat->prep_lock); | |
a309218a DW |
700 | /* never allow the last descriptor to be consumed, we need at |
701 | * least one free at all times to allow for on-the-fly ring | |
702 | * resizing. | |
703 | */ | |
074cc476 DW |
704 | if (likely(ioat2_ring_space(ioat) > num_descs)) { |
705 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | |
706 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
707 | ioat->produce = num_descs; | |
708 | return 0; /* with ioat->prep_lock held */ | |
5cbafa65 | 709 | } |
074cc476 DW |
710 | retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); |
711 | spin_unlock_bh(&ioat->prep_lock); | |
5cbafa65 | 712 | |
074cc476 DW |
713 | /* is another cpu already trying to expand the ring? */ |
714 | if (retry) | |
715 | goto retry; | |
5cbafa65 | 716 | |
074cc476 DW |
717 | spin_lock_bh(&chan->cleanup_lock); |
718 | spin_lock_bh(&ioat->prep_lock); | |
719 | retry = reshape_ring(ioat, ioat->alloc_order + 1); | |
720 | clear_bit(IOAT_RESHAPE_PENDING, &chan->state); | |
721 | spin_unlock_bh(&ioat->prep_lock); | |
722 | spin_unlock_bh(&chan->cleanup_lock); | |
723 | ||
724 | /* if we were able to expand the ring retry the allocation */ | |
725 | if (retry) | |
726 | goto retry; | |
727 | ||
728 | if (printk_ratelimit()) | |
729 | dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", | |
730 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
731 | ||
732 | /* progress reclaim in the allocation failure case we may be | |
733 | * called under bh_disabled so we need to trigger the timer | |
734 | * event directly | |
735 | */ | |
736 | if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { | |
737 | struct ioatdma_device *device = chan->device; | |
738 | ||
739 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
740 | device->timer_fn((unsigned long) &chan->common); | |
741 | } | |
742 | ||
743 | return -ENOMEM; | |
5cbafa65 DW |
744 | } |
745 | ||
bf40a686 | 746 | struct dma_async_tx_descriptor * |
5cbafa65 DW |
747 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
748 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
749 | { | |
750 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
751 | struct ioat_dma_descriptor *hw; | |
752 | struct ioat_ring_ent *desc; | |
753 | dma_addr_t dst = dma_dest; | |
754 | dma_addr_t src = dma_src; | |
755 | size_t total_len = len; | |
074cc476 | 756 | int num_descs, idx, i; |
5cbafa65 DW |
757 | |
758 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
074cc476 DW |
759 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) |
760 | idx = ioat->head; | |
5cbafa65 DW |
761 | else |
762 | return NULL; | |
f477f5b3 AM |
763 | i = 0; |
764 | do { | |
5cbafa65 DW |
765 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); |
766 | ||
767 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
768 | hw = desc->hw; | |
769 | ||
770 | hw->size = copy; | |
771 | hw->ctl = 0; | |
772 | hw->src_addr = src; | |
773 | hw->dst_addr = dst; | |
774 | ||
775 | len -= copy; | |
776 | dst += copy; | |
777 | src += copy; | |
6df9183a | 778 | dump_desc_dbg(ioat, desc); |
f477f5b3 | 779 | } while (++i < num_descs); |
5cbafa65 DW |
780 | |
781 | desc->txd.flags = flags; | |
782 | desc->len = total_len; | |
783 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
128f2d56 | 784 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); |
5cbafa65 | 785 | hw->ctl_f.compl_write = 1; |
6df9183a | 786 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
787 | /* we leave the channel locked to ensure in order submission */ |
788 | ||
789 | return &desc->txd; | |
790 | } | |
791 | ||
792 | /** | |
793 | * ioat2_free_chan_resources - release all the descriptors | |
794 | * @chan: the channel to be cleaned | |
795 | */ | |
bf40a686 | 796 | void ioat2_free_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
797 | { |
798 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
799 | struct ioat_chan_common *chan = &ioat->base; | |
bf40a686 | 800 | struct ioatdma_device *device = chan->device; |
5cbafa65 DW |
801 | struct ioat_ring_ent *desc; |
802 | const u16 total_descs = 1 << ioat->alloc_order; | |
803 | int descs; | |
804 | int i; | |
805 | ||
806 | /* Before freeing channel resources first check | |
807 | * if they have been previously allocated for this channel. | |
808 | */ | |
809 | if (!ioat->ring) | |
810 | return; | |
811 | ||
812 | tasklet_disable(&chan->cleanup_task); | |
09c8a5b8 | 813 | del_timer_sync(&chan->timer); |
aa4d72ae | 814 | device->cleanup_fn((unsigned long) c); |
a6d52d70 | 815 | device->reset_hw(chan); |
556ab45f | 816 | clear_bit(IOAT_RUN, &chan->state); |
5cbafa65 | 817 | |
074cc476 DW |
818 | spin_lock_bh(&chan->cleanup_lock); |
819 | spin_lock_bh(&ioat->prep_lock); | |
5cbafa65 | 820 | descs = ioat2_ring_space(ioat); |
6df9183a | 821 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
5cbafa65 DW |
822 | for (i = 0; i < descs; i++) { |
823 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | |
824 | ioat2_free_ring_ent(desc, c); | |
825 | } | |
826 | ||
827 | if (descs < total_descs) | |
828 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | |
829 | total_descs - descs); | |
830 | ||
831 | for (i = 0; i < total_descs - descs; i++) { | |
832 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
6df9183a | 833 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
834 | ioat2_free_ring_ent(desc, c); |
835 | } | |
836 | ||
837 | kfree(ioat->ring); | |
838 | ioat->ring = NULL; | |
839 | ioat->alloc_order = 0; | |
bf40a686 | 840 | pci_pool_free(device->completion_pool, chan->completion, |
4fb9b9e8 | 841 | chan->completion_dma); |
074cc476 DW |
842 | spin_unlock_bh(&ioat->prep_lock); |
843 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
844 | |
845 | chan->last_completion = 0; | |
4fb9b9e8 | 846 | chan->completion_dma = 0; |
5cbafa65 | 847 | ioat->dmacount = 0; |
5cbafa65 DW |
848 | } |
849 | ||
5669e31c DW |
850 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
851 | { | |
852 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
853 | ||
854 | return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); | |
855 | } | |
856 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
857 | ||
858 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
859 | { | |
860 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
861 | ||
862 | /* ...taken outside the lock, no need to be precise */ | |
863 | return sprintf(page, "%d\n", ioat2_ring_active(ioat)); | |
864 | } | |
865 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
866 | ||
867 | static struct attribute *ioat2_attrs[] = { | |
868 | &ring_size_attr.attr, | |
869 | &ring_active_attr.attr, | |
870 | &ioat_cap_attr.attr, | |
871 | &ioat_version_attr.attr, | |
872 | NULL, | |
873 | }; | |
874 | ||
875 | struct kobj_type ioat2_ktype = { | |
876 | .sysfs_ops = &ioat_sysfs_ops, | |
877 | .default_attrs = ioat2_attrs, | |
878 | }; | |
879 | ||
4bf27b8b | 880 | int ioat2_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
881 | { |
882 | struct pci_dev *pdev = device->pdev; | |
883 | struct dma_device *dma; | |
884 | struct dma_chan *c; | |
885 | struct ioat_chan_common *chan; | |
886 | int err; | |
887 | ||
888 | device->enumerate_channels = ioat2_enumerate_channels; | |
a6d52d70 | 889 | device->reset_hw = ioat2_reset_hw; |
aa4d72ae | 890 | device->cleanup_fn = ioat2_cleanup_event; |
bf40a686 | 891 | device->timer_fn = ioat2_timer_event; |
9de6fc71 | 892 | device->self_test = ioat_dma_self_test; |
5cbafa65 DW |
893 | dma = &device->common; |
894 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
895 | dma->device_issue_pending = ioat2_issue_pending; | |
896 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
897 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
c50a898f | 898 | dma->device_tx_status = ioat_dma_tx_status; |
5cbafa65 DW |
899 | |
900 | err = ioat_probe(device); | |
901 | if (err) | |
902 | return err; | |
903 | ioat_set_tcp_copy_break(2048); | |
904 | ||
905 | list_for_each_entry(c, &dma->channels, device_node) { | |
906 | chan = to_chan_common(c); | |
907 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | |
908 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
909 | } | |
910 | ||
911 | err = ioat_register(device); | |
912 | if (err) | |
913 | return err; | |
5669e31c DW |
914 | |
915 | ioat_kobject_add(device, &ioat2_ktype); | |
916 | ||
5cbafa65 DW |
917 | if (dca) |
918 | device->dca = ioat2_dca_init(pdev, device->reg_base); | |
919 | ||
5cbafa65 DW |
920 | return err; |
921 | } |