Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
43d6e369 | 2 | * Intel I/OAT DMA Linux driver |
211a22ce | 3 | * Copyright(c) 2004 - 2009 Intel Corporation. |
0bbd5f4e CL |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
43d6e369 SN |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | |
0bbd5f4e CL |
8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
43d6e369 SN |
15 | * this program; if not, write to the Free Software Foundation, Inc., |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
0bbd5f4e | 20 | * |
0bbd5f4e CL |
21 | */ |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | |
25 | * copy operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/delay.h> | |
6b00c92c | 34 | #include <linux/dma-mapping.h> |
09177e85 | 35 | #include <linux/workqueue.h> |
3ad0b02e | 36 | #include <linux/i7300_idle.h> |
0bbd5f4e | 37 | #include "ioatdma.h" |
0bbd5f4e CL |
38 | #include "ioatdma_registers.h" |
39 | #include "ioatdma_hw.h" | |
40 | ||
41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | |
8ab89567 | 42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
0bbd5f4e | 43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
7405f74b | 44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) |
0bbd5f4e | 45 | |
09177e85 | 46 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) |
7bb67c14 SN |
47 | static int ioat_pending_level = 4; |
48 | module_param(ioat_pending_level, int, 0644); | |
49 | MODULE_PARM_DESC(ioat_pending_level, | |
50 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
51 | ||
09177e85 MS |
52 | #define RESET_DELAY msecs_to_jiffies(100) |
53 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | |
54 | static void ioat_dma_chan_reset_part2(struct work_struct *work); | |
55 | static void ioat_dma_chan_watchdog(struct work_struct *work); | |
56 | ||
7f1b358a MS |
57 | /* |
58 | * workaround for IOAT ver.3.0 null descriptor issue | |
59 | * (channel returns error when size is 0) | |
60 | */ | |
61 | #define NULL_DESC_BUFFER_SIZE 1 | |
62 | ||
0bbd5f4e | 63 | /* internal functions */ |
43d6e369 SN |
64 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
65 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | |
7bb67c14 SN |
66 | |
67 | static struct ioat_desc_sw * | |
68 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | |
7f2b291f | 69 | static struct ioat_desc_sw * |
7bb67c14 | 70 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); |
0bbd5f4e | 71 | |
7f2b291f SN |
72 | static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( |
73 | struct ioatdma_device *device, | |
74 | int index) | |
3e037454 SN |
75 | { |
76 | return device->idx[index]; | |
77 | } | |
78 | ||
79 | /** | |
80 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | |
81 | * @irq: interrupt id | |
82 | * @data: interrupt data | |
83 | */ | |
84 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |
85 | { | |
86 | struct ioatdma_device *instance = data; | |
87 | struct ioat_dma_chan *ioat_chan; | |
88 | unsigned long attnstatus; | |
89 | int bit; | |
90 | u8 intrctrl; | |
91 | ||
92 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
93 | ||
94 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | |
95 | return IRQ_NONE; | |
96 | ||
97 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | |
98 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
99 | return IRQ_NONE; | |
100 | } | |
101 | ||
102 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | |
103 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | |
104 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | |
105 | tasklet_schedule(&ioat_chan->cleanup_task); | |
106 | } | |
107 | ||
108 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
109 | return IRQ_HANDLED; | |
110 | } | |
111 | ||
112 | /** | |
113 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | |
114 | * @irq: interrupt id | |
115 | * @data: interrupt data | |
116 | */ | |
117 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |
118 | { | |
119 | struct ioat_dma_chan *ioat_chan = data; | |
120 | ||
121 | tasklet_schedule(&ioat_chan->cleanup_task); | |
122 | ||
123 | return IRQ_HANDLED; | |
124 | } | |
125 | ||
126 | static void ioat_dma_cleanup_tasklet(unsigned long data); | |
127 | ||
128 | /** | |
129 | * ioat_dma_enumerate_channels - find and initialize the device's channels | |
130 | * @device: the device to be enumerated | |
131 | */ | |
8ab89567 | 132 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) |
0bbd5f4e CL |
133 | { |
134 | u8 xfercap_scale; | |
135 | u32 xfercap; | |
136 | int i; | |
137 | struct ioat_dma_chan *ioat_chan; | |
138 | ||
7f1b358a MS |
139 | /* |
140 | * IOAT ver.3 workarounds | |
141 | */ | |
142 | if (device->version == IOAT_VER_3_0) { | |
143 | u32 chan_err_mask; | |
144 | u16 dev_id; | |
145 | u32 dmauncerrsts; | |
146 | ||
147 | /* | |
148 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | |
149 | * that can cause stability issues for IOAT ver.3 | |
150 | */ | |
151 | chan_err_mask = 0x3E07; | |
152 | pci_write_config_dword(device->pdev, | |
153 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | |
154 | chan_err_mask); | |
155 | ||
156 | /* | |
157 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | |
158 | * (workaround for spurious config parity error after restart) | |
159 | */ | |
160 | pci_read_config_word(device->pdev, | |
161 | IOAT_PCI_DEVICE_ID_OFFSET, | |
162 | &dev_id); | |
163 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | |
164 | dmauncerrsts = 0x10; | |
165 | pci_write_config_dword(device->pdev, | |
166 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | |
167 | dmauncerrsts); | |
168 | } | |
169 | } | |
170 | ||
e3828811 CL |
171 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
172 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | |
0bbd5f4e CL |
173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
174 | ||
f371be63 | 175 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
3ad0b02e VP |
176 | if (i7300_idle_platform_probe(NULL, NULL) == 0) { |
177 | device->common.chancnt--; | |
178 | } | |
27471fdb | 179 | #endif |
0bbd5f4e CL |
180 | for (i = 0; i < device->common.chancnt; i++) { |
181 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); | |
182 | if (!ioat_chan) { | |
183 | device->common.chancnt = i; | |
184 | break; | |
185 | } | |
186 | ||
187 | ioat_chan->device = device; | |
188 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); | |
189 | ioat_chan->xfercap = xfercap; | |
7bb67c14 | 190 | ioat_chan->desccount = 0; |
09177e85 | 191 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); |
ea9c717d MS |
192 | if (ioat_chan->device->version == IOAT_VER_2_0) |
193 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | | |
194 | IOAT_DMA_DCA_ANY_CPU, | |
195 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | |
196 | else if (ioat_chan->device->version == IOAT_VER_3_0) | |
197 | writel(IOAT_DMA_DCA_ANY_CPU, | |
198 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | |
0bbd5f4e CL |
199 | spin_lock_init(&ioat_chan->cleanup_lock); |
200 | spin_lock_init(&ioat_chan->desc_lock); | |
201 | INIT_LIST_HEAD(&ioat_chan->free_desc); | |
202 | INIT_LIST_HEAD(&ioat_chan->used_desc); | |
203 | /* This should be made common somewhere in dmaengine.c */ | |
204 | ioat_chan->common.device = &device->common; | |
0bbd5f4e | 205 | list_add_tail(&ioat_chan->common.device_node, |
43d6e369 | 206 | &device->common.channels); |
3e037454 SN |
207 | device->idx[i] = ioat_chan; |
208 | tasklet_init(&ioat_chan->cleanup_task, | |
209 | ioat_dma_cleanup_tasklet, | |
210 | (unsigned long) ioat_chan); | |
211 | tasklet_disable(&ioat_chan->cleanup_task); | |
0bbd5f4e CL |
212 | } |
213 | return device->common.chancnt; | |
214 | } | |
215 | ||
711924b1 SN |
216 | /** |
217 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | |
218 | * descriptors to hw | |
219 | * @chan: DMA channel handle | |
220 | */ | |
7bb67c14 | 221 | static inline void __ioat1_dma_memcpy_issue_pending( |
711924b1 SN |
222 | struct ioat_dma_chan *ioat_chan) |
223 | { | |
224 | ioat_chan->pending = 0; | |
225 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | |
226 | } | |
227 | ||
228 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |
229 | { | |
230 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
231 | ||
09177e85 | 232 | if (ioat_chan->pending > 0) { |
711924b1 SN |
233 | spin_lock_bh(&ioat_chan->desc_lock); |
234 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | |
235 | spin_unlock_bh(&ioat_chan->desc_lock); | |
236 | } | |
237 | } | |
238 | ||
7bb67c14 | 239 | static inline void __ioat2_dma_memcpy_issue_pending( |
711924b1 SN |
240 | struct ioat_dma_chan *ioat_chan) |
241 | { | |
242 | ioat_chan->pending = 0; | |
243 | writew(ioat_chan->dmacount, | |
244 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
245 | } | |
246 | ||
247 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | |
248 | { | |
249 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
250 | ||
09177e85 | 251 | if (ioat_chan->pending > 0) { |
711924b1 SN |
252 | spin_lock_bh(&ioat_chan->desc_lock); |
253 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
254 | spin_unlock_bh(&ioat_chan->desc_lock); | |
255 | } | |
256 | } | |
7bb67c14 | 257 | |
09177e85 MS |
258 | |
259 | /** | |
260 | * ioat_dma_chan_reset_part2 - reinit the channel after a reset | |
261 | */ | |
262 | static void ioat_dma_chan_reset_part2(struct work_struct *work) | |
263 | { | |
264 | struct ioat_dma_chan *ioat_chan = | |
265 | container_of(work, struct ioat_dma_chan, work.work); | |
266 | struct ioat_desc_sw *desc; | |
267 | ||
268 | spin_lock_bh(&ioat_chan->cleanup_lock); | |
269 | spin_lock_bh(&ioat_chan->desc_lock); | |
270 | ||
271 | ioat_chan->completion_virt->low = 0; | |
272 | ioat_chan->completion_virt->high = 0; | |
273 | ioat_chan->pending = 0; | |
274 | ||
275 | /* | |
276 | * count the descriptors waiting, and be sure to do it | |
277 | * right for both the CB1 line and the CB2 ring | |
278 | */ | |
279 | ioat_chan->dmacount = 0; | |
280 | if (ioat_chan->used_desc.prev) { | |
281 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
282 | do { | |
283 | ioat_chan->dmacount++; | |
284 | desc = to_ioat_desc(desc->node.next); | |
285 | } while (&desc->node != ioat_chan->used_desc.next); | |
286 | } | |
287 | ||
288 | /* | |
289 | * write the new starting descriptor address | |
290 | * this puts channel engine into ARMED state | |
291 | */ | |
292 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
293 | switch (ioat_chan->device->version) { | |
294 | case IOAT_VER_1_2: | |
295 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
296 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | |
297 | writel(((u64) desc->async_tx.phys) >> 32, | |
298 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | |
299 | ||
300 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | |
301 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
302 | break; | |
303 | case IOAT_VER_2_0: | |
304 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
305 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
306 | writel(((u64) desc->async_tx.phys) >> 32, | |
307 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
308 | ||
309 | /* tell the engine to go with what's left to be done */ | |
310 | writew(ioat_chan->dmacount, | |
311 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
312 | ||
313 | break; | |
314 | } | |
315 | dev_err(&ioat_chan->device->pdev->dev, | |
316 | "chan%d reset - %d descs waiting, %d total desc\n", | |
317 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
318 | ||
319 | spin_unlock_bh(&ioat_chan->desc_lock); | |
320 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
321 | } | |
322 | ||
323 | /** | |
324 | * ioat_dma_reset_channel - restart a channel | |
325 | * @ioat_chan: IOAT DMA channel handle | |
326 | */ | |
327 | static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | |
328 | { | |
329 | u32 chansts, chanerr; | |
330 | ||
331 | if (!ioat_chan->used_desc.prev) | |
332 | return; | |
333 | ||
334 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
335 | chansts = (ioat_chan->completion_virt->low | |
336 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | |
337 | if (chanerr) { | |
338 | dev_err(&ioat_chan->device->pdev->dev, | |
339 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | |
340 | chan_num(ioat_chan), chansts, chanerr); | |
341 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
342 | } | |
343 | ||
344 | /* | |
345 | * whack it upside the head with a reset | |
346 | * and wait for things to settle out. | |
347 | * force the pending count to a really big negative | |
348 | * to make sure no one forces an issue_pending | |
349 | * while we're waiting. | |
350 | */ | |
351 | ||
352 | spin_lock_bh(&ioat_chan->desc_lock); | |
353 | ioat_chan->pending = INT_MIN; | |
354 | writeb(IOAT_CHANCMD_RESET, | |
355 | ioat_chan->reg_base | |
356 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
357 | spin_unlock_bh(&ioat_chan->desc_lock); | |
358 | ||
359 | /* schedule the 2nd half instead of sleeping a long time */ | |
360 | schedule_delayed_work(&ioat_chan->work, RESET_DELAY); | |
361 | } | |
362 | ||
363 | /** | |
364 | * ioat_dma_chan_watchdog - watch for stuck channels | |
365 | */ | |
366 | static void ioat_dma_chan_watchdog(struct work_struct *work) | |
367 | { | |
368 | struct ioatdma_device *device = | |
369 | container_of(work, struct ioatdma_device, work.work); | |
370 | struct ioat_dma_chan *ioat_chan; | |
371 | int i; | |
372 | ||
373 | union { | |
374 | u64 full; | |
375 | struct { | |
376 | u32 low; | |
377 | u32 high; | |
378 | }; | |
379 | } completion_hw; | |
380 | unsigned long compl_desc_addr_hw; | |
381 | ||
382 | for (i = 0; i < device->common.chancnt; i++) { | |
383 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
384 | ||
385 | if (ioat_chan->device->version == IOAT_VER_1_2 | |
386 | /* have we started processing anything yet */ | |
387 | && ioat_chan->last_completion | |
388 | /* have we completed any since last watchdog cycle? */ | |
389 | && (ioat_chan->last_completion == | |
390 | ioat_chan->watchdog_completion) | |
391 | /* has TCP stuck on one cookie since last watchdog? */ | |
392 | && (ioat_chan->watchdog_tcp_cookie == | |
393 | ioat_chan->watchdog_last_tcp_cookie) | |
394 | && (ioat_chan->watchdog_tcp_cookie != | |
395 | ioat_chan->completed_cookie) | |
396 | /* is there something in the chain to be processed? */ | |
397 | /* CB1 chain always has at least the last one processed */ | |
398 | && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) | |
399 | && ioat_chan->pending == 0) { | |
400 | ||
401 | /* | |
402 | * check CHANSTS register for completed | |
403 | * descriptor address. | |
404 | * if it is different than completion writeback, | |
405 | * it is not zero | |
406 | * and it has changed since the last watchdog | |
407 | * we can assume that channel | |
408 | * is still working correctly | |
409 | * and the problem is in completion writeback. | |
410 | * update completion writeback | |
411 | * with actual CHANSTS value | |
412 | * else | |
413 | * try resetting the channel | |
414 | */ | |
415 | ||
416 | completion_hw.low = readl(ioat_chan->reg_base + | |
417 | IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); | |
418 | completion_hw.high = readl(ioat_chan->reg_base + | |
419 | IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); | |
420 | #if (BITS_PER_LONG == 64) | |
421 | compl_desc_addr_hw = | |
422 | completion_hw.full | |
423 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
424 | #else | |
425 | compl_desc_addr_hw = | |
426 | completion_hw.low & IOAT_LOW_COMPLETION_MASK; | |
427 | #endif | |
428 | ||
429 | if ((compl_desc_addr_hw != 0) | |
430 | && (compl_desc_addr_hw != ioat_chan->watchdog_completion) | |
431 | && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { | |
432 | ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | |
433 | ioat_chan->completion_virt->low = completion_hw.low; | |
434 | ioat_chan->completion_virt->high = completion_hw.high; | |
435 | } else { | |
436 | ioat_dma_reset_channel(ioat_chan); | |
437 | ioat_chan->watchdog_completion = 0; | |
438 | ioat_chan->last_compl_desc_addr_hw = 0; | |
439 | } | |
440 | ||
441 | /* | |
442 | * for version 2.0 if there are descriptors yet to be processed | |
443 | * and the last completed hasn't changed since the last watchdog | |
444 | * if they haven't hit the pending level | |
445 | * issue the pending to push them through | |
446 | * else | |
447 | * try resetting the channel | |
448 | */ | |
449 | } else if (ioat_chan->device->version == IOAT_VER_2_0 | |
450 | && ioat_chan->used_desc.prev | |
451 | && ioat_chan->last_completion | |
452 | && ioat_chan->last_completion == ioat_chan->watchdog_completion) { | |
453 | ||
454 | if (ioat_chan->pending < ioat_pending_level) | |
455 | ioat2_dma_memcpy_issue_pending(&ioat_chan->common); | |
456 | else { | |
457 | ioat_dma_reset_channel(ioat_chan); | |
458 | ioat_chan->watchdog_completion = 0; | |
459 | } | |
460 | } else { | |
461 | ioat_chan->last_compl_desc_addr_hw = 0; | |
462 | ioat_chan->watchdog_completion | |
463 | = ioat_chan->last_completion; | |
464 | } | |
465 | ||
466 | ioat_chan->watchdog_last_tcp_cookie = | |
467 | ioat_chan->watchdog_tcp_cookie; | |
468 | } | |
469 | ||
470 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | |
471 | } | |
472 | ||
7bb67c14 | 473 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
7405f74b DW |
474 | { |
475 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | |
7f2b291f SN |
476 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); |
477 | struct ioat_desc_sw *prev, *new; | |
478 | struct ioat_dma_descriptor *hw; | |
7405f74b | 479 | dma_cookie_t cookie; |
7f2b291f SN |
480 | LIST_HEAD(new_chain); |
481 | u32 copy; | |
482 | size_t len; | |
483 | dma_addr_t src, dst; | |
636bdeaa | 484 | unsigned long orig_flags; |
7f2b291f SN |
485 | unsigned int desc_count = 0; |
486 | ||
487 | /* src and dest and len are stored in the initial descriptor */ | |
488 | len = first->len; | |
489 | src = first->src; | |
490 | dst = first->dst; | |
636bdeaa | 491 | orig_flags = first->async_tx.flags; |
7f2b291f | 492 | new = first; |
7405f74b | 493 | |
7405f74b | 494 | spin_lock_bh(&ioat_chan->desc_lock); |
7f2b291f SN |
495 | prev = to_ioat_desc(ioat_chan->used_desc.prev); |
496 | prefetch(prev->hw); | |
497 | do { | |
711924b1 | 498 | copy = min_t(size_t, len, ioat_chan->xfercap); |
7f2b291f | 499 | |
636bdeaa | 500 | async_tx_ack(&new->async_tx); |
7f2b291f SN |
501 | |
502 | hw = new->hw; | |
503 | hw->size = copy; | |
504 | hw->ctl = 0; | |
505 | hw->src_addr = src; | |
506 | hw->dst_addr = dst; | |
507 | hw->next = 0; | |
508 | ||
509 | /* chain together the physical address list for the HW */ | |
510 | wmb(); | |
511 | prev->hw->next = (u64) new->async_tx.phys; | |
512 | ||
513 | len -= copy; | |
514 | dst += copy; | |
515 | src += copy; | |
516 | ||
517 | list_add_tail(&new->node, &new_chain); | |
518 | desc_count++; | |
519 | prev = new; | |
7bb67c14 | 520 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); |
7f2b291f | 521 | |
7f1b358a MS |
522 | if (!new) { |
523 | dev_err(&ioat_chan->device->pdev->dev, | |
524 | "tx submit failed\n"); | |
525 | spin_unlock_bh(&ioat_chan->desc_lock); | |
526 | return -ENOMEM; | |
527 | } | |
528 | ||
7f2b291f | 529 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
12ccea24 | 530 | if (first->async_tx.callback) { |
95218430 SN |
531 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
532 | if (first != new) { | |
533 | /* move callback into to last desc */ | |
534 | new->async_tx.callback = first->async_tx.callback; | |
535 | new->async_tx.callback_param | |
536 | = first->async_tx.callback_param; | |
537 | first->async_tx.callback = NULL; | |
538 | first->async_tx.callback_param = NULL; | |
539 | } | |
540 | } | |
541 | ||
7f2b291f | 542 | new->tx_cnt = desc_count; |
636bdeaa | 543 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
7f2b291f SN |
544 | |
545 | /* store the original values for use in later cleanup */ | |
546 | if (new != first) { | |
547 | new->src = first->src; | |
548 | new->dst = first->dst; | |
549 | new->len = first->len; | |
550 | } | |
551 | ||
7405f74b DW |
552 | /* cookie incr and addition to used_list must be atomic */ |
553 | cookie = ioat_chan->common.cookie; | |
554 | cookie++; | |
555 | if (cookie < 0) | |
556 | cookie = 1; | |
7f2b291f | 557 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; |
7405f74b DW |
558 | |
559 | /* write address into NextDescriptor field of last desc in chain */ | |
560 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = | |
7f2b291f | 561 | first->async_tx.phys; |
7d283aee | 562 | list_splice_tail(&new_chain, &ioat_chan->used_desc); |
7405f74b | 563 | |
7bb67c14 | 564 | ioat_chan->dmacount += desc_count; |
7f2b291f | 565 | ioat_chan->pending += desc_count; |
7bb67c14 SN |
566 | if (ioat_chan->pending >= ioat_pending_level) |
567 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | |
7405f74b DW |
568 | spin_unlock_bh(&ioat_chan->desc_lock); |
569 | ||
7bb67c14 SN |
570 | return cookie; |
571 | } | |
572 | ||
573 | static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |
574 | { | |
575 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | |
576 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | |
577 | struct ioat_desc_sw *new; | |
578 | struct ioat_dma_descriptor *hw; | |
579 | dma_cookie_t cookie; | |
580 | u32 copy; | |
581 | size_t len; | |
582 | dma_addr_t src, dst; | |
636bdeaa | 583 | unsigned long orig_flags; |
7bb67c14 SN |
584 | unsigned int desc_count = 0; |
585 | ||
586 | /* src and dest and len are stored in the initial descriptor */ | |
587 | len = first->len; | |
588 | src = first->src; | |
589 | dst = first->dst; | |
636bdeaa | 590 | orig_flags = first->async_tx.flags; |
7bb67c14 SN |
591 | new = first; |
592 | ||
711924b1 SN |
593 | /* |
594 | * ioat_chan->desc_lock is still in force in version 2 path | |
595 | * it gets unlocked at end of this function | |
596 | */ | |
7bb67c14 | 597 | do { |
711924b1 | 598 | copy = min_t(size_t, len, ioat_chan->xfercap); |
7bb67c14 | 599 | |
636bdeaa | 600 | async_tx_ack(&new->async_tx); |
7bb67c14 SN |
601 | |
602 | hw = new->hw; | |
603 | hw->size = copy; | |
604 | hw->ctl = 0; | |
605 | hw->src_addr = src; | |
606 | hw->dst_addr = dst; | |
607 | ||
608 | len -= copy; | |
609 | dst += copy; | |
610 | src += copy; | |
611 | desc_count++; | |
612 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | |
613 | ||
7f1b358a MS |
614 | if (!new) { |
615 | dev_err(&ioat_chan->device->pdev->dev, | |
616 | "tx submit failed\n"); | |
617 | spin_unlock_bh(&ioat_chan->desc_lock); | |
618 | return -ENOMEM; | |
619 | } | |
620 | ||
621 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | |
12ccea24 | 622 | if (first->async_tx.callback) { |
7bb67c14 SN |
623 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
624 | if (first != new) { | |
625 | /* move callback into to last desc */ | |
626 | new->async_tx.callback = first->async_tx.callback; | |
627 | new->async_tx.callback_param | |
628 | = first->async_tx.callback_param; | |
629 | first->async_tx.callback = NULL; | |
630 | first->async_tx.callback_param = NULL; | |
631 | } | |
632 | } | |
633 | ||
634 | new->tx_cnt = desc_count; | |
636bdeaa | 635 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
7bb67c14 SN |
636 | |
637 | /* store the original values for use in later cleanup */ | |
638 | if (new != first) { | |
639 | new->src = first->src; | |
640 | new->dst = first->dst; | |
641 | new->len = first->len; | |
642 | } | |
643 | ||
644 | /* cookie incr and addition to used_list must be atomic */ | |
645 | cookie = ioat_chan->common.cookie; | |
646 | cookie++; | |
647 | if (cookie < 0) | |
648 | cookie = 1; | |
649 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | |
650 | ||
651 | ioat_chan->dmacount += desc_count; | |
652 | ioat_chan->pending += desc_count; | |
653 | if (ioat_chan->pending >= ioat_pending_level) | |
654 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
655 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1fda5f4e | 656 | |
7405f74b DW |
657 | return cookie; |
658 | } | |
659 | ||
7bb67c14 SN |
660 | /** |
661 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | |
662 | * @ioat_chan: the channel supplying the memory pool for the descriptors | |
663 | * @flags: allocation flags | |
664 | */ | |
0bbd5f4e | 665 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( |
43d6e369 SN |
666 | struct ioat_dma_chan *ioat_chan, |
667 | gfp_t flags) | |
0bbd5f4e CL |
668 | { |
669 | struct ioat_dma_descriptor *desc; | |
670 | struct ioat_desc_sw *desc_sw; | |
8ab89567 | 671 | struct ioatdma_device *ioatdma_device; |
0bbd5f4e CL |
672 | dma_addr_t phys; |
673 | ||
8ab89567 SN |
674 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); |
675 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | |
0bbd5f4e CL |
676 | if (unlikely(!desc)) |
677 | return NULL; | |
678 | ||
679 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | |
680 | if (unlikely(!desc_sw)) { | |
8ab89567 | 681 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
0bbd5f4e CL |
682 | return NULL; |
683 | } | |
684 | ||
685 | memset(desc, 0, sizeof(*desc)); | |
7405f74b | 686 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); |
7bb67c14 SN |
687 | switch (ioat_chan->device->version) { |
688 | case IOAT_VER_1_2: | |
689 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | |
690 | break; | |
691 | case IOAT_VER_2_0: | |
7f1b358a | 692 | case IOAT_VER_3_0: |
7bb67c14 SN |
693 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; |
694 | break; | |
695 | } | |
7405f74b | 696 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); |
7bb67c14 | 697 | |
0bbd5f4e | 698 | desc_sw->hw = desc; |
7405f74b | 699 | desc_sw->async_tx.phys = phys; |
0bbd5f4e CL |
700 | |
701 | return desc_sw; | |
702 | } | |
703 | ||
7bb67c14 SN |
704 | static int ioat_initial_desc_count = 256; |
705 | module_param(ioat_initial_desc_count, int, 0644); | |
706 | MODULE_PARM_DESC(ioat_initial_desc_count, | |
707 | "initial descriptors per channel (default: 256)"); | |
708 | ||
709 | /** | |
710 | * ioat2_dma_massage_chan_desc - link the descriptors into a circle | |
711 | * @ioat_chan: the channel to be massaged | |
712 | */ | |
713 | static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |
714 | { | |
715 | struct ioat_desc_sw *desc, *_desc; | |
716 | ||
717 | /* setup used_desc */ | |
718 | ioat_chan->used_desc.next = ioat_chan->free_desc.next; | |
719 | ioat_chan->used_desc.prev = NULL; | |
720 | ||
721 | /* pull free_desc out of the circle so that every node is a hw | |
722 | * descriptor, but leave it pointing to the list | |
723 | */ | |
724 | ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; | |
725 | ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; | |
726 | ||
727 | /* circle link the hw descriptors */ | |
728 | desc = to_ioat_desc(ioat_chan->free_desc.next); | |
729 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | |
730 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { | |
731 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | |
732 | } | |
733 | } | |
734 | ||
735 | /** | |
736 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | |
737 | * @chan: the channel to be filled out | |
738 | */ | |
aa1e6f1a | 739 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
0bbd5f4e CL |
740 | { |
741 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
711924b1 | 742 | struct ioat_desc_sw *desc; |
0bbd5f4e CL |
743 | u16 chanctrl; |
744 | u32 chanerr; | |
745 | int i; | |
746 | LIST_HEAD(tmp_list); | |
747 | ||
e4223976 SN |
748 | /* have we already been set up? */ |
749 | if (!list_empty(&ioat_chan->free_desc)) | |
7bb67c14 | 750 | return ioat_chan->desccount; |
0bbd5f4e | 751 | |
43d6e369 | 752 | /* Setup register to interrupt and write completion status on error */ |
e4223976 | 753 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | |
0bbd5f4e CL |
754 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | |
755 | IOAT_CHANCTRL_ERR_COMPLETION_EN; | |
43d6e369 | 756 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); |
0bbd5f4e | 757 | |
e3828811 | 758 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
0bbd5f4e | 759 | if (chanerr) { |
43d6e369 | 760 | dev_err(&ioat_chan->device->pdev->dev, |
5149fd01 | 761 | "CHANERR = %x, clearing\n", chanerr); |
e3828811 | 762 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
0bbd5f4e CL |
763 | } |
764 | ||
765 | /* Allocate descriptors */ | |
7bb67c14 | 766 | for (i = 0; i < ioat_initial_desc_count; i++) { |
0bbd5f4e CL |
767 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
768 | if (!desc) { | |
43d6e369 | 769 | dev_err(&ioat_chan->device->pdev->dev, |
5149fd01 | 770 | "Only %d initial descriptors\n", i); |
0bbd5f4e CL |
771 | break; |
772 | } | |
773 | list_add_tail(&desc->node, &tmp_list); | |
774 | } | |
775 | spin_lock_bh(&ioat_chan->desc_lock); | |
7bb67c14 | 776 | ioat_chan->desccount = i; |
0bbd5f4e | 777 | list_splice(&tmp_list, &ioat_chan->free_desc); |
7bb67c14 SN |
778 | if (ioat_chan->device->version != IOAT_VER_1_2) |
779 | ioat2_dma_massage_chan_desc(ioat_chan); | |
0bbd5f4e CL |
780 | spin_unlock_bh(&ioat_chan->desc_lock); |
781 | ||
782 | /* allocate a completion writeback area */ | |
783 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
784 | ioat_chan->completion_virt = | |
785 | pci_pool_alloc(ioat_chan->device->completion_pool, | |
43d6e369 SN |
786 | GFP_KERNEL, |
787 | &ioat_chan->completion_addr); | |
0bbd5f4e CL |
788 | memset(ioat_chan->completion_virt, 0, |
789 | sizeof(*ioat_chan->completion_virt)); | |
e3828811 CL |
790 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, |
791 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | |
792 | writel(((u64) ioat_chan->completion_addr) >> 32, | |
793 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | |
0bbd5f4e | 794 | |
3e037454 | 795 | tasklet_enable(&ioat_chan->cleanup_task); |
7bb67c14 SN |
796 | ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ |
797 | return ioat_chan->desccount; | |
0bbd5f4e CL |
798 | } |
799 | ||
7bb67c14 SN |
800 | /** |
801 | * ioat_dma_free_chan_resources - release all the descriptors | |
802 | * @chan: the channel to be cleaned | |
803 | */ | |
0bbd5f4e CL |
804 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
805 | { | |
806 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
8ab89567 | 807 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); |
0bbd5f4e | 808 | struct ioat_desc_sw *desc, *_desc; |
0bbd5f4e CL |
809 | int in_use_descs = 0; |
810 | ||
c3d4f44f MS |
811 | /* Before freeing channel resources first check |
812 | * if they have been previously allocated for this channel. | |
813 | */ | |
814 | if (ioat_chan->desccount == 0) | |
815 | return; | |
816 | ||
3e037454 | 817 | tasklet_disable(&ioat_chan->cleanup_task); |
0bbd5f4e CL |
818 | ioat_dma_memcpy_cleanup(ioat_chan); |
819 | ||
3e037454 SN |
820 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
821 | * before removing DMA descriptor resources. | |
822 | */ | |
7bb67c14 SN |
823 | writeb(IOAT_CHANCMD_RESET, |
824 | ioat_chan->reg_base | |
825 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
3e037454 | 826 | mdelay(100); |
0bbd5f4e CL |
827 | |
828 | spin_lock_bh(&ioat_chan->desc_lock); | |
7bb67c14 SN |
829 | switch (ioat_chan->device->version) { |
830 | case IOAT_VER_1_2: | |
831 | list_for_each_entry_safe(desc, _desc, | |
832 | &ioat_chan->used_desc, node) { | |
833 | in_use_descs++; | |
834 | list_del(&desc->node); | |
835 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
836 | desc->async_tx.phys); | |
837 | kfree(desc); | |
838 | } | |
839 | list_for_each_entry_safe(desc, _desc, | |
840 | &ioat_chan->free_desc, node) { | |
841 | list_del(&desc->node); | |
842 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
843 | desc->async_tx.phys); | |
844 | kfree(desc); | |
845 | } | |
846 | break; | |
847 | case IOAT_VER_2_0: | |
7f1b358a | 848 | case IOAT_VER_3_0: |
7bb67c14 SN |
849 | list_for_each_entry_safe(desc, _desc, |
850 | ioat_chan->free_desc.next, node) { | |
851 | list_del(&desc->node); | |
852 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
853 | desc->async_tx.phys); | |
854 | kfree(desc); | |
855 | } | |
856 | desc = to_ioat_desc(ioat_chan->free_desc.next); | |
8ab89567 | 857 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
7405f74b | 858 | desc->async_tx.phys); |
0bbd5f4e | 859 | kfree(desc); |
7bb67c14 SN |
860 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
861 | INIT_LIST_HEAD(&ioat_chan->used_desc); | |
862 | break; | |
0bbd5f4e CL |
863 | } |
864 | spin_unlock_bh(&ioat_chan->desc_lock); | |
865 | ||
8ab89567 | 866 | pci_pool_free(ioatdma_device->completion_pool, |
43d6e369 SN |
867 | ioat_chan->completion_virt, |
868 | ioat_chan->completion_addr); | |
0bbd5f4e CL |
869 | |
870 | /* one is ok since we left it on there on purpose */ | |
871 | if (in_use_descs > 1) | |
43d6e369 | 872 | dev_err(&ioat_chan->device->pdev->dev, |
5149fd01 | 873 | "Freeing %d in use descriptors!\n", |
0bbd5f4e CL |
874 | in_use_descs - 1); |
875 | ||
876 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | |
3e037454 | 877 | ioat_chan->pending = 0; |
7bb67c14 | 878 | ioat_chan->dmacount = 0; |
c3d4f44f | 879 | ioat_chan->desccount = 0; |
09177e85 MS |
880 | ioat_chan->watchdog_completion = 0; |
881 | ioat_chan->last_compl_desc_addr_hw = 0; | |
882 | ioat_chan->watchdog_tcp_cookie = | |
883 | ioat_chan->watchdog_last_tcp_cookie = 0; | |
3e037454 | 884 | } |
7f2b291f | 885 | |
3e037454 SN |
886 | /** |
887 | * ioat_dma_get_next_descriptor - return the next available descriptor | |
888 | * @ioat_chan: IOAT DMA channel handle | |
889 | * | |
890 | * Gets the next descriptor from the chain, and must be called with the | |
891 | * channel's desc_lock held. Allocates more descriptors if the channel | |
892 | * has run out. | |
893 | */ | |
7f2b291f | 894 | static struct ioat_desc_sw * |
7bb67c14 | 895 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) |
3e037454 | 896 | { |
711924b1 | 897 | struct ioat_desc_sw *new; |
3e037454 SN |
898 | |
899 | if (!list_empty(&ioat_chan->free_desc)) { | |
900 | new = to_ioat_desc(ioat_chan->free_desc.next); | |
901 | list_del(&new->node); | |
902 | } else { | |
903 | /* try to get another desc */ | |
904 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | |
711924b1 SN |
905 | if (!new) { |
906 | dev_err(&ioat_chan->device->pdev->dev, | |
907 | "alloc failed\n"); | |
908 | return NULL; | |
909 | } | |
3e037454 SN |
910 | } |
911 | ||
912 | prefetch(new->hw); | |
913 | return new; | |
0bbd5f4e CL |
914 | } |
915 | ||
7bb67c14 SN |
916 | static struct ioat_desc_sw * |
917 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |
918 | { | |
711924b1 | 919 | struct ioat_desc_sw *new; |
7bb67c14 SN |
920 | |
921 | /* | |
922 | * used.prev points to where to start processing | |
923 | * used.next points to next free descriptor | |
924 | * if used.prev == NULL, there are none waiting to be processed | |
925 | * if used.next == used.prev.prev, there is only one free descriptor, | |
926 | * and we need to use it to as a noop descriptor before | |
927 | * linking in a new set of descriptors, since the device | |
928 | * has probably already read the pointer to it | |
929 | */ | |
930 | if (ioat_chan->used_desc.prev && | |
931 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { | |
932 | ||
711924b1 SN |
933 | struct ioat_desc_sw *desc; |
934 | struct ioat_desc_sw *noop_desc; | |
7bb67c14 SN |
935 | int i; |
936 | ||
937 | /* set up the noop descriptor */ | |
938 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); | |
7f1b358a MS |
939 | /* set size to non-zero value (channel returns error when size is 0) */ |
940 | noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; | |
7bb67c14 SN |
941 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; |
942 | noop_desc->hw->src_addr = 0; | |
943 | noop_desc->hw->dst_addr = 0; | |
944 | ||
945 | ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; | |
946 | ioat_chan->pending++; | |
947 | ioat_chan->dmacount++; | |
948 | ||
711924b1 | 949 | /* try to get a few more descriptors */ |
7bb67c14 SN |
950 | for (i = 16; i; i--) { |
951 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | |
711924b1 SN |
952 | if (!desc) { |
953 | dev_err(&ioat_chan->device->pdev->dev, | |
954 | "alloc failed\n"); | |
955 | break; | |
956 | } | |
7bb67c14 SN |
957 | list_add_tail(&desc->node, ioat_chan->used_desc.next); |
958 | ||
959 | desc->hw->next | |
960 | = to_ioat_desc(desc->node.next)->async_tx.phys; | |
961 | to_ioat_desc(desc->node.prev)->hw->next | |
962 | = desc->async_tx.phys; | |
963 | ioat_chan->desccount++; | |
964 | } | |
965 | ||
966 | ioat_chan->used_desc.next = noop_desc->node.next; | |
967 | } | |
968 | new = to_ioat_desc(ioat_chan->used_desc.next); | |
969 | prefetch(new); | |
970 | ioat_chan->used_desc.next = new->node.next; | |
971 | ||
972 | if (ioat_chan->used_desc.prev == NULL) | |
973 | ioat_chan->used_desc.prev = &new->node; | |
974 | ||
975 | prefetch(new->hw); | |
976 | return new; | |
977 | } | |
978 | ||
979 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |
980 | struct ioat_dma_chan *ioat_chan) | |
981 | { | |
982 | if (!ioat_chan) | |
983 | return NULL; | |
984 | ||
985 | switch (ioat_chan->device->version) { | |
986 | case IOAT_VER_1_2: | |
987 | return ioat1_dma_get_next_descriptor(ioat_chan); | |
7bb67c14 | 988 | case IOAT_VER_2_0: |
7f1b358a | 989 | case IOAT_VER_3_0: |
7bb67c14 | 990 | return ioat2_dma_get_next_descriptor(ioat_chan); |
7bb67c14 SN |
991 | } |
992 | return NULL; | |
993 | } | |
994 | ||
995 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |
43d6e369 | 996 | struct dma_chan *chan, |
0036731c DW |
997 | dma_addr_t dma_dest, |
998 | dma_addr_t dma_src, | |
43d6e369 | 999 | size_t len, |
d4c56f97 | 1000 | unsigned long flags) |
0bbd5f4e | 1001 | { |
7405f74b | 1002 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
7f2b291f | 1003 | struct ioat_desc_sw *new; |
0bbd5f4e CL |
1004 | |
1005 | spin_lock_bh(&ioat_chan->desc_lock); | |
7f2b291f | 1006 | new = ioat_dma_get_next_descriptor(ioat_chan); |
0bbd5f4e CL |
1007 | spin_unlock_bh(&ioat_chan->desc_lock); |
1008 | ||
711924b1 SN |
1009 | if (new) { |
1010 | new->len = len; | |
0036731c DW |
1011 | new->dst = dma_dest; |
1012 | new->src = dma_src; | |
636bdeaa | 1013 | new->async_tx.flags = flags; |
711924b1 | 1014 | return &new->async_tx; |
09177e85 MS |
1015 | } else { |
1016 | dev_err(&ioat_chan->device->pdev->dev, | |
1017 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | |
1018 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
711924b1 | 1019 | return NULL; |
09177e85 | 1020 | } |
0bbd5f4e CL |
1021 | } |
1022 | ||
7bb67c14 SN |
1023 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( |
1024 | struct dma_chan *chan, | |
0036731c DW |
1025 | dma_addr_t dma_dest, |
1026 | dma_addr_t dma_src, | |
7bb67c14 | 1027 | size_t len, |
d4c56f97 | 1028 | unsigned long flags) |
7bb67c14 SN |
1029 | { |
1030 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
1031 | struct ioat_desc_sw *new; | |
1032 | ||
1033 | spin_lock_bh(&ioat_chan->desc_lock); | |
1034 | new = ioat2_dma_get_next_descriptor(ioat_chan); | |
7bb67c14 | 1035 | |
711924b1 SN |
1036 | /* |
1037 | * leave ioat_chan->desc_lock set in ioat 2 path | |
1038 | * it will get unlocked at end of tx_submit | |
1039 | */ | |
7bb67c14 | 1040 | |
711924b1 SN |
1041 | if (new) { |
1042 | new->len = len; | |
0036731c DW |
1043 | new->dst = dma_dest; |
1044 | new->src = dma_src; | |
636bdeaa | 1045 | new->async_tx.flags = flags; |
711924b1 | 1046 | return &new->async_tx; |
09177e85 MS |
1047 | } else { |
1048 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1049 | dev_err(&ioat_chan->device->pdev->dev, | |
1050 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | |
1051 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
711924b1 | 1052 | return NULL; |
09177e85 | 1053 | } |
0bbd5f4e CL |
1054 | } |
1055 | ||
3e037454 SN |
1056 | static void ioat_dma_cleanup_tasklet(unsigned long data) |
1057 | { | |
1058 | struct ioat_dma_chan *chan = (void *)data; | |
1059 | ioat_dma_memcpy_cleanup(chan); | |
1060 | writew(IOAT_CHANCTRL_INT_DISABLE, | |
1061 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | |
1062 | } | |
1063 | ||
e1d181ef DW |
1064 | static void |
1065 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | |
1066 | { | |
1067 | /* | |
1068 | * yes we are unmapping both _page and _single | |
1069 | * alloc'd regions with unmap_page. Is this | |
1070 | * *really* that bad? | |
1071 | */ | |
1072 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
1073 | pci_unmap_page(ioat_chan->device->pdev, | |
1074 | pci_unmap_addr(desc, dst), | |
1075 | pci_unmap_len(desc, len), | |
1076 | PCI_DMA_FROMDEVICE); | |
1077 | ||
1078 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) | |
1079 | pci_unmap_page(ioat_chan->device->pdev, | |
1080 | pci_unmap_addr(desc, src), | |
1081 | pci_unmap_len(desc, len), | |
1082 | PCI_DMA_TODEVICE); | |
1083 | } | |
1084 | ||
7bb67c14 SN |
1085 | /** |
1086 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | |
1087 | * @chan: ioat channel to be cleaned up | |
1088 | */ | |
43d6e369 | 1089 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) |
0bbd5f4e CL |
1090 | { |
1091 | unsigned long phys_complete; | |
1092 | struct ioat_desc_sw *desc, *_desc; | |
1093 | dma_cookie_t cookie = 0; | |
7bb67c14 SN |
1094 | unsigned long desc_phys; |
1095 | struct ioat_desc_sw *latest_desc; | |
0bbd5f4e | 1096 | |
43d6e369 | 1097 | prefetch(ioat_chan->completion_virt); |
0bbd5f4e | 1098 | |
7f2b291f | 1099 | if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) |
0bbd5f4e CL |
1100 | return; |
1101 | ||
1102 | /* The completion writeback can happen at any time, | |
1103 | so reads by the driver need to be atomic operations | |
1104 | The descriptor physical addresses are limited to 32-bits | |
1105 | when the CPU can only do a 32-bit mov */ | |
1106 | ||
1107 | #if (BITS_PER_LONG == 64) | |
1108 | phys_complete = | |
7f2b291f SN |
1109 | ioat_chan->completion_virt->full |
1110 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
0bbd5f4e | 1111 | #else |
7f2b291f SN |
1112 | phys_complete = |
1113 | ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | |
0bbd5f4e CL |
1114 | #endif |
1115 | ||
7f2b291f SN |
1116 | if ((ioat_chan->completion_virt->full |
1117 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | |
43d6e369 SN |
1118 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
1119 | dev_err(&ioat_chan->device->pdev->dev, | |
5149fd01 | 1120 | "Channel halted, chanerr = %x\n", |
43d6e369 | 1121 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); |
0bbd5f4e CL |
1122 | |
1123 | /* TODO do something to salvage the situation */ | |
1124 | } | |
1125 | ||
43d6e369 | 1126 | if (phys_complete == ioat_chan->last_completion) { |
7f2b291f | 1127 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
09177e85 MS |
1128 | /* |
1129 | * perhaps we're stuck so hard that the watchdog can't go off? | |
1130 | * try to catch it after 2 seconds | |
1131 | */ | |
7f1b358a MS |
1132 | if (ioat_chan->device->version != IOAT_VER_3_0) { |
1133 | if (time_after(jiffies, | |
1134 | ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | |
1135 | ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); | |
1136 | ioat_chan->last_completion_time = jiffies; | |
1137 | } | |
09177e85 | 1138 | } |
0bbd5f4e CL |
1139 | return; |
1140 | } | |
09177e85 | 1141 | ioat_chan->last_completion_time = jiffies; |
0bbd5f4e | 1142 | |
3e037454 | 1143 | cookie = 0; |
09177e85 MS |
1144 | if (!spin_trylock_bh(&ioat_chan->desc_lock)) { |
1145 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
1146 | return; | |
1147 | } | |
1148 | ||
7bb67c14 SN |
1149 | switch (ioat_chan->device->version) { |
1150 | case IOAT_VER_1_2: | |
1151 | list_for_each_entry_safe(desc, _desc, | |
1152 | &ioat_chan->used_desc, node) { | |
0bbd5f4e | 1153 | |
43d6e369 | 1154 | /* |
7bb67c14 SN |
1155 | * Incoming DMA requests may use multiple descriptors, |
1156 | * due to exceeding xfercap, perhaps. If so, only the | |
1157 | * last one will have a cookie, and require unmapping. | |
43d6e369 | 1158 | */ |
7bb67c14 SN |
1159 | if (desc->async_tx.cookie) { |
1160 | cookie = desc->async_tx.cookie; | |
e1d181ef | 1161 | ioat_dma_unmap(ioat_chan, desc); |
7bb67c14 SN |
1162 | if (desc->async_tx.callback) { |
1163 | desc->async_tx.callback(desc->async_tx.callback_param); | |
1164 | desc->async_tx.callback = NULL; | |
1165 | } | |
95218430 | 1166 | } |
0bbd5f4e | 1167 | |
7bb67c14 SN |
1168 | if (desc->async_tx.phys != phys_complete) { |
1169 | /* | |
1170 | * a completed entry, but not the last, so clean | |
1171 | * up if the client is done with the descriptor | |
1172 | */ | |
636bdeaa | 1173 | if (async_tx_test_ack(&desc->async_tx)) { |
aa2d0b8b ES |
1174 | list_move_tail(&desc->node, |
1175 | &ioat_chan->free_desc); | |
7bb67c14 SN |
1176 | } else |
1177 | desc->async_tx.cookie = 0; | |
1178 | } else { | |
1179 | /* | |
1180 | * last used desc. Do not remove, so we can | |
1181 | * append from it, but don't look at it next | |
1182 | * time, either | |
1183 | */ | |
7405f74b | 1184 | desc->async_tx.cookie = 0; |
0bbd5f4e | 1185 | |
7bb67c14 SN |
1186 | /* TODO check status bits? */ |
1187 | break; | |
1188 | } | |
1189 | } | |
1190 | break; | |
1191 | case IOAT_VER_2_0: | |
7f1b358a | 1192 | case IOAT_VER_3_0: |
7bb67c14 SN |
1193 | /* has some other thread has already cleaned up? */ |
1194 | if (ioat_chan->used_desc.prev == NULL) | |
0bbd5f4e | 1195 | break; |
7bb67c14 SN |
1196 | |
1197 | /* work backwards to find latest finished desc */ | |
1198 | desc = to_ioat_desc(ioat_chan->used_desc.next); | |
1199 | latest_desc = NULL; | |
1200 | do { | |
1201 | desc = to_ioat_desc(desc->node.prev); | |
1202 | desc_phys = (unsigned long)desc->async_tx.phys | |
1203 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
1204 | if (desc_phys == phys_complete) { | |
1205 | latest_desc = desc; | |
1206 | break; | |
1207 | } | |
1208 | } while (&desc->node != ioat_chan->used_desc.prev); | |
1209 | ||
1210 | if (latest_desc != NULL) { | |
1211 | ||
1212 | /* work forwards to clear finished descriptors */ | |
1213 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
1214 | &desc->node != latest_desc->node.next && | |
1215 | &desc->node != ioat_chan->used_desc.next; | |
1216 | desc = to_ioat_desc(desc->node.next)) { | |
1217 | if (desc->async_tx.cookie) { | |
1218 | cookie = desc->async_tx.cookie; | |
1219 | desc->async_tx.cookie = 0; | |
e1d181ef | 1220 | ioat_dma_unmap(ioat_chan, desc); |
7bb67c14 SN |
1221 | if (desc->async_tx.callback) { |
1222 | desc->async_tx.callback(desc->async_tx.callback_param); | |
1223 | desc->async_tx.callback = NULL; | |
1224 | } | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | /* move used.prev up beyond those that are finished */ | |
1229 | if (&desc->node == ioat_chan->used_desc.next) | |
1230 | ioat_chan->used_desc.prev = NULL; | |
1231 | else | |
1232 | ioat_chan->used_desc.prev = &desc->node; | |
0bbd5f4e | 1233 | } |
7bb67c14 | 1234 | break; |
0bbd5f4e CL |
1235 | } |
1236 | ||
43d6e369 | 1237 | spin_unlock_bh(&ioat_chan->desc_lock); |
0bbd5f4e | 1238 | |
43d6e369 | 1239 | ioat_chan->last_completion = phys_complete; |
0bbd5f4e | 1240 | if (cookie != 0) |
43d6e369 | 1241 | ioat_chan->completed_cookie = cookie; |
0bbd5f4e | 1242 | |
7f2b291f | 1243 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
0bbd5f4e CL |
1244 | } |
1245 | ||
1246 | /** | |
1247 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | |
1248 | * @chan: IOAT DMA channel handle | |
1249 | * @cookie: DMA transaction identifier | |
6508871e RD |
1250 | * @done: if not %NULL, updated with last completed transaction |
1251 | * @used: if not %NULL, updated with last used transaction | |
0bbd5f4e | 1252 | */ |
0bbd5f4e | 1253 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, |
43d6e369 SN |
1254 | dma_cookie_t cookie, |
1255 | dma_cookie_t *done, | |
1256 | dma_cookie_t *used) | |
0bbd5f4e CL |
1257 | { |
1258 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
1259 | dma_cookie_t last_used; | |
1260 | dma_cookie_t last_complete; | |
1261 | enum dma_status ret; | |
1262 | ||
1263 | last_used = chan->cookie; | |
1264 | last_complete = ioat_chan->completed_cookie; | |
09177e85 | 1265 | ioat_chan->watchdog_tcp_cookie = cookie; |
0bbd5f4e CL |
1266 | |
1267 | if (done) | |
43d6e369 | 1268 | *done = last_complete; |
0bbd5f4e CL |
1269 | if (used) |
1270 | *used = last_used; | |
1271 | ||
1272 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
1273 | if (ret == DMA_SUCCESS) | |
1274 | return ret; | |
1275 | ||
1276 | ioat_dma_memcpy_cleanup(ioat_chan); | |
1277 | ||
1278 | last_used = chan->cookie; | |
1279 | last_complete = ioat_chan->completed_cookie; | |
1280 | ||
1281 | if (done) | |
43d6e369 | 1282 | *done = last_complete; |
0bbd5f4e CL |
1283 | if (used) |
1284 | *used = last_used; | |
1285 | ||
1286 | return dma_async_is_complete(cookie, last_complete, last_used); | |
1287 | } | |
1288 | ||
43d6e369 | 1289 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) |
0bbd5f4e CL |
1290 | { |
1291 | struct ioat_desc_sw *desc; | |
1292 | ||
1293 | spin_lock_bh(&ioat_chan->desc_lock); | |
1294 | ||
3e037454 | 1295 | desc = ioat_dma_get_next_descriptor(ioat_chan); |
7f1b358a MS |
1296 | |
1297 | if (!desc) { | |
1298 | dev_err(&ioat_chan->device->pdev->dev, | |
1299 | "Unable to start null desc - get next desc failed\n"); | |
1300 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1301 | return; | |
1302 | } | |
1303 | ||
7f2b291f SN |
1304 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL |
1305 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | |
1306 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | |
7f1b358a MS |
1307 | /* set size to non-zero value (channel returns error when size is 0) */ |
1308 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | |
7f2b291f SN |
1309 | desc->hw->src_addr = 0; |
1310 | desc->hw->dst_addr = 0; | |
636bdeaa | 1311 | async_tx_ack(&desc->async_tx); |
7bb67c14 SN |
1312 | switch (ioat_chan->device->version) { |
1313 | case IOAT_VER_1_2: | |
1314 | desc->hw->next = 0; | |
1315 | list_add_tail(&desc->node, &ioat_chan->used_desc); | |
1316 | ||
1317 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
1318 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | |
1319 | writel(((u64) desc->async_tx.phys) >> 32, | |
1320 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | |
1321 | ||
1322 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | |
1323 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
1324 | break; | |
1325 | case IOAT_VER_2_0: | |
7f1b358a | 1326 | case IOAT_VER_3_0: |
7bb67c14 SN |
1327 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, |
1328 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
1329 | writel(((u64) desc->async_tx.phys) >> 32, | |
1330 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
1331 | ||
1332 | ioat_chan->dmacount++; | |
1333 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
1334 | break; | |
1335 | } | |
0bbd5f4e | 1336 | spin_unlock_bh(&ioat_chan->desc_lock); |
0bbd5f4e CL |
1337 | } |
1338 | ||
1339 | /* | |
1340 | * Perform a IOAT transaction to verify the HW works. | |
1341 | */ | |
1342 | #define IOAT_TEST_SIZE 2000 | |
1343 | ||
95218430 SN |
1344 | static void ioat_dma_test_callback(void *dma_async_param) |
1345 | { | |
b9bdcbba DW |
1346 | struct completion *cmp = dma_async_param; |
1347 | ||
1348 | complete(cmp); | |
95218430 SN |
1349 | } |
1350 | ||
3e037454 SN |
1351 | /** |
1352 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
1353 | * @device: device to be tested | |
1354 | */ | |
1355 | static int ioat_dma_self_test(struct ioatdma_device *device) | |
0bbd5f4e CL |
1356 | { |
1357 | int i; | |
1358 | u8 *src; | |
1359 | u8 *dest; | |
1360 | struct dma_chan *dma_chan; | |
711924b1 | 1361 | struct dma_async_tx_descriptor *tx; |
0036731c | 1362 | dma_addr_t dma_dest, dma_src; |
0bbd5f4e CL |
1363 | dma_cookie_t cookie; |
1364 | int err = 0; | |
b9bdcbba | 1365 | struct completion cmp; |
0c33e1ca | 1366 | unsigned long tmo; |
0bbd5f4e | 1367 | |
e94b1766 | 1368 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
1369 | if (!src) |
1370 | return -ENOMEM; | |
e94b1766 | 1371 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
1372 | if (!dest) { |
1373 | kfree(src); | |
1374 | return -ENOMEM; | |
1375 | } | |
1376 | ||
1377 | /* Fill in src buffer */ | |
1378 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
1379 | src[i] = (u8)i; | |
1380 | ||
1381 | /* Start copy, using first DMA channel */ | |
1382 | dma_chan = container_of(device->common.channels.next, | |
43d6e369 SN |
1383 | struct dma_chan, |
1384 | device_node); | |
aa1e6f1a | 1385 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { |
43d6e369 SN |
1386 | dev_err(&device->pdev->dev, |
1387 | "selftest cannot allocate chan resource\n"); | |
0bbd5f4e CL |
1388 | err = -ENODEV; |
1389 | goto out; | |
1390 | } | |
1391 | ||
0036731c DW |
1392 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, |
1393 | DMA_TO_DEVICE); | |
1394 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | |
1395 | DMA_FROM_DEVICE); | |
1396 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | |
1397 | IOAT_TEST_SIZE, 0); | |
5149fd01 SN |
1398 | if (!tx) { |
1399 | dev_err(&device->pdev->dev, | |
1400 | "Self-test prep failed, disabling\n"); | |
1401 | err = -ENODEV; | |
1402 | goto free_resources; | |
1403 | } | |
1404 | ||
7405f74b | 1405 | async_tx_ack(tx); |
b9bdcbba | 1406 | init_completion(&cmp); |
95218430 | 1407 | tx->callback = ioat_dma_test_callback; |
b9bdcbba | 1408 | tx->callback_param = &cmp; |
7bb67c14 | 1409 | cookie = tx->tx_submit(tx); |
7f2b291f SN |
1410 | if (cookie < 0) { |
1411 | dev_err(&device->pdev->dev, | |
1412 | "Self-test setup failed, disabling\n"); | |
1413 | err = -ENODEV; | |
1414 | goto free_resources; | |
1415 | } | |
7bb67c14 | 1416 | device->common.device_issue_pending(dma_chan); |
532d3b1f | 1417 | |
0c33e1ca | 1418 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
0bbd5f4e | 1419 | |
0c33e1ca DW |
1420 | if (tmo == 0 || |
1421 | device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | |
7bb67c14 | 1422 | != DMA_SUCCESS) { |
43d6e369 | 1423 | dev_err(&device->pdev->dev, |
5149fd01 | 1424 | "Self-test copy timed out, disabling\n"); |
0bbd5f4e CL |
1425 | err = -ENODEV; |
1426 | goto free_resources; | |
1427 | } | |
1428 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
43d6e369 | 1429 | dev_err(&device->pdev->dev, |
5149fd01 | 1430 | "Self-test copy failed compare, disabling\n"); |
0bbd5f4e CL |
1431 | err = -ENODEV; |
1432 | goto free_resources; | |
1433 | } | |
1434 | ||
1435 | free_resources: | |
7bb67c14 | 1436 | device->common.device_free_chan_resources(dma_chan); |
0bbd5f4e CL |
1437 | out: |
1438 | kfree(src); | |
1439 | kfree(dest); | |
1440 | return err; | |
1441 | } | |
1442 | ||
3e037454 SN |
1443 | static char ioat_interrupt_style[32] = "msix"; |
1444 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
1445 | sizeof(ioat_interrupt_style), 0644); | |
1446 | MODULE_PARM_DESC(ioat_interrupt_style, | |
1447 | "set ioat interrupt style: msix (default), " | |
1448 | "msix-single-vector, msi, intx)"); | |
1449 | ||
1450 | /** | |
1451 | * ioat_dma_setup_interrupts - setup interrupt handler | |
1452 | * @device: ioat device | |
1453 | */ | |
1454 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |
1455 | { | |
1456 | struct ioat_dma_chan *ioat_chan; | |
1457 | int err, i, j, msixcnt; | |
1458 | u8 intrctrl = 0; | |
1459 | ||
1460 | if (!strcmp(ioat_interrupt_style, "msix")) | |
1461 | goto msix; | |
1462 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | |
1463 | goto msix_single_vector; | |
1464 | if (!strcmp(ioat_interrupt_style, "msi")) | |
1465 | goto msi; | |
1466 | if (!strcmp(ioat_interrupt_style, "intx")) | |
1467 | goto intx; | |
5149fd01 SN |
1468 | dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", |
1469 | ioat_interrupt_style); | |
1470 | goto err_no_irq; | |
3e037454 SN |
1471 | |
1472 | msix: | |
1473 | /* The number of MSI-X vectors should equal the number of channels */ | |
1474 | msixcnt = device->common.chancnt; | |
1475 | for (i = 0; i < msixcnt; i++) | |
1476 | device->msix_entries[i].entry = i; | |
1477 | ||
1478 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); | |
1479 | if (err < 0) | |
1480 | goto msi; | |
1481 | if (err > 0) | |
1482 | goto msix_single_vector; | |
1483 | ||
1484 | for (i = 0; i < msixcnt; i++) { | |
1485 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
1486 | err = request_irq(device->msix_entries[i].vector, | |
1487 | ioat_dma_do_interrupt_msix, | |
1488 | 0, "ioat-msix", ioat_chan); | |
1489 | if (err) { | |
1490 | for (j = 0; j < i; j++) { | |
1491 | ioat_chan = | |
1492 | ioat_lookup_chan_by_index(device, j); | |
1493 | free_irq(device->msix_entries[j].vector, | |
1494 | ioat_chan); | |
1495 | } | |
1496 | goto msix_single_vector; | |
1497 | } | |
1498 | } | |
1499 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
1500 | device->irq_mode = msix_multi_vector; | |
1501 | goto done; | |
1502 | ||
1503 | msix_single_vector: | |
1504 | device->msix_entries[0].entry = 0; | |
1505 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); | |
1506 | if (err) | |
1507 | goto msi; | |
1508 | ||
1509 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, | |
1510 | 0, "ioat-msix", device); | |
1511 | if (err) { | |
1512 | pci_disable_msix(device->pdev); | |
1513 | goto msi; | |
1514 | } | |
1515 | device->irq_mode = msix_single_vector; | |
1516 | goto done; | |
1517 | ||
1518 | msi: | |
1519 | err = pci_enable_msi(device->pdev); | |
1520 | if (err) | |
1521 | goto intx; | |
1522 | ||
1523 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | |
1524 | 0, "ioat-msi", device); | |
1525 | if (err) { | |
1526 | pci_disable_msi(device->pdev); | |
1527 | goto intx; | |
1528 | } | |
1529 | /* | |
1530 | * CB 1.2 devices need a bit set in configuration space to enable MSI | |
1531 | */ | |
1532 | if (device->version == IOAT_VER_1_2) { | |
1533 | u32 dmactrl; | |
1534 | pci_read_config_dword(device->pdev, | |
1535 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | |
1536 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | |
1537 | pci_write_config_dword(device->pdev, | |
1538 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); | |
1539 | } | |
1540 | device->irq_mode = msi; | |
1541 | goto done; | |
1542 | ||
1543 | intx: | |
1544 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | |
1545 | IRQF_SHARED, "ioat-intx", device); | |
1546 | if (err) | |
1547 | goto err_no_irq; | |
1548 | device->irq_mode = intx; | |
1549 | ||
1550 | done: | |
1551 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | |
1552 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1553 | return 0; | |
1554 | ||
1555 | err_no_irq: | |
1556 | /* Disable all interrupt generation */ | |
1557 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1558 | dev_err(&device->pdev->dev, "no usable interrupts\n"); | |
1559 | device->irq_mode = none; | |
1560 | return -1; | |
1561 | } | |
1562 | ||
1563 | /** | |
1564 | * ioat_dma_remove_interrupts - remove whatever interrupts were set | |
1565 | * @device: ioat device | |
1566 | */ | |
1567 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) | |
1568 | { | |
1569 | struct ioat_dma_chan *ioat_chan; | |
1570 | int i; | |
1571 | ||
1572 | /* Disable all interrupt generation */ | |
1573 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1574 | ||
1575 | switch (device->irq_mode) { | |
1576 | case msix_multi_vector: | |
1577 | for (i = 0; i < device->common.chancnt; i++) { | |
1578 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
1579 | free_irq(device->msix_entries[i].vector, ioat_chan); | |
1580 | } | |
1581 | pci_disable_msix(device->pdev); | |
1582 | break; | |
1583 | case msix_single_vector: | |
1584 | free_irq(device->msix_entries[0].vector, device); | |
1585 | pci_disable_msix(device->pdev); | |
1586 | break; | |
1587 | case msi: | |
1588 | free_irq(device->pdev->irq, device); | |
1589 | pci_disable_msi(device->pdev); | |
1590 | break; | |
1591 | case intx: | |
1592 | free_irq(device->pdev->irq, device); | |
1593 | break; | |
1594 | case none: | |
1595 | dev_warn(&device->pdev->dev, | |
1596 | "call to %s without interrupts setup\n", __func__); | |
1597 | } | |
1598 | device->irq_mode = none; | |
1599 | } | |
1600 | ||
8ab89567 SN |
1601 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, |
1602 | void __iomem *iobase) | |
0bbd5f4e CL |
1603 | { |
1604 | int err; | |
8ab89567 | 1605 | struct ioatdma_device *device; |
0bbd5f4e CL |
1606 | |
1607 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
1608 | if (!device) { | |
1609 | err = -ENOMEM; | |
1610 | goto err_kzalloc; | |
1611 | } | |
8ab89567 SN |
1612 | device->pdev = pdev; |
1613 | device->reg_base = iobase; | |
1614 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | |
0bbd5f4e CL |
1615 | |
1616 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1617 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
8ab89567 SN |
1618 | sizeof(struct ioat_dma_descriptor), |
1619 | 64, 0); | |
0bbd5f4e CL |
1620 | if (!device->dma_pool) { |
1621 | err = -ENOMEM; | |
1622 | goto err_dma_pool; | |
1623 | } | |
1624 | ||
43d6e369 SN |
1625 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
1626 | sizeof(u64), SMP_CACHE_BYTES, | |
1627 | SMP_CACHE_BYTES); | |
0bbd5f4e CL |
1628 | if (!device->completion_pool) { |
1629 | err = -ENOMEM; | |
1630 | goto err_completion_pool; | |
1631 | } | |
1632 | ||
0bbd5f4e | 1633 | INIT_LIST_HEAD(&device->common.channels); |
43d6e369 | 1634 | ioat_dma_enumerate_channels(device); |
0bbd5f4e | 1635 | |
43d6e369 SN |
1636 | device->common.device_alloc_chan_resources = |
1637 | ioat_dma_alloc_chan_resources; | |
1638 | device->common.device_free_chan_resources = | |
1639 | ioat_dma_free_chan_resources; | |
7bb67c14 SN |
1640 | device->common.dev = &pdev->dev; |
1641 | ||
1642 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | |
7405f74b | 1643 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
7bb67c14 SN |
1644 | switch (device->version) { |
1645 | case IOAT_VER_1_2: | |
1646 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | |
1647 | device->common.device_issue_pending = | |
1648 | ioat1_dma_memcpy_issue_pending; | |
1649 | break; | |
1650 | case IOAT_VER_2_0: | |
7f1b358a | 1651 | case IOAT_VER_3_0: |
7bb67c14 SN |
1652 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; |
1653 | device->common.device_issue_pending = | |
1654 | ioat2_dma_memcpy_issue_pending; | |
1655 | break; | |
1656 | } | |
1657 | ||
3e037454 | 1658 | dev_err(&device->pdev->dev, |
5149fd01 SN |
1659 | "Intel(R) I/OAT DMA Engine found," |
1660 | " %d channels, device version 0x%02x, driver version %s\n", | |
1661 | device->common.chancnt, device->version, IOAT_DMA_VERSION); | |
8ab89567 | 1662 | |
8b794b14 MS |
1663 | if (!device->common.chancnt) { |
1664 | dev_err(&device->pdev->dev, | |
1665 | "Intel(R) I/OAT DMA Engine problem found: " | |
1666 | "zero channels detected\n"); | |
1667 | goto err_setup_interrupts; | |
1668 | } | |
1669 | ||
3e037454 | 1670 | err = ioat_dma_setup_interrupts(device); |
8ab89567 | 1671 | if (err) |
3e037454 | 1672 | goto err_setup_interrupts; |
0bbd5f4e | 1673 | |
3e037454 | 1674 | err = ioat_dma_self_test(device); |
0bbd5f4e CL |
1675 | if (err) |
1676 | goto err_self_test; | |
1677 | ||
16a37aca MS |
1678 | ioat_set_tcp_copy_break(device); |
1679 | ||
0bbd5f4e CL |
1680 | dma_async_device_register(&device->common); |
1681 | ||
7f1b358a MS |
1682 | if (device->version != IOAT_VER_3_0) { |
1683 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | |
1684 | schedule_delayed_work(&device->work, | |
1685 | WATCHDOG_DELAY); | |
1686 | } | |
09177e85 | 1687 | |
8ab89567 | 1688 | return device; |
0bbd5f4e CL |
1689 | |
1690 | err_self_test: | |
3e037454 SN |
1691 | ioat_dma_remove_interrupts(device); |
1692 | err_setup_interrupts: | |
0bbd5f4e CL |
1693 | pci_pool_destroy(device->completion_pool); |
1694 | err_completion_pool: | |
1695 | pci_pool_destroy(device->dma_pool); | |
1696 | err_dma_pool: | |
1697 | kfree(device); | |
1698 | err_kzalloc: | |
bb8e8bcc | 1699 | dev_err(&pdev->dev, |
5149fd01 | 1700 | "Intel(R) I/OAT DMA Engine initialization failed\n"); |
8ab89567 | 1701 | return NULL; |
428ed602 DA |
1702 | } |
1703 | ||
8ab89567 | 1704 | void ioat_dma_remove(struct ioatdma_device *device) |
0bbd5f4e | 1705 | { |
0bbd5f4e CL |
1706 | struct dma_chan *chan, *_chan; |
1707 | struct ioat_dma_chan *ioat_chan; | |
1708 | ||
2b8a6bf8 MS |
1709 | if (device->version != IOAT_VER_3_0) |
1710 | cancel_delayed_work(&device->work); | |
1711 | ||
3e037454 | 1712 | ioat_dma_remove_interrupts(device); |
8ab89567 | 1713 | |
dfe2299e SN |
1714 | dma_async_device_unregister(&device->common); |
1715 | ||
0bbd5f4e CL |
1716 | pci_pool_destroy(device->dma_pool); |
1717 | pci_pool_destroy(device->completion_pool); | |
8ab89567 | 1718 | |
7df7cf06 SN |
1719 | iounmap(device->reg_base); |
1720 | pci_release_regions(device->pdev); | |
1721 | pci_disable_device(device->pdev); | |
1722 | ||
43d6e369 SN |
1723 | list_for_each_entry_safe(chan, _chan, |
1724 | &device->common.channels, device_node) { | |
0bbd5f4e CL |
1725 | ioat_chan = to_ioat_chan(chan); |
1726 | list_del(&chan->device_node); | |
1727 | kfree(ioat_chan); | |
1728 | } | |
1729 | kfree(device); | |
1730 | } | |
1731 |