dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap
[deliverable/linux.git] / drivers / dma / ioat_dma.c
1 /*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23 /*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include "ioatdma.h"
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
38
39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43
44 static int ioat_pending_level = 4;
45 module_param(ioat_pending_level, int, 0644);
46 MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)");
48
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
57
58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59 struct ioatdma_device *device,
60 int index)
61 {
62 return device->idx[index];
63 }
64
65 /**
66 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67 * @irq: interrupt id
68 * @data: interrupt data
69 */
70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71 {
72 struct ioatdma_device *instance = data;
73 struct ioat_dma_chan *ioat_chan;
74 unsigned long attnstatus;
75 int bit;
76 u8 intrctrl;
77
78 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81 return IRQ_NONE;
82
83 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85 return IRQ_NONE;
86 }
87
88 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91 tasklet_schedule(&ioat_chan->cleanup_task);
92 }
93
94 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95 return IRQ_HANDLED;
96 }
97
98 /**
99 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100 * @irq: interrupt id
101 * @data: interrupt data
102 */
103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104 {
105 struct ioat_dma_chan *ioat_chan = data;
106
107 tasklet_schedule(&ioat_chan->cleanup_task);
108
109 return IRQ_HANDLED;
110 }
111
112 static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114 /**
115 * ioat_dma_enumerate_channels - find and initialize the device's channels
116 * @device: the device to be enumerated
117 */
118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
119 {
120 u8 xfercap_scale;
121 u32 xfercap;
122 int i;
123 struct ioat_dma_chan *ioat_chan;
124
125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129 for (i = 0; i < device->common.chancnt; i++) {
130 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131 if (!ioat_chan) {
132 device->common.chancnt = i;
133 break;
134 }
135
136 ioat_chan->device = device;
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0;
140 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU,
143 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144 }
145 spin_lock_init(&ioat_chan->cleanup_lock);
146 spin_lock_init(&ioat_chan->desc_lock);
147 INIT_LIST_HEAD(&ioat_chan->free_desc);
148 INIT_LIST_HEAD(&ioat_chan->used_desc);
149 /* This should be made common somewhere in dmaengine.c */
150 ioat_chan->common.device = &device->common;
151 list_add_tail(&ioat_chan->common.device_node,
152 &device->common.channels);
153 device->idx[i] = ioat_chan;
154 tasklet_init(&ioat_chan->cleanup_task,
155 ioat_dma_cleanup_tasklet,
156 (unsigned long) ioat_chan);
157 tasklet_disable(&ioat_chan->cleanup_task);
158 }
159 return device->common.chancnt;
160 }
161
162 /**
163 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
164 * descriptors to hw
165 * @chan: DMA channel handle
166 */
167 static inline void __ioat1_dma_memcpy_issue_pending(
168 struct ioat_dma_chan *ioat_chan)
169 {
170 ioat_chan->pending = 0;
171 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
172 }
173
174 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175 {
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177
178 if (ioat_chan->pending != 0) {
179 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock);
182 }
183 }
184
185 static inline void __ioat2_dma_memcpy_issue_pending(
186 struct ioat_dma_chan *ioat_chan)
187 {
188 ioat_chan->pending = 0;
189 writew(ioat_chan->dmacount,
190 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
191 }
192
193 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194 {
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196
197 if (ioat_chan->pending != 0) {
198 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock);
201 }
202 }
203
204 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205 {
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
207 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
208 struct ioat_desc_sw *prev, *new;
209 struct ioat_dma_descriptor *hw;
210 dma_cookie_t cookie;
211 LIST_HEAD(new_chain);
212 u32 copy;
213 size_t len;
214 dma_addr_t src, dst;
215 unsigned long orig_flags;
216 unsigned int desc_count = 0;
217
218 /* src and dest and len are stored in the initial descriptor */
219 len = first->len;
220 src = first->src;
221 dst = first->dst;
222 orig_flags = first->async_tx.flags;
223 new = first;
224
225 spin_lock_bh(&ioat_chan->desc_lock);
226 prev = to_ioat_desc(ioat_chan->used_desc.prev);
227 prefetch(prev->hw);
228 do {
229 copy = min_t(size_t, len, ioat_chan->xfercap);
230
231 async_tx_ack(&new->async_tx);
232
233 hw = new->hw;
234 hw->size = copy;
235 hw->ctl = 0;
236 hw->src_addr = src;
237 hw->dst_addr = dst;
238 hw->next = 0;
239
240 /* chain together the physical address list for the HW */
241 wmb();
242 prev->hw->next = (u64) new->async_tx.phys;
243
244 len -= copy;
245 dst += copy;
246 src += copy;
247
248 list_add_tail(&new->node, &new_chain);
249 desc_count++;
250 prev = new;
251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
256 if (first != new) {
257 /* move callback into to last desc */
258 new->async_tx.callback = first->async_tx.callback;
259 new->async_tx.callback_param
260 = first->async_tx.callback_param;
261 first->async_tx.callback = NULL;
262 first->async_tx.callback_param = NULL;
263 }
264 }
265
266 new->tx_cnt = desc_count;
267 new->async_tx.flags = orig_flags; /* client is in control of this ack */
268
269 /* store the original values for use in later cleanup */
270 if (new != first) {
271 new->src = first->src;
272 new->dst = first->dst;
273 new->len = first->len;
274 }
275
276 /* cookie incr and addition to used_list must be atomic */
277 cookie = ioat_chan->common.cookie;
278 cookie++;
279 if (cookie < 0)
280 cookie = 1;
281 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
282
283 /* write address into NextDescriptor field of last desc in chain */
284 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
285 first->async_tx.phys;
286 __list_splice(&new_chain, ioat_chan->used_desc.prev);
287
288 ioat_chan->dmacount += desc_count;
289 ioat_chan->pending += desc_count;
290 if (ioat_chan->pending >= ioat_pending_level)
291 __ioat1_dma_memcpy_issue_pending(ioat_chan);
292 spin_unlock_bh(&ioat_chan->desc_lock);
293
294 return cookie;
295 }
296
297 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
298 {
299 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
300 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
301 struct ioat_desc_sw *new;
302 struct ioat_dma_descriptor *hw;
303 dma_cookie_t cookie;
304 u32 copy;
305 size_t len;
306 dma_addr_t src, dst;
307 unsigned long orig_flags;
308 unsigned int desc_count = 0;
309
310 /* src and dest and len are stored in the initial descriptor */
311 len = first->len;
312 src = first->src;
313 dst = first->dst;
314 orig_flags = first->async_tx.flags;
315 new = first;
316
317 /*
318 * ioat_chan->desc_lock is still in force in version 2 path
319 * it gets unlocked at end of this function
320 */
321 do {
322 copy = min_t(size_t, len, ioat_chan->xfercap);
323
324 async_tx_ack(&new->async_tx);
325
326 hw = new->hw;
327 hw->size = copy;
328 hw->ctl = 0;
329 hw->src_addr = src;
330 hw->dst_addr = dst;
331
332 len -= copy;
333 dst += copy;
334 src += copy;
335 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) {
342 /* move callback into to last desc */
343 new->async_tx.callback = first->async_tx.callback;
344 new->async_tx.callback_param
345 = first->async_tx.callback_param;
346 first->async_tx.callback = NULL;
347 first->async_tx.callback_param = NULL;
348 }
349 }
350
351 new->tx_cnt = desc_count;
352 new->async_tx.flags = orig_flags; /* client is in control of this ack */
353
354 /* store the original values for use in later cleanup */
355 if (new != first) {
356 new->src = first->src;
357 new->dst = first->dst;
358 new->len = first->len;
359 }
360
361 /* cookie incr and addition to used_list must be atomic */
362 cookie = ioat_chan->common.cookie;
363 cookie++;
364 if (cookie < 0)
365 cookie = 1;
366 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
367
368 ioat_chan->dmacount += desc_count;
369 ioat_chan->pending += desc_count;
370 if (ioat_chan->pending >= ioat_pending_level)
371 __ioat2_dma_memcpy_issue_pending(ioat_chan);
372 spin_unlock_bh(&ioat_chan->desc_lock);
373
374 return cookie;
375 }
376
377 /**
378 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
379 * @ioat_chan: the channel supplying the memory pool for the descriptors
380 * @flags: allocation flags
381 */
382 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
383 struct ioat_dma_chan *ioat_chan,
384 gfp_t flags)
385 {
386 struct ioat_dma_descriptor *desc;
387 struct ioat_desc_sw *desc_sw;
388 struct ioatdma_device *ioatdma_device;
389 dma_addr_t phys;
390
391 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
392 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
393 if (unlikely(!desc))
394 return NULL;
395
396 desc_sw = kzalloc(sizeof(*desc_sw), flags);
397 if (unlikely(!desc_sw)) {
398 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
399 return NULL;
400 }
401
402 memset(desc, 0, sizeof(*desc));
403 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
404 switch (ioat_chan->device->version) {
405 case IOAT_VER_1_2:
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break;
408 case IOAT_VER_2_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break;
411 }
412 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
413
414 desc_sw->hw = desc;
415 desc_sw->async_tx.phys = phys;
416
417 return desc_sw;
418 }
419
420 static int ioat_initial_desc_count = 256;
421 module_param(ioat_initial_desc_count, int, 0644);
422 MODULE_PARM_DESC(ioat_initial_desc_count,
423 "initial descriptors per channel (default: 256)");
424
425 /**
426 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
427 * @ioat_chan: the channel to be massaged
428 */
429 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
430 {
431 struct ioat_desc_sw *desc, *_desc;
432
433 /* setup used_desc */
434 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
435 ioat_chan->used_desc.prev = NULL;
436
437 /* pull free_desc out of the circle so that every node is a hw
438 * descriptor, but leave it pointing to the list
439 */
440 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
441 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
442
443 /* circle link the hw descriptors */
444 desc = to_ioat_desc(ioat_chan->free_desc.next);
445 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
446 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
447 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
448 }
449 }
450
451 /**
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out
454 */
455 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
456 struct dma_client *client)
457 {
458 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
459 struct ioat_desc_sw *desc;
460 u16 chanctrl;
461 u32 chanerr;
462 int i;
463 LIST_HEAD(tmp_list);
464
465 /* have we already been set up? */
466 if (!list_empty(&ioat_chan->free_desc))
467 return ioat_chan->desccount;
468
469 /* Setup register to interrupt and write completion status on error */
470 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
471 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
472 IOAT_CHANCTRL_ERR_COMPLETION_EN;
473 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
474
475 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
476 if (chanerr) {
477 dev_err(&ioat_chan->device->pdev->dev,
478 "CHANERR = %x, clearing\n", chanerr);
479 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
480 }
481
482 /* Allocate descriptors */
483 for (i = 0; i < ioat_initial_desc_count; i++) {
484 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
485 if (!desc) {
486 dev_err(&ioat_chan->device->pdev->dev,
487 "Only %d initial descriptors\n", i);
488 break;
489 }
490 list_add_tail(&desc->node, &tmp_list);
491 }
492 spin_lock_bh(&ioat_chan->desc_lock);
493 ioat_chan->desccount = i;
494 list_splice(&tmp_list, &ioat_chan->free_desc);
495 if (ioat_chan->device->version != IOAT_VER_1_2)
496 ioat2_dma_massage_chan_desc(ioat_chan);
497 spin_unlock_bh(&ioat_chan->desc_lock);
498
499 /* allocate a completion writeback area */
500 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
501 ioat_chan->completion_virt =
502 pci_pool_alloc(ioat_chan->device->completion_pool,
503 GFP_KERNEL,
504 &ioat_chan->completion_addr);
505 memset(ioat_chan->completion_virt, 0,
506 sizeof(*ioat_chan->completion_virt));
507 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
508 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
509 writel(((u64) ioat_chan->completion_addr) >> 32,
510 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
511
512 tasklet_enable(&ioat_chan->cleanup_task);
513 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
514 return ioat_chan->desccount;
515 }
516
517 /**
518 * ioat_dma_free_chan_resources - release all the descriptors
519 * @chan: the channel to be cleaned
520 */
521 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
522 {
523 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
524 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
525 struct ioat_desc_sw *desc, *_desc;
526 int in_use_descs = 0;
527
528 tasklet_disable(&ioat_chan->cleanup_task);
529 ioat_dma_memcpy_cleanup(ioat_chan);
530
531 /* Delay 100ms after reset to allow internal DMA logic to quiesce
532 * before removing DMA descriptor resources.
533 */
534 writeb(IOAT_CHANCMD_RESET,
535 ioat_chan->reg_base
536 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
537 mdelay(100);
538
539 spin_lock_bh(&ioat_chan->desc_lock);
540 switch (ioat_chan->device->version) {
541 case IOAT_VER_1_2:
542 list_for_each_entry_safe(desc, _desc,
543 &ioat_chan->used_desc, node) {
544 in_use_descs++;
545 list_del(&desc->node);
546 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
547 desc->async_tx.phys);
548 kfree(desc);
549 }
550 list_for_each_entry_safe(desc, _desc,
551 &ioat_chan->free_desc, node) {
552 list_del(&desc->node);
553 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
554 desc->async_tx.phys);
555 kfree(desc);
556 }
557 break;
558 case IOAT_VER_2_0:
559 list_for_each_entry_safe(desc, _desc,
560 ioat_chan->free_desc.next, node) {
561 list_del(&desc->node);
562 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
563 desc->async_tx.phys);
564 kfree(desc);
565 }
566 desc = to_ioat_desc(ioat_chan->free_desc.next);
567 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
568 desc->async_tx.phys);
569 kfree(desc);
570 INIT_LIST_HEAD(&ioat_chan->free_desc);
571 INIT_LIST_HEAD(&ioat_chan->used_desc);
572 break;
573 }
574 spin_unlock_bh(&ioat_chan->desc_lock);
575
576 pci_pool_free(ioatdma_device->completion_pool,
577 ioat_chan->completion_virt,
578 ioat_chan->completion_addr);
579
580 /* one is ok since we left it on there on purpose */
581 if (in_use_descs > 1)
582 dev_err(&ioat_chan->device->pdev->dev,
583 "Freeing %d in use descriptors!\n",
584 in_use_descs - 1);
585
586 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
587 ioat_chan->pending = 0;
588 ioat_chan->dmacount = 0;
589 }
590
591 /**
592 * ioat_dma_get_next_descriptor - return the next available descriptor
593 * @ioat_chan: IOAT DMA channel handle
594 *
595 * Gets the next descriptor from the chain, and must be called with the
596 * channel's desc_lock held. Allocates more descriptors if the channel
597 * has run out.
598 */
599 static struct ioat_desc_sw *
600 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
601 {
602 struct ioat_desc_sw *new;
603
604 if (!list_empty(&ioat_chan->free_desc)) {
605 new = to_ioat_desc(ioat_chan->free_desc.next);
606 list_del(&new->node);
607 } else {
608 /* try to get another desc */
609 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
610 if (!new) {
611 dev_err(&ioat_chan->device->pdev->dev,
612 "alloc failed\n");
613 return NULL;
614 }
615 }
616
617 prefetch(new->hw);
618 return new;
619 }
620
621 static struct ioat_desc_sw *
622 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
623 {
624 struct ioat_desc_sw *new;
625
626 /*
627 * used.prev points to where to start processing
628 * used.next points to next free descriptor
629 * if used.prev == NULL, there are none waiting to be processed
630 * if used.next == used.prev.prev, there is only one free descriptor,
631 * and we need to use it to as a noop descriptor before
632 * linking in a new set of descriptors, since the device
633 * has probably already read the pointer to it
634 */
635 if (ioat_chan->used_desc.prev &&
636 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
637
638 struct ioat_desc_sw *desc;
639 struct ioat_desc_sw *noop_desc;
640 int i;
641
642 /* set up the noop descriptor */
643 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
644 noop_desc->hw->size = 0;
645 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
646 noop_desc->hw->src_addr = 0;
647 noop_desc->hw->dst_addr = 0;
648
649 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
650 ioat_chan->pending++;
651 ioat_chan->dmacount++;
652
653 /* try to get a few more descriptors */
654 for (i = 16; i; i--) {
655 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
656 if (!desc) {
657 dev_err(&ioat_chan->device->pdev->dev,
658 "alloc failed\n");
659 break;
660 }
661 list_add_tail(&desc->node, ioat_chan->used_desc.next);
662
663 desc->hw->next
664 = to_ioat_desc(desc->node.next)->async_tx.phys;
665 to_ioat_desc(desc->node.prev)->hw->next
666 = desc->async_tx.phys;
667 ioat_chan->desccount++;
668 }
669
670 ioat_chan->used_desc.next = noop_desc->node.next;
671 }
672 new = to_ioat_desc(ioat_chan->used_desc.next);
673 prefetch(new);
674 ioat_chan->used_desc.next = new->node.next;
675
676 if (ioat_chan->used_desc.prev == NULL)
677 ioat_chan->used_desc.prev = &new->node;
678
679 prefetch(new->hw);
680 return new;
681 }
682
683 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
684 struct ioat_dma_chan *ioat_chan)
685 {
686 if (!ioat_chan)
687 return NULL;
688
689 switch (ioat_chan->device->version) {
690 case IOAT_VER_1_2:
691 return ioat1_dma_get_next_descriptor(ioat_chan);
692 break;
693 case IOAT_VER_2_0:
694 return ioat2_dma_get_next_descriptor(ioat_chan);
695 break;
696 }
697 return NULL;
698 }
699
700 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
701 struct dma_chan *chan,
702 dma_addr_t dma_dest,
703 dma_addr_t dma_src,
704 size_t len,
705 unsigned long flags)
706 {
707 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
708 struct ioat_desc_sw *new;
709
710 spin_lock_bh(&ioat_chan->desc_lock);
711 new = ioat_dma_get_next_descriptor(ioat_chan);
712 spin_unlock_bh(&ioat_chan->desc_lock);
713
714 if (new) {
715 new->len = len;
716 new->dst = dma_dest;
717 new->src = dma_src;
718 new->async_tx.flags = flags;
719 return &new->async_tx;
720 } else
721 return NULL;
722 }
723
724 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
725 struct dma_chan *chan,
726 dma_addr_t dma_dest,
727 dma_addr_t dma_src,
728 size_t len,
729 unsigned long flags)
730 {
731 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
732 struct ioat_desc_sw *new;
733
734 spin_lock_bh(&ioat_chan->desc_lock);
735 new = ioat2_dma_get_next_descriptor(ioat_chan);
736
737 /*
738 * leave ioat_chan->desc_lock set in ioat 2 path
739 * it will get unlocked at end of tx_submit
740 */
741
742 if (new) {
743 new->len = len;
744 new->dst = dma_dest;
745 new->src = dma_src;
746 new->async_tx.flags = flags;
747 return &new->async_tx;
748 } else
749 return NULL;
750 }
751
752 static void ioat_dma_cleanup_tasklet(unsigned long data)
753 {
754 struct ioat_dma_chan *chan = (void *)data;
755 ioat_dma_memcpy_cleanup(chan);
756 writew(IOAT_CHANCTRL_INT_DISABLE,
757 chan->reg_base + IOAT_CHANCTRL_OFFSET);
758 }
759
760 static void
761 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
762 {
763 /*
764 * yes we are unmapping both _page and _single
765 * alloc'd regions with unmap_page. Is this
766 * *really* that bad?
767 */
768 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
769 pci_unmap_page(ioat_chan->device->pdev,
770 pci_unmap_addr(desc, dst),
771 pci_unmap_len(desc, len),
772 PCI_DMA_FROMDEVICE);
773
774 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
775 pci_unmap_page(ioat_chan->device->pdev,
776 pci_unmap_addr(desc, src),
777 pci_unmap_len(desc, len),
778 PCI_DMA_TODEVICE);
779 }
780
781 /**
782 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
783 * @chan: ioat channel to be cleaned up
784 */
785 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
786 {
787 unsigned long phys_complete;
788 struct ioat_desc_sw *desc, *_desc;
789 dma_cookie_t cookie = 0;
790 unsigned long desc_phys;
791 struct ioat_desc_sw *latest_desc;
792
793 prefetch(ioat_chan->completion_virt);
794
795 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
796 return;
797
798 /* The completion writeback can happen at any time,
799 so reads by the driver need to be atomic operations
800 The descriptor physical addresses are limited to 32-bits
801 when the CPU can only do a 32-bit mov */
802
803 #if (BITS_PER_LONG == 64)
804 phys_complete =
805 ioat_chan->completion_virt->full
806 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
807 #else
808 phys_complete =
809 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
810 #endif
811
812 if ((ioat_chan->completion_virt->full
813 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
814 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
815 dev_err(&ioat_chan->device->pdev->dev,
816 "Channel halted, chanerr = %x\n",
817 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
818
819 /* TODO do something to salvage the situation */
820 }
821
822 if (phys_complete == ioat_chan->last_completion) {
823 spin_unlock_bh(&ioat_chan->cleanup_lock);
824 return;
825 }
826
827 cookie = 0;
828 spin_lock_bh(&ioat_chan->desc_lock);
829 switch (ioat_chan->device->version) {
830 case IOAT_VER_1_2:
831 list_for_each_entry_safe(desc, _desc,
832 &ioat_chan->used_desc, node) {
833
834 /*
835 * Incoming DMA requests may use multiple descriptors,
836 * due to exceeding xfercap, perhaps. If so, only the
837 * last one will have a cookie, and require unmapping.
838 */
839 if (desc->async_tx.cookie) {
840 cookie = desc->async_tx.cookie;
841 ioat_dma_unmap(ioat_chan, desc);
842 if (desc->async_tx.callback) {
843 desc->async_tx.callback(desc->async_tx.callback_param);
844 desc->async_tx.callback = NULL;
845 }
846 }
847
848 if (desc->async_tx.phys != phys_complete) {
849 /*
850 * a completed entry, but not the last, so clean
851 * up if the client is done with the descriptor
852 */
853 if (async_tx_test_ack(&desc->async_tx)) {
854 list_del(&desc->node);
855 list_add_tail(&desc->node,
856 &ioat_chan->free_desc);
857 } else
858 desc->async_tx.cookie = 0;
859 } else {
860 /*
861 * last used desc. Do not remove, so we can
862 * append from it, but don't look at it next
863 * time, either
864 */
865 desc->async_tx.cookie = 0;
866
867 /* TODO check status bits? */
868 break;
869 }
870 }
871 break;
872 case IOAT_VER_2_0:
873 /* has some other thread has already cleaned up? */
874 if (ioat_chan->used_desc.prev == NULL)
875 break;
876
877 /* work backwards to find latest finished desc */
878 desc = to_ioat_desc(ioat_chan->used_desc.next);
879 latest_desc = NULL;
880 do {
881 desc = to_ioat_desc(desc->node.prev);
882 desc_phys = (unsigned long)desc->async_tx.phys
883 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
884 if (desc_phys == phys_complete) {
885 latest_desc = desc;
886 break;
887 }
888 } while (&desc->node != ioat_chan->used_desc.prev);
889
890 if (latest_desc != NULL) {
891
892 /* work forwards to clear finished descriptors */
893 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
894 &desc->node != latest_desc->node.next &&
895 &desc->node != ioat_chan->used_desc.next;
896 desc = to_ioat_desc(desc->node.next)) {
897 if (desc->async_tx.cookie) {
898 cookie = desc->async_tx.cookie;
899 desc->async_tx.cookie = 0;
900 ioat_dma_unmap(ioat_chan, desc);
901 if (desc->async_tx.callback) {
902 desc->async_tx.callback(desc->async_tx.callback_param);
903 desc->async_tx.callback = NULL;
904 }
905 }
906 }
907
908 /* move used.prev up beyond those that are finished */
909 if (&desc->node == ioat_chan->used_desc.next)
910 ioat_chan->used_desc.prev = NULL;
911 else
912 ioat_chan->used_desc.prev = &desc->node;
913 }
914 break;
915 }
916
917 spin_unlock_bh(&ioat_chan->desc_lock);
918
919 ioat_chan->last_completion = phys_complete;
920 if (cookie != 0)
921 ioat_chan->completed_cookie = cookie;
922
923 spin_unlock_bh(&ioat_chan->cleanup_lock);
924 }
925
926 /**
927 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
928 * @chan: IOAT DMA channel handle
929 * @cookie: DMA transaction identifier
930 * @done: if not %NULL, updated with last completed transaction
931 * @used: if not %NULL, updated with last used transaction
932 */
933 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
934 dma_cookie_t cookie,
935 dma_cookie_t *done,
936 dma_cookie_t *used)
937 {
938 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
939 dma_cookie_t last_used;
940 dma_cookie_t last_complete;
941 enum dma_status ret;
942
943 last_used = chan->cookie;
944 last_complete = ioat_chan->completed_cookie;
945
946 if (done)
947 *done = last_complete;
948 if (used)
949 *used = last_used;
950
951 ret = dma_async_is_complete(cookie, last_complete, last_used);
952 if (ret == DMA_SUCCESS)
953 return ret;
954
955 ioat_dma_memcpy_cleanup(ioat_chan);
956
957 last_used = chan->cookie;
958 last_complete = ioat_chan->completed_cookie;
959
960 if (done)
961 *done = last_complete;
962 if (used)
963 *used = last_used;
964
965 return dma_async_is_complete(cookie, last_complete, last_used);
966 }
967
968 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
969 {
970 struct ioat_desc_sw *desc;
971
972 spin_lock_bh(&ioat_chan->desc_lock);
973
974 desc = ioat_dma_get_next_descriptor(ioat_chan);
975 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
976 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
977 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
978 desc->hw->size = 0;
979 desc->hw->src_addr = 0;
980 desc->hw->dst_addr = 0;
981 async_tx_ack(&desc->async_tx);
982 switch (ioat_chan->device->version) {
983 case IOAT_VER_1_2:
984 desc->hw->next = 0;
985 list_add_tail(&desc->node, &ioat_chan->used_desc);
986
987 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
988 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
989 writel(((u64) desc->async_tx.phys) >> 32,
990 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
991
992 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
993 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
994 break;
995 case IOAT_VER_2_0:
996 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
997 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
998 writel(((u64) desc->async_tx.phys) >> 32,
999 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1000
1001 ioat_chan->dmacount++;
1002 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1003 break;
1004 }
1005 spin_unlock_bh(&ioat_chan->desc_lock);
1006 }
1007
1008 /*
1009 * Perform a IOAT transaction to verify the HW works.
1010 */
1011 #define IOAT_TEST_SIZE 2000
1012
1013 static void ioat_dma_test_callback(void *dma_async_param)
1014 {
1015 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1016 dma_async_param);
1017 }
1018
1019 /**
1020 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1021 * @device: device to be tested
1022 */
1023 static int ioat_dma_self_test(struct ioatdma_device *device)
1024 {
1025 int i;
1026 u8 *src;
1027 u8 *dest;
1028 struct dma_chan *dma_chan;
1029 struct dma_async_tx_descriptor *tx;
1030 dma_addr_t dma_dest, dma_src;
1031 dma_cookie_t cookie;
1032 int err = 0;
1033
1034 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1035 if (!src)
1036 return -ENOMEM;
1037 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1038 if (!dest) {
1039 kfree(src);
1040 return -ENOMEM;
1041 }
1042
1043 /* Fill in src buffer */
1044 for (i = 0; i < IOAT_TEST_SIZE; i++)
1045 src[i] = (u8)i;
1046
1047 /* Start copy, using first DMA channel */
1048 dma_chan = container_of(device->common.channels.next,
1049 struct dma_chan,
1050 device_node);
1051 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1052 dev_err(&device->pdev->dev,
1053 "selftest cannot allocate chan resource\n");
1054 err = -ENODEV;
1055 goto out;
1056 }
1057
1058 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1059 DMA_TO_DEVICE);
1060 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1061 DMA_FROM_DEVICE);
1062 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1063 IOAT_TEST_SIZE, 0);
1064 if (!tx) {
1065 dev_err(&device->pdev->dev,
1066 "Self-test prep failed, disabling\n");
1067 err = -ENODEV;
1068 goto free_resources;
1069 }
1070
1071 async_tx_ack(tx);
1072 tx->callback = ioat_dma_test_callback;
1073 tx->callback_param = (void *)0x8086;
1074 cookie = tx->tx_submit(tx);
1075 if (cookie < 0) {
1076 dev_err(&device->pdev->dev,
1077 "Self-test setup failed, disabling\n");
1078 err = -ENODEV;
1079 goto free_resources;
1080 }
1081 device->common.device_issue_pending(dma_chan);
1082 msleep(1);
1083
1084 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1085 != DMA_SUCCESS) {
1086 dev_err(&device->pdev->dev,
1087 "Self-test copy timed out, disabling\n");
1088 err = -ENODEV;
1089 goto free_resources;
1090 }
1091 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1092 dev_err(&device->pdev->dev,
1093 "Self-test copy failed compare, disabling\n");
1094 err = -ENODEV;
1095 goto free_resources;
1096 }
1097
1098 free_resources:
1099 device->common.device_free_chan_resources(dma_chan);
1100 out:
1101 kfree(src);
1102 kfree(dest);
1103 return err;
1104 }
1105
1106 static char ioat_interrupt_style[32] = "msix";
1107 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1108 sizeof(ioat_interrupt_style), 0644);
1109 MODULE_PARM_DESC(ioat_interrupt_style,
1110 "set ioat interrupt style: msix (default), "
1111 "msix-single-vector, msi, intx)");
1112
1113 /**
1114 * ioat_dma_setup_interrupts - setup interrupt handler
1115 * @device: ioat device
1116 */
1117 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1118 {
1119 struct ioat_dma_chan *ioat_chan;
1120 int err, i, j, msixcnt;
1121 u8 intrctrl = 0;
1122
1123 if (!strcmp(ioat_interrupt_style, "msix"))
1124 goto msix;
1125 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1126 goto msix_single_vector;
1127 if (!strcmp(ioat_interrupt_style, "msi"))
1128 goto msi;
1129 if (!strcmp(ioat_interrupt_style, "intx"))
1130 goto intx;
1131 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1132 ioat_interrupt_style);
1133 goto err_no_irq;
1134
1135 msix:
1136 /* The number of MSI-X vectors should equal the number of channels */
1137 msixcnt = device->common.chancnt;
1138 for (i = 0; i < msixcnt; i++)
1139 device->msix_entries[i].entry = i;
1140
1141 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1142 if (err < 0)
1143 goto msi;
1144 if (err > 0)
1145 goto msix_single_vector;
1146
1147 for (i = 0; i < msixcnt; i++) {
1148 ioat_chan = ioat_lookup_chan_by_index(device, i);
1149 err = request_irq(device->msix_entries[i].vector,
1150 ioat_dma_do_interrupt_msix,
1151 0, "ioat-msix", ioat_chan);
1152 if (err) {
1153 for (j = 0; j < i; j++) {
1154 ioat_chan =
1155 ioat_lookup_chan_by_index(device, j);
1156 free_irq(device->msix_entries[j].vector,
1157 ioat_chan);
1158 }
1159 goto msix_single_vector;
1160 }
1161 }
1162 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1163 device->irq_mode = msix_multi_vector;
1164 goto done;
1165
1166 msix_single_vector:
1167 device->msix_entries[0].entry = 0;
1168 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1169 if (err)
1170 goto msi;
1171
1172 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1173 0, "ioat-msix", device);
1174 if (err) {
1175 pci_disable_msix(device->pdev);
1176 goto msi;
1177 }
1178 device->irq_mode = msix_single_vector;
1179 goto done;
1180
1181 msi:
1182 err = pci_enable_msi(device->pdev);
1183 if (err)
1184 goto intx;
1185
1186 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1187 0, "ioat-msi", device);
1188 if (err) {
1189 pci_disable_msi(device->pdev);
1190 goto intx;
1191 }
1192 /*
1193 * CB 1.2 devices need a bit set in configuration space to enable MSI
1194 */
1195 if (device->version == IOAT_VER_1_2) {
1196 u32 dmactrl;
1197 pci_read_config_dword(device->pdev,
1198 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1199 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1200 pci_write_config_dword(device->pdev,
1201 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1202 }
1203 device->irq_mode = msi;
1204 goto done;
1205
1206 intx:
1207 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1208 IRQF_SHARED, "ioat-intx", device);
1209 if (err)
1210 goto err_no_irq;
1211 device->irq_mode = intx;
1212
1213 done:
1214 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1215 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1216 return 0;
1217
1218 err_no_irq:
1219 /* Disable all interrupt generation */
1220 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1221 dev_err(&device->pdev->dev, "no usable interrupts\n");
1222 device->irq_mode = none;
1223 return -1;
1224 }
1225
1226 /**
1227 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1228 * @device: ioat device
1229 */
1230 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1231 {
1232 struct ioat_dma_chan *ioat_chan;
1233 int i;
1234
1235 /* Disable all interrupt generation */
1236 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1237
1238 switch (device->irq_mode) {
1239 case msix_multi_vector:
1240 for (i = 0; i < device->common.chancnt; i++) {
1241 ioat_chan = ioat_lookup_chan_by_index(device, i);
1242 free_irq(device->msix_entries[i].vector, ioat_chan);
1243 }
1244 pci_disable_msix(device->pdev);
1245 break;
1246 case msix_single_vector:
1247 free_irq(device->msix_entries[0].vector, device);
1248 pci_disable_msix(device->pdev);
1249 break;
1250 case msi:
1251 free_irq(device->pdev->irq, device);
1252 pci_disable_msi(device->pdev);
1253 break;
1254 case intx:
1255 free_irq(device->pdev->irq, device);
1256 break;
1257 case none:
1258 dev_warn(&device->pdev->dev,
1259 "call to %s without interrupts setup\n", __func__);
1260 }
1261 device->irq_mode = none;
1262 }
1263
1264 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1265 void __iomem *iobase)
1266 {
1267 int err;
1268 struct ioatdma_device *device;
1269
1270 device = kzalloc(sizeof(*device), GFP_KERNEL);
1271 if (!device) {
1272 err = -ENOMEM;
1273 goto err_kzalloc;
1274 }
1275 device->pdev = pdev;
1276 device->reg_base = iobase;
1277 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1278
1279 /* DMA coherent memory pool for DMA descriptor allocations */
1280 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1281 sizeof(struct ioat_dma_descriptor),
1282 64, 0);
1283 if (!device->dma_pool) {
1284 err = -ENOMEM;
1285 goto err_dma_pool;
1286 }
1287
1288 device->completion_pool = pci_pool_create("completion_pool", pdev,
1289 sizeof(u64), SMP_CACHE_BYTES,
1290 SMP_CACHE_BYTES);
1291 if (!device->completion_pool) {
1292 err = -ENOMEM;
1293 goto err_completion_pool;
1294 }
1295
1296 INIT_LIST_HEAD(&device->common.channels);
1297 ioat_dma_enumerate_channels(device);
1298
1299 device->common.device_alloc_chan_resources =
1300 ioat_dma_alloc_chan_resources;
1301 device->common.device_free_chan_resources =
1302 ioat_dma_free_chan_resources;
1303 device->common.dev = &pdev->dev;
1304
1305 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1306 device->common.device_is_tx_complete = ioat_dma_is_complete;
1307 switch (device->version) {
1308 case IOAT_VER_1_2:
1309 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1310 device->common.device_issue_pending =
1311 ioat1_dma_memcpy_issue_pending;
1312 break;
1313 case IOAT_VER_2_0:
1314 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1315 device->common.device_issue_pending =
1316 ioat2_dma_memcpy_issue_pending;
1317 break;
1318 }
1319
1320 dev_err(&device->pdev->dev,
1321 "Intel(R) I/OAT DMA Engine found,"
1322 " %d channels, device version 0x%02x, driver version %s\n",
1323 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1324
1325 err = ioat_dma_setup_interrupts(device);
1326 if (err)
1327 goto err_setup_interrupts;
1328
1329 err = ioat_dma_self_test(device);
1330 if (err)
1331 goto err_self_test;
1332
1333 dma_async_device_register(&device->common);
1334
1335 return device;
1336
1337 err_self_test:
1338 ioat_dma_remove_interrupts(device);
1339 err_setup_interrupts:
1340 pci_pool_destroy(device->completion_pool);
1341 err_completion_pool:
1342 pci_pool_destroy(device->dma_pool);
1343 err_dma_pool:
1344 kfree(device);
1345 err_kzalloc:
1346 dev_err(&pdev->dev,
1347 "Intel(R) I/OAT DMA Engine initialization failed\n");
1348 return NULL;
1349 }
1350
1351 void ioat_dma_remove(struct ioatdma_device *device)
1352 {
1353 struct dma_chan *chan, *_chan;
1354 struct ioat_dma_chan *ioat_chan;
1355
1356 ioat_dma_remove_interrupts(device);
1357
1358 dma_async_device_unregister(&device->common);
1359
1360 pci_pool_destroy(device->dma_pool);
1361 pci_pool_destroy(device->completion_pool);
1362
1363 iounmap(device->reg_base);
1364 pci_release_regions(device->pdev);
1365 pci_disable_device(device->pdev);
1366
1367 list_for_each_entry_safe(chan, _chan,
1368 &device->common.channels, device_node) {
1369 ioat_chan = to_ioat_chan(chan);
1370 list_del(&chan->device_node);
1371 kfree(ioat_chan);
1372 }
1373 kfree(device);
1374 }
1375
This page took 0.102214 seconds and 6 git commands to generate.