Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
43d6e369 | 2 | * Intel I/OAT DMA Linux driver |
211a22ce | 3 | * Copyright(c) 2004 - 2009 Intel Corporation. |
0bbd5f4e CL |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
43d6e369 SN |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | |
0bbd5f4e CL |
8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
43d6e369 SN |
15 | * this program; if not, write to the Free Software Foundation, Inc., |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
0bbd5f4e | 20 | * |
0bbd5f4e CL |
21 | */ |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | |
25 | * copy operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
0bbd5f4e CL |
31 | #include <linux/pci.h> |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/dmaengine.h> | |
34 | #include <linux/delay.h> | |
6b00c92c | 35 | #include <linux/dma-mapping.h> |
09177e85 | 36 | #include <linux/workqueue.h> |
70c71606 | 37 | #include <linux/prefetch.h> |
3ad0b02e | 38 | #include <linux/i7300_idle.h> |
584ec227 DW |
39 | #include "dma.h" |
40 | #include "registers.h" | |
41 | #include "hw.h" | |
0bbd5f4e | 42 | |
d2ebfb33 RKAL |
43 | #include "../dmaengine.h" |
44 | ||
5cbafa65 | 45 | int ioat_pending_level = 4; |
7bb67c14 SN |
46 | module_param(ioat_pending_level, int, 0644); |
47 | MODULE_PARM_DESC(ioat_pending_level, | |
48 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
49 | ||
0bbd5f4e | 50 | /* internal functions */ |
5cbafa65 DW |
51 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); |
52 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | |
3e037454 SN |
53 | |
54 | /** | |
55 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | |
56 | * @irq: interrupt id | |
57 | * @data: interrupt data | |
58 | */ | |
59 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |
60 | { | |
61 | struct ioatdma_device *instance = data; | |
dcbc853a | 62 | struct ioat_chan_common *chan; |
3e037454 SN |
63 | unsigned long attnstatus; |
64 | int bit; | |
65 | u8 intrctrl; | |
66 | ||
67 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
68 | ||
69 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | |
70 | return IRQ_NONE; | |
71 | ||
72 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | |
73 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
74 | return IRQ_NONE; | |
75 | } | |
76 | ||
77 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | |
984b3f57 | 78 | for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { |
dcbc853a DW |
79 | chan = ioat_chan_by_index(instance, bit); |
80 | tasklet_schedule(&chan->cleanup_task); | |
3e037454 SN |
81 | } |
82 | ||
83 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
84 | return IRQ_HANDLED; | |
85 | } | |
86 | ||
87 | /** | |
88 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | |
89 | * @irq: interrupt id | |
90 | * @data: interrupt data | |
91 | */ | |
92 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |
93 | { | |
dcbc853a | 94 | struct ioat_chan_common *chan = data; |
3e037454 | 95 | |
dcbc853a | 96 | tasklet_schedule(&chan->cleanup_task); |
3e037454 SN |
97 | |
98 | return IRQ_HANDLED; | |
99 | } | |
100 | ||
5cbafa65 | 101 | /* common channel initialization */ |
aa4d72ae | 102 | void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) |
5cbafa65 DW |
103 | { |
104 | struct dma_device *dma = &device->common; | |
aa4d72ae DW |
105 | struct dma_chan *c = &chan->common; |
106 | unsigned long data = (unsigned long) c; | |
5cbafa65 DW |
107 | |
108 | chan->device = device; | |
109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | |
5cbafa65 DW |
110 | spin_lock_init(&chan->cleanup_lock); |
111 | chan->common.device = dma; | |
8ac69546 | 112 | dma_cookie_init(&chan->common); |
5cbafa65 DW |
113 | list_add_tail(&chan->common.device_node, &dma->channels); |
114 | device->idx[idx] = chan; | |
09c8a5b8 | 115 | init_timer(&chan->timer); |
aa4d72ae DW |
116 | chan->timer.function = device->timer_fn; |
117 | chan->timer.data = data; | |
118 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); | |
5cbafa65 DW |
119 | tasklet_disable(&chan->cleanup_task); |
120 | } | |
121 | ||
3e037454 | 122 | /** |
5cbafa65 | 123 | * ioat1_dma_enumerate_channels - find and initialize the device's channels |
3e037454 SN |
124 | * @device: the device to be enumerated |
125 | */ | |
5cbafa65 | 126 | static int ioat1_enumerate_channels(struct ioatdma_device *device) |
0bbd5f4e CL |
127 | { |
128 | u8 xfercap_scale; | |
129 | u32 xfercap; | |
130 | int i; | |
dcbc853a | 131 | struct ioat_dma_chan *ioat; |
e6c0b69a | 132 | struct device *dev = &device->pdev->dev; |
f2427e27 | 133 | struct dma_device *dma = &device->common; |
0bbd5f4e | 134 | |
f2427e27 DW |
135 | INIT_LIST_HEAD(&dma->channels); |
136 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
137 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
138 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
139 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
140 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
141 | dma->chancnt = ARRAY_SIZE(device->idx); | |
142 | } | |
e3828811 | 143 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 144 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ |
0bbd5f4e | 145 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
6df9183a | 146 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); |
0bbd5f4e | 147 | |
f371be63 | 148 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
f2427e27 DW |
149 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
150 | dma->chancnt--; | |
27471fdb | 151 | #endif |
f2427e27 | 152 | for (i = 0; i < dma->chancnt; i++) { |
dcbc853a | 153 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); |
5cbafa65 | 154 | if (!ioat) |
0bbd5f4e | 155 | break; |
0bbd5f4e | 156 | |
aa4d72ae | 157 | ioat_init_channel(device, &ioat->base, i); |
dcbc853a | 158 | ioat->xfercap = xfercap; |
dcbc853a DW |
159 | spin_lock_init(&ioat->desc_lock); |
160 | INIT_LIST_HEAD(&ioat->free_desc); | |
161 | INIT_LIST_HEAD(&ioat->used_desc); | |
0bbd5f4e | 162 | } |
5cbafa65 DW |
163 | dma->chancnt = i; |
164 | return i; | |
0bbd5f4e CL |
165 | } |
166 | ||
711924b1 SN |
167 | /** |
168 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | |
169 | * descriptors to hw | |
170 | * @chan: DMA channel handle | |
171 | */ | |
bc3c7025 | 172 | static inline void |
dcbc853a | 173 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) |
711924b1 | 174 | { |
dcbc853a DW |
175 | void __iomem *reg_base = ioat->base.reg_base; |
176 | ||
6df9183a DW |
177 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", |
178 | __func__, ioat->pending); | |
dcbc853a DW |
179 | ioat->pending = 0; |
180 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | |
711924b1 SN |
181 | } |
182 | ||
183 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |
184 | { | |
dcbc853a | 185 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); |
711924b1 | 186 | |
dcbc853a DW |
187 | if (ioat->pending > 0) { |
188 | spin_lock_bh(&ioat->desc_lock); | |
189 | __ioat1_dma_memcpy_issue_pending(ioat); | |
190 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 SN |
191 | } |
192 | } | |
193 | ||
09177e85 | 194 | /** |
5cbafa65 | 195 | * ioat1_reset_channel - restart a channel |
dcbc853a | 196 | * @ioat: IOAT DMA channel handle |
09177e85 | 197 | */ |
5cbafa65 | 198 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) |
09177e85 | 199 | { |
dcbc853a DW |
200 | struct ioat_chan_common *chan = &ioat->base; |
201 | void __iomem *reg_base = chan->reg_base; | |
09177e85 MS |
202 | u32 chansts, chanerr; |
203 | ||
09c8a5b8 | 204 | dev_warn(to_dev(chan), "reset\n"); |
dcbc853a | 205 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); |
09c8a5b8 | 206 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; |
09177e85 | 207 | if (chanerr) { |
dcbc853a | 208 | dev_err(to_dev(chan), |
09177e85 | 209 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", |
dcbc853a DW |
210 | chan_num(chan), chansts, chanerr); |
211 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | |
09177e85 MS |
212 | } |
213 | ||
214 | /* | |
215 | * whack it upside the head with a reset | |
216 | * and wait for things to settle out. | |
217 | * force the pending count to a really big negative | |
218 | * to make sure no one forces an issue_pending | |
219 | * while we're waiting. | |
220 | */ | |
221 | ||
dcbc853a | 222 | ioat->pending = INT_MIN; |
09177e85 | 223 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 224 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
09c8a5b8 DW |
225 | set_bit(IOAT_RESET_PENDING, &chan->state); |
226 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | |
09177e85 MS |
227 | } |
228 | ||
7bb67c14 | 229 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
7405f74b | 230 | { |
dcbc853a DW |
231 | struct dma_chan *c = tx->chan; |
232 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
a0587bcf | 233 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
09c8a5b8 | 234 | struct ioat_chan_common *chan = &ioat->base; |
a0587bcf DW |
235 | struct ioat_desc_sw *first; |
236 | struct ioat_desc_sw *chain_tail; | |
7405f74b | 237 | dma_cookie_t cookie; |
7405f74b | 238 | |
dcbc853a | 239 | spin_lock_bh(&ioat->desc_lock); |
7405f74b | 240 | /* cookie incr and addition to used_list must be atomic */ |
884485e1 | 241 | cookie = dma_cookie_assign(tx); |
6df9183a | 242 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
7405f74b DW |
243 | |
244 | /* write address into NextDescriptor field of last desc in chain */ | |
ea25968a | 245 | first = to_ioat_desc(desc->tx_list.next); |
dcbc853a | 246 | chain_tail = to_ioat_desc(ioat->used_desc.prev); |
a0587bcf DW |
247 | /* make descriptor updates globally visible before chaining */ |
248 | wmb(); | |
249 | chain_tail->hw->next = first->txd.phys; | |
ea25968a | 250 | list_splice_tail_init(&desc->tx_list, &ioat->used_desc); |
6df9183a DW |
251 | dump_desc_dbg(ioat, chain_tail); |
252 | dump_desc_dbg(ioat, first); | |
a0587bcf | 253 | |
09c8a5b8 DW |
254 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
255 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
256 | ||
5669e31c | 257 | ioat->active += desc->hw->tx_cnt; |
ad643f54 | 258 | ioat->pending += desc->hw->tx_cnt; |
dcbc853a DW |
259 | if (ioat->pending >= ioat_pending_level) |
260 | __ioat1_dma_memcpy_issue_pending(ioat); | |
261 | spin_unlock_bh(&ioat->desc_lock); | |
7405f74b | 262 | |
7bb67c14 SN |
263 | return cookie; |
264 | } | |
265 | ||
7bb67c14 SN |
266 | /** |
267 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | |
dcbc853a | 268 | * @ioat: the channel supplying the memory pool for the descriptors |
7bb67c14 SN |
269 | * @flags: allocation flags |
270 | */ | |
bc3c7025 | 271 | static struct ioat_desc_sw * |
dcbc853a | 272 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) |
0bbd5f4e CL |
273 | { |
274 | struct ioat_dma_descriptor *desc; | |
275 | struct ioat_desc_sw *desc_sw; | |
8ab89567 | 276 | struct ioatdma_device *ioatdma_device; |
0bbd5f4e CL |
277 | dma_addr_t phys; |
278 | ||
dcbc853a | 279 | ioatdma_device = ioat->base.device; |
8ab89567 | 280 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); |
0bbd5f4e CL |
281 | if (unlikely(!desc)) |
282 | return NULL; | |
283 | ||
284 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | |
285 | if (unlikely(!desc_sw)) { | |
8ab89567 | 286 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
0bbd5f4e CL |
287 | return NULL; |
288 | } | |
289 | ||
290 | memset(desc, 0, sizeof(*desc)); | |
7bb67c14 | 291 | |
ea25968a | 292 | INIT_LIST_HEAD(&desc_sw->tx_list); |
5cbafa65 DW |
293 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); |
294 | desc_sw->txd.tx_submit = ioat1_tx_submit; | |
0bbd5f4e | 295 | desc_sw->hw = desc; |
bc3c7025 | 296 | desc_sw->txd.phys = phys; |
6df9183a | 297 | set_desc_id(desc_sw, -1); |
0bbd5f4e CL |
298 | |
299 | return desc_sw; | |
300 | } | |
301 | ||
7bb67c14 SN |
302 | static int ioat_initial_desc_count = 256; |
303 | module_param(ioat_initial_desc_count, int, 0644); | |
304 | MODULE_PARM_DESC(ioat_initial_desc_count, | |
5cbafa65 | 305 | "ioat1: initial descriptors per channel (default: 256)"); |
7bb67c14 | 306 | /** |
5cbafa65 | 307 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors |
7bb67c14 SN |
308 | * @chan: the channel to be filled out |
309 | */ | |
5cbafa65 | 310 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) |
0bbd5f4e | 311 | { |
dcbc853a DW |
312 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
313 | struct ioat_chan_common *chan = &ioat->base; | |
711924b1 | 314 | struct ioat_desc_sw *desc; |
0bbd5f4e CL |
315 | u32 chanerr; |
316 | int i; | |
317 | LIST_HEAD(tmp_list); | |
318 | ||
e4223976 | 319 | /* have we already been set up? */ |
dcbc853a DW |
320 | if (!list_empty(&ioat->free_desc)) |
321 | return ioat->desccount; | |
0bbd5f4e | 322 | |
43d6e369 | 323 | /* Setup register to interrupt and write completion status on error */ |
f6ab95b5 | 324 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
0bbd5f4e | 325 | |
dcbc853a | 326 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
0bbd5f4e | 327 | if (chanerr) { |
dcbc853a DW |
328 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); |
329 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
0bbd5f4e CL |
330 | } |
331 | ||
332 | /* Allocate descriptors */ | |
7bb67c14 | 333 | for (i = 0; i < ioat_initial_desc_count; i++) { |
dcbc853a | 334 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); |
0bbd5f4e | 335 | if (!desc) { |
dcbc853a | 336 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); |
0bbd5f4e CL |
337 | break; |
338 | } | |
6df9183a | 339 | set_desc_id(desc, i); |
0bbd5f4e CL |
340 | list_add_tail(&desc->node, &tmp_list); |
341 | } | |
dcbc853a DW |
342 | spin_lock_bh(&ioat->desc_lock); |
343 | ioat->desccount = i; | |
344 | list_splice(&tmp_list, &ioat->free_desc); | |
dcbc853a | 345 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
346 | |
347 | /* allocate a completion writeback area */ | |
348 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
349 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
350 | GFP_KERNEL, &chan->completion_dma); | |
351 | memset(chan->completion, 0, sizeof(*chan->completion)); | |
352 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
dcbc853a | 353 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 354 | writel(((u64) chan->completion_dma) >> 32, |
dcbc853a DW |
355 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
356 | ||
357 | tasklet_enable(&chan->cleanup_task); | |
5cbafa65 | 358 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ |
6df9183a DW |
359 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", |
360 | __func__, ioat->desccount); | |
dcbc853a | 361 | return ioat->desccount; |
0bbd5f4e CL |
362 | } |
363 | ||
7bb67c14 | 364 | /** |
5cbafa65 | 365 | * ioat1_dma_free_chan_resources - release all the descriptors |
7bb67c14 SN |
366 | * @chan: the channel to be cleaned |
367 | */ | |
5cbafa65 | 368 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) |
0bbd5f4e | 369 | { |
dcbc853a DW |
370 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
371 | struct ioat_chan_common *chan = &ioat->base; | |
372 | struct ioatdma_device *ioatdma_device = chan->device; | |
0bbd5f4e | 373 | struct ioat_desc_sw *desc, *_desc; |
0bbd5f4e CL |
374 | int in_use_descs = 0; |
375 | ||
c3d4f44f MS |
376 | /* Before freeing channel resources first check |
377 | * if they have been previously allocated for this channel. | |
378 | */ | |
dcbc853a | 379 | if (ioat->desccount == 0) |
c3d4f44f MS |
380 | return; |
381 | ||
dcbc853a | 382 | tasklet_disable(&chan->cleanup_task); |
09c8a5b8 | 383 | del_timer_sync(&chan->timer); |
5cbafa65 | 384 | ioat1_cleanup(ioat); |
0bbd5f4e | 385 | |
3e037454 SN |
386 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
387 | * before removing DMA descriptor resources. | |
388 | */ | |
7bb67c14 | 389 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 390 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
3e037454 | 391 | mdelay(100); |
0bbd5f4e | 392 | |
dcbc853a | 393 | spin_lock_bh(&ioat->desc_lock); |
6df9183a DW |
394 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { |
395 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | |
396 | __func__, desc_id(desc)); | |
397 | dump_desc_dbg(ioat, desc); | |
5cbafa65 DW |
398 | in_use_descs++; |
399 | list_del(&desc->node); | |
400 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
401 | desc->txd.phys); | |
402 | kfree(desc); | |
403 | } | |
404 | list_for_each_entry_safe(desc, _desc, | |
405 | &ioat->free_desc, node) { | |
406 | list_del(&desc->node); | |
8ab89567 | 407 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
bc3c7025 | 408 | desc->txd.phys); |
0bbd5f4e CL |
409 | kfree(desc); |
410 | } | |
dcbc853a | 411 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e | 412 | |
8ab89567 | 413 | pci_pool_free(ioatdma_device->completion_pool, |
4fb9b9e8 DW |
414 | chan->completion, |
415 | chan->completion_dma); | |
0bbd5f4e CL |
416 | |
417 | /* one is ok since we left it on there on purpose */ | |
418 | if (in_use_descs > 1) | |
dcbc853a | 419 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", |
0bbd5f4e CL |
420 | in_use_descs - 1); |
421 | ||
4fb9b9e8 DW |
422 | chan->last_completion = 0; |
423 | chan->completion_dma = 0; | |
dcbc853a | 424 | ioat->pending = 0; |
dcbc853a | 425 | ioat->desccount = 0; |
3e037454 | 426 | } |
7f2b291f | 427 | |
3e037454 | 428 | /** |
dcbc853a DW |
429 | * ioat1_dma_get_next_descriptor - return the next available descriptor |
430 | * @ioat: IOAT DMA channel handle | |
3e037454 SN |
431 | * |
432 | * Gets the next descriptor from the chain, and must be called with the | |
433 | * channel's desc_lock held. Allocates more descriptors if the channel | |
434 | * has run out. | |
435 | */ | |
7f2b291f | 436 | static struct ioat_desc_sw * |
dcbc853a | 437 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) |
3e037454 | 438 | { |
711924b1 | 439 | struct ioat_desc_sw *new; |
3e037454 | 440 | |
dcbc853a DW |
441 | if (!list_empty(&ioat->free_desc)) { |
442 | new = to_ioat_desc(ioat->free_desc.next); | |
3e037454 SN |
443 | list_del(&new->node); |
444 | } else { | |
445 | /* try to get another desc */ | |
dcbc853a | 446 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); |
711924b1 | 447 | if (!new) { |
dcbc853a | 448 | dev_err(to_dev(&ioat->base), "alloc failed\n"); |
711924b1 SN |
449 | return NULL; |
450 | } | |
3e037454 | 451 | } |
6df9183a DW |
452 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", |
453 | __func__, desc_id(new)); | |
3e037454 SN |
454 | prefetch(new->hw); |
455 | return new; | |
0bbd5f4e CL |
456 | } |
457 | ||
bc3c7025 | 458 | static struct dma_async_tx_descriptor * |
dcbc853a | 459 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, |
bc3c7025 | 460 | dma_addr_t dma_src, size_t len, unsigned long flags) |
0bbd5f4e | 461 | { |
dcbc853a | 462 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
a0587bcf DW |
463 | struct ioat_desc_sw *desc; |
464 | size_t copy; | |
465 | LIST_HEAD(chain); | |
466 | dma_addr_t src = dma_src; | |
467 | dma_addr_t dest = dma_dest; | |
468 | size_t total_len = len; | |
469 | struct ioat_dma_descriptor *hw = NULL; | |
470 | int tx_cnt = 0; | |
0bbd5f4e | 471 | |
dcbc853a | 472 | spin_lock_bh(&ioat->desc_lock); |
5cbafa65 | 473 | desc = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf DW |
474 | do { |
475 | if (!desc) | |
476 | break; | |
0bbd5f4e | 477 | |
a0587bcf | 478 | tx_cnt++; |
dcbc853a | 479 | copy = min_t(size_t, len, ioat->xfercap); |
a0587bcf DW |
480 | |
481 | hw = desc->hw; | |
482 | hw->size = copy; | |
483 | hw->ctl = 0; | |
484 | hw->src_addr = src; | |
485 | hw->dst_addr = dest; | |
486 | ||
487 | list_add_tail(&desc->node, &chain); | |
488 | ||
489 | len -= copy; | |
490 | dest += copy; | |
491 | src += copy; | |
492 | if (len) { | |
493 | struct ioat_desc_sw *next; | |
494 | ||
495 | async_tx_ack(&desc->txd); | |
5cbafa65 | 496 | next = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf | 497 | hw->next = next ? next->txd.phys : 0; |
6df9183a | 498 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
499 | desc = next; |
500 | } else | |
501 | hw->next = 0; | |
502 | } while (len); | |
503 | ||
504 | if (!desc) { | |
dcbc853a DW |
505 | struct ioat_chan_common *chan = &ioat->base; |
506 | ||
507 | dev_err(to_dev(chan), | |
5cbafa65 | 508 | "chan%d - get_next_desc failed\n", chan_num(chan)); |
dcbc853a DW |
509 | list_splice(&chain, &ioat->free_desc); |
510 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 | 511 | return NULL; |
09177e85 | 512 | } |
dcbc853a | 513 | spin_unlock_bh(&ioat->desc_lock); |
a0587bcf DW |
514 | |
515 | desc->txd.flags = flags; | |
a0587bcf | 516 | desc->len = total_len; |
ea25968a | 517 | list_splice(&chain, &desc->tx_list); |
a0587bcf DW |
518 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); |
519 | hw->ctl_f.compl_write = 1; | |
ad643f54 | 520 | hw->tx_cnt = tx_cnt; |
6df9183a | 521 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
522 | |
523 | return &desc->txd; | |
0bbd5f4e CL |
524 | } |
525 | ||
aa4d72ae | 526 | static void ioat1_cleanup_event(unsigned long data) |
3e037454 | 527 | { |
aa4d72ae | 528 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
f6ab95b5 | 529 | |
aa4d72ae DW |
530 | ioat1_cleanup(ioat); |
531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | |
3e037454 SN |
532 | } |
533 | ||
5cbafa65 DW |
534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
535 | size_t len, struct ioat_dma_descriptor *hw) | |
0bbd5f4e | 536 | { |
5cbafa65 DW |
537 | struct pci_dev *pdev = chan->device->pdev; |
538 | size_t offset = len - hw->size; | |
0bbd5f4e | 539 | |
5cbafa65 DW |
540 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) |
541 | ioat_unmap(pdev, hw->dst_addr - offset, len, | |
542 | PCI_DMA_FROMDEVICE, flags, 1); | |
0bbd5f4e | 543 | |
5cbafa65 DW |
544 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
545 | ioat_unmap(pdev, hw->src_addr - offset, len, | |
546 | PCI_DMA_TODEVICE, flags, 0); | |
547 | } | |
548 | ||
27502935 | 549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
5cbafa65 | 550 | { |
27502935 | 551 | dma_addr_t phys_complete; |
4fb9b9e8 | 552 | u64 completion; |
0bbd5f4e | 553 | |
4fb9b9e8 | 554 | completion = *chan->completion; |
09c8a5b8 | 555 | phys_complete = ioat_chansts_to_addr(completion); |
0bbd5f4e | 556 | |
6df9183a DW |
557 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, |
558 | (unsigned long long) phys_complete); | |
559 | ||
09c8a5b8 DW |
560 | if (is_ioat_halted(completion)) { |
561 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
dcbc853a | 562 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", |
09c8a5b8 | 563 | chanerr); |
0bbd5f4e CL |
564 | |
565 | /* TODO do something to salvage the situation */ | |
566 | } | |
567 | ||
5cbafa65 DW |
568 | return phys_complete; |
569 | } | |
570 | ||
09c8a5b8 | 571 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
27502935 | 572 | dma_addr_t *phys_complete) |
5cbafa65 | 573 | { |
09c8a5b8 DW |
574 | *phys_complete = ioat_get_current_completion(chan); |
575 | if (*phys_complete == chan->last_completion) | |
576 | return false; | |
577 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | |
578 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 | 579 | |
09c8a5b8 DW |
580 | return true; |
581 | } | |
0bbd5f4e | 582 | |
27502935 | 583 | static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) |
09c8a5b8 DW |
584 | { |
585 | struct ioat_chan_common *chan = &ioat->base; | |
586 | struct list_head *_desc, *n; | |
587 | struct dma_async_tx_descriptor *tx; | |
09177e85 | 588 | |
27502935 DW |
589 | dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", |
590 | __func__, (unsigned long long) phys_complete); | |
09c8a5b8 DW |
591 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
592 | struct ioat_desc_sw *desc; | |
593 | ||
594 | prefetch(n); | |
595 | desc = list_entry(_desc, typeof(*desc), node); | |
5cbafa65 DW |
596 | tx = &desc->txd; |
597 | /* | |
598 | * Incoming DMA requests may use multiple descriptors, | |
599 | * due to exceeding xfercap, perhaps. If so, only the | |
600 | * last one will have a cookie, and require unmapping. | |
601 | */ | |
6df9183a | 602 | dump_desc_dbg(ioat, desc); |
5cbafa65 | 603 | if (tx->cookie) { |
f7fbce07 | 604 | dma_cookie_complete(tx); |
5cbafa65 | 605 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
5669e31c | 606 | ioat->active -= desc->hw->tx_cnt; |
5cbafa65 DW |
607 | if (tx->callback) { |
608 | tx->callback(tx->callback_param); | |
609 | tx->callback = NULL; | |
95218430 | 610 | } |
5cbafa65 | 611 | } |
0bbd5f4e | 612 | |
5cbafa65 DW |
613 | if (tx->phys != phys_complete) { |
614 | /* | |
615 | * a completed entry, but not the last, so clean | |
616 | * up if the client is done with the descriptor | |
617 | */ | |
618 | if (async_tx_test_ack(tx)) | |
619 | list_move_tail(&desc->node, &ioat->free_desc); | |
5cbafa65 DW |
620 | } else { |
621 | /* | |
622 | * last used desc. Do not remove, so we can | |
09c8a5b8 | 623 | * append from it. |
5cbafa65 | 624 | */ |
09c8a5b8 DW |
625 | |
626 | /* if nothing else is pending, cancel the | |
627 | * completion timeout | |
628 | */ | |
629 | if (n == &ioat->used_desc) { | |
630 | dev_dbg(to_dev(chan), | |
631 | "%s cancel completion timeout\n", | |
632 | __func__); | |
633 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
634 | } | |
0bbd5f4e | 635 | |
5cbafa65 | 636 | /* TODO check status bits? */ |
0bbd5f4e CL |
637 | break; |
638 | } | |
639 | } | |
640 | ||
09c8a5b8 DW |
641 | chan->last_completion = phys_complete; |
642 | } | |
643 | ||
644 | /** | |
645 | * ioat1_cleanup - cleanup up finished descriptors | |
646 | * @chan: ioat channel to be cleaned up | |
647 | * | |
648 | * To prevent lock contention we defer cleanup when the locks are | |
649 | * contended with a terminal timeout that forces cleanup and catches | |
650 | * completion notification errors. | |
651 | */ | |
652 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |
653 | { | |
654 | struct ioat_chan_common *chan = &ioat->base; | |
27502935 | 655 | dma_addr_t phys_complete; |
09c8a5b8 DW |
656 | |
657 | prefetch(chan->completion); | |
658 | ||
659 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
660 | return; | |
661 | ||
662 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
663 | spin_unlock_bh(&chan->cleanup_lock); | |
664 | return; | |
665 | } | |
666 | ||
667 | if (!spin_trylock_bh(&ioat->desc_lock)) { | |
668 | spin_unlock_bh(&chan->cleanup_lock); | |
669 | return; | |
670 | } | |
671 | ||
672 | __cleanup(ioat, phys_complete); | |
673 | ||
dcbc853a | 674 | spin_unlock_bh(&ioat->desc_lock); |
09c8a5b8 DW |
675 | spin_unlock_bh(&chan->cleanup_lock); |
676 | } | |
0bbd5f4e | 677 | |
09c8a5b8 DW |
678 | static void ioat1_timer_event(unsigned long data) |
679 | { | |
aa4d72ae | 680 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
09c8a5b8 | 681 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 682 | |
09c8a5b8 DW |
683 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); |
684 | ||
685 | spin_lock_bh(&chan->cleanup_lock); | |
686 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | |
687 | struct ioat_desc_sw *desc; | |
688 | ||
689 | spin_lock_bh(&ioat->desc_lock); | |
690 | ||
691 | /* restart active descriptors */ | |
692 | desc = to_ioat_desc(ioat->used_desc.prev); | |
693 | ioat_set_chainaddr(ioat, desc->txd.phys); | |
694 | ioat_start(chan); | |
695 | ||
696 | ioat->pending = 0; | |
697 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
698 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
699 | spin_unlock_bh(&ioat->desc_lock); | |
700 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
27502935 | 701 | dma_addr_t phys_complete; |
09c8a5b8 DW |
702 | |
703 | spin_lock_bh(&ioat->desc_lock); | |
704 | /* if we haven't made progress and we have already | |
705 | * acknowledged a pending completion once, then be more | |
706 | * forceful with a restart | |
707 | */ | |
708 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
709 | __cleanup(ioat, phys_complete); | |
710 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
711 | ioat1_reset_channel(ioat); | |
712 | else { | |
713 | u64 status = ioat_chansts(chan); | |
714 | ||
715 | /* manually update the last completion address */ | |
716 | if (ioat_chansts_to_addr(status) != 0) | |
717 | *chan->completion = status; | |
718 | ||
719 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
720 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
721 | } | |
722 | spin_unlock_bh(&ioat->desc_lock); | |
723 | } | |
dcbc853a | 724 | spin_unlock_bh(&chan->cleanup_lock); |
0bbd5f4e CL |
725 | } |
726 | ||
aa4d72ae | 727 | enum dma_status |
07934481 LW |
728 | ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
729 | struct dma_tx_state *txstate) | |
0bbd5f4e | 730 | { |
aa4d72ae DW |
731 | struct ioat_chan_common *chan = to_chan_common(c); |
732 | struct ioatdma_device *device = chan->device; | |
96a2af41 | 733 | enum dma_status ret; |
0bbd5f4e | 734 | |
96a2af41 RKAL |
735 | ret = dma_cookie_status(c, cookie, txstate); |
736 | if (ret == DMA_SUCCESS) | |
737 | return ret; | |
0bbd5f4e | 738 | |
aa4d72ae | 739 | device->cleanup_fn((unsigned long) c); |
0bbd5f4e | 740 | |
96a2af41 | 741 | return dma_cookie_status(c, cookie, txstate); |
0bbd5f4e CL |
742 | } |
743 | ||
5cbafa65 | 744 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) |
0bbd5f4e | 745 | { |
dcbc853a | 746 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 747 | struct ioat_desc_sw *desc; |
c7984f4e | 748 | struct ioat_dma_descriptor *hw; |
0bbd5f4e | 749 | |
dcbc853a | 750 | spin_lock_bh(&ioat->desc_lock); |
0bbd5f4e | 751 | |
5cbafa65 | 752 | desc = ioat1_dma_get_next_descriptor(ioat); |
7f1b358a MS |
753 | |
754 | if (!desc) { | |
dcbc853a | 755 | dev_err(to_dev(chan), |
7f1b358a | 756 | "Unable to start null desc - get next desc failed\n"); |
dcbc853a | 757 | spin_unlock_bh(&ioat->desc_lock); |
7f1b358a MS |
758 | return; |
759 | } | |
760 | ||
c7984f4e DW |
761 | hw = desc->hw; |
762 | hw->ctl = 0; | |
763 | hw->ctl_f.null = 1; | |
764 | hw->ctl_f.int_en = 1; | |
765 | hw->ctl_f.compl_write = 1; | |
7f1b358a | 766 | /* set size to non-zero value (channel returns error when size is 0) */ |
c7984f4e DW |
767 | hw->size = NULL_DESC_BUFFER_SIZE; |
768 | hw->src_addr = 0; | |
769 | hw->dst_addr = 0; | |
bc3c7025 | 770 | async_tx_ack(&desc->txd); |
5cbafa65 DW |
771 | hw->next = 0; |
772 | list_add_tail(&desc->node, &ioat->used_desc); | |
6df9183a | 773 | dump_desc_dbg(ioat, desc); |
7bb67c14 | 774 | |
09c8a5b8 DW |
775 | ioat_set_chainaddr(ioat, desc->txd.phys); |
776 | ioat_start(chan); | |
dcbc853a | 777 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
778 | } |
779 | ||
780 | /* | |
781 | * Perform a IOAT transaction to verify the HW works. | |
782 | */ | |
783 | #define IOAT_TEST_SIZE 2000 | |
784 | ||
4bf27b8b | 785 | static void ioat_dma_test_callback(void *dma_async_param) |
95218430 | 786 | { |
b9bdcbba DW |
787 | struct completion *cmp = dma_async_param; |
788 | ||
789 | complete(cmp); | |
95218430 SN |
790 | } |
791 | ||
3e037454 SN |
792 | /** |
793 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
794 | * @device: device to be tested | |
795 | */ | |
4bf27b8b | 796 | int ioat_dma_self_test(struct ioatdma_device *device) |
0bbd5f4e CL |
797 | { |
798 | int i; | |
799 | u8 *src; | |
800 | u8 *dest; | |
bc3c7025 DW |
801 | struct dma_device *dma = &device->common; |
802 | struct device *dev = &device->pdev->dev; | |
0bbd5f4e | 803 | struct dma_chan *dma_chan; |
711924b1 | 804 | struct dma_async_tx_descriptor *tx; |
0036731c | 805 | dma_addr_t dma_dest, dma_src; |
0bbd5f4e CL |
806 | dma_cookie_t cookie; |
807 | int err = 0; | |
b9bdcbba | 808 | struct completion cmp; |
0c33e1ca | 809 | unsigned long tmo; |
4f005dbe | 810 | unsigned long flags; |
0bbd5f4e | 811 | |
e94b1766 | 812 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
813 | if (!src) |
814 | return -ENOMEM; | |
e94b1766 | 815 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
816 | if (!dest) { |
817 | kfree(src); | |
818 | return -ENOMEM; | |
819 | } | |
820 | ||
821 | /* Fill in src buffer */ | |
822 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
823 | src[i] = (u8)i; | |
824 | ||
825 | /* Start copy, using first DMA channel */ | |
bc3c7025 | 826 | dma_chan = container_of(dma->channels.next, struct dma_chan, |
43d6e369 | 827 | device_node); |
bc3c7025 DW |
828 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { |
829 | dev_err(dev, "selftest cannot allocate chan resource\n"); | |
0bbd5f4e CL |
830 | err = -ENODEV; |
831 | goto out; | |
832 | } | |
833 | ||
bc3c7025 DW |
834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | |
522d9744 | 836 | flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | |
a6a39ca1 | 837 | DMA_PREP_INTERRUPT; |
0036731c | 838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
4f005dbe | 839 | IOAT_TEST_SIZE, flags); |
5149fd01 | 840 | if (!tx) { |
bc3c7025 | 841 | dev_err(dev, "Self-test prep failed, disabling\n"); |
5149fd01 | 842 | err = -ENODEV; |
522d9744 | 843 | goto unmap_dma; |
5149fd01 SN |
844 | } |
845 | ||
7405f74b | 846 | async_tx_ack(tx); |
b9bdcbba | 847 | init_completion(&cmp); |
95218430 | 848 | tx->callback = ioat_dma_test_callback; |
b9bdcbba | 849 | tx->callback_param = &cmp; |
7bb67c14 | 850 | cookie = tx->tx_submit(tx); |
7f2b291f | 851 | if (cookie < 0) { |
bc3c7025 | 852 | dev_err(dev, "Self-test setup failed, disabling\n"); |
7f2b291f | 853 | err = -ENODEV; |
522d9744 | 854 | goto unmap_dma; |
7f2b291f | 855 | } |
bc3c7025 | 856 | dma->device_issue_pending(dma_chan); |
532d3b1f | 857 | |
0c33e1ca | 858 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
0bbd5f4e | 859 | |
0c33e1ca | 860 | if (tmo == 0 || |
07934481 | 861 | dma->device_tx_status(dma_chan, cookie, NULL) |
7bb67c14 | 862 | != DMA_SUCCESS) { |
bc3c7025 | 863 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
0bbd5f4e | 864 | err = -ENODEV; |
522d9744 | 865 | goto unmap_dma; |
0bbd5f4e CL |
866 | } |
867 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
bc3c7025 | 868 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
0bbd5f4e CL |
869 | err = -ENODEV; |
870 | goto free_resources; | |
871 | } | |
872 | ||
522d9744 BZ |
873 | unmap_dma: |
874 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | |
875 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | |
0bbd5f4e | 876 | free_resources: |
bc3c7025 | 877 | dma->device_free_chan_resources(dma_chan); |
0bbd5f4e CL |
878 | out: |
879 | kfree(src); | |
880 | kfree(dest); | |
881 | return err; | |
882 | } | |
883 | ||
3e037454 SN |
884 | static char ioat_interrupt_style[32] = "msix"; |
885 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
886 | sizeof(ioat_interrupt_style), 0644); | |
887 | MODULE_PARM_DESC(ioat_interrupt_style, | |
888 | "set ioat interrupt style: msix (default), " | |
889 | "msix-single-vector, msi, intx)"); | |
890 | ||
891 | /** | |
892 | * ioat_dma_setup_interrupts - setup interrupt handler | |
893 | * @device: ioat device | |
894 | */ | |
8a52b9ff | 895 | int ioat_dma_setup_interrupts(struct ioatdma_device *device) |
3e037454 | 896 | { |
dcbc853a | 897 | struct ioat_chan_common *chan; |
e6c0b69a DW |
898 | struct pci_dev *pdev = device->pdev; |
899 | struct device *dev = &pdev->dev; | |
900 | struct msix_entry *msix; | |
901 | int i, j, msixcnt; | |
902 | int err = -EINVAL; | |
3e037454 SN |
903 | u8 intrctrl = 0; |
904 | ||
905 | if (!strcmp(ioat_interrupt_style, "msix")) | |
906 | goto msix; | |
907 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | |
908 | goto msix_single_vector; | |
909 | if (!strcmp(ioat_interrupt_style, "msi")) | |
910 | goto msi; | |
911 | if (!strcmp(ioat_interrupt_style, "intx")) | |
912 | goto intx; | |
e6c0b69a | 913 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); |
5149fd01 | 914 | goto err_no_irq; |
3e037454 SN |
915 | |
916 | msix: | |
917 | /* The number of MSI-X vectors should equal the number of channels */ | |
918 | msixcnt = device->common.chancnt; | |
919 | for (i = 0; i < msixcnt; i++) | |
920 | device->msix_entries[i].entry = i; | |
921 | ||
e6c0b69a | 922 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); |
3e037454 SN |
923 | if (err < 0) |
924 | goto msi; | |
925 | if (err > 0) | |
926 | goto msix_single_vector; | |
927 | ||
928 | for (i = 0; i < msixcnt; i++) { | |
e6c0b69a | 929 | msix = &device->msix_entries[i]; |
dcbc853a | 930 | chan = ioat_chan_by_index(device, i); |
e6c0b69a DW |
931 | err = devm_request_irq(dev, msix->vector, |
932 | ioat_dma_do_interrupt_msix, 0, | |
dcbc853a | 933 | "ioat-msix", chan); |
3e037454 SN |
934 | if (err) { |
935 | for (j = 0; j < i; j++) { | |
e6c0b69a | 936 | msix = &device->msix_entries[j]; |
dcbc853a DW |
937 | chan = ioat_chan_by_index(device, j); |
938 | devm_free_irq(dev, msix->vector, chan); | |
3e037454 SN |
939 | } |
940 | goto msix_single_vector; | |
941 | } | |
942 | } | |
943 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
8a52b9ff | 944 | device->irq_mode = IOAT_MSIX; |
3e037454 SN |
945 | goto done; |
946 | ||
947 | msix_single_vector: | |
e6c0b69a DW |
948 | msix = &device->msix_entries[0]; |
949 | msix->entry = 0; | |
950 | err = pci_enable_msix(pdev, device->msix_entries, 1); | |
3e037454 SN |
951 | if (err) |
952 | goto msi; | |
953 | ||
e6c0b69a DW |
954 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, |
955 | "ioat-msix", device); | |
3e037454 | 956 | if (err) { |
e6c0b69a | 957 | pci_disable_msix(pdev); |
3e037454 SN |
958 | goto msi; |
959 | } | |
8a52b9ff | 960 | device->irq_mode = IOAT_MSIX_SINGLE; |
3e037454 SN |
961 | goto done; |
962 | ||
963 | msi: | |
e6c0b69a | 964 | err = pci_enable_msi(pdev); |
3e037454 SN |
965 | if (err) |
966 | goto intx; | |
967 | ||
e6c0b69a DW |
968 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, |
969 | "ioat-msi", device); | |
3e037454 | 970 | if (err) { |
e6c0b69a | 971 | pci_disable_msi(pdev); |
3e037454 SN |
972 | goto intx; |
973 | } | |
8a52b9ff | 974 | device->irq_mode = IOAT_MSIX; |
3e037454 SN |
975 | goto done; |
976 | ||
977 | intx: | |
e6c0b69a DW |
978 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, |
979 | IRQF_SHARED, "ioat-intx", device); | |
3e037454 SN |
980 | if (err) |
981 | goto err_no_irq; | |
3e037454 | 982 | |
8a52b9ff | 983 | device->irq_mode = IOAT_INTX; |
3e037454 | 984 | done: |
f2427e27 DW |
985 | if (device->intr_quirk) |
986 | device->intr_quirk(device); | |
3e037454 SN |
987 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
988 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
989 | return 0; | |
990 | ||
991 | err_no_irq: | |
992 | /* Disable all interrupt generation */ | |
993 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
8a52b9ff | 994 | device->irq_mode = IOAT_NOIRQ; |
e6c0b69a DW |
995 | dev_err(dev, "no usable interrupts\n"); |
996 | return err; | |
3e037454 | 997 | } |
8a52b9ff | 998 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); |
3e037454 | 999 | |
e6c0b69a | 1000 | static void ioat_disable_interrupts(struct ioatdma_device *device) |
3e037454 | 1001 | { |
3e037454 SN |
1002 | /* Disable all interrupt generation */ |
1003 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
3e037454 SN |
1004 | } |
1005 | ||
4bf27b8b | 1006 | int ioat_probe(struct ioatdma_device *device) |
0bbd5f4e | 1007 | { |
f2427e27 DW |
1008 | int err = -ENODEV; |
1009 | struct dma_device *dma = &device->common; | |
1010 | struct pci_dev *pdev = device->pdev; | |
e6c0b69a | 1011 | struct device *dev = &pdev->dev; |
0bbd5f4e CL |
1012 | |
1013 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1014 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
8ab89567 SN |
1015 | sizeof(struct ioat_dma_descriptor), |
1016 | 64, 0); | |
0bbd5f4e CL |
1017 | if (!device->dma_pool) { |
1018 | err = -ENOMEM; | |
1019 | goto err_dma_pool; | |
1020 | } | |
1021 | ||
43d6e369 SN |
1022 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
1023 | sizeof(u64), SMP_CACHE_BYTES, | |
1024 | SMP_CACHE_BYTES); | |
5cbafa65 | 1025 | |
0bbd5f4e CL |
1026 | if (!device->completion_pool) { |
1027 | err = -ENOMEM; | |
1028 | goto err_completion_pool; | |
1029 | } | |
1030 | ||
5cbafa65 | 1031 | device->enumerate_channels(device); |
0bbd5f4e | 1032 | |
f2427e27 | 1033 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
f2427e27 | 1034 | dma->dev = &pdev->dev; |
7bb67c14 | 1035 | |
bc3c7025 | 1036 | if (!dma->chancnt) { |
a6d52d70 | 1037 | dev_err(dev, "channel enumeration error\n"); |
8b794b14 MS |
1038 | goto err_setup_interrupts; |
1039 | } | |
1040 | ||
3e037454 | 1041 | err = ioat_dma_setup_interrupts(device); |
8ab89567 | 1042 | if (err) |
3e037454 | 1043 | goto err_setup_interrupts; |
0bbd5f4e | 1044 | |
9de6fc71 | 1045 | err = device->self_test(device); |
0bbd5f4e CL |
1046 | if (err) |
1047 | goto err_self_test; | |
1048 | ||
f2427e27 | 1049 | return 0; |
0bbd5f4e CL |
1050 | |
1051 | err_self_test: | |
e6c0b69a | 1052 | ioat_disable_interrupts(device); |
3e037454 | 1053 | err_setup_interrupts: |
0bbd5f4e CL |
1054 | pci_pool_destroy(device->completion_pool); |
1055 | err_completion_pool: | |
1056 | pci_pool_destroy(device->dma_pool); | |
1057 | err_dma_pool: | |
f2427e27 DW |
1058 | return err; |
1059 | } | |
1060 | ||
4bf27b8b | 1061 | int ioat_register(struct ioatdma_device *device) |
f2427e27 DW |
1062 | { |
1063 | int err = dma_async_device_register(&device->common); | |
1064 | ||
1065 | if (err) { | |
1066 | ioat_disable_interrupts(device); | |
1067 | pci_pool_destroy(device->completion_pool); | |
1068 | pci_pool_destroy(device->dma_pool); | |
1069 | } | |
1070 | ||
1071 | return err; | |
1072 | } | |
1073 | ||
1074 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | |
1075 | static void ioat1_intr_quirk(struct ioatdma_device *device) | |
1076 | { | |
1077 | struct pci_dev *pdev = device->pdev; | |
1078 | u32 dmactrl; | |
1079 | ||
1080 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | |
1081 | if (pdev->msi_enabled) | |
1082 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | |
1083 | else | |
1084 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | |
1085 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | |
1086 | } | |
1087 | ||
5669e31c DW |
1088 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
1089 | { | |
1090 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1091 | ||
1092 | return sprintf(page, "%d\n", ioat->desccount); | |
1093 | } | |
1094 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
1095 | ||
1096 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
1097 | { | |
1098 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1099 | ||
1100 | return sprintf(page, "%d\n", ioat->active); | |
1101 | } | |
1102 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
1103 | ||
1104 | static ssize_t cap_show(struct dma_chan *c, char *page) | |
1105 | { | |
1106 | struct dma_device *dma = c->device; | |
1107 | ||
48a9db46 | 1108 | return sprintf(page, "copy%s%s%s%s%s\n", |
5669e31c DW |
1109 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", |
1110 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | |
1111 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | |
1112 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | |
5669e31c DW |
1113 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); |
1114 | ||
1115 | } | |
1116 | struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); | |
1117 | ||
1118 | static ssize_t version_show(struct dma_chan *c, char *page) | |
1119 | { | |
1120 | struct dma_device *dma = c->device; | |
1121 | struct ioatdma_device *device = to_ioatdma_device(dma); | |
1122 | ||
1123 | return sprintf(page, "%d.%d\n", | |
1124 | device->version >> 4, device->version & 0xf); | |
1125 | } | |
1126 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | |
1127 | ||
1128 | static struct attribute *ioat1_attrs[] = { | |
1129 | &ring_size_attr.attr, | |
1130 | &ring_active_attr.attr, | |
1131 | &ioat_cap_attr.attr, | |
1132 | &ioat_version_attr.attr, | |
1133 | NULL, | |
1134 | }; | |
1135 | ||
1136 | static ssize_t | |
1137 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1138 | { | |
1139 | struct ioat_sysfs_entry *entry; | |
1140 | struct ioat_chan_common *chan; | |
1141 | ||
1142 | entry = container_of(attr, struct ioat_sysfs_entry, attr); | |
1143 | chan = container_of(kobj, struct ioat_chan_common, kobj); | |
1144 | ||
1145 | if (!entry->show) | |
1146 | return -EIO; | |
1147 | return entry->show(&chan->common, page); | |
1148 | } | |
1149 | ||
52cf25d0 | 1150 | const struct sysfs_ops ioat_sysfs_ops = { |
5669e31c DW |
1151 | .show = ioat_attr_show, |
1152 | }; | |
1153 | ||
1154 | static struct kobj_type ioat1_ktype = { | |
1155 | .sysfs_ops = &ioat_sysfs_ops, | |
1156 | .default_attrs = ioat1_attrs, | |
1157 | }; | |
1158 | ||
1159 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | |
1160 | { | |
1161 | struct dma_device *dma = &device->common; | |
1162 | struct dma_chan *c; | |
1163 | ||
1164 | list_for_each_entry(c, &dma->channels, device_node) { | |
1165 | struct ioat_chan_common *chan = to_chan_common(c); | |
1166 | struct kobject *parent = &c->dev->device.kobj; | |
1167 | int err; | |
1168 | ||
1169 | err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); | |
1170 | if (err) { | |
1171 | dev_warn(to_dev(chan), | |
1172 | "sysfs init error (%d), continuing...\n", err); | |
1173 | kobject_put(&chan->kobj); | |
1174 | set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); | |
1175 | } | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | void ioat_kobject_del(struct ioatdma_device *device) | |
1180 | { | |
1181 | struct dma_device *dma = &device->common; | |
1182 | struct dma_chan *c; | |
1183 | ||
1184 | list_for_each_entry(c, &dma->channels, device_node) { | |
1185 | struct ioat_chan_common *chan = to_chan_common(c); | |
1186 | ||
1187 | if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { | |
1188 | kobject_del(&chan->kobj); | |
1189 | kobject_put(&chan->kobj); | |
1190 | } | |
1191 | } | |
1192 | } | |
1193 | ||
4bf27b8b | 1194 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) |
f2427e27 DW |
1195 | { |
1196 | struct pci_dev *pdev = device->pdev; | |
1197 | struct dma_device *dma; | |
1198 | int err; | |
1199 | ||
1200 | device->intr_quirk = ioat1_intr_quirk; | |
5cbafa65 | 1201 | device->enumerate_channels = ioat1_enumerate_channels; |
9de6fc71 | 1202 | device->self_test = ioat_dma_self_test; |
aa4d72ae DW |
1203 | device->timer_fn = ioat1_timer_event; |
1204 | device->cleanup_fn = ioat1_cleanup_event; | |
f2427e27 DW |
1205 | dma = &device->common; |
1206 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | |
1207 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | |
5cbafa65 DW |
1208 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; |
1209 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | |
07934481 | 1210 | dma->device_tx_status = ioat_dma_tx_status; |
f2427e27 DW |
1211 | |
1212 | err = ioat_probe(device); | |
1213 | if (err) | |
1214 | return err; | |
1215 | ioat_set_tcp_copy_break(4096); | |
1216 | err = ioat_register(device); | |
1217 | if (err) | |
1218 | return err; | |
5669e31c DW |
1219 | ioat_kobject_add(device, &ioat1_ktype); |
1220 | ||
f2427e27 DW |
1221 | if (dca) |
1222 | device->dca = ioat_dca_init(pdev, device->reg_base); | |
1223 | ||
f2427e27 DW |
1224 | return err; |
1225 | } | |
1226 | ||
4bf27b8b | 1227 | void ioat_dma_remove(struct ioatdma_device *device) |
0bbd5f4e | 1228 | { |
bc3c7025 | 1229 | struct dma_device *dma = &device->common; |
0bbd5f4e | 1230 | |
e6c0b69a | 1231 | ioat_disable_interrupts(device); |
8ab89567 | 1232 | |
5669e31c DW |
1233 | ioat_kobject_del(device); |
1234 | ||
bc3c7025 | 1235 | dma_async_device_unregister(dma); |
dfe2299e | 1236 | |
0bbd5f4e CL |
1237 | pci_pool_destroy(device->dma_pool); |
1238 | pci_pool_destroy(device->completion_pool); | |
8ab89567 | 1239 | |
dcbc853a | 1240 | INIT_LIST_HEAD(&dma->channels); |
0bbd5f4e | 1241 | } |