Commit | Line | Data |
---|---|---|
8722ff8c | 1 | /* |
2 | * Filename: dma.c | |
3 | * | |
4 | * | |
5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | |
6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | |
7 | * | |
8 | * (C) Copyright 2013 IBM Corporation | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2 of the | |
13 | * License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software Foundation, | |
22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
e5e9fdaa | 25 | #include <linux/slab.h> |
8722ff8c | 26 | #include "rsxx_priv.h" |
27 | ||
28 | struct rsxx_dma { | |
29 | struct list_head list; | |
30 | u8 cmd; | |
9bb3c446 | 31 | unsigned int laddr; /* Logical address */ |
8722ff8c | 32 | struct { |
33 | u32 off; | |
34 | u32 cnt; | |
35 | } sub_page; | |
36 | dma_addr_t dma_addr; | |
37 | struct page *page; | |
38 | unsigned int pg_off; /* Page Offset */ | |
39 | rsxx_dma_cb cb; | |
40 | void *cb_data; | |
41 | }; | |
42 | ||
43 | /* This timeout is used to detect a stalled DMA channel */ | |
44 | #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000) | |
45 | ||
46 | struct hw_status { | |
47 | u8 status; | |
48 | u8 tag; | |
49 | __le16 count; | |
50 | __le32 _rsvd2; | |
51 | __le64 _rsvd3; | |
52 | } __packed; | |
53 | ||
54 | enum rsxx_dma_status { | |
55 | DMA_SW_ERR = 0x1, | |
56 | DMA_HW_FAULT = 0x2, | |
57 | DMA_CANCELLED = 0x4, | |
58 | }; | |
59 | ||
60 | struct hw_cmd { | |
61 | u8 command; | |
62 | u8 tag; | |
63 | u8 _rsvd; | |
64 | u8 sub_page; /* Bit[0:2]: 512byte offset */ | |
65 | /* Bit[4:6]: 512byte count */ | |
66 | __le32 device_addr; | |
67 | __le64 host_addr; | |
68 | } __packed; | |
69 | ||
70 | enum rsxx_hw_cmd { | |
71 | HW_CMD_BLK_DISCARD = 0x70, | |
72 | HW_CMD_BLK_WRITE = 0x80, | |
73 | HW_CMD_BLK_READ = 0xC0, | |
74 | HW_CMD_BLK_RECON_READ = 0xE0, | |
75 | }; | |
76 | ||
77 | enum rsxx_hw_status { | |
78 | HW_STATUS_CRC = 0x01, | |
79 | HW_STATUS_HARD_ERR = 0x02, | |
80 | HW_STATUS_SOFT_ERR = 0x04, | |
81 | HW_STATUS_FAULT = 0x08, | |
82 | }; | |
83 | ||
8722ff8c | 84 | static struct kmem_cache *rsxx_dma_pool; |
85 | ||
86 | struct dma_tracker { | |
87 | int next_tag; | |
88 | struct rsxx_dma *dma; | |
89 | }; | |
90 | ||
91 | #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ | |
92 | (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) | |
93 | ||
94 | struct dma_tracker_list { | |
95 | spinlock_t lock; | |
96 | int head; | |
97 | struct dma_tracker list[0]; | |
98 | }; | |
99 | ||
100 | ||
101 | /*----------------- Misc Utility Functions -------------------*/ | |
c206c709 | 102 | static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card) |
8722ff8c | 103 | { |
104 | unsigned long long tgt_addr8; | |
105 | ||
106 | tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & | |
107 | card->_stripe.upper_mask) | | |
108 | ((addr8) & card->_stripe.lower_mask); | |
109 | do_div(tgt_addr8, RSXX_HW_BLK_SIZE); | |
110 | return tgt_addr8; | |
111 | } | |
112 | ||
c206c709 | 113 | static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) |
8722ff8c | 114 | { |
115 | unsigned int tgt; | |
116 | ||
117 | tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; | |
118 | ||
119 | return tgt; | |
120 | } | |
121 | ||
c95246c3 | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
8722ff8c | 123 | { |
124 | /* Reset all DMA Command/Status Queues */ | |
125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | |
126 | } | |
127 | ||
128 | static unsigned int get_dma_size(struct rsxx_dma *dma) | |
129 | { | |
130 | if (dma->sub_page.cnt) | |
131 | return dma->sub_page.cnt << 9; | |
132 | else | |
133 | return RSXX_HW_BLK_SIZE; | |
134 | } | |
135 | ||
136 | ||
137 | /*----------------- DMA Tracker -------------------*/ | |
138 | static void set_tracker_dma(struct dma_tracker_list *trackers, | |
139 | int tag, | |
140 | struct rsxx_dma *dma) | |
141 | { | |
142 | trackers->list[tag].dma = dma; | |
143 | } | |
144 | ||
145 | static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers, | |
146 | int tag) | |
147 | { | |
148 | return trackers->list[tag].dma; | |
149 | } | |
150 | ||
151 | static int pop_tracker(struct dma_tracker_list *trackers) | |
152 | { | |
153 | int tag; | |
154 | ||
155 | spin_lock(&trackers->lock); | |
156 | tag = trackers->head; | |
157 | if (tag != -1) { | |
158 | trackers->head = trackers->list[tag].next_tag; | |
159 | trackers->list[tag].next_tag = -1; | |
160 | } | |
161 | spin_unlock(&trackers->lock); | |
162 | ||
163 | return tag; | |
164 | } | |
165 | ||
166 | static void push_tracker(struct dma_tracker_list *trackers, int tag) | |
167 | { | |
168 | spin_lock(&trackers->lock); | |
169 | trackers->list[tag].next_tag = trackers->head; | |
170 | trackers->head = tag; | |
171 | trackers->list[tag].dma = NULL; | |
172 | spin_unlock(&trackers->lock); | |
173 | } | |
174 | ||
175 | ||
176 | /*----------------- Interrupt Coalescing -------------*/ | |
177 | /* | |
178 | * Interrupt Coalescing Register Format: | |
179 | * Interrupt Timer (64ns units) [15:0] | |
180 | * Interrupt Count [24:16] | |
181 | * Reserved [31:25] | |
182 | */ | |
183 | #define INTR_COAL_LATENCY_MASK (0x0000ffff) | |
184 | ||
185 | #define INTR_COAL_COUNT_SHIFT 16 | |
186 | #define INTR_COAL_COUNT_BITS 9 | |
187 | #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \ | |
188 | INTR_COAL_COUNT_SHIFT) | |
189 | #define INTR_COAL_LATENCY_UNITS_NS 64 | |
190 | ||
191 | ||
192 | static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) | |
193 | { | |
194 | u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; | |
195 | ||
196 | if (mode == RSXX_INTR_COAL_DISABLED) | |
197 | return 0; | |
198 | ||
199 | return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) | | |
200 | (latency_units & INTR_COAL_LATENCY_MASK); | |
201 | ||
202 | } | |
203 | ||
204 | static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |
205 | { | |
206 | int i; | |
207 | u32 q_depth = 0; | |
208 | u32 intr_coal; | |
209 | ||
c95246c3 PK |
210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | |
8722ff8c | 212 | return; |
213 | ||
214 | for (i = 0; i < card->n_targets; i++) | |
215 | q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); | |
216 | ||
217 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
218 | q_depth / 2, | |
219 | card->config.data.intr_coal.latency); | |
220 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
221 | } | |
222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | |
c95246c3 | 224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
8722ff8c | 225 | struct rsxx_dma *dma, |
226 | unsigned int status) | |
227 | { | |
228 | if (status & DMA_SW_ERR) | |
c95246c3 | 229 | ctrl->stats.dma_sw_err++; |
8722ff8c | 230 | if (status & DMA_HW_FAULT) |
c95246c3 | 231 | ctrl->stats.dma_hw_fault++; |
8722ff8c | 232 | if (status & DMA_CANCELLED) |
c95246c3 | 233 | ctrl->stats.dma_cancelled++; |
8722ff8c | 234 | |
235 | if (dma->dma_addr) | |
c95246c3 PK |
236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, |
237 | get_dma_size(dma), | |
8722ff8c | 238 | dma->cmd == HW_CMD_BLK_WRITE ? |
239 | PCI_DMA_TODEVICE : | |
240 | PCI_DMA_FROMDEVICE); | |
241 | ||
242 | if (dma->cb) | |
c95246c3 | 243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
8722ff8c | 244 | |
245 | kmem_cache_free(rsxx_dma_pool, dma); | |
246 | } | |
247 | ||
0ab4743e PK |
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
249 | struct list_head *q) | |
250 | { | |
251 | struct rsxx_dma *dma; | |
252 | struct rsxx_dma *tmp; | |
253 | int cnt = 0; | |
254 | ||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | |
256 | list_del(&dma->list); | |
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
258 | cnt++; | |
259 | } | |
260 | ||
261 | return cnt; | |
262 | } | |
263 | ||
8722ff8c | 264 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
265 | struct rsxx_dma *dma) | |
266 | { | |
267 | /* | |
268 | * Requeued DMAs go to the front of the queue so they are issued | |
269 | * first. | |
270 | */ | |
0ab4743e | 271 | spin_lock_bh(&ctrl->queue_lock); |
62302508 | 272 | ctrl->stats.sw_q_depth++; |
8722ff8c | 273 | list_add(&dma->list, &ctrl->queue); |
0ab4743e | 274 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 275 | } |
276 | ||
277 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |
278 | struct rsxx_dma *dma, | |
279 | u8 hw_st) | |
280 | { | |
281 | unsigned int status = 0; | |
282 | int requeue_cmd = 0; | |
283 | ||
284 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
285 | "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n", | |
286 | dma->cmd, dma->laddr, hw_st); | |
287 | ||
288 | if (hw_st & HW_STATUS_CRC) | |
289 | ctrl->stats.crc_errors++; | |
290 | if (hw_st & HW_STATUS_HARD_ERR) | |
291 | ctrl->stats.hard_errors++; | |
292 | if (hw_st & HW_STATUS_SOFT_ERR) | |
293 | ctrl->stats.soft_errors++; | |
294 | ||
295 | switch (dma->cmd) { | |
296 | case HW_CMD_BLK_READ: | |
297 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
298 | if (ctrl->card->scrub_hard) { | |
299 | dma->cmd = HW_CMD_BLK_RECON_READ; | |
300 | requeue_cmd = 1; | |
301 | ctrl->stats.reads_retried++; | |
302 | } else { | |
303 | status |= DMA_HW_FAULT; | |
304 | ctrl->stats.reads_failed++; | |
305 | } | |
306 | } else if (hw_st & HW_STATUS_FAULT) { | |
307 | status |= DMA_HW_FAULT; | |
308 | ctrl->stats.reads_failed++; | |
309 | } | |
310 | ||
311 | break; | |
312 | case HW_CMD_BLK_RECON_READ: | |
313 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
314 | /* Data could not be reconstructed. */ | |
315 | status |= DMA_HW_FAULT; | |
316 | ctrl->stats.reads_failed++; | |
317 | } | |
318 | ||
319 | break; | |
320 | case HW_CMD_BLK_WRITE: | |
321 | status |= DMA_HW_FAULT; | |
322 | ctrl->stats.writes_failed++; | |
323 | ||
324 | break; | |
325 | case HW_CMD_BLK_DISCARD: | |
326 | status |= DMA_HW_FAULT; | |
327 | ctrl->stats.discards_failed++; | |
328 | ||
329 | break; | |
330 | default: | |
331 | dev_err(CARD_TO_DEV(ctrl->card), | |
332 | "Unknown command in DMA!(cmd: x%02x " | |
333 | "laddr x%08x st: x%02x\n", | |
334 | dma->cmd, dma->laddr, hw_st); | |
335 | status |= DMA_SW_ERR; | |
336 | ||
337 | break; | |
338 | } | |
339 | ||
340 | if (requeue_cmd) | |
341 | rsxx_requeue_dma(ctrl, dma); | |
342 | else | |
c95246c3 | 343 | rsxx_complete_dma(ctrl, dma, status); |
8722ff8c | 344 | } |
345 | ||
346 | static void dma_engine_stalled(unsigned long data) | |
347 | { | |
348 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | |
0ab4743e | 349 | int cnt; |
8722ff8c | 350 | |
c95246c3 PK |
351 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
352 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 353 | return; |
354 | ||
355 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | |
356 | /* | |
357 | * The dma engine was stalled because the SW_CMD_IDX write | |
358 | * was lost. Issue it again to recover. | |
359 | */ | |
360 | dev_warn(CARD_TO_DEV(ctrl->card), | |
361 | "SW_CMD_IDX write was lost, re-writing...\n"); | |
362 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
363 | mod_timer(&ctrl->activity_timer, | |
364 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
365 | } else { | |
366 | dev_warn(CARD_TO_DEV(ctrl->card), | |
367 | "DMA channel %d has stalled, faulting interface.\n", | |
368 | ctrl->id); | |
369 | ctrl->card->dma_fault = 1; | |
0ab4743e PK |
370 | |
371 | /* Clean up the DMA queue */ | |
372 | spin_lock(&ctrl->queue_lock); | |
373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | |
374 | spin_unlock(&ctrl->queue_lock); | |
375 | ||
376 | cnt += rsxx_dma_cancel(ctrl); | |
377 | ||
378 | if (cnt) | |
379 | dev_info(CARD_TO_DEV(ctrl->card), | |
380 | "Freed %d queued DMAs on channel %d\n", | |
381 | cnt, ctrl->id); | |
8722ff8c | 382 | } |
383 | } | |
384 | ||
31a70bb4 | 385 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 386 | { |
8722ff8c | 387 | struct rsxx_dma *dma; |
388 | int tag; | |
389 | int cmds_pending = 0; | |
390 | struct hw_cmd *hw_cmd_buf; | |
391 | ||
8722ff8c | 392 | hw_cmd_buf = ctrl->cmd.buf; |
393 | ||
c95246c3 PK |
394 | if (unlikely(ctrl->card->halt) || |
395 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 396 | return; |
397 | ||
398 | while (1) { | |
0ab4743e | 399 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 400 | if (list_empty(&ctrl->queue)) { |
0ab4743e | 401 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 402 | break; |
403 | } | |
0ab4743e | 404 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 405 | |
406 | tag = pop_tracker(ctrl->trackers); | |
407 | if (tag == -1) | |
408 | break; | |
409 | ||
0ab4743e | 410 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 411 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
412 | list_del(&dma->list); | |
413 | ctrl->stats.sw_q_depth--; | |
0ab4743e | 414 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 415 | |
416 | /* | |
417 | * This will catch any DMAs that slipped in right before the | |
418 | * fault, but was queued after all the other DMAs were | |
419 | * cancelled. | |
420 | */ | |
421 | if (unlikely(ctrl->card->dma_fault)) { | |
422 | push_tracker(ctrl->trackers, tag); | |
c95246c3 | 423 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
8722ff8c | 424 | continue; |
425 | } | |
426 | ||
427 | set_tracker_dma(ctrl->trackers, tag, dma); | |
428 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | |
429 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | |
430 | hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; | |
431 | hw_cmd_buf[ctrl->cmd.idx].sub_page = | |
432 | ((dma->sub_page.cnt & 0x7) << 4) | | |
433 | (dma->sub_page.off & 0x7); | |
434 | ||
435 | hw_cmd_buf[ctrl->cmd.idx].device_addr = | |
436 | cpu_to_le32(dma->laddr); | |
437 | ||
438 | hw_cmd_buf[ctrl->cmd.idx].host_addr = | |
439 | cpu_to_le64(dma->dma_addr); | |
440 | ||
441 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
442 | "Issue DMA%d(laddr %d tag %d) to idx %d\n", | |
443 | ctrl->id, dma->laddr, tag, ctrl->cmd.idx); | |
444 | ||
445 | ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; | |
446 | cmds_pending++; | |
447 | ||
448 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
449 | ctrl->stats.writes_issued++; | |
450 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
451 | ctrl->stats.discards_issued++; | |
452 | else | |
453 | ctrl->stats.reads_issued++; | |
454 | } | |
455 | ||
456 | /* Let HW know we've queued commands. */ | |
457 | if (cmds_pending) { | |
8722ff8c | 458 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
459 | mod_timer(&ctrl->activity_timer, | |
460 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
c95246c3 PK |
461 | |
462 | if (unlikely(ctrl->card->eeh_state)) { | |
463 | del_timer_sync(&ctrl->activity_timer); | |
464 | return; | |
465 | } | |
466 | ||
8722ff8c | 467 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
468 | } | |
469 | } | |
470 | ||
31a70bb4 | 471 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 472 | { |
8722ff8c | 473 | struct rsxx_dma *dma; |
474 | unsigned long flags; | |
475 | u16 count; | |
476 | u8 status; | |
477 | u8 tag; | |
478 | struct hw_status *hw_st_buf; | |
479 | ||
8722ff8c | 480 | hw_st_buf = ctrl->status.buf; |
481 | ||
482 | if (unlikely(ctrl->card->halt) || | |
c95246c3 PK |
483 | unlikely(ctrl->card->dma_fault) || |
484 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 485 | return; |
486 | ||
487 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
488 | ||
489 | while (count == ctrl->e_cnt) { | |
490 | /* | |
491 | * The read memory-barrier is necessary to keep aggressive | |
492 | * processors/optimizers (such as the PPC Apple G5) from | |
493 | * reordering the following status-buffer tag & status read | |
494 | * *before* the count read on subsequent iterations of the | |
495 | * loop! | |
496 | */ | |
497 | rmb(); | |
498 | ||
499 | status = hw_st_buf[ctrl->status.idx].status; | |
500 | tag = hw_st_buf[ctrl->status.idx].tag; | |
501 | ||
502 | dma = get_tracker_dma(ctrl->trackers, tag); | |
503 | if (dma == NULL) { | |
504 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
505 | rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); | |
506 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
507 | ||
508 | dev_err(CARD_TO_DEV(ctrl->card), | |
509 | "No tracker for tag %d " | |
510 | "(idx %d id %d)\n", | |
511 | tag, ctrl->status.idx, ctrl->id); | |
512 | return; | |
513 | } | |
514 | ||
515 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
516 | "Completing DMA%d" | |
517 | "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n", | |
518 | ctrl->id, dma->laddr, tag, status, count, | |
519 | ctrl->status.idx); | |
520 | ||
521 | atomic_dec(&ctrl->stats.hw_q_depth); | |
522 | ||
523 | mod_timer(&ctrl->activity_timer, | |
524 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
525 | ||
526 | if (status) | |
527 | rsxx_handle_dma_error(ctrl, dma, status); | |
528 | else | |
c95246c3 | 529 | rsxx_complete_dma(ctrl, dma, 0); |
8722ff8c | 530 | |
531 | push_tracker(ctrl->trackers, tag); | |
532 | ||
533 | ctrl->status.idx = (ctrl->status.idx + 1) & | |
534 | RSXX_CS_IDX_MASK; | |
535 | ctrl->e_cnt++; | |
536 | ||
537 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
538 | } | |
539 | ||
540 | dma_intr_coal_auto_tune(ctrl->card); | |
541 | ||
542 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | |
543 | del_timer_sync(&ctrl->activity_timer); | |
544 | ||
545 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
546 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | |
547 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
548 | ||
0ab4743e | 549 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 550 | if (ctrl->stats.sw_q_depth) |
551 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | |
0ab4743e | 552 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 553 | } |
554 | ||
31a70bb4 PK |
555 | static void rsxx_schedule_issue(struct work_struct *work) |
556 | { | |
557 | struct rsxx_dma_ctrl *ctrl; | |
558 | ||
559 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | |
560 | ||
561 | mutex_lock(&ctrl->work_lock); | |
562 | rsxx_issue_dmas(ctrl); | |
563 | mutex_unlock(&ctrl->work_lock); | |
564 | } | |
565 | ||
566 | static void rsxx_schedule_done(struct work_struct *work) | |
567 | { | |
568 | struct rsxx_dma_ctrl *ctrl; | |
569 | ||
570 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | |
571 | ||
572 | mutex_lock(&ctrl->work_lock); | |
573 | rsxx_dma_done(ctrl); | |
574 | mutex_unlock(&ctrl->work_lock); | |
575 | } | |
576 | ||
8722ff8c | 577 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
578 | struct list_head *q, | |
579 | unsigned int laddr, | |
580 | rsxx_dma_cb cb, | |
581 | void *cb_data) | |
582 | { | |
583 | struct rsxx_dma *dma; | |
584 | ||
585 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
586 | if (!dma) | |
587 | return -ENOMEM; | |
588 | ||
589 | dma->cmd = HW_CMD_BLK_DISCARD; | |
590 | dma->laddr = laddr; | |
591 | dma->dma_addr = 0; | |
592 | dma->sub_page.off = 0; | |
593 | dma->sub_page.cnt = 0; | |
594 | dma->page = NULL; | |
595 | dma->pg_off = 0; | |
596 | dma->cb = cb; | |
597 | dma->cb_data = cb_data; | |
598 | ||
599 | dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); | |
600 | ||
601 | list_add_tail(&dma->list, q); | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
606 | static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |
607 | struct list_head *q, | |
608 | int dir, | |
609 | unsigned int dma_off, | |
610 | unsigned int dma_len, | |
611 | unsigned int laddr, | |
612 | struct page *page, | |
613 | unsigned int pg_off, | |
614 | rsxx_dma_cb cb, | |
615 | void *cb_data) | |
616 | { | |
617 | struct rsxx_dma *dma; | |
618 | ||
619 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
620 | if (!dma) | |
621 | return -ENOMEM; | |
622 | ||
623 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | |
624 | dir ? PCI_DMA_TODEVICE : | |
625 | PCI_DMA_FROMDEVICE); | |
626 | if (!dma->dma_addr) { | |
627 | kmem_cache_free(rsxx_dma_pool, dma); | |
628 | return -ENOMEM; | |
629 | } | |
630 | ||
631 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | |
632 | dma->laddr = laddr; | |
633 | dma->sub_page.off = (dma_off >> 9); | |
634 | dma->sub_page.cnt = (dma_len >> 9); | |
635 | dma->page = page; | |
636 | dma->pg_off = pg_off; | |
637 | dma->cb = cb; | |
638 | dma->cb_data = cb_data; | |
639 | ||
640 | dev_dbg(CARD_TO_DEV(card), | |
641 | "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n", | |
642 | dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, | |
643 | dma->sub_page.cnt, dma->page, dma->pg_off); | |
644 | ||
645 | /* Queue the DMA */ | |
646 | list_add_tail(&dma->list, q); | |
647 | ||
648 | return 0; | |
649 | } | |
650 | ||
651 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |
652 | struct bio *bio, | |
653 | atomic_t *n_dmas, | |
654 | rsxx_dma_cb cb, | |
655 | void *cb_data) | |
656 | { | |
657 | struct list_head dma_list[RSXX_MAX_TARGETS]; | |
658 | struct bio_vec *bvec; | |
659 | unsigned long long addr8; | |
660 | unsigned int laddr; | |
661 | unsigned int bv_len; | |
662 | unsigned int bv_off; | |
663 | unsigned int dma_off; | |
664 | unsigned int dma_len; | |
665 | int dma_cnt[RSXX_MAX_TARGETS]; | |
666 | int tgt; | |
667 | int st; | |
668 | int i; | |
669 | ||
670 | addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ | |
671 | atomic_set(n_dmas, 0); | |
672 | ||
673 | for (i = 0; i < card->n_targets; i++) { | |
674 | INIT_LIST_HEAD(&dma_list[i]); | |
675 | dma_cnt[i] = 0; | |
676 | } | |
677 | ||
678 | if (bio->bi_rw & REQ_DISCARD) { | |
679 | bv_len = bio->bi_size; | |
680 | ||
681 | while (bv_len > 0) { | |
682 | tgt = rsxx_get_dma_tgt(card, addr8); | |
683 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
684 | ||
685 | st = rsxx_queue_discard(card, &dma_list[tgt], laddr, | |
686 | cb, cb_data); | |
687 | if (st) | |
688 | goto bvec_err; | |
689 | ||
690 | dma_cnt[tgt]++; | |
691 | atomic_inc(n_dmas); | |
692 | addr8 += RSXX_HW_BLK_SIZE; | |
693 | bv_len -= RSXX_HW_BLK_SIZE; | |
694 | } | |
695 | } else { | |
696 | bio_for_each_segment(bvec, bio, i) { | |
697 | bv_len = bvec->bv_len; | |
698 | bv_off = bvec->bv_offset; | |
699 | ||
700 | while (bv_len > 0) { | |
701 | tgt = rsxx_get_dma_tgt(card, addr8); | |
702 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
703 | dma_off = addr8 & RSXX_HW_BLK_MASK; | |
704 | dma_len = min(bv_len, | |
705 | RSXX_HW_BLK_SIZE - dma_off); | |
706 | ||
707 | st = rsxx_queue_dma(card, &dma_list[tgt], | |
708 | bio_data_dir(bio), | |
709 | dma_off, dma_len, | |
710 | laddr, bvec->bv_page, | |
711 | bv_off, cb, cb_data); | |
712 | if (st) | |
713 | goto bvec_err; | |
714 | ||
715 | dma_cnt[tgt]++; | |
716 | atomic_inc(n_dmas); | |
717 | addr8 += dma_len; | |
718 | bv_off += dma_len; | |
719 | bv_len -= dma_len; | |
720 | } | |
721 | } | |
722 | } | |
723 | ||
724 | for (i = 0; i < card->n_targets; i++) { | |
725 | if (!list_empty(&dma_list[i])) { | |
0ab4743e | 726 | spin_lock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 727 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
728 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | |
0ab4743e | 729 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 730 | |
731 | queue_work(card->ctrl[i].issue_wq, | |
732 | &card->ctrl[i].issue_dma_work); | |
733 | } | |
734 | } | |
735 | ||
736 | return 0; | |
737 | ||
738 | bvec_err: | |
0ab4743e PK |
739 | for (i = 0; i < card->n_targets; i++) { |
740 | spin_lock_bh(&card->ctrl[i].queue_lock); | |
741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | |
742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | |
743 | } | |
8722ff8c | 744 | |
745 | return st; | |
746 | } | |
747 | ||
748 | ||
749 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | |
c95246c3 PK |
750 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) |
751 | { | |
752 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | |
753 | &ctrl->status.dma_addr); | |
754 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | |
755 | &ctrl->cmd.dma_addr); | |
756 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | |
757 | return -ENOMEM; | |
758 | ||
759 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | |
760 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | |
761 | ctrl->regmap + SB_ADD_LO); | |
762 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | |
763 | ctrl->regmap + SB_ADD_HI); | |
764 | ||
765 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | |
766 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | |
767 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | |
768 | ||
769 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | |
770 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
771 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | |
772 | ctrl->status.idx); | |
773 | return -EINVAL; | |
774 | } | |
775 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | |
776 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | |
777 | ||
778 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | |
779 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
780 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | |
781 | ctrl->status.idx); | |
782 | return -EINVAL; | |
783 | } | |
784 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | |
785 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
786 | ||
787 | return 0; | |
788 | } | |
789 | ||
8722ff8c | 790 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
791 | struct rsxx_dma_ctrl *ctrl) | |
792 | { | |
793 | int i; | |
c95246c3 | 794 | int st; |
8722ff8c | 795 | |
796 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | |
797 | ||
8722ff8c | 798 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
799 | if (!ctrl->trackers) | |
800 | return -ENOMEM; | |
801 | ||
802 | ctrl->trackers->head = 0; | |
803 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
804 | ctrl->trackers->list[i].next_tag = i + 1; | |
805 | ctrl->trackers->list[i].dma = NULL; | |
806 | } | |
807 | ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; | |
808 | spin_lock_init(&ctrl->trackers->lock); | |
809 | ||
810 | spin_lock_init(&ctrl->queue_lock); | |
31a70bb4 | 811 | mutex_init(&ctrl->work_lock); |
8722ff8c | 812 | INIT_LIST_HEAD(&ctrl->queue); |
813 | ||
814 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | |
c206c709 | 815 | (unsigned long)ctrl); |
8722ff8c | 816 | |
817 | ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); | |
818 | if (!ctrl->issue_wq) | |
819 | return -ENOMEM; | |
820 | ||
821 | ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); | |
822 | if (!ctrl->done_wq) | |
823 | return -ENOMEM; | |
824 | ||
31a70bb4 PK |
825 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
826 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); | |
8722ff8c | 827 | |
c95246c3 PK |
828 | st = rsxx_hw_buffers_init(dev, ctrl); |
829 | if (st) | |
830 | return st; | |
8722ff8c | 831 | |
8722ff8c | 832 | return 0; |
833 | } | |
834 | ||
c206c709 | 835 | static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, |
8722ff8c | 836 | unsigned int stripe_size8) |
837 | { | |
838 | if (!is_power_of_2(stripe_size8)) { | |
839 | dev_err(CARD_TO_DEV(card), | |
840 | "stripe_size is NOT a power of 2!\n"); | |
841 | return -EINVAL; | |
842 | } | |
843 | ||
844 | card->_stripe.lower_mask = stripe_size8 - 1; | |
845 | ||
846 | card->_stripe.upper_mask = ~(card->_stripe.lower_mask); | |
847 | card->_stripe.upper_shift = ffs(card->n_targets) - 1; | |
848 | ||
849 | card->_stripe.target_mask = card->n_targets - 1; | |
850 | card->_stripe.target_shift = ffs(stripe_size8) - 1; | |
851 | ||
852 | dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n", | |
853 | card->_stripe.lower_mask); | |
854 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n", | |
855 | card->_stripe.upper_shift); | |
856 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n", | |
857 | card->_stripe.upper_mask); | |
858 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n", | |
859 | card->_stripe.target_mask); | |
860 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n", | |
861 | card->_stripe.target_shift); | |
862 | ||
863 | return 0; | |
864 | } | |
865 | ||
c95246c3 | 866 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
8722ff8c | 867 | { |
868 | u32 intr_coal; | |
869 | ||
870 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
871 | card->config.data.intr_coal.count, | |
872 | card->config.data.intr_coal.latency); | |
873 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
874 | ||
875 | return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); | |
876 | } | |
877 | ||
878 | int rsxx_dma_setup(struct rsxx_cardinfo *card) | |
879 | { | |
880 | unsigned long flags; | |
881 | int st; | |
882 | int i; | |
883 | ||
884 | dev_info(CARD_TO_DEV(card), | |
885 | "Initializing %d DMA targets\n", | |
886 | card->n_targets); | |
887 | ||
888 | /* Regmap is divided up into 4K chunks. One for each DMA channel */ | |
889 | for (i = 0; i < card->n_targets; i++) | |
890 | card->ctrl[i].regmap = card->regmap + (i * 4096); | |
891 | ||
892 | card->dma_fault = 0; | |
893 | ||
894 | /* Reset the DMA queues */ | |
895 | rsxx_dma_queue_reset(card); | |
896 | ||
897 | /************* Setup DMA Control *************/ | |
898 | for (i = 0; i < card->n_targets; i++) { | |
899 | st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); | |
900 | if (st) | |
901 | goto failed_dma_setup; | |
902 | ||
903 | card->ctrl[i].card = card; | |
904 | card->ctrl[i].id = i; | |
905 | } | |
906 | ||
907 | card->scrub_hard = 1; | |
908 | ||
909 | if (card->config_valid) | |
910 | rsxx_dma_configure(card); | |
911 | ||
912 | /* Enable the interrupts after all setup has completed. */ | |
913 | for (i = 0; i < card->n_targets; i++) { | |
914 | spin_lock_irqsave(&card->irq_lock, flags); | |
915 | rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i)); | |
916 | spin_unlock_irqrestore(&card->irq_lock, flags); | |
917 | } | |
918 | ||
919 | return 0; | |
920 | ||
921 | failed_dma_setup: | |
922 | for (i = 0; i < card->n_targets; i++) { | |
923 | struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; | |
924 | ||
925 | if (ctrl->issue_wq) { | |
926 | destroy_workqueue(ctrl->issue_wq); | |
927 | ctrl->issue_wq = NULL; | |
928 | } | |
929 | ||
930 | if (ctrl->done_wq) { | |
931 | destroy_workqueue(ctrl->done_wq); | |
932 | ctrl->done_wq = NULL; | |
933 | } | |
934 | ||
935 | if (ctrl->trackers) | |
936 | vfree(ctrl->trackers); | |
937 | ||
938 | if (ctrl->status.buf) | |
939 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
940 | ctrl->status.buf, | |
941 | ctrl->status.dma_addr); | |
942 | if (ctrl->cmd.buf) | |
943 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
944 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
945 | } | |
946 | ||
947 | return st; | |
948 | } | |
949 | ||
0ab4743e PK |
950 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) |
951 | { | |
952 | struct rsxx_dma *dma; | |
953 | int i; | |
954 | int cnt = 0; | |
955 | ||
956 | /* Clean up issued DMAs */ | |
957 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
958 | dma = get_tracker_dma(ctrl->trackers, i); | |
959 | if (dma) { | |
960 | atomic_dec(&ctrl->stats.hw_q_depth); | |
961 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
962 | push_tracker(ctrl->trackers, i); | |
963 | cnt++; | |
964 | } | |
965 | } | |
966 | ||
967 | return cnt; | |
968 | } | |
8722ff8c | 969 | |
970 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |
971 | { | |
972 | struct rsxx_dma_ctrl *ctrl; | |
0ab4743e | 973 | int i; |
8722ff8c | 974 | |
975 | for (i = 0; i < card->n_targets; i++) { | |
976 | ctrl = &card->ctrl[i]; | |
977 | ||
978 | if (ctrl->issue_wq) { | |
979 | destroy_workqueue(ctrl->issue_wq); | |
980 | ctrl->issue_wq = NULL; | |
981 | } | |
982 | ||
983 | if (ctrl->done_wq) { | |
984 | destroy_workqueue(ctrl->done_wq); | |
985 | ctrl->done_wq = NULL; | |
986 | } | |
987 | ||
988 | if (timer_pending(&ctrl->activity_timer)) | |
989 | del_timer_sync(&ctrl->activity_timer); | |
990 | ||
991 | /* Clean up the DMA queue */ | |
0ab4743e PK |
992 | spin_lock_bh(&ctrl->queue_lock); |
993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | |
994 | spin_unlock_bh(&ctrl->queue_lock); | |
8722ff8c | 995 | |
0ab4743e | 996 | rsxx_dma_cancel(ctrl); |
8722ff8c | 997 | |
998 | vfree(ctrl->trackers); | |
999 | ||
1000 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
1001 | ctrl->status.buf, ctrl->status.dma_addr); | |
1002 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
1003 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
1004 | } | |
1005 | } | |
1006 | ||
4dcaf472 | 1007 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) |
c95246c3 PK |
1008 | { |
1009 | int i; | |
1010 | int j; | |
1011 | int cnt; | |
1012 | struct rsxx_dma *dma; | |
d8d595df PK |
1013 | struct list_head *issued_dmas; |
1014 | ||
1015 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | |
1016 | GFP_KERNEL); | |
4dcaf472 PK |
1017 | if (!issued_dmas) |
1018 | return -ENOMEM; | |
c95246c3 PK |
1019 | |
1020 | for (i = 0; i < card->n_targets; i++) { | |
1021 | INIT_LIST_HEAD(&issued_dmas[i]); | |
1022 | cnt = 0; | |
1023 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | |
1024 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | |
1025 | if (dma == NULL) | |
1026 | continue; | |
1027 | ||
1028 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
1029 | card->ctrl[i].stats.writes_issued--; | |
1030 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
1031 | card->ctrl[i].stats.discards_issued--; | |
1032 | else | |
1033 | card->ctrl[i].stats.reads_issued--; | |
1034 | ||
1035 | list_add_tail(&dma->list, &issued_dmas[i]); | |
1036 | push_tracker(card->ctrl[i].trackers, j); | |
1037 | cnt++; | |
1038 | } | |
1039 | ||
0ab4743e | 1040 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1041 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1042 | ||
1043 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | |
1044 | card->ctrl[i].stats.sw_q_depth += cnt; | |
1045 | card->ctrl[i].e_cnt = 0; | |
1046 | ||
1047 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | |
1048 | if (dma->dma_addr) | |
1049 | pci_unmap_page(card->dev, dma->dma_addr, | |
1050 | get_dma_size(dma), | |
1051 | dma->cmd == HW_CMD_BLK_WRITE ? | |
1052 | PCI_DMA_TODEVICE : | |
1053 | PCI_DMA_FROMDEVICE); | |
1054 | } | |
0ab4743e | 1055 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 | 1056 | } |
d8d595df PK |
1057 | |
1058 | kfree(issued_dmas); | |
4dcaf472 PK |
1059 | |
1060 | return 0; | |
c95246c3 PK |
1061 | } |
1062 | ||
c95246c3 PK |
1063 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) |
1064 | { | |
1065 | struct rsxx_dma *dma; | |
c95246c3 PK |
1066 | int i; |
1067 | ||
1068 | for (i = 0; i < card->n_targets; i++) { | |
0ab4743e | 1069 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1070 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
1071 | dma->dma_addr = pci_map_page(card->dev, dma->page, | |
1072 | dma->pg_off, get_dma_size(dma), | |
1073 | dma->cmd == HW_CMD_BLK_WRITE ? | |
1074 | PCI_DMA_TODEVICE : | |
1075 | PCI_DMA_FROMDEVICE); | |
1076 | if (!dma->dma_addr) { | |
0ab4743e | 1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1078 | kmem_cache_free(rsxx_dma_pool, dma); |
1079 | return -ENOMEM; | |
1080 | } | |
1081 | } | |
0ab4743e | 1082 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1083 | } |
1084 | ||
1085 | return 0; | |
1086 | } | |
8722ff8c | 1087 | |
1088 | int rsxx_dma_init(void) | |
1089 | { | |
1090 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | |
1091 | if (!rsxx_dma_pool) | |
1092 | return -ENOMEM; | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | ||
1098 | void rsxx_dma_cleanup(void) | |
1099 | { | |
1100 | kmem_cache_destroy(rsxx_dma_pool); | |
1101 | } | |
1102 |