Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[deliverable/linux.git] / drivers / net / wireless / b43 / dma.c
1 /*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "b43.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41
42
43 /* 32bit DMA ops. */
44 static
45 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
46 int slot,
47 struct b43_dmadesc_meta **meta)
48 {
49 struct b43_dmadesc32 *desc;
50
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
54
55 return (struct b43_dmadesc_generic *)desc;
56 }
57
58 static void op32_fill_descriptor(struct b43_dmaring *ring,
59 struct b43_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
62 {
63 struct b43_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
68
69 slot = (int)(&(desc->dma32) - descbase);
70 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
71
72 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & B43_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= B43_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= B43_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= B43_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43_DMA32_DCTL_ADDREXT_MASK;
88
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
91 }
92
93 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
94 {
95 b43_dma_write(ring, B43_DMA32_TXINDEX,
96 (u32) (slot * sizeof(struct b43_dmadesc32)));
97 }
98
99 static void op32_tx_suspend(struct b43_dmaring *ring)
100 {
101 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
102 | B43_DMA32_TXSUSPEND);
103 }
104
105 static void op32_tx_resume(struct b43_dmaring *ring)
106 {
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 & ~B43_DMA32_TXSUSPEND);
109 }
110
111 static int op32_get_current_rxslot(struct b43_dmaring *ring)
112 {
113 u32 val;
114
115 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
116 val &= B43_DMA32_RXDPTR;
117
118 return (val / sizeof(struct b43_dmadesc32));
119 }
120
121 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
122 {
123 b43_dma_write(ring, B43_DMA32_RXINDEX,
124 (u32) (slot * sizeof(struct b43_dmadesc32)));
125 }
126
127 static const struct b43_dma_ops dma32_ops = {
128 .idx2desc = op32_idx2desc,
129 .fill_descriptor = op32_fill_descriptor,
130 .poke_tx = op32_poke_tx,
131 .tx_suspend = op32_tx_suspend,
132 .tx_resume = op32_tx_resume,
133 .get_current_rxslot = op32_get_current_rxslot,
134 .set_current_rxslot = op32_set_current_rxslot,
135 };
136
137 /* 64bit DMA ops. */
138 static
139 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
140 int slot,
141 struct b43_dmadesc_meta **meta)
142 {
143 struct b43_dmadesc64 *desc;
144
145 *meta = &(ring->meta[slot]);
146 desc = ring->descbase;
147 desc = &(desc[slot]);
148
149 return (struct b43_dmadesc_generic *)desc;
150 }
151
152 static void op64_fill_descriptor(struct b43_dmaring *ring,
153 struct b43_dmadesc_generic *desc,
154 dma_addr_t dmaaddr, u16 bufsize,
155 int start, int end, int irq)
156 {
157 struct b43_dmadesc64 *descbase = ring->descbase;
158 int slot;
159 u32 ctl0 = 0, ctl1 = 0;
160 u32 addrlo, addrhi;
161 u32 addrext;
162
163 slot = (int)(&(desc->dma64) - descbase);
164 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
165
166 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
167 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
168 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
169 >> SSB_DMA_TRANSLATION_SHIFT;
170 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
171 if (slot == ring->nr_slots - 1)
172 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
173 if (start)
174 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
175 if (end)
176 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
177 if (irq)
178 ctl0 |= B43_DMA64_DCTL0_IRQ;
179 ctl1 |= (bufsize - ring->frameoffset)
180 & B43_DMA64_DCTL1_BYTECNT;
181 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
182 & B43_DMA64_DCTL1_ADDREXT_MASK;
183
184 desc->dma64.control0 = cpu_to_le32(ctl0);
185 desc->dma64.control1 = cpu_to_le32(ctl1);
186 desc->dma64.address_low = cpu_to_le32(addrlo);
187 desc->dma64.address_high = cpu_to_le32(addrhi);
188 }
189
190 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
191 {
192 b43_dma_write(ring, B43_DMA64_TXINDEX,
193 (u32) (slot * sizeof(struct b43_dmadesc64)));
194 }
195
196 static void op64_tx_suspend(struct b43_dmaring *ring)
197 {
198 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
199 | B43_DMA64_TXSUSPEND);
200 }
201
202 static void op64_tx_resume(struct b43_dmaring *ring)
203 {
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 & ~B43_DMA64_TXSUSPEND);
206 }
207
208 static int op64_get_current_rxslot(struct b43_dmaring *ring)
209 {
210 u32 val;
211
212 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
213 val &= B43_DMA64_RXSTATDPTR;
214
215 return (val / sizeof(struct b43_dmadesc64));
216 }
217
218 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
219 {
220 b43_dma_write(ring, B43_DMA64_RXINDEX,
221 (u32) (slot * sizeof(struct b43_dmadesc64)));
222 }
223
224 static const struct b43_dma_ops dma64_ops = {
225 .idx2desc = op64_idx2desc,
226 .fill_descriptor = op64_fill_descriptor,
227 .poke_tx = op64_poke_tx,
228 .tx_suspend = op64_tx_suspend,
229 .tx_resume = op64_tx_resume,
230 .get_current_rxslot = op64_get_current_rxslot,
231 .set_current_rxslot = op64_set_current_rxslot,
232 };
233
234 static inline int free_slots(struct b43_dmaring *ring)
235 {
236 return (ring->nr_slots - ring->used_slots);
237 }
238
239 static inline int next_slot(struct b43_dmaring *ring, int slot)
240 {
241 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
242 if (slot == ring->nr_slots - 1)
243 return 0;
244 return slot + 1;
245 }
246
247 static inline int prev_slot(struct b43_dmaring *ring, int slot)
248 {
249 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
250 if (slot == 0)
251 return ring->nr_slots - 1;
252 return slot - 1;
253 }
254
255 #ifdef CONFIG_B43_DEBUG
256 static void update_max_used_slots(struct b43_dmaring *ring,
257 int current_used_slots)
258 {
259 if (current_used_slots <= ring->max_used_slots)
260 return;
261 ring->max_used_slots = current_used_slots;
262 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
263 b43dbg(ring->dev->wl,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring->max_used_slots,
266 ring->tx ? "TX" : "RX", ring->index);
267 }
268 }
269 #else
270 static inline
271 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
272 {
273 }
274 #endif /* DEBUG */
275
276 /* Request a slot for usage. */
277 static inline int request_slot(struct b43_dmaring *ring)
278 {
279 int slot;
280
281 B43_WARN_ON(!ring->tx);
282 B43_WARN_ON(ring->stopped);
283 B43_WARN_ON(free_slots(ring) == 0);
284
285 slot = next_slot(ring, ring->current_slot);
286 ring->current_slot = slot;
287 ring->used_slots++;
288
289 update_max_used_slots(ring, ring->used_slots);
290
291 return slot;
292 }
293
294 /* Mac80211-queue to b43-ring mapping */
295 static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
296 int queue_priority)
297 {
298 struct b43_dmaring *ring;
299
300 /*FIXME: For now we always run on TX-ring-1 */
301 return dev->dma.tx_ring1;
302
303 /* 0 = highest priority */
304 switch (queue_priority) {
305 default:
306 B43_WARN_ON(1);
307 /* fallthrough */
308 case 0:
309 ring = dev->dma.tx_ring3;
310 break;
311 case 1:
312 ring = dev->dma.tx_ring2;
313 break;
314 case 2:
315 ring = dev->dma.tx_ring1;
316 break;
317 case 3:
318 ring = dev->dma.tx_ring0;
319 break;
320 }
321
322 return ring;
323 }
324
325 /* b43-ring to mac80211-queue mapping */
326 static inline int txring_to_priority(struct b43_dmaring *ring)
327 {
328 static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
329 unsigned int index;
330
331 /*FIXME: have only one queue, for now */
332 return 0;
333
334 index = ring->index;
335 if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
336 index = 0;
337 return idx_to_prio[index];
338 }
339
340 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
341 {
342 static const u16 map64[] = {
343 B43_MMIO_DMA64_BASE0,
344 B43_MMIO_DMA64_BASE1,
345 B43_MMIO_DMA64_BASE2,
346 B43_MMIO_DMA64_BASE3,
347 B43_MMIO_DMA64_BASE4,
348 B43_MMIO_DMA64_BASE5,
349 };
350 static const u16 map32[] = {
351 B43_MMIO_DMA32_BASE0,
352 B43_MMIO_DMA32_BASE1,
353 B43_MMIO_DMA32_BASE2,
354 B43_MMIO_DMA32_BASE3,
355 B43_MMIO_DMA32_BASE4,
356 B43_MMIO_DMA32_BASE5,
357 };
358
359 if (type == B43_DMA_64BIT) {
360 B43_WARN_ON(!(controller_idx >= 0 &&
361 controller_idx < ARRAY_SIZE(map64)));
362 return map64[controller_idx];
363 }
364 B43_WARN_ON(!(controller_idx >= 0 &&
365 controller_idx < ARRAY_SIZE(map32)));
366 return map32[controller_idx];
367 }
368
369 static inline
370 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
371 unsigned char *buf, size_t len, int tx)
372 {
373 dma_addr_t dmaaddr;
374
375 if (tx) {
376 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
377 buf, len, DMA_TO_DEVICE);
378 } else {
379 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
380 buf, len, DMA_FROM_DEVICE);
381 }
382
383 return dmaaddr;
384 }
385
386 static inline
387 void unmap_descbuffer(struct b43_dmaring *ring,
388 dma_addr_t addr, size_t len, int tx)
389 {
390 if (tx) {
391 dma_unmap_single(ring->dev->dev->dma_dev,
392 addr, len, DMA_TO_DEVICE);
393 } else {
394 dma_unmap_single(ring->dev->dev->dma_dev,
395 addr, len, DMA_FROM_DEVICE);
396 }
397 }
398
399 static inline
400 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
401 dma_addr_t addr, size_t len)
402 {
403 B43_WARN_ON(ring->tx);
404 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
405 addr, len, DMA_FROM_DEVICE);
406 }
407
408 static inline
409 void sync_descbuffer_for_device(struct b43_dmaring *ring,
410 dma_addr_t addr, size_t len)
411 {
412 B43_WARN_ON(ring->tx);
413 dma_sync_single_for_device(ring->dev->dev->dma_dev,
414 addr, len, DMA_FROM_DEVICE);
415 }
416
417 static inline
418 void free_descriptor_buffer(struct b43_dmaring *ring,
419 struct b43_dmadesc_meta *meta)
420 {
421 if (meta->skb) {
422 dev_kfree_skb_any(meta->skb);
423 meta->skb = NULL;
424 }
425 }
426
427 static int alloc_ringmemory(struct b43_dmaring *ring)
428 {
429 struct device *dma_dev = ring->dev->dev->dma_dev;
430 gfp_t flags = GFP_KERNEL;
431
432 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
433 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
434 * has shown that 4K is sufficient for the latter as long as the buffer
435 * does not cross an 8K boundary.
436 *
437 * For unknown reasons - possibly a hardware error - the BCM4311 rev
438 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
439 * which accounts for the GFP_DMA flag below.
440 */
441 if (ring->type == B43_DMA_64BIT)
442 flags |= GFP_DMA;
443 ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
444 &(ring->dmabase), flags);
445 if (!ring->descbase) {
446 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
447 return -ENOMEM;
448 }
449 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
450
451 return 0;
452 }
453
454 static void free_ringmemory(struct b43_dmaring *ring)
455 {
456 struct device *dma_dev = ring->dev->dev->dma_dev;
457
458 dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
459 ring->descbase, ring->dmabase);
460 }
461
462 /* Reset the RX DMA channel */
463 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
464 enum b43_dmatype type)
465 {
466 int i;
467 u32 value;
468 u16 offset;
469
470 might_sleep();
471
472 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
473 b43_write32(dev, mmio_base + offset, 0);
474 for (i = 0; i < 10; i++) {
475 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
476 B43_DMA32_RXSTATUS;
477 value = b43_read32(dev, mmio_base + offset);
478 if (type == B43_DMA_64BIT) {
479 value &= B43_DMA64_RXSTAT;
480 if (value == B43_DMA64_RXSTAT_DISABLED) {
481 i = -1;
482 break;
483 }
484 } else {
485 value &= B43_DMA32_RXSTATE;
486 if (value == B43_DMA32_RXSTAT_DISABLED) {
487 i = -1;
488 break;
489 }
490 }
491 msleep(1);
492 }
493 if (i != -1) {
494 b43err(dev->wl, "DMA RX reset timed out\n");
495 return -ENODEV;
496 }
497
498 return 0;
499 }
500
501 /* Reset the TX DMA channel */
502 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
503 enum b43_dmatype type)
504 {
505 int i;
506 u32 value;
507 u16 offset;
508
509 might_sleep();
510
511 for (i = 0; i < 10; i++) {
512 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
513 B43_DMA32_TXSTATUS;
514 value = b43_read32(dev, mmio_base + offset);
515 if (type == B43_DMA_64BIT) {
516 value &= B43_DMA64_TXSTAT;
517 if (value == B43_DMA64_TXSTAT_DISABLED ||
518 value == B43_DMA64_TXSTAT_IDLEWAIT ||
519 value == B43_DMA64_TXSTAT_STOPPED)
520 break;
521 } else {
522 value &= B43_DMA32_TXSTATE;
523 if (value == B43_DMA32_TXSTAT_DISABLED ||
524 value == B43_DMA32_TXSTAT_IDLEWAIT ||
525 value == B43_DMA32_TXSTAT_STOPPED)
526 break;
527 }
528 msleep(1);
529 }
530 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
531 b43_write32(dev, mmio_base + offset, 0);
532 for (i = 0; i < 10; i++) {
533 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
534 B43_DMA32_TXSTATUS;
535 value = b43_read32(dev, mmio_base + offset);
536 if (type == B43_DMA_64BIT) {
537 value &= B43_DMA64_TXSTAT;
538 if (value == B43_DMA64_TXSTAT_DISABLED) {
539 i = -1;
540 break;
541 }
542 } else {
543 value &= B43_DMA32_TXSTATE;
544 if (value == B43_DMA32_TXSTAT_DISABLED) {
545 i = -1;
546 break;
547 }
548 }
549 msleep(1);
550 }
551 if (i != -1) {
552 b43err(dev->wl, "DMA TX reset timed out\n");
553 return -ENODEV;
554 }
555 /* ensure the reset is completed. */
556 msleep(1);
557
558 return 0;
559 }
560
561 /* Check if a DMA mapping address is invalid. */
562 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
563 dma_addr_t addr,
564 size_t buffersize, bool dma_to_device)
565 {
566 if (unlikely(dma_mapping_error(addr)))
567 return 1;
568
569 switch (ring->type) {
570 case B43_DMA_30BIT:
571 if ((u64)addr + buffersize > (1ULL << 30))
572 goto address_error;
573 break;
574 case B43_DMA_32BIT:
575 if ((u64)addr + buffersize > (1ULL << 32))
576 goto address_error;
577 break;
578 case B43_DMA_64BIT:
579 /* Currently we can't have addresses beyond
580 * 64bit in the kernel. */
581 break;
582 }
583
584 /* The address is OK. */
585 return 0;
586
587 address_error:
588 /* We can't support this address. Unmap it again. */
589 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
590
591 return 1;
592 }
593
594 static int setup_rx_descbuffer(struct b43_dmaring *ring,
595 struct b43_dmadesc_generic *desc,
596 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
597 {
598 struct b43_rxhdr_fw4 *rxhdr;
599 struct b43_hwtxstatus *txstat;
600 dma_addr_t dmaaddr;
601 struct sk_buff *skb;
602
603 B43_WARN_ON(ring->tx);
604
605 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
606 if (unlikely(!skb))
607 return -ENOMEM;
608 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
609 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
610 /* ugh. try to realloc in zone_dma */
611 gfp_flags |= GFP_DMA;
612
613 dev_kfree_skb_any(skb);
614
615 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
616 if (unlikely(!skb))
617 return -ENOMEM;
618 dmaaddr = map_descbuffer(ring, skb->data,
619 ring->rx_buffersize, 0);
620 }
621
622 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
623 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
624 dev_kfree_skb_any(skb);
625 return -EIO;
626 }
627
628 meta->skb = skb;
629 meta->dmaaddr = dmaaddr;
630 ring->ops->fill_descriptor(ring, desc, dmaaddr,
631 ring->rx_buffersize, 0, 0, 0);
632
633 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
634 rxhdr->frame_len = 0;
635 txstat = (struct b43_hwtxstatus *)(skb->data);
636 txstat->cookie = 0;
637
638 return 0;
639 }
640
641 /* Allocate the initial descbuffers.
642 * This is used for an RX ring only.
643 */
644 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
645 {
646 int i, err = -ENOMEM;
647 struct b43_dmadesc_generic *desc;
648 struct b43_dmadesc_meta *meta;
649
650 for (i = 0; i < ring->nr_slots; i++) {
651 desc = ring->ops->idx2desc(ring, i, &meta);
652
653 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
654 if (err) {
655 b43err(ring->dev->wl,
656 "Failed to allocate initial descbuffers\n");
657 goto err_unwind;
658 }
659 }
660 mb();
661 ring->used_slots = ring->nr_slots;
662 err = 0;
663 out:
664 return err;
665
666 err_unwind:
667 for (i--; i >= 0; i--) {
668 desc = ring->ops->idx2desc(ring, i, &meta);
669
670 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
671 dev_kfree_skb(meta->skb);
672 }
673 goto out;
674 }
675
676 /* Do initial setup of the DMA controller.
677 * Reset the controller, write the ring busaddress
678 * and switch the "enable" bit on.
679 */
680 static int dmacontroller_setup(struct b43_dmaring *ring)
681 {
682 int err = 0;
683 u32 value;
684 u32 addrext;
685 u32 trans = ssb_dma_translation(ring->dev->dev);
686
687 if (ring->tx) {
688 if (ring->type == B43_DMA_64BIT) {
689 u64 ringbase = (u64) (ring->dmabase);
690
691 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
692 >> SSB_DMA_TRANSLATION_SHIFT;
693 value = B43_DMA64_TXENABLE;
694 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
695 & B43_DMA64_TXADDREXT_MASK;
696 b43_dma_write(ring, B43_DMA64_TXCTL, value);
697 b43_dma_write(ring, B43_DMA64_TXRINGLO,
698 (ringbase & 0xFFFFFFFF));
699 b43_dma_write(ring, B43_DMA64_TXRINGHI,
700 ((ringbase >> 32) &
701 ~SSB_DMA_TRANSLATION_MASK)
702 | (trans << 1));
703 } else {
704 u32 ringbase = (u32) (ring->dmabase);
705
706 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
707 >> SSB_DMA_TRANSLATION_SHIFT;
708 value = B43_DMA32_TXENABLE;
709 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
710 & B43_DMA32_TXADDREXT_MASK;
711 b43_dma_write(ring, B43_DMA32_TXCTL, value);
712 b43_dma_write(ring, B43_DMA32_TXRING,
713 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
714 | trans);
715 }
716 } else {
717 err = alloc_initial_descbuffers(ring);
718 if (err)
719 goto out;
720 if (ring->type == B43_DMA_64BIT) {
721 u64 ringbase = (u64) (ring->dmabase);
722
723 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
724 >> SSB_DMA_TRANSLATION_SHIFT;
725 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
726 value |= B43_DMA64_RXENABLE;
727 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
728 & B43_DMA64_RXADDREXT_MASK;
729 b43_dma_write(ring, B43_DMA64_RXCTL, value);
730 b43_dma_write(ring, B43_DMA64_RXRINGLO,
731 (ringbase & 0xFFFFFFFF));
732 b43_dma_write(ring, B43_DMA64_RXRINGHI,
733 ((ringbase >> 32) &
734 ~SSB_DMA_TRANSLATION_MASK)
735 | (trans << 1));
736 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
737 sizeof(struct b43_dmadesc64));
738 } else {
739 u32 ringbase = (u32) (ring->dmabase);
740
741 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
742 >> SSB_DMA_TRANSLATION_SHIFT;
743 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
744 value |= B43_DMA32_RXENABLE;
745 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
746 & B43_DMA32_RXADDREXT_MASK;
747 b43_dma_write(ring, B43_DMA32_RXCTL, value);
748 b43_dma_write(ring, B43_DMA32_RXRING,
749 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
750 | trans);
751 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
752 sizeof(struct b43_dmadesc32));
753 }
754 }
755
756 out:
757 return err;
758 }
759
760 /* Shutdown the DMA controller. */
761 static void dmacontroller_cleanup(struct b43_dmaring *ring)
762 {
763 if (ring->tx) {
764 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
765 ring->type);
766 if (ring->type == B43_DMA_64BIT) {
767 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
768 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
769 } else
770 b43_dma_write(ring, B43_DMA32_TXRING, 0);
771 } else {
772 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
773 ring->type);
774 if (ring->type == B43_DMA_64BIT) {
775 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
776 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
777 } else
778 b43_dma_write(ring, B43_DMA32_RXRING, 0);
779 }
780 }
781
782 static void free_all_descbuffers(struct b43_dmaring *ring)
783 {
784 struct b43_dmadesc_generic *desc;
785 struct b43_dmadesc_meta *meta;
786 int i;
787
788 if (!ring->used_slots)
789 return;
790 for (i = 0; i < ring->nr_slots; i++) {
791 desc = ring->ops->idx2desc(ring, i, &meta);
792
793 if (!meta->skb) {
794 B43_WARN_ON(!ring->tx);
795 continue;
796 }
797 if (ring->tx) {
798 unmap_descbuffer(ring, meta->dmaaddr,
799 meta->skb->len, 1);
800 } else {
801 unmap_descbuffer(ring, meta->dmaaddr,
802 ring->rx_buffersize, 0);
803 }
804 free_descriptor_buffer(ring, meta);
805 }
806 }
807
808 static u64 supported_dma_mask(struct b43_wldev *dev)
809 {
810 u32 tmp;
811 u16 mmio_base;
812
813 tmp = b43_read32(dev, SSB_TMSHIGH);
814 if (tmp & SSB_TMSHIGH_DMA64)
815 return DMA_64BIT_MASK;
816 mmio_base = b43_dmacontroller_base(0, 0);
817 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
818 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
819 if (tmp & B43_DMA32_TXADDREXT_MASK)
820 return DMA_32BIT_MASK;
821
822 return DMA_30BIT_MASK;
823 }
824
825 /* Main initialization function. */
826 static
827 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
828 int controller_index,
829 int for_tx,
830 enum b43_dmatype type)
831 {
832 struct b43_dmaring *ring;
833 int err;
834 int nr_slots;
835 dma_addr_t dma_test;
836
837 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
838 if (!ring)
839 goto out;
840 ring->type = type;
841
842 nr_slots = B43_RXRING_SLOTS;
843 if (for_tx)
844 nr_slots = B43_TXRING_SLOTS;
845
846 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
847 GFP_KERNEL);
848 if (!ring->meta)
849 goto err_kfree_ring;
850 if (for_tx) {
851 ring->txhdr_cache = kcalloc(nr_slots,
852 b43_txhdr_size(dev),
853 GFP_KERNEL);
854 if (!ring->txhdr_cache)
855 goto err_kfree_meta;
856
857 /* test for ability to dma to txhdr_cache */
858 dma_test = dma_map_single(dev->dev->dma_dev,
859 ring->txhdr_cache,
860 b43_txhdr_size(dev),
861 DMA_TO_DEVICE);
862
863 if (b43_dma_mapping_error(ring, dma_test,
864 b43_txhdr_size(dev), 1)) {
865 /* ugh realloc */
866 kfree(ring->txhdr_cache);
867 ring->txhdr_cache = kcalloc(nr_slots,
868 b43_txhdr_size(dev),
869 GFP_KERNEL | GFP_DMA);
870 if (!ring->txhdr_cache)
871 goto err_kfree_meta;
872
873 dma_test = dma_map_single(dev->dev->dma_dev,
874 ring->txhdr_cache,
875 b43_txhdr_size(dev),
876 DMA_TO_DEVICE);
877
878 if (b43_dma_mapping_error(ring, dma_test,
879 b43_txhdr_size(dev), 1)) {
880
881 b43err(dev->wl,
882 "TXHDR DMA allocation failed\n");
883 goto err_kfree_txhdr_cache;
884 }
885 }
886
887 dma_unmap_single(dev->dev->dma_dev,
888 dma_test, b43_txhdr_size(dev),
889 DMA_TO_DEVICE);
890 }
891
892 ring->dev = dev;
893 ring->nr_slots = nr_slots;
894 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
895 ring->index = controller_index;
896 if (type == B43_DMA_64BIT)
897 ring->ops = &dma64_ops;
898 else
899 ring->ops = &dma32_ops;
900 if (for_tx) {
901 ring->tx = 1;
902 ring->current_slot = -1;
903 } else {
904 if (ring->index == 0) {
905 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
906 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
907 } else if (ring->index == 3) {
908 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
909 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
910 } else
911 B43_WARN_ON(1);
912 }
913 spin_lock_init(&ring->lock);
914 #ifdef CONFIG_B43_DEBUG
915 ring->last_injected_overflow = jiffies;
916 #endif
917
918 err = alloc_ringmemory(ring);
919 if (err)
920 goto err_kfree_txhdr_cache;
921 err = dmacontroller_setup(ring);
922 if (err)
923 goto err_free_ringmemory;
924
925 out:
926 return ring;
927
928 err_free_ringmemory:
929 free_ringmemory(ring);
930 err_kfree_txhdr_cache:
931 kfree(ring->txhdr_cache);
932 err_kfree_meta:
933 kfree(ring->meta);
934 err_kfree_ring:
935 kfree(ring);
936 ring = NULL;
937 goto out;
938 }
939
940 /* Main cleanup function. */
941 static void b43_destroy_dmaring(struct b43_dmaring *ring)
942 {
943 if (!ring)
944 return;
945
946 b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
947 (unsigned int)(ring->type),
948 ring->mmio_base,
949 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
950 /* Device IRQs are disabled prior entering this function,
951 * so no need to take care of concurrency with rx handler stuff.
952 */
953 dmacontroller_cleanup(ring);
954 free_all_descbuffers(ring);
955 free_ringmemory(ring);
956
957 kfree(ring->txhdr_cache);
958 kfree(ring->meta);
959 kfree(ring);
960 }
961
962 void b43_dma_free(struct b43_wldev *dev)
963 {
964 struct b43_dma *dma = &dev->dma;
965
966 b43_destroy_dmaring(dma->rx_ring3);
967 dma->rx_ring3 = NULL;
968 b43_destroy_dmaring(dma->rx_ring0);
969 dma->rx_ring0 = NULL;
970
971 b43_destroy_dmaring(dma->tx_ring5);
972 dma->tx_ring5 = NULL;
973 b43_destroy_dmaring(dma->tx_ring4);
974 dma->tx_ring4 = NULL;
975 b43_destroy_dmaring(dma->tx_ring3);
976 dma->tx_ring3 = NULL;
977 b43_destroy_dmaring(dma->tx_ring2);
978 dma->tx_ring2 = NULL;
979 b43_destroy_dmaring(dma->tx_ring1);
980 dma->tx_ring1 = NULL;
981 b43_destroy_dmaring(dma->tx_ring0);
982 dma->tx_ring0 = NULL;
983 }
984
985 int b43_dma_init(struct b43_wldev *dev)
986 {
987 struct b43_dma *dma = &dev->dma;
988 struct b43_dmaring *ring;
989 int err;
990 u64 dmamask;
991 enum b43_dmatype type;
992
993 dmamask = supported_dma_mask(dev);
994 switch (dmamask) {
995 default:
996 B43_WARN_ON(1);
997 case DMA_30BIT_MASK:
998 type = B43_DMA_30BIT;
999 break;
1000 case DMA_32BIT_MASK:
1001 type = B43_DMA_32BIT;
1002 break;
1003 case DMA_64BIT_MASK:
1004 type = B43_DMA_64BIT;
1005 break;
1006 }
1007 err = ssb_dma_set_mask(dev->dev, dmamask);
1008 if (err) {
1009 b43err(dev->wl, "The machine/kernel does not support "
1010 "the required DMA mask (0x%08X%08X)\n",
1011 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
1012 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
1013 return -EOPNOTSUPP;
1014 }
1015
1016 err = -ENOMEM;
1017 /* setup TX DMA channels. */
1018 ring = b43_setup_dmaring(dev, 0, 1, type);
1019 if (!ring)
1020 goto out;
1021 dma->tx_ring0 = ring;
1022
1023 ring = b43_setup_dmaring(dev, 1, 1, type);
1024 if (!ring)
1025 goto err_destroy_tx0;
1026 dma->tx_ring1 = ring;
1027
1028 ring = b43_setup_dmaring(dev, 2, 1, type);
1029 if (!ring)
1030 goto err_destroy_tx1;
1031 dma->tx_ring2 = ring;
1032
1033 ring = b43_setup_dmaring(dev, 3, 1, type);
1034 if (!ring)
1035 goto err_destroy_tx2;
1036 dma->tx_ring3 = ring;
1037
1038 ring = b43_setup_dmaring(dev, 4, 1, type);
1039 if (!ring)
1040 goto err_destroy_tx3;
1041 dma->tx_ring4 = ring;
1042
1043 ring = b43_setup_dmaring(dev, 5, 1, type);
1044 if (!ring)
1045 goto err_destroy_tx4;
1046 dma->tx_ring5 = ring;
1047
1048 /* setup RX DMA channels. */
1049 ring = b43_setup_dmaring(dev, 0, 0, type);
1050 if (!ring)
1051 goto err_destroy_tx5;
1052 dma->rx_ring0 = ring;
1053
1054 if (dev->dev->id.revision < 5) {
1055 ring = b43_setup_dmaring(dev, 3, 0, type);
1056 if (!ring)
1057 goto err_destroy_rx0;
1058 dma->rx_ring3 = ring;
1059 }
1060
1061 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1062 (unsigned int)type);
1063 err = 0;
1064 out:
1065 return err;
1066
1067 err_destroy_rx0:
1068 b43_destroy_dmaring(dma->rx_ring0);
1069 dma->rx_ring0 = NULL;
1070 err_destroy_tx5:
1071 b43_destroy_dmaring(dma->tx_ring5);
1072 dma->tx_ring5 = NULL;
1073 err_destroy_tx4:
1074 b43_destroy_dmaring(dma->tx_ring4);
1075 dma->tx_ring4 = NULL;
1076 err_destroy_tx3:
1077 b43_destroy_dmaring(dma->tx_ring3);
1078 dma->tx_ring3 = NULL;
1079 err_destroy_tx2:
1080 b43_destroy_dmaring(dma->tx_ring2);
1081 dma->tx_ring2 = NULL;
1082 err_destroy_tx1:
1083 b43_destroy_dmaring(dma->tx_ring1);
1084 dma->tx_ring1 = NULL;
1085 err_destroy_tx0:
1086 b43_destroy_dmaring(dma->tx_ring0);
1087 dma->tx_ring0 = NULL;
1088 goto out;
1089 }
1090
1091 /* Generate a cookie for the TX header. */
1092 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1093 {
1094 u16 cookie = 0x1000;
1095
1096 /* Use the upper 4 bits of the cookie as
1097 * DMA controller ID and store the slot number
1098 * in the lower 12 bits.
1099 * Note that the cookie must never be 0, as this
1100 * is a special value used in RX path.
1101 * It can also not be 0xFFFF because that is special
1102 * for multicast frames.
1103 */
1104 switch (ring->index) {
1105 case 0:
1106 cookie = 0x1000;
1107 break;
1108 case 1:
1109 cookie = 0x2000;
1110 break;
1111 case 2:
1112 cookie = 0x3000;
1113 break;
1114 case 3:
1115 cookie = 0x4000;
1116 break;
1117 case 4:
1118 cookie = 0x5000;
1119 break;
1120 case 5:
1121 cookie = 0x6000;
1122 break;
1123 default:
1124 B43_WARN_ON(1);
1125 }
1126 B43_WARN_ON(slot & ~0x0FFF);
1127 cookie |= (u16) slot;
1128
1129 return cookie;
1130 }
1131
1132 /* Inspect a cookie and find out to which controller/slot it belongs. */
1133 static
1134 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1135 {
1136 struct b43_dma *dma = &dev->dma;
1137 struct b43_dmaring *ring = NULL;
1138
1139 switch (cookie & 0xF000) {
1140 case 0x1000:
1141 ring = dma->tx_ring0;
1142 break;
1143 case 0x2000:
1144 ring = dma->tx_ring1;
1145 break;
1146 case 0x3000:
1147 ring = dma->tx_ring2;
1148 break;
1149 case 0x4000:
1150 ring = dma->tx_ring3;
1151 break;
1152 case 0x5000:
1153 ring = dma->tx_ring4;
1154 break;
1155 case 0x6000:
1156 ring = dma->tx_ring5;
1157 break;
1158 default:
1159 B43_WARN_ON(1);
1160 }
1161 *slot = (cookie & 0x0FFF);
1162 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1163
1164 return ring;
1165 }
1166
1167 static int dma_tx_fragment(struct b43_dmaring *ring,
1168 struct sk_buff *skb,
1169 struct ieee80211_tx_control *ctl)
1170 {
1171 const struct b43_dma_ops *ops = ring->ops;
1172 u8 *header;
1173 int slot, old_top_slot, old_used_slots;
1174 int err;
1175 struct b43_dmadesc_generic *desc;
1176 struct b43_dmadesc_meta *meta;
1177 struct b43_dmadesc_meta *meta_hdr;
1178 struct sk_buff *bounce_skb;
1179 u16 cookie;
1180 size_t hdrsize = b43_txhdr_size(ring->dev);
1181
1182 #define SLOTS_PER_PACKET 2
1183 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1184
1185 old_top_slot = ring->current_slot;
1186 old_used_slots = ring->used_slots;
1187
1188 /* Get a slot for the header. */
1189 slot = request_slot(ring);
1190 desc = ops->idx2desc(ring, slot, &meta_hdr);
1191 memset(meta_hdr, 0, sizeof(*meta_hdr));
1192
1193 header = &(ring->txhdr_cache[slot * hdrsize]);
1194 cookie = generate_cookie(ring, slot);
1195 err = b43_generate_txhdr(ring->dev, header,
1196 skb->data, skb->len, ctl, cookie);
1197 if (unlikely(err)) {
1198 ring->current_slot = old_top_slot;
1199 ring->used_slots = old_used_slots;
1200 return err;
1201 }
1202
1203 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1204 hdrsize, 1);
1205 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1206 ring->current_slot = old_top_slot;
1207 ring->used_slots = old_used_slots;
1208 return -EIO;
1209 }
1210 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1211 hdrsize, 1, 0, 0);
1212
1213 /* Get a slot for the payload. */
1214 slot = request_slot(ring);
1215 desc = ops->idx2desc(ring, slot, &meta);
1216 memset(meta, 0, sizeof(*meta));
1217
1218 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1219 meta->skb = skb;
1220 meta->is_last_fragment = 1;
1221
1222 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1223 /* create a bounce buffer in zone_dma on mapping failure. */
1224 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1225 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1226 if (!bounce_skb) {
1227 ring->current_slot = old_top_slot;
1228 ring->used_slots = old_used_slots;
1229 err = -ENOMEM;
1230 goto out_unmap_hdr;
1231 }
1232
1233 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1234 dev_kfree_skb_any(skb);
1235 skb = bounce_skb;
1236 meta->skb = skb;
1237 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1238 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1239 ring->current_slot = old_top_slot;
1240 ring->used_slots = old_used_slots;
1241 err = -EIO;
1242 goto out_free_bounce;
1243 }
1244 }
1245
1246 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1247
1248 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1249 /* Tell the firmware about the cookie of the last
1250 * mcast frame, so it can clear the more-data bit in it. */
1251 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1252 B43_SHM_SH_MCASTCOOKIE, cookie);
1253 }
1254 /* Now transfer the whole frame. */
1255 wmb();
1256 ops->poke_tx(ring, next_slot(ring, slot));
1257 return 0;
1258
1259 out_free_bounce:
1260 dev_kfree_skb_any(skb);
1261 out_unmap_hdr:
1262 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1263 hdrsize, 1);
1264 return err;
1265 }
1266
1267 static inline int should_inject_overflow(struct b43_dmaring *ring)
1268 {
1269 #ifdef CONFIG_B43_DEBUG
1270 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1271 /* Check if we should inject another ringbuffer overflow
1272 * to test handling of this situation in the stack. */
1273 unsigned long next_overflow;
1274
1275 next_overflow = ring->last_injected_overflow + HZ;
1276 if (time_after(jiffies, next_overflow)) {
1277 ring->last_injected_overflow = jiffies;
1278 b43dbg(ring->dev->wl,
1279 "Injecting TX ring overflow on "
1280 "DMA controller %d\n", ring->index);
1281 return 1;
1282 }
1283 }
1284 #endif /* CONFIG_B43_DEBUG */
1285 return 0;
1286 }
1287
1288 int b43_dma_tx(struct b43_wldev *dev,
1289 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1290 {
1291 struct b43_dmaring *ring;
1292 struct ieee80211_hdr *hdr;
1293 int err = 0;
1294 unsigned long flags;
1295
1296 if (unlikely(skb->len < 2 + 2 + 6)) {
1297 /* Too short, this can't be a valid frame. */
1298 return -EINVAL;
1299 }
1300
1301 hdr = (struct ieee80211_hdr *)skb->data;
1302 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1303 /* The multicast ring will be sent after the DTIM */
1304 ring = dev->dma.tx_ring4;
1305 /* Set the more-data bit. Ucode will clear it on
1306 * the last frame for us. */
1307 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1308 } else {
1309 /* Decide by priority where to put this frame. */
1310 ring = priority_to_txring(dev, ctl->queue);
1311 }
1312
1313 spin_lock_irqsave(&ring->lock, flags);
1314 B43_WARN_ON(!ring->tx);
1315 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1316 b43warn(dev->wl, "DMA queue overflow\n");
1317 err = -ENOSPC;
1318 goto out_unlock;
1319 }
1320 /* Check if the queue was stopped in mac80211,
1321 * but we got called nevertheless.
1322 * That would be a mac80211 bug. */
1323 B43_WARN_ON(ring->stopped);
1324
1325 err = dma_tx_fragment(ring, skb, ctl);
1326 if (unlikely(err == -ENOKEY)) {
1327 /* Drop this packet, as we don't have the encryption key
1328 * anymore and must not transmit it unencrypted. */
1329 dev_kfree_skb_any(skb);
1330 err = 0;
1331 goto out_unlock;
1332 }
1333 if (unlikely(err)) {
1334 b43err(dev->wl, "DMA tx mapping failure\n");
1335 goto out_unlock;
1336 }
1337 ring->nr_tx_packets++;
1338 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1339 should_inject_overflow(ring)) {
1340 /* This TX ring is full. */
1341 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1342 ring->stopped = 1;
1343 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1344 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1345 }
1346 }
1347 out_unlock:
1348 spin_unlock_irqrestore(&ring->lock, flags);
1349
1350 return err;
1351 }
1352
1353 /* Called with IRQs disabled. */
1354 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1355 const struct b43_txstatus *status)
1356 {
1357 const struct b43_dma_ops *ops;
1358 struct b43_dmaring *ring;
1359 struct b43_dmadesc_generic *desc;
1360 struct b43_dmadesc_meta *meta;
1361 int slot;
1362
1363 ring = parse_cookie(dev, status->cookie, &slot);
1364 if (unlikely(!ring))
1365 return;
1366
1367 spin_lock(&ring->lock); /* IRQs are already disabled. */
1368
1369 B43_WARN_ON(!ring->tx);
1370 ops = ring->ops;
1371 while (1) {
1372 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1373 desc = ops->idx2desc(ring, slot, &meta);
1374
1375 if (meta->skb)
1376 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1377 1);
1378 else
1379 unmap_descbuffer(ring, meta->dmaaddr,
1380 b43_txhdr_size(dev), 1);
1381
1382 if (meta->is_last_fragment) {
1383 B43_WARN_ON(!meta->skb);
1384 /* Call back to inform the ieee80211 subsystem about the
1385 * status of the transmission.
1386 * Some fields of txstat are already filled in dma_tx().
1387 */
1388 if (status->acked) {
1389 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1390 } else {
1391 if (!(meta->txstat.control.flags
1392 & IEEE80211_TXCTL_NO_ACK))
1393 meta->txstat.excessive_retries = 1;
1394 }
1395 if (status->frame_count == 0) {
1396 /* The frame was not transmitted at all. */
1397 meta->txstat.retry_count = 0;
1398 } else
1399 meta->txstat.retry_count = status->frame_count - 1;
1400 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1401 &(meta->txstat));
1402 /* skb is freed by ieee80211_tx_status_irqsafe() */
1403 meta->skb = NULL;
1404 } else {
1405 /* No need to call free_descriptor_buffer here, as
1406 * this is only the txhdr, which is not allocated.
1407 */
1408 B43_WARN_ON(meta->skb);
1409 }
1410
1411 /* Everything unmapped and free'd. So it's not used anymore. */
1412 ring->used_slots--;
1413
1414 if (meta->is_last_fragment)
1415 break;
1416 slot = next_slot(ring, slot);
1417 }
1418 dev->stats.last_tx = jiffies;
1419 if (ring->stopped) {
1420 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1421 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1422 ring->stopped = 0;
1423 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1424 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1425 }
1426 }
1427
1428 spin_unlock(&ring->lock);
1429 }
1430
1431 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1432 struct ieee80211_tx_queue_stats *stats)
1433 {
1434 const int nr_queues = dev->wl->hw->queues;
1435 struct b43_dmaring *ring;
1436 struct ieee80211_tx_queue_stats_data *data;
1437 unsigned long flags;
1438 int i;
1439
1440 for (i = 0; i < nr_queues; i++) {
1441 data = &(stats->data[i]);
1442 ring = priority_to_txring(dev, i);
1443
1444 spin_lock_irqsave(&ring->lock, flags);
1445 data->len = ring->used_slots / SLOTS_PER_PACKET;
1446 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1447 data->count = ring->nr_tx_packets;
1448 spin_unlock_irqrestore(&ring->lock, flags);
1449 }
1450 }
1451
1452 static void dma_rx(struct b43_dmaring *ring, int *slot)
1453 {
1454 const struct b43_dma_ops *ops = ring->ops;
1455 struct b43_dmadesc_generic *desc;
1456 struct b43_dmadesc_meta *meta;
1457 struct b43_rxhdr_fw4 *rxhdr;
1458 struct sk_buff *skb;
1459 u16 len;
1460 int err;
1461 dma_addr_t dmaaddr;
1462
1463 desc = ops->idx2desc(ring, *slot, &meta);
1464
1465 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1466 skb = meta->skb;
1467
1468 if (ring->index == 3) {
1469 /* We received an xmit status. */
1470 struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
1471 int i = 0;
1472
1473 while (hw->cookie == 0) {
1474 if (i > 100)
1475 break;
1476 i++;
1477 udelay(2);
1478 barrier();
1479 }
1480 b43_handle_hwtxstatus(ring->dev, hw);
1481 /* recycle the descriptor buffer. */
1482 sync_descbuffer_for_device(ring, meta->dmaaddr,
1483 ring->rx_buffersize);
1484
1485 return;
1486 }
1487 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1488 len = le16_to_cpu(rxhdr->frame_len);
1489 if (len == 0) {
1490 int i = 0;
1491
1492 do {
1493 udelay(2);
1494 barrier();
1495 len = le16_to_cpu(rxhdr->frame_len);
1496 } while (len == 0 && i++ < 5);
1497 if (unlikely(len == 0)) {
1498 /* recycle the descriptor buffer. */
1499 sync_descbuffer_for_device(ring, meta->dmaaddr,
1500 ring->rx_buffersize);
1501 goto drop;
1502 }
1503 }
1504 if (unlikely(len > ring->rx_buffersize)) {
1505 /* The data did not fit into one descriptor buffer
1506 * and is split over multiple buffers.
1507 * This should never happen, as we try to allocate buffers
1508 * big enough. So simply ignore this packet.
1509 */
1510 int cnt = 0;
1511 s32 tmp = len;
1512
1513 while (1) {
1514 desc = ops->idx2desc(ring, *slot, &meta);
1515 /* recycle the descriptor buffer. */
1516 sync_descbuffer_for_device(ring, meta->dmaaddr,
1517 ring->rx_buffersize);
1518 *slot = next_slot(ring, *slot);
1519 cnt++;
1520 tmp -= ring->rx_buffersize;
1521 if (tmp <= 0)
1522 break;
1523 }
1524 b43err(ring->dev->wl, "DMA RX buffer too small "
1525 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1526 len, ring->rx_buffersize, cnt);
1527 goto drop;
1528 }
1529
1530 dmaaddr = meta->dmaaddr;
1531 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1532 if (unlikely(err)) {
1533 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1534 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1535 goto drop;
1536 }
1537
1538 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1539 skb_put(skb, len + ring->frameoffset);
1540 skb_pull(skb, ring->frameoffset);
1541
1542 b43_rx(ring->dev, skb, rxhdr);
1543 drop:
1544 return;
1545 }
1546
1547 void b43_dma_rx(struct b43_dmaring *ring)
1548 {
1549 const struct b43_dma_ops *ops = ring->ops;
1550 int slot, current_slot;
1551 int used_slots = 0;
1552
1553 B43_WARN_ON(ring->tx);
1554 current_slot = ops->get_current_rxslot(ring);
1555 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1556
1557 slot = ring->current_slot;
1558 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1559 dma_rx(ring, &slot);
1560 update_max_used_slots(ring, ++used_slots);
1561 }
1562 ops->set_current_rxslot(ring, slot);
1563 ring->current_slot = slot;
1564 }
1565
1566 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1567 {
1568 unsigned long flags;
1569
1570 spin_lock_irqsave(&ring->lock, flags);
1571 B43_WARN_ON(!ring->tx);
1572 ring->ops->tx_suspend(ring);
1573 spin_unlock_irqrestore(&ring->lock, flags);
1574 }
1575
1576 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1577 {
1578 unsigned long flags;
1579
1580 spin_lock_irqsave(&ring->lock, flags);
1581 B43_WARN_ON(!ring->tx);
1582 ring->ops->tx_resume(ring);
1583 spin_unlock_irqrestore(&ring->lock, flags);
1584 }
1585
1586 void b43_dma_tx_suspend(struct b43_wldev *dev)
1587 {
1588 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1589 b43_dma_tx_suspend_ring(dev->dma.tx_ring0);
1590 b43_dma_tx_suspend_ring(dev->dma.tx_ring1);
1591 b43_dma_tx_suspend_ring(dev->dma.tx_ring2);
1592 b43_dma_tx_suspend_ring(dev->dma.tx_ring3);
1593 b43_dma_tx_suspend_ring(dev->dma.tx_ring4);
1594 b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
1595 }
1596
1597 void b43_dma_tx_resume(struct b43_wldev *dev)
1598 {
1599 b43_dma_tx_resume_ring(dev->dma.tx_ring5);
1600 b43_dma_tx_resume_ring(dev->dma.tx_ring4);
1601 b43_dma_tx_resume_ring(dev->dma.tx_ring3);
1602 b43_dma_tx_resume_ring(dev->dma.tx_ring2);
1603 b43_dma_tx_resume_ring(dev->dma.tx_ring1);
1604 b43_dma_tx_resume_ring(dev->dma.tx_ring0);
1605 b43_power_saving_ctl_bits(dev, 0);
1606 }
This page took 0.06129 seconds and 6 git commands to generate.