3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
45 struct b43_dmadesc_generic
*op32_idx2desc(struct b43_dmaring
*ring
,
47 struct b43_dmadesc_meta
**meta
)
49 struct b43_dmadesc32
*desc
;
51 *meta
= &(ring
->meta
[slot
]);
52 desc
= ring
->descbase
;
55 return (struct b43_dmadesc_generic
*)desc
;
58 static void op32_fill_descriptor(struct b43_dmaring
*ring
,
59 struct b43_dmadesc_generic
*desc
,
60 dma_addr_t dmaaddr
, u16 bufsize
,
61 int start
, int end
, int irq
)
63 struct b43_dmadesc32
*descbase
= ring
->descbase
;
69 slot
= (int)(&(desc
->dma32
) - descbase
);
70 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
72 addr
= (u32
) (dmaaddr
& ~SSB_DMA_TRANSLATION_MASK
);
73 addrext
= (u32
) (dmaaddr
& SSB_DMA_TRANSLATION_MASK
)
74 >> SSB_DMA_TRANSLATION_SHIFT
;
75 addr
|= ssb_dma_translation(ring
->dev
->dev
);
76 ctl
= (bufsize
- ring
->frameoffset
)
77 & B43_DMA32_DCTL_BYTECNT
;
78 if (slot
== ring
->nr_slots
- 1)
79 ctl
|= B43_DMA32_DCTL_DTABLEEND
;
81 ctl
|= B43_DMA32_DCTL_FRAMESTART
;
83 ctl
|= B43_DMA32_DCTL_FRAMEEND
;
85 ctl
|= B43_DMA32_DCTL_IRQ
;
86 ctl
|= (addrext
<< B43_DMA32_DCTL_ADDREXT_SHIFT
)
87 & B43_DMA32_DCTL_ADDREXT_MASK
;
89 desc
->dma32
.control
= cpu_to_le32(ctl
);
90 desc
->dma32
.address
= cpu_to_le32(addr
);
93 static void op32_poke_tx(struct b43_dmaring
*ring
, int slot
)
95 b43_dma_write(ring
, B43_DMA32_TXINDEX
,
96 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
99 static void op32_tx_suspend(struct b43_dmaring
*ring
)
101 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
102 | B43_DMA32_TXSUSPEND
);
105 static void op32_tx_resume(struct b43_dmaring
*ring
)
107 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
108 & ~B43_DMA32_TXSUSPEND
);
111 static int op32_get_current_rxslot(struct b43_dmaring
*ring
)
115 val
= b43_dma_read(ring
, B43_DMA32_RXSTATUS
);
116 val
&= B43_DMA32_RXDPTR
;
118 return (val
/ sizeof(struct b43_dmadesc32
));
121 static void op32_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
123 b43_dma_write(ring
, B43_DMA32_RXINDEX
,
124 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
127 static const struct b43_dma_ops dma32_ops
= {
128 .idx2desc
= op32_idx2desc
,
129 .fill_descriptor
= op32_fill_descriptor
,
130 .poke_tx
= op32_poke_tx
,
131 .tx_suspend
= op32_tx_suspend
,
132 .tx_resume
= op32_tx_resume
,
133 .get_current_rxslot
= op32_get_current_rxslot
,
134 .set_current_rxslot
= op32_set_current_rxslot
,
139 struct b43_dmadesc_generic
*op64_idx2desc(struct b43_dmaring
*ring
,
141 struct b43_dmadesc_meta
**meta
)
143 struct b43_dmadesc64
*desc
;
145 *meta
= &(ring
->meta
[slot
]);
146 desc
= ring
->descbase
;
147 desc
= &(desc
[slot
]);
149 return (struct b43_dmadesc_generic
*)desc
;
152 static void op64_fill_descriptor(struct b43_dmaring
*ring
,
153 struct b43_dmadesc_generic
*desc
,
154 dma_addr_t dmaaddr
, u16 bufsize
,
155 int start
, int end
, int irq
)
157 struct b43_dmadesc64
*descbase
= ring
->descbase
;
159 u32 ctl0
= 0, ctl1
= 0;
163 slot
= (int)(&(desc
->dma64
) - descbase
);
164 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
166 addrlo
= (u32
) (dmaaddr
& 0xFFFFFFFF);
167 addrhi
= (((u64
) dmaaddr
>> 32) & ~SSB_DMA_TRANSLATION_MASK
);
168 addrext
= (((u64
) dmaaddr
>> 32) & SSB_DMA_TRANSLATION_MASK
)
169 >> SSB_DMA_TRANSLATION_SHIFT
;
170 addrhi
|= (ssb_dma_translation(ring
->dev
->dev
) << 1);
171 if (slot
== ring
->nr_slots
- 1)
172 ctl0
|= B43_DMA64_DCTL0_DTABLEEND
;
174 ctl0
|= B43_DMA64_DCTL0_FRAMESTART
;
176 ctl0
|= B43_DMA64_DCTL0_FRAMEEND
;
178 ctl0
|= B43_DMA64_DCTL0_IRQ
;
179 ctl1
|= (bufsize
- ring
->frameoffset
)
180 & B43_DMA64_DCTL1_BYTECNT
;
181 ctl1
|= (addrext
<< B43_DMA64_DCTL1_ADDREXT_SHIFT
)
182 & B43_DMA64_DCTL1_ADDREXT_MASK
;
184 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
185 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
186 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
187 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
190 static void op64_poke_tx(struct b43_dmaring
*ring
, int slot
)
192 b43_dma_write(ring
, B43_DMA64_TXINDEX
,
193 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
196 static void op64_tx_suspend(struct b43_dmaring
*ring
)
198 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
199 | B43_DMA64_TXSUSPEND
);
202 static void op64_tx_resume(struct b43_dmaring
*ring
)
204 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
205 & ~B43_DMA64_TXSUSPEND
);
208 static int op64_get_current_rxslot(struct b43_dmaring
*ring
)
212 val
= b43_dma_read(ring
, B43_DMA64_RXSTATUS
);
213 val
&= B43_DMA64_RXSTATDPTR
;
215 return (val
/ sizeof(struct b43_dmadesc64
));
218 static void op64_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
220 b43_dma_write(ring
, B43_DMA64_RXINDEX
,
221 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
224 static const struct b43_dma_ops dma64_ops
= {
225 .idx2desc
= op64_idx2desc
,
226 .fill_descriptor
= op64_fill_descriptor
,
227 .poke_tx
= op64_poke_tx
,
228 .tx_suspend
= op64_tx_suspend
,
229 .tx_resume
= op64_tx_resume
,
230 .get_current_rxslot
= op64_get_current_rxslot
,
231 .set_current_rxslot
= op64_set_current_rxslot
,
234 static inline int free_slots(struct b43_dmaring
*ring
)
236 return (ring
->nr_slots
- ring
->used_slots
);
239 static inline int next_slot(struct b43_dmaring
*ring
, int slot
)
241 B43_WARN_ON(!(slot
>= -1 && slot
<= ring
->nr_slots
- 1));
242 if (slot
== ring
->nr_slots
- 1)
247 static inline int prev_slot(struct b43_dmaring
*ring
, int slot
)
249 B43_WARN_ON(!(slot
>= 0 && slot
<= ring
->nr_slots
- 1));
251 return ring
->nr_slots
- 1;
255 #ifdef CONFIG_B43_DEBUG
256 static void update_max_used_slots(struct b43_dmaring
*ring
,
257 int current_used_slots
)
259 if (current_used_slots
<= ring
->max_used_slots
)
261 ring
->max_used_slots
= current_used_slots
;
262 if (b43_debug(ring
->dev
, B43_DBG_DMAVERBOSE
)) {
263 b43dbg(ring
->dev
->wl
,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring
->max_used_slots
,
266 ring
->tx
? "TX" : "RX", ring
->index
);
271 void update_max_used_slots(struct b43_dmaring
*ring
, int current_used_slots
)
276 /* Request a slot for usage. */
277 static inline int request_slot(struct b43_dmaring
*ring
)
281 B43_WARN_ON(!ring
->tx
);
282 B43_WARN_ON(ring
->stopped
);
283 B43_WARN_ON(free_slots(ring
) == 0);
285 slot
= next_slot(ring
, ring
->current_slot
);
286 ring
->current_slot
= slot
;
289 update_max_used_slots(ring
, ring
->used_slots
);
294 /* Mac80211-queue to b43-ring mapping */
295 static struct b43_dmaring
*priority_to_txring(struct b43_wldev
*dev
,
298 struct b43_dmaring
*ring
;
300 /*FIXME: For now we always run on TX-ring-1 */
301 return dev
->dma
.tx_ring1
;
303 /* 0 = highest priority */
304 switch (queue_priority
) {
309 ring
= dev
->dma
.tx_ring3
;
312 ring
= dev
->dma
.tx_ring2
;
315 ring
= dev
->dma
.tx_ring1
;
318 ring
= dev
->dma
.tx_ring0
;
325 /* b43-ring to mac80211-queue mapping */
326 static inline int txring_to_priority(struct b43_dmaring
*ring
)
328 static const u8 idx_to_prio
[] = { 3, 2, 1, 0, };
331 /*FIXME: have only one queue, for now */
335 if (B43_WARN_ON(index
>= ARRAY_SIZE(idx_to_prio
)))
337 return idx_to_prio
[index
];
340 static u16
b43_dmacontroller_base(enum b43_dmatype type
, int controller_idx
)
342 static const u16 map64
[] = {
343 B43_MMIO_DMA64_BASE0
,
344 B43_MMIO_DMA64_BASE1
,
345 B43_MMIO_DMA64_BASE2
,
346 B43_MMIO_DMA64_BASE3
,
347 B43_MMIO_DMA64_BASE4
,
348 B43_MMIO_DMA64_BASE5
,
350 static const u16 map32
[] = {
351 B43_MMIO_DMA32_BASE0
,
352 B43_MMIO_DMA32_BASE1
,
353 B43_MMIO_DMA32_BASE2
,
354 B43_MMIO_DMA32_BASE3
,
355 B43_MMIO_DMA32_BASE4
,
356 B43_MMIO_DMA32_BASE5
,
359 if (type
== B43_DMA_64BIT
) {
360 B43_WARN_ON(!(controller_idx
>= 0 &&
361 controller_idx
< ARRAY_SIZE(map64
)));
362 return map64
[controller_idx
];
364 B43_WARN_ON(!(controller_idx
>= 0 &&
365 controller_idx
< ARRAY_SIZE(map32
)));
366 return map32
[controller_idx
];
370 dma_addr_t
map_descbuffer(struct b43_dmaring
*ring
,
371 unsigned char *buf
, size_t len
, int tx
)
376 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
377 buf
, len
, DMA_TO_DEVICE
);
379 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
380 buf
, len
, DMA_FROM_DEVICE
);
387 void unmap_descbuffer(struct b43_dmaring
*ring
,
388 dma_addr_t addr
, size_t len
, int tx
)
391 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
392 addr
, len
, DMA_TO_DEVICE
);
394 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
395 addr
, len
, DMA_FROM_DEVICE
);
400 void sync_descbuffer_for_cpu(struct b43_dmaring
*ring
,
401 dma_addr_t addr
, size_t len
)
403 B43_WARN_ON(ring
->tx
);
404 dma_sync_single_for_cpu(ring
->dev
->dev
->dma_dev
,
405 addr
, len
, DMA_FROM_DEVICE
);
409 void sync_descbuffer_for_device(struct b43_dmaring
*ring
,
410 dma_addr_t addr
, size_t len
)
412 B43_WARN_ON(ring
->tx
);
413 dma_sync_single_for_device(ring
->dev
->dev
->dma_dev
,
414 addr
, len
, DMA_FROM_DEVICE
);
418 void free_descriptor_buffer(struct b43_dmaring
*ring
,
419 struct b43_dmadesc_meta
*meta
)
422 dev_kfree_skb_any(meta
->skb
);
427 static int alloc_ringmemory(struct b43_dmaring
*ring
)
429 struct device
*dma_dev
= ring
->dev
->dev
->dma_dev
;
430 gfp_t flags
= GFP_KERNEL
;
432 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
433 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
434 * has shown that 4K is sufficient for the latter as long as the buffer
435 * does not cross an 8K boundary.
437 * For unknown reasons - possibly a hardware error - the BCM4311 rev
438 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
439 * which accounts for the GFP_DMA flag below.
441 if (ring
->type
== B43_DMA_64BIT
)
443 ring
->descbase
= dma_alloc_coherent(dma_dev
, B43_DMA_RINGMEMSIZE
,
444 &(ring
->dmabase
), flags
);
445 if (!ring
->descbase
) {
446 b43err(ring
->dev
->wl
, "DMA ringmemory allocation failed\n");
449 memset(ring
->descbase
, 0, B43_DMA_RINGMEMSIZE
);
454 static void free_ringmemory(struct b43_dmaring
*ring
)
456 struct device
*dma_dev
= ring
->dev
->dev
->dma_dev
;
458 dma_free_coherent(dma_dev
, B43_DMA_RINGMEMSIZE
,
459 ring
->descbase
, ring
->dmabase
);
462 /* Reset the RX DMA channel */
463 static int b43_dmacontroller_rx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
464 enum b43_dmatype type
)
472 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXCTL
: B43_DMA32_RXCTL
;
473 b43_write32(dev
, mmio_base
+ offset
, 0);
474 for (i
= 0; i
< 10; i
++) {
475 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXSTATUS
:
477 value
= b43_read32(dev
, mmio_base
+ offset
);
478 if (type
== B43_DMA_64BIT
) {
479 value
&= B43_DMA64_RXSTAT
;
480 if (value
== B43_DMA64_RXSTAT_DISABLED
) {
485 value
&= B43_DMA32_RXSTATE
;
486 if (value
== B43_DMA32_RXSTAT_DISABLED
) {
494 b43err(dev
->wl
, "DMA RX reset timed out\n");
501 /* Reset the TX DMA channel */
502 static int b43_dmacontroller_tx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
503 enum b43_dmatype type
)
511 for (i
= 0; i
< 10; i
++) {
512 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
514 value
= b43_read32(dev
, mmio_base
+ offset
);
515 if (type
== B43_DMA_64BIT
) {
516 value
&= B43_DMA64_TXSTAT
;
517 if (value
== B43_DMA64_TXSTAT_DISABLED
||
518 value
== B43_DMA64_TXSTAT_IDLEWAIT
||
519 value
== B43_DMA64_TXSTAT_STOPPED
)
522 value
&= B43_DMA32_TXSTATE
;
523 if (value
== B43_DMA32_TXSTAT_DISABLED
||
524 value
== B43_DMA32_TXSTAT_IDLEWAIT
||
525 value
== B43_DMA32_TXSTAT_STOPPED
)
530 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXCTL
: B43_DMA32_TXCTL
;
531 b43_write32(dev
, mmio_base
+ offset
, 0);
532 for (i
= 0; i
< 10; i
++) {
533 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
535 value
= b43_read32(dev
, mmio_base
+ offset
);
536 if (type
== B43_DMA_64BIT
) {
537 value
&= B43_DMA64_TXSTAT
;
538 if (value
== B43_DMA64_TXSTAT_DISABLED
) {
543 value
&= B43_DMA32_TXSTATE
;
544 if (value
== B43_DMA32_TXSTAT_DISABLED
) {
552 b43err(dev
->wl
, "DMA TX reset timed out\n");
555 /* ensure the reset is completed. */
561 /* Check if a DMA mapping address is invalid. */
562 static bool b43_dma_mapping_error(struct b43_dmaring
*ring
,
564 size_t buffersize
, bool dma_to_device
)
566 if (unlikely(dma_mapping_error(addr
)))
569 switch (ring
->type
) {
571 if ((u64
)addr
+ buffersize
> (1ULL << 30))
575 if ((u64
)addr
+ buffersize
> (1ULL << 32))
579 /* Currently we can't have addresses beyond
580 * 64bit in the kernel. */
584 /* The address is OK. */
588 /* We can't support this address. Unmap it again. */
589 unmap_descbuffer(ring
, addr
, buffersize
, dma_to_device
);
594 static int setup_rx_descbuffer(struct b43_dmaring
*ring
,
595 struct b43_dmadesc_generic
*desc
,
596 struct b43_dmadesc_meta
*meta
, gfp_t gfp_flags
)
598 struct b43_rxhdr_fw4
*rxhdr
;
599 struct b43_hwtxstatus
*txstat
;
603 B43_WARN_ON(ring
->tx
);
605 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
608 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
609 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
610 /* ugh. try to realloc in zone_dma */
611 gfp_flags
|= GFP_DMA
;
613 dev_kfree_skb_any(skb
);
615 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
618 dmaaddr
= map_descbuffer(ring
, skb
->data
,
619 ring
->rx_buffersize
, 0);
622 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
623 b43err(ring
->dev
->wl
, "RX DMA buffer allocation failed\n");
624 dev_kfree_skb_any(skb
);
629 meta
->dmaaddr
= dmaaddr
;
630 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
631 ring
->rx_buffersize
, 0, 0, 0);
633 rxhdr
= (struct b43_rxhdr_fw4
*)(skb
->data
);
634 rxhdr
->frame_len
= 0;
635 txstat
= (struct b43_hwtxstatus
*)(skb
->data
);
641 /* Allocate the initial descbuffers.
642 * This is used for an RX ring only.
644 static int alloc_initial_descbuffers(struct b43_dmaring
*ring
)
646 int i
, err
= -ENOMEM
;
647 struct b43_dmadesc_generic
*desc
;
648 struct b43_dmadesc_meta
*meta
;
650 for (i
= 0; i
< ring
->nr_slots
; i
++) {
651 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
653 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
655 b43err(ring
->dev
->wl
,
656 "Failed to allocate initial descbuffers\n");
661 ring
->used_slots
= ring
->nr_slots
;
667 for (i
--; i
>= 0; i
--) {
668 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
670 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
671 dev_kfree_skb(meta
->skb
);
676 /* Do initial setup of the DMA controller.
677 * Reset the controller, write the ring busaddress
678 * and switch the "enable" bit on.
680 static int dmacontroller_setup(struct b43_dmaring
*ring
)
685 u32 trans
= ssb_dma_translation(ring
->dev
->dev
);
688 if (ring
->type
== B43_DMA_64BIT
) {
689 u64 ringbase
= (u64
) (ring
->dmabase
);
691 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
692 >> SSB_DMA_TRANSLATION_SHIFT
;
693 value
= B43_DMA64_TXENABLE
;
694 value
|= (addrext
<< B43_DMA64_TXADDREXT_SHIFT
)
695 & B43_DMA64_TXADDREXT_MASK
;
696 b43_dma_write(ring
, B43_DMA64_TXCTL
, value
);
697 b43_dma_write(ring
, B43_DMA64_TXRINGLO
,
698 (ringbase
& 0xFFFFFFFF));
699 b43_dma_write(ring
, B43_DMA64_TXRINGHI
,
701 ~SSB_DMA_TRANSLATION_MASK
)
704 u32 ringbase
= (u32
) (ring
->dmabase
);
706 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
707 >> SSB_DMA_TRANSLATION_SHIFT
;
708 value
= B43_DMA32_TXENABLE
;
709 value
|= (addrext
<< B43_DMA32_TXADDREXT_SHIFT
)
710 & B43_DMA32_TXADDREXT_MASK
;
711 b43_dma_write(ring
, B43_DMA32_TXCTL
, value
);
712 b43_dma_write(ring
, B43_DMA32_TXRING
,
713 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
717 err
= alloc_initial_descbuffers(ring
);
720 if (ring
->type
== B43_DMA_64BIT
) {
721 u64 ringbase
= (u64
) (ring
->dmabase
);
723 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
724 >> SSB_DMA_TRANSLATION_SHIFT
;
725 value
= (ring
->frameoffset
<< B43_DMA64_RXFROFF_SHIFT
);
726 value
|= B43_DMA64_RXENABLE
;
727 value
|= (addrext
<< B43_DMA64_RXADDREXT_SHIFT
)
728 & B43_DMA64_RXADDREXT_MASK
;
729 b43_dma_write(ring
, B43_DMA64_RXCTL
, value
);
730 b43_dma_write(ring
, B43_DMA64_RXRINGLO
,
731 (ringbase
& 0xFFFFFFFF));
732 b43_dma_write(ring
, B43_DMA64_RXRINGHI
,
734 ~SSB_DMA_TRANSLATION_MASK
)
736 b43_dma_write(ring
, B43_DMA64_RXINDEX
, ring
->nr_slots
*
737 sizeof(struct b43_dmadesc64
));
739 u32 ringbase
= (u32
) (ring
->dmabase
);
741 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
742 >> SSB_DMA_TRANSLATION_SHIFT
;
743 value
= (ring
->frameoffset
<< B43_DMA32_RXFROFF_SHIFT
);
744 value
|= B43_DMA32_RXENABLE
;
745 value
|= (addrext
<< B43_DMA32_RXADDREXT_SHIFT
)
746 & B43_DMA32_RXADDREXT_MASK
;
747 b43_dma_write(ring
, B43_DMA32_RXCTL
, value
);
748 b43_dma_write(ring
, B43_DMA32_RXRING
,
749 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
751 b43_dma_write(ring
, B43_DMA32_RXINDEX
, ring
->nr_slots
*
752 sizeof(struct b43_dmadesc32
));
760 /* Shutdown the DMA controller. */
761 static void dmacontroller_cleanup(struct b43_dmaring
*ring
)
764 b43_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
,
766 if (ring
->type
== B43_DMA_64BIT
) {
767 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, 0);
768 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, 0);
770 b43_dma_write(ring
, B43_DMA32_TXRING
, 0);
772 b43_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
,
774 if (ring
->type
== B43_DMA_64BIT
) {
775 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, 0);
776 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, 0);
778 b43_dma_write(ring
, B43_DMA32_RXRING
, 0);
782 static void free_all_descbuffers(struct b43_dmaring
*ring
)
784 struct b43_dmadesc_generic
*desc
;
785 struct b43_dmadesc_meta
*meta
;
788 if (!ring
->used_slots
)
790 for (i
= 0; i
< ring
->nr_slots
; i
++) {
791 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
794 B43_WARN_ON(!ring
->tx
);
798 unmap_descbuffer(ring
, meta
->dmaaddr
,
801 unmap_descbuffer(ring
, meta
->dmaaddr
,
802 ring
->rx_buffersize
, 0);
804 free_descriptor_buffer(ring
, meta
);
808 static u64
supported_dma_mask(struct b43_wldev
*dev
)
813 tmp
= b43_read32(dev
, SSB_TMSHIGH
);
814 if (tmp
& SSB_TMSHIGH_DMA64
)
815 return DMA_64BIT_MASK
;
816 mmio_base
= b43_dmacontroller_base(0, 0);
817 b43_write32(dev
, mmio_base
+ B43_DMA32_TXCTL
, B43_DMA32_TXADDREXT_MASK
);
818 tmp
= b43_read32(dev
, mmio_base
+ B43_DMA32_TXCTL
);
819 if (tmp
& B43_DMA32_TXADDREXT_MASK
)
820 return DMA_32BIT_MASK
;
822 return DMA_30BIT_MASK
;
825 /* Main initialization function. */
827 struct b43_dmaring
*b43_setup_dmaring(struct b43_wldev
*dev
,
828 int controller_index
,
830 enum b43_dmatype type
)
832 struct b43_dmaring
*ring
;
837 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
842 nr_slots
= B43_RXRING_SLOTS
;
844 nr_slots
= B43_TXRING_SLOTS
;
846 ring
->meta
= kcalloc(nr_slots
, sizeof(struct b43_dmadesc_meta
),
851 ring
->txhdr_cache
= kcalloc(nr_slots
,
854 if (!ring
->txhdr_cache
)
857 /* test for ability to dma to txhdr_cache */
858 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
863 if (b43_dma_mapping_error(ring
, dma_test
,
864 b43_txhdr_size(dev
), 1)) {
866 kfree(ring
->txhdr_cache
);
867 ring
->txhdr_cache
= kcalloc(nr_slots
,
869 GFP_KERNEL
| GFP_DMA
);
870 if (!ring
->txhdr_cache
)
873 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
878 if (b43_dma_mapping_error(ring
, dma_test
,
879 b43_txhdr_size(dev
), 1)) {
882 "TXHDR DMA allocation failed\n");
883 goto err_kfree_txhdr_cache
;
887 dma_unmap_single(dev
->dev
->dma_dev
,
888 dma_test
, b43_txhdr_size(dev
),
893 ring
->nr_slots
= nr_slots
;
894 ring
->mmio_base
= b43_dmacontroller_base(type
, controller_index
);
895 ring
->index
= controller_index
;
896 if (type
== B43_DMA_64BIT
)
897 ring
->ops
= &dma64_ops
;
899 ring
->ops
= &dma32_ops
;
902 ring
->current_slot
= -1;
904 if (ring
->index
== 0) {
905 ring
->rx_buffersize
= B43_DMA0_RX_BUFFERSIZE
;
906 ring
->frameoffset
= B43_DMA0_RX_FRAMEOFFSET
;
907 } else if (ring
->index
== 3) {
908 ring
->rx_buffersize
= B43_DMA3_RX_BUFFERSIZE
;
909 ring
->frameoffset
= B43_DMA3_RX_FRAMEOFFSET
;
913 spin_lock_init(&ring
->lock
);
914 #ifdef CONFIG_B43_DEBUG
915 ring
->last_injected_overflow
= jiffies
;
918 err
= alloc_ringmemory(ring
);
920 goto err_kfree_txhdr_cache
;
921 err
= dmacontroller_setup(ring
);
923 goto err_free_ringmemory
;
929 free_ringmemory(ring
);
930 err_kfree_txhdr_cache
:
931 kfree(ring
->txhdr_cache
);
940 /* Main cleanup function. */
941 static void b43_destroy_dmaring(struct b43_dmaring
*ring
)
946 b43dbg(ring
->dev
->wl
, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
947 (unsigned int)(ring
->type
),
949 (ring
->tx
) ? "TX" : "RX", ring
->max_used_slots
, ring
->nr_slots
);
950 /* Device IRQs are disabled prior entering this function,
951 * so no need to take care of concurrency with rx handler stuff.
953 dmacontroller_cleanup(ring
);
954 free_all_descbuffers(ring
);
955 free_ringmemory(ring
);
957 kfree(ring
->txhdr_cache
);
962 void b43_dma_free(struct b43_wldev
*dev
)
964 struct b43_dma
*dma
= &dev
->dma
;
966 b43_destroy_dmaring(dma
->rx_ring3
);
967 dma
->rx_ring3
= NULL
;
968 b43_destroy_dmaring(dma
->rx_ring0
);
969 dma
->rx_ring0
= NULL
;
971 b43_destroy_dmaring(dma
->tx_ring5
);
972 dma
->tx_ring5
= NULL
;
973 b43_destroy_dmaring(dma
->tx_ring4
);
974 dma
->tx_ring4
= NULL
;
975 b43_destroy_dmaring(dma
->tx_ring3
);
976 dma
->tx_ring3
= NULL
;
977 b43_destroy_dmaring(dma
->tx_ring2
);
978 dma
->tx_ring2
= NULL
;
979 b43_destroy_dmaring(dma
->tx_ring1
);
980 dma
->tx_ring1
= NULL
;
981 b43_destroy_dmaring(dma
->tx_ring0
);
982 dma
->tx_ring0
= NULL
;
985 int b43_dma_init(struct b43_wldev
*dev
)
987 struct b43_dma
*dma
= &dev
->dma
;
988 struct b43_dmaring
*ring
;
991 enum b43_dmatype type
;
993 dmamask
= supported_dma_mask(dev
);
998 type
= B43_DMA_30BIT
;
1000 case DMA_32BIT_MASK
:
1001 type
= B43_DMA_32BIT
;
1003 case DMA_64BIT_MASK
:
1004 type
= B43_DMA_64BIT
;
1007 err
= ssb_dma_set_mask(dev
->dev
, dmamask
);
1009 b43err(dev
->wl
, "The machine/kernel does not support "
1010 "the required DMA mask (0x%08X%08X)\n",
1011 (unsigned int)((dmamask
& 0xFFFFFFFF00000000ULL
) >> 32),
1012 (unsigned int)(dmamask
& 0x00000000FFFFFFFFULL
));
1017 /* setup TX DMA channels. */
1018 ring
= b43_setup_dmaring(dev
, 0, 1, type
);
1021 dma
->tx_ring0
= ring
;
1023 ring
= b43_setup_dmaring(dev
, 1, 1, type
);
1025 goto err_destroy_tx0
;
1026 dma
->tx_ring1
= ring
;
1028 ring
= b43_setup_dmaring(dev
, 2, 1, type
);
1030 goto err_destroy_tx1
;
1031 dma
->tx_ring2
= ring
;
1033 ring
= b43_setup_dmaring(dev
, 3, 1, type
);
1035 goto err_destroy_tx2
;
1036 dma
->tx_ring3
= ring
;
1038 ring
= b43_setup_dmaring(dev
, 4, 1, type
);
1040 goto err_destroy_tx3
;
1041 dma
->tx_ring4
= ring
;
1043 ring
= b43_setup_dmaring(dev
, 5, 1, type
);
1045 goto err_destroy_tx4
;
1046 dma
->tx_ring5
= ring
;
1048 /* setup RX DMA channels. */
1049 ring
= b43_setup_dmaring(dev
, 0, 0, type
);
1051 goto err_destroy_tx5
;
1052 dma
->rx_ring0
= ring
;
1054 if (dev
->dev
->id
.revision
< 5) {
1055 ring
= b43_setup_dmaring(dev
, 3, 0, type
);
1057 goto err_destroy_rx0
;
1058 dma
->rx_ring3
= ring
;
1061 b43dbg(dev
->wl
, "%u-bit DMA initialized\n",
1062 (unsigned int)type
);
1068 b43_destroy_dmaring(dma
->rx_ring0
);
1069 dma
->rx_ring0
= NULL
;
1071 b43_destroy_dmaring(dma
->tx_ring5
);
1072 dma
->tx_ring5
= NULL
;
1074 b43_destroy_dmaring(dma
->tx_ring4
);
1075 dma
->tx_ring4
= NULL
;
1077 b43_destroy_dmaring(dma
->tx_ring3
);
1078 dma
->tx_ring3
= NULL
;
1080 b43_destroy_dmaring(dma
->tx_ring2
);
1081 dma
->tx_ring2
= NULL
;
1083 b43_destroy_dmaring(dma
->tx_ring1
);
1084 dma
->tx_ring1
= NULL
;
1086 b43_destroy_dmaring(dma
->tx_ring0
);
1087 dma
->tx_ring0
= NULL
;
1091 /* Generate a cookie for the TX header. */
1092 static u16
generate_cookie(struct b43_dmaring
*ring
, int slot
)
1094 u16 cookie
= 0x1000;
1096 /* Use the upper 4 bits of the cookie as
1097 * DMA controller ID and store the slot number
1098 * in the lower 12 bits.
1099 * Note that the cookie must never be 0, as this
1100 * is a special value used in RX path.
1101 * It can also not be 0xFFFF because that is special
1102 * for multicast frames.
1104 switch (ring
->index
) {
1126 B43_WARN_ON(slot
& ~0x0FFF);
1127 cookie
|= (u16
) slot
;
1132 /* Inspect a cookie and find out to which controller/slot it belongs. */
1134 struct b43_dmaring
*parse_cookie(struct b43_wldev
*dev
, u16 cookie
, int *slot
)
1136 struct b43_dma
*dma
= &dev
->dma
;
1137 struct b43_dmaring
*ring
= NULL
;
1139 switch (cookie
& 0xF000) {
1141 ring
= dma
->tx_ring0
;
1144 ring
= dma
->tx_ring1
;
1147 ring
= dma
->tx_ring2
;
1150 ring
= dma
->tx_ring3
;
1153 ring
= dma
->tx_ring4
;
1156 ring
= dma
->tx_ring5
;
1161 *slot
= (cookie
& 0x0FFF);
1162 B43_WARN_ON(!(ring
&& *slot
>= 0 && *slot
< ring
->nr_slots
));
1167 static int dma_tx_fragment(struct b43_dmaring
*ring
,
1168 struct sk_buff
*skb
,
1169 struct ieee80211_tx_control
*ctl
)
1171 const struct b43_dma_ops
*ops
= ring
->ops
;
1173 int slot
, old_top_slot
, old_used_slots
;
1175 struct b43_dmadesc_generic
*desc
;
1176 struct b43_dmadesc_meta
*meta
;
1177 struct b43_dmadesc_meta
*meta_hdr
;
1178 struct sk_buff
*bounce_skb
;
1180 size_t hdrsize
= b43_txhdr_size(ring
->dev
);
1182 #define SLOTS_PER_PACKET 2
1183 B43_WARN_ON(skb_shinfo(skb
)->nr_frags
);
1185 old_top_slot
= ring
->current_slot
;
1186 old_used_slots
= ring
->used_slots
;
1188 /* Get a slot for the header. */
1189 slot
= request_slot(ring
);
1190 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1191 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1193 header
= &(ring
->txhdr_cache
[slot
* hdrsize
]);
1194 cookie
= generate_cookie(ring
, slot
);
1195 err
= b43_generate_txhdr(ring
->dev
, header
,
1196 skb
->data
, skb
->len
, ctl
, cookie
);
1197 if (unlikely(err
)) {
1198 ring
->current_slot
= old_top_slot
;
1199 ring
->used_slots
= old_used_slots
;
1203 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1205 if (b43_dma_mapping_error(ring
, meta_hdr
->dmaaddr
, hdrsize
, 1)) {
1206 ring
->current_slot
= old_top_slot
;
1207 ring
->used_slots
= old_used_slots
;
1210 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1213 /* Get a slot for the payload. */
1214 slot
= request_slot(ring
);
1215 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1216 memset(meta
, 0, sizeof(*meta
));
1218 memcpy(&meta
->txstat
.control
, ctl
, sizeof(*ctl
));
1220 meta
->is_last_fragment
= 1;
1222 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1223 /* create a bounce buffer in zone_dma on mapping failure. */
1224 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1225 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
| GFP_DMA
);
1227 ring
->current_slot
= old_top_slot
;
1228 ring
->used_slots
= old_used_slots
;
1233 memcpy(skb_put(bounce_skb
, skb
->len
), skb
->data
, skb
->len
);
1234 dev_kfree_skb_any(skb
);
1237 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1238 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1239 ring
->current_slot
= old_top_slot
;
1240 ring
->used_slots
= old_used_slots
;
1242 goto out_free_bounce
;
1246 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
, skb
->len
, 0, 1, 1);
1248 if (ctl
->flags
& IEEE80211_TXCTL_SEND_AFTER_DTIM
) {
1249 /* Tell the firmware about the cookie of the last
1250 * mcast frame, so it can clear the more-data bit in it. */
1251 b43_shm_write16(ring
->dev
, B43_SHM_SHARED
,
1252 B43_SHM_SH_MCASTCOOKIE
, cookie
);
1254 /* Now transfer the whole frame. */
1256 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1260 dev_kfree_skb_any(skb
);
1262 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1267 static inline int should_inject_overflow(struct b43_dmaring
*ring
)
1269 #ifdef CONFIG_B43_DEBUG
1270 if (unlikely(b43_debug(ring
->dev
, B43_DBG_DMAOVERFLOW
))) {
1271 /* Check if we should inject another ringbuffer overflow
1272 * to test handling of this situation in the stack. */
1273 unsigned long next_overflow
;
1275 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1276 if (time_after(jiffies
, next_overflow
)) {
1277 ring
->last_injected_overflow
= jiffies
;
1278 b43dbg(ring
->dev
->wl
,
1279 "Injecting TX ring overflow on "
1280 "DMA controller %d\n", ring
->index
);
1284 #endif /* CONFIG_B43_DEBUG */
1288 int b43_dma_tx(struct b43_wldev
*dev
,
1289 struct sk_buff
*skb
, struct ieee80211_tx_control
*ctl
)
1291 struct b43_dmaring
*ring
;
1292 struct ieee80211_hdr
*hdr
;
1294 unsigned long flags
;
1296 if (unlikely(skb
->len
< 2 + 2 + 6)) {
1297 /* Too short, this can't be a valid frame. */
1301 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1302 if (ctl
->flags
& IEEE80211_TXCTL_SEND_AFTER_DTIM
) {
1303 /* The multicast ring will be sent after the DTIM */
1304 ring
= dev
->dma
.tx_ring4
;
1305 /* Set the more-data bit. Ucode will clear it on
1306 * the last frame for us. */
1307 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
1309 /* Decide by priority where to put this frame. */
1310 ring
= priority_to_txring(dev
, ctl
->queue
);
1313 spin_lock_irqsave(&ring
->lock
, flags
);
1314 B43_WARN_ON(!ring
->tx
);
1315 if (unlikely(free_slots(ring
) < SLOTS_PER_PACKET
)) {
1316 b43warn(dev
->wl
, "DMA queue overflow\n");
1320 /* Check if the queue was stopped in mac80211,
1321 * but we got called nevertheless.
1322 * That would be a mac80211 bug. */
1323 B43_WARN_ON(ring
->stopped
);
1325 err
= dma_tx_fragment(ring
, skb
, ctl
);
1326 if (unlikely(err
== -ENOKEY
)) {
1327 /* Drop this packet, as we don't have the encryption key
1328 * anymore and must not transmit it unencrypted. */
1329 dev_kfree_skb_any(skb
);
1333 if (unlikely(err
)) {
1334 b43err(dev
->wl
, "DMA tx mapping failure\n");
1337 ring
->nr_tx_packets
++;
1338 if ((free_slots(ring
) < SLOTS_PER_PACKET
) ||
1339 should_inject_overflow(ring
)) {
1340 /* This TX ring is full. */
1341 ieee80211_stop_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1343 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1344 b43dbg(dev
->wl
, "Stopped TX ring %d\n", ring
->index
);
1348 spin_unlock_irqrestore(&ring
->lock
, flags
);
1353 /* Called with IRQs disabled. */
1354 void b43_dma_handle_txstatus(struct b43_wldev
*dev
,
1355 const struct b43_txstatus
*status
)
1357 const struct b43_dma_ops
*ops
;
1358 struct b43_dmaring
*ring
;
1359 struct b43_dmadesc_generic
*desc
;
1360 struct b43_dmadesc_meta
*meta
;
1363 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1364 if (unlikely(!ring
))
1367 spin_lock(&ring
->lock
); /* IRQs are already disabled. */
1369 B43_WARN_ON(!ring
->tx
);
1372 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
1373 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1376 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
,
1379 unmap_descbuffer(ring
, meta
->dmaaddr
,
1380 b43_txhdr_size(dev
), 1);
1382 if (meta
->is_last_fragment
) {
1383 B43_WARN_ON(!meta
->skb
);
1384 /* Call back to inform the ieee80211 subsystem about the
1385 * status of the transmission.
1386 * Some fields of txstat are already filled in dma_tx().
1388 if (status
->acked
) {
1389 meta
->txstat
.flags
|= IEEE80211_TX_STATUS_ACK
;
1391 if (!(meta
->txstat
.control
.flags
1392 & IEEE80211_TXCTL_NO_ACK
))
1393 meta
->txstat
.excessive_retries
= 1;
1395 if (status
->frame_count
== 0) {
1396 /* The frame was not transmitted at all. */
1397 meta
->txstat
.retry_count
= 0;
1399 meta
->txstat
.retry_count
= status
->frame_count
- 1;
1400 ieee80211_tx_status_irqsafe(dev
->wl
->hw
, meta
->skb
,
1402 /* skb is freed by ieee80211_tx_status_irqsafe() */
1405 /* No need to call free_descriptor_buffer here, as
1406 * this is only the txhdr, which is not allocated.
1408 B43_WARN_ON(meta
->skb
);
1411 /* Everything unmapped and free'd. So it's not used anymore. */
1414 if (meta
->is_last_fragment
)
1416 slot
= next_slot(ring
, slot
);
1418 dev
->stats
.last_tx
= jiffies
;
1419 if (ring
->stopped
) {
1420 B43_WARN_ON(free_slots(ring
) < SLOTS_PER_PACKET
);
1421 ieee80211_wake_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1423 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1424 b43dbg(dev
->wl
, "Woke up TX ring %d\n", ring
->index
);
1428 spin_unlock(&ring
->lock
);
1431 void b43_dma_get_tx_stats(struct b43_wldev
*dev
,
1432 struct ieee80211_tx_queue_stats
*stats
)
1434 const int nr_queues
= dev
->wl
->hw
->queues
;
1435 struct b43_dmaring
*ring
;
1436 struct ieee80211_tx_queue_stats_data
*data
;
1437 unsigned long flags
;
1440 for (i
= 0; i
< nr_queues
; i
++) {
1441 data
= &(stats
->data
[i
]);
1442 ring
= priority_to_txring(dev
, i
);
1444 spin_lock_irqsave(&ring
->lock
, flags
);
1445 data
->len
= ring
->used_slots
/ SLOTS_PER_PACKET
;
1446 data
->limit
= ring
->nr_slots
/ SLOTS_PER_PACKET
;
1447 data
->count
= ring
->nr_tx_packets
;
1448 spin_unlock_irqrestore(&ring
->lock
, flags
);
1452 static void dma_rx(struct b43_dmaring
*ring
, int *slot
)
1454 const struct b43_dma_ops
*ops
= ring
->ops
;
1455 struct b43_dmadesc_generic
*desc
;
1456 struct b43_dmadesc_meta
*meta
;
1457 struct b43_rxhdr_fw4
*rxhdr
;
1458 struct sk_buff
*skb
;
1463 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1465 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1468 if (ring
->index
== 3) {
1469 /* We received an xmit status. */
1470 struct b43_hwtxstatus
*hw
= (struct b43_hwtxstatus
*)skb
->data
;
1473 while (hw
->cookie
== 0) {
1480 b43_handle_hwtxstatus(ring
->dev
, hw
);
1481 /* recycle the descriptor buffer. */
1482 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1483 ring
->rx_buffersize
);
1487 rxhdr
= (struct b43_rxhdr_fw4
*)skb
->data
;
1488 len
= le16_to_cpu(rxhdr
->frame_len
);
1495 len
= le16_to_cpu(rxhdr
->frame_len
);
1496 } while (len
== 0 && i
++ < 5);
1497 if (unlikely(len
== 0)) {
1498 /* recycle the descriptor buffer. */
1499 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1500 ring
->rx_buffersize
);
1504 if (unlikely(len
> ring
->rx_buffersize
)) {
1505 /* The data did not fit into one descriptor buffer
1506 * and is split over multiple buffers.
1507 * This should never happen, as we try to allocate buffers
1508 * big enough. So simply ignore this packet.
1514 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1515 /* recycle the descriptor buffer. */
1516 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1517 ring
->rx_buffersize
);
1518 *slot
= next_slot(ring
, *slot
);
1520 tmp
-= ring
->rx_buffersize
;
1524 b43err(ring
->dev
->wl
, "DMA RX buffer too small "
1525 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1526 len
, ring
->rx_buffersize
, cnt
);
1530 dmaaddr
= meta
->dmaaddr
;
1531 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1532 if (unlikely(err
)) {
1533 b43dbg(ring
->dev
->wl
, "DMA RX: setup_rx_descbuffer() failed\n");
1534 sync_descbuffer_for_device(ring
, dmaaddr
, ring
->rx_buffersize
);
1538 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1539 skb_put(skb
, len
+ ring
->frameoffset
);
1540 skb_pull(skb
, ring
->frameoffset
);
1542 b43_rx(ring
->dev
, skb
, rxhdr
);
1547 void b43_dma_rx(struct b43_dmaring
*ring
)
1549 const struct b43_dma_ops
*ops
= ring
->ops
;
1550 int slot
, current_slot
;
1553 B43_WARN_ON(ring
->tx
);
1554 current_slot
= ops
->get_current_rxslot(ring
);
1555 B43_WARN_ON(!(current_slot
>= 0 && current_slot
< ring
->nr_slots
));
1557 slot
= ring
->current_slot
;
1558 for (; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1559 dma_rx(ring
, &slot
);
1560 update_max_used_slots(ring
, ++used_slots
);
1562 ops
->set_current_rxslot(ring
, slot
);
1563 ring
->current_slot
= slot
;
1566 static void b43_dma_tx_suspend_ring(struct b43_dmaring
*ring
)
1568 unsigned long flags
;
1570 spin_lock_irqsave(&ring
->lock
, flags
);
1571 B43_WARN_ON(!ring
->tx
);
1572 ring
->ops
->tx_suspend(ring
);
1573 spin_unlock_irqrestore(&ring
->lock
, flags
);
1576 static void b43_dma_tx_resume_ring(struct b43_dmaring
*ring
)
1578 unsigned long flags
;
1580 spin_lock_irqsave(&ring
->lock
, flags
);
1581 B43_WARN_ON(!ring
->tx
);
1582 ring
->ops
->tx_resume(ring
);
1583 spin_unlock_irqrestore(&ring
->lock
, flags
);
1586 void b43_dma_tx_suspend(struct b43_wldev
*dev
)
1588 b43_power_saving_ctl_bits(dev
, B43_PS_AWAKE
);
1589 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring0
);
1590 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring1
);
1591 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring2
);
1592 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring3
);
1593 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring4
);
1594 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring5
);
1597 void b43_dma_tx_resume(struct b43_wldev
*dev
)
1599 b43_dma_tx_resume_ring(dev
->dma
.tx_ring5
);
1600 b43_dma_tx_resume_ring(dev
->dma
.tx_ring4
);
1601 b43_dma_tx_resume_ring(dev
->dma
.tx_ring3
);
1602 b43_dma_tx_resume_ring(dev
->dma
.tx_ring2
);
1603 b43_dma_tx_resume_ring(dev
->dma
.tx_ring1
);
1604 b43_dma_tx_resume_ring(dev
->dma
.tx_ring0
);
1605 b43_power_saving_ctl_bits(dev
, 0);