3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
45 /* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48 #define TX_SLOTS_PER_FRAME 2
50 static u32
b43_dma_address(struct b43_dma
*dma
, dma_addr_t dmaaddr
,
51 enum b43_addrtype addrtype
)
53 u32
uninitialized_var(addr
);
56 case B43_DMA_ADDR_LOW
:
57 addr
= lower_32_bits(dmaaddr
);
58 if (dma
->translation_in_low
) {
59 addr
&= ~SSB_DMA_TRANSLATION_MASK
;
60 addr
|= dma
->translation
;
63 case B43_DMA_ADDR_HIGH
:
64 addr
= upper_32_bits(dmaaddr
);
65 if (!dma
->translation_in_low
) {
66 addr
&= ~SSB_DMA_TRANSLATION_MASK
;
67 addr
|= dma
->translation
;
70 case B43_DMA_ADDR_EXT
:
71 if (dma
->translation_in_low
)
72 addr
= lower_32_bits(dmaaddr
);
74 addr
= upper_32_bits(dmaaddr
);
75 addr
&= SSB_DMA_TRANSLATION_MASK
;
76 addr
>>= SSB_DMA_TRANSLATION_SHIFT
;
85 struct b43_dmadesc_generic
*op32_idx2desc(struct b43_dmaring
*ring
,
87 struct b43_dmadesc_meta
**meta
)
89 struct b43_dmadesc32
*desc
;
91 *meta
= &(ring
->meta
[slot
]);
92 desc
= ring
->descbase
;
95 return (struct b43_dmadesc_generic
*)desc
;
98 static void op32_fill_descriptor(struct b43_dmaring
*ring
,
99 struct b43_dmadesc_generic
*desc
,
100 dma_addr_t dmaaddr
, u16 bufsize
,
101 int start
, int end
, int irq
)
103 struct b43_dmadesc32
*descbase
= ring
->descbase
;
109 slot
= (int)(&(desc
->dma32
) - descbase
);
110 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
112 addr
= b43_dma_address(&ring
->dev
->dma
, dmaaddr
, B43_DMA_ADDR_LOW
);
113 addrext
= b43_dma_address(&ring
->dev
->dma
, dmaaddr
, B43_DMA_ADDR_EXT
);
115 ctl
= bufsize
& B43_DMA32_DCTL_BYTECNT
;
116 if (slot
== ring
->nr_slots
- 1)
117 ctl
|= B43_DMA32_DCTL_DTABLEEND
;
119 ctl
|= B43_DMA32_DCTL_FRAMESTART
;
121 ctl
|= B43_DMA32_DCTL_FRAMEEND
;
123 ctl
|= B43_DMA32_DCTL_IRQ
;
124 ctl
|= (addrext
<< B43_DMA32_DCTL_ADDREXT_SHIFT
)
125 & B43_DMA32_DCTL_ADDREXT_MASK
;
127 desc
->dma32
.control
= cpu_to_le32(ctl
);
128 desc
->dma32
.address
= cpu_to_le32(addr
);
131 static void op32_poke_tx(struct b43_dmaring
*ring
, int slot
)
133 b43_dma_write(ring
, B43_DMA32_TXINDEX
,
134 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
137 static void op32_tx_suspend(struct b43_dmaring
*ring
)
139 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
140 | B43_DMA32_TXSUSPEND
);
143 static void op32_tx_resume(struct b43_dmaring
*ring
)
145 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
146 & ~B43_DMA32_TXSUSPEND
);
149 static int op32_get_current_rxslot(struct b43_dmaring
*ring
)
153 val
= b43_dma_read(ring
, B43_DMA32_RXSTATUS
);
154 val
&= B43_DMA32_RXDPTR
;
156 return (val
/ sizeof(struct b43_dmadesc32
));
159 static void op32_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
161 b43_dma_write(ring
, B43_DMA32_RXINDEX
,
162 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
165 static const struct b43_dma_ops dma32_ops
= {
166 .idx2desc
= op32_idx2desc
,
167 .fill_descriptor
= op32_fill_descriptor
,
168 .poke_tx
= op32_poke_tx
,
169 .tx_suspend
= op32_tx_suspend
,
170 .tx_resume
= op32_tx_resume
,
171 .get_current_rxslot
= op32_get_current_rxslot
,
172 .set_current_rxslot
= op32_set_current_rxslot
,
177 struct b43_dmadesc_generic
*op64_idx2desc(struct b43_dmaring
*ring
,
179 struct b43_dmadesc_meta
**meta
)
181 struct b43_dmadesc64
*desc
;
183 *meta
= &(ring
->meta
[slot
]);
184 desc
= ring
->descbase
;
185 desc
= &(desc
[slot
]);
187 return (struct b43_dmadesc_generic
*)desc
;
190 static void op64_fill_descriptor(struct b43_dmaring
*ring
,
191 struct b43_dmadesc_generic
*desc
,
192 dma_addr_t dmaaddr
, u16 bufsize
,
193 int start
, int end
, int irq
)
195 struct b43_dmadesc64
*descbase
= ring
->descbase
;
197 u32 ctl0
= 0, ctl1
= 0;
201 slot
= (int)(&(desc
->dma64
) - descbase
);
202 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
204 addrlo
= b43_dma_address(&ring
->dev
->dma
, dmaaddr
, B43_DMA_ADDR_LOW
);
205 addrhi
= b43_dma_address(&ring
->dev
->dma
, dmaaddr
, B43_DMA_ADDR_HIGH
);
206 addrext
= b43_dma_address(&ring
->dev
->dma
, dmaaddr
, B43_DMA_ADDR_EXT
);
208 if (slot
== ring
->nr_slots
- 1)
209 ctl0
|= B43_DMA64_DCTL0_DTABLEEND
;
211 ctl0
|= B43_DMA64_DCTL0_FRAMESTART
;
213 ctl0
|= B43_DMA64_DCTL0_FRAMEEND
;
215 ctl0
|= B43_DMA64_DCTL0_IRQ
;
216 ctl1
|= bufsize
& B43_DMA64_DCTL1_BYTECNT
;
217 ctl1
|= (addrext
<< B43_DMA64_DCTL1_ADDREXT_SHIFT
)
218 & B43_DMA64_DCTL1_ADDREXT_MASK
;
220 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
221 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
222 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
223 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
226 static void op64_poke_tx(struct b43_dmaring
*ring
, int slot
)
228 b43_dma_write(ring
, B43_DMA64_TXINDEX
,
229 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
232 static void op64_tx_suspend(struct b43_dmaring
*ring
)
234 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
235 | B43_DMA64_TXSUSPEND
);
238 static void op64_tx_resume(struct b43_dmaring
*ring
)
240 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
241 & ~B43_DMA64_TXSUSPEND
);
244 static int op64_get_current_rxslot(struct b43_dmaring
*ring
)
248 val
= b43_dma_read(ring
, B43_DMA64_RXSTATUS
);
249 val
&= B43_DMA64_RXSTATDPTR
;
251 return (val
/ sizeof(struct b43_dmadesc64
));
254 static void op64_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
256 b43_dma_write(ring
, B43_DMA64_RXINDEX
,
257 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
260 static const struct b43_dma_ops dma64_ops
= {
261 .idx2desc
= op64_idx2desc
,
262 .fill_descriptor
= op64_fill_descriptor
,
263 .poke_tx
= op64_poke_tx
,
264 .tx_suspend
= op64_tx_suspend
,
265 .tx_resume
= op64_tx_resume
,
266 .get_current_rxslot
= op64_get_current_rxslot
,
267 .set_current_rxslot
= op64_set_current_rxslot
,
270 static inline int free_slots(struct b43_dmaring
*ring
)
272 return (ring
->nr_slots
- ring
->used_slots
);
275 static inline int next_slot(struct b43_dmaring
*ring
, int slot
)
277 B43_WARN_ON(!(slot
>= -1 && slot
<= ring
->nr_slots
- 1));
278 if (slot
== ring
->nr_slots
- 1)
283 static inline int prev_slot(struct b43_dmaring
*ring
, int slot
)
285 B43_WARN_ON(!(slot
>= 0 && slot
<= ring
->nr_slots
- 1));
287 return ring
->nr_slots
- 1;
291 #ifdef CONFIG_B43_DEBUG
292 static void update_max_used_slots(struct b43_dmaring
*ring
,
293 int current_used_slots
)
295 if (current_used_slots
<= ring
->max_used_slots
)
297 ring
->max_used_slots
= current_used_slots
;
298 if (b43_debug(ring
->dev
, B43_DBG_DMAVERBOSE
)) {
299 b43dbg(ring
->dev
->wl
,
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring
->max_used_slots
,
302 ring
->tx
? "TX" : "RX", ring
->index
);
307 void update_max_used_slots(struct b43_dmaring
*ring
, int current_used_slots
)
312 /* Request a slot for usage. */
313 static inline int request_slot(struct b43_dmaring
*ring
)
317 B43_WARN_ON(!ring
->tx
);
318 B43_WARN_ON(ring
->stopped
);
319 B43_WARN_ON(free_slots(ring
) == 0);
321 slot
= next_slot(ring
, ring
->current_slot
);
322 ring
->current_slot
= slot
;
325 update_max_used_slots(ring
, ring
->used_slots
);
330 static u16
b43_dmacontroller_base(enum b43_dmatype type
, int controller_idx
)
332 static const u16 map64
[] = {
333 B43_MMIO_DMA64_BASE0
,
334 B43_MMIO_DMA64_BASE1
,
335 B43_MMIO_DMA64_BASE2
,
336 B43_MMIO_DMA64_BASE3
,
337 B43_MMIO_DMA64_BASE4
,
338 B43_MMIO_DMA64_BASE5
,
340 static const u16 map32
[] = {
341 B43_MMIO_DMA32_BASE0
,
342 B43_MMIO_DMA32_BASE1
,
343 B43_MMIO_DMA32_BASE2
,
344 B43_MMIO_DMA32_BASE3
,
345 B43_MMIO_DMA32_BASE4
,
346 B43_MMIO_DMA32_BASE5
,
349 if (type
== B43_DMA_64BIT
) {
350 B43_WARN_ON(!(controller_idx
>= 0 &&
351 controller_idx
< ARRAY_SIZE(map64
)));
352 return map64
[controller_idx
];
354 B43_WARN_ON(!(controller_idx
>= 0 &&
355 controller_idx
< ARRAY_SIZE(map32
)));
356 return map32
[controller_idx
];
360 dma_addr_t
map_descbuffer(struct b43_dmaring
*ring
,
361 unsigned char *buf
, size_t len
, int tx
)
366 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
367 buf
, len
, DMA_TO_DEVICE
);
369 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
370 buf
, len
, DMA_FROM_DEVICE
);
377 void unmap_descbuffer(struct b43_dmaring
*ring
,
378 dma_addr_t addr
, size_t len
, int tx
)
381 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
382 addr
, len
, DMA_TO_DEVICE
);
384 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
385 addr
, len
, DMA_FROM_DEVICE
);
390 void sync_descbuffer_for_cpu(struct b43_dmaring
*ring
,
391 dma_addr_t addr
, size_t len
)
393 B43_WARN_ON(ring
->tx
);
394 dma_sync_single_for_cpu(ring
->dev
->dev
->dma_dev
,
395 addr
, len
, DMA_FROM_DEVICE
);
399 void sync_descbuffer_for_device(struct b43_dmaring
*ring
,
400 dma_addr_t addr
, size_t len
)
402 B43_WARN_ON(ring
->tx
);
403 dma_sync_single_for_device(ring
->dev
->dev
->dma_dev
,
404 addr
, len
, DMA_FROM_DEVICE
);
408 void free_descriptor_buffer(struct b43_dmaring
*ring
,
409 struct b43_dmadesc_meta
*meta
)
412 dev_kfree_skb_any(meta
->skb
);
417 static int alloc_ringmemory(struct b43_dmaring
*ring
)
419 gfp_t flags
= GFP_KERNEL
;
421 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
422 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
423 * has shown that 4K is sufficient for the latter as long as the buffer
424 * does not cross an 8K boundary.
427 ring
->descbase
= dma_alloc_coherent(ring
->dev
->dev
->dma_dev
,
429 &(ring
->dmabase
), flags
);
430 if (!ring
->descbase
) {
431 b43err(ring
->dev
->wl
, "DMA ringmemory allocation failed\n");
434 memset(ring
->descbase
, 0, B43_DMA_RINGMEMSIZE
);
439 static void free_ringmemory(struct b43_dmaring
*ring
)
441 dma_free_coherent(ring
->dev
->dev
->dma_dev
, B43_DMA_RINGMEMSIZE
,
442 ring
->descbase
, ring
->dmabase
);
445 /* Reset the RX DMA channel */
446 static int b43_dmacontroller_rx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
447 enum b43_dmatype type
)
455 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXCTL
: B43_DMA32_RXCTL
;
456 b43_write32(dev
, mmio_base
+ offset
, 0);
457 for (i
= 0; i
< 10; i
++) {
458 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXSTATUS
:
460 value
= b43_read32(dev
, mmio_base
+ offset
);
461 if (type
== B43_DMA_64BIT
) {
462 value
&= B43_DMA64_RXSTAT
;
463 if (value
== B43_DMA64_RXSTAT_DISABLED
) {
468 value
&= B43_DMA32_RXSTATE
;
469 if (value
== B43_DMA32_RXSTAT_DISABLED
) {
477 b43err(dev
->wl
, "DMA RX reset timed out\n");
484 /* Reset the TX DMA channel */
485 static int b43_dmacontroller_tx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
486 enum b43_dmatype type
)
494 for (i
= 0; i
< 10; i
++) {
495 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
497 value
= b43_read32(dev
, mmio_base
+ offset
);
498 if (type
== B43_DMA_64BIT
) {
499 value
&= B43_DMA64_TXSTAT
;
500 if (value
== B43_DMA64_TXSTAT_DISABLED
||
501 value
== B43_DMA64_TXSTAT_IDLEWAIT
||
502 value
== B43_DMA64_TXSTAT_STOPPED
)
505 value
&= B43_DMA32_TXSTATE
;
506 if (value
== B43_DMA32_TXSTAT_DISABLED
||
507 value
== B43_DMA32_TXSTAT_IDLEWAIT
||
508 value
== B43_DMA32_TXSTAT_STOPPED
)
513 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXCTL
: B43_DMA32_TXCTL
;
514 b43_write32(dev
, mmio_base
+ offset
, 0);
515 for (i
= 0; i
< 10; i
++) {
516 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
518 value
= b43_read32(dev
, mmio_base
+ offset
);
519 if (type
== B43_DMA_64BIT
) {
520 value
&= B43_DMA64_TXSTAT
;
521 if (value
== B43_DMA64_TXSTAT_DISABLED
) {
526 value
&= B43_DMA32_TXSTATE
;
527 if (value
== B43_DMA32_TXSTAT_DISABLED
) {
535 b43err(dev
->wl
, "DMA TX reset timed out\n");
538 /* ensure the reset is completed. */
544 /* Check if a DMA mapping address is invalid. */
545 static bool b43_dma_mapping_error(struct b43_dmaring
*ring
,
547 size_t buffersize
, bool dma_to_device
)
549 if (unlikely(dma_mapping_error(ring
->dev
->dev
->dma_dev
, addr
)))
552 switch (ring
->type
) {
554 if ((u64
)addr
+ buffersize
> (1ULL << 30))
558 if ((u64
)addr
+ buffersize
> (1ULL << 32))
562 /* Currently we can't have addresses beyond
563 * 64bit in the kernel. */
567 /* The address is OK. */
571 /* We can't support this address. Unmap it again. */
572 unmap_descbuffer(ring
, addr
, buffersize
, dma_to_device
);
577 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
579 unsigned char *f
= skb
->data
+ ring
->frameoffset
;
581 return ((f
[0] & f
[1] & f
[2] & f
[3] & f
[4] & f
[5] & f
[6] & f
[7]) == 0xFF);
584 static void b43_poison_rx_buffer(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
586 struct b43_rxhdr_fw4
*rxhdr
;
587 unsigned char *frame
;
589 /* This poisons the RX buffer to detect DMA failures. */
591 rxhdr
= (struct b43_rxhdr_fw4
*)(skb
->data
);
592 rxhdr
->frame_len
= 0;
594 B43_WARN_ON(ring
->rx_buffersize
< ring
->frameoffset
+ sizeof(struct b43_plcp_hdr6
) + 2);
595 frame
= skb
->data
+ ring
->frameoffset
;
596 memset(frame
, 0xFF, sizeof(struct b43_plcp_hdr6
) + 2 /* padding */);
599 static int setup_rx_descbuffer(struct b43_dmaring
*ring
,
600 struct b43_dmadesc_generic
*desc
,
601 struct b43_dmadesc_meta
*meta
, gfp_t gfp_flags
)
606 B43_WARN_ON(ring
->tx
);
608 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
611 b43_poison_rx_buffer(ring
, skb
);
612 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
613 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
614 /* ugh. try to realloc in zone_dma */
615 gfp_flags
|= GFP_DMA
;
617 dev_kfree_skb_any(skb
);
619 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
622 b43_poison_rx_buffer(ring
, skb
);
623 dmaaddr
= map_descbuffer(ring
, skb
->data
,
624 ring
->rx_buffersize
, 0);
625 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
626 b43err(ring
->dev
->wl
, "RX DMA buffer allocation failed\n");
627 dev_kfree_skb_any(skb
);
633 meta
->dmaaddr
= dmaaddr
;
634 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
635 ring
->rx_buffersize
, 0, 0, 0);
640 /* Allocate the initial descbuffers.
641 * This is used for an RX ring only.
643 static int alloc_initial_descbuffers(struct b43_dmaring
*ring
)
645 int i
, err
= -ENOMEM
;
646 struct b43_dmadesc_generic
*desc
;
647 struct b43_dmadesc_meta
*meta
;
649 for (i
= 0; i
< ring
->nr_slots
; i
++) {
650 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
652 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
654 b43err(ring
->dev
->wl
,
655 "Failed to allocate initial descbuffers\n");
660 ring
->used_slots
= ring
->nr_slots
;
666 for (i
--; i
>= 0; i
--) {
667 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
669 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
670 dev_kfree_skb(meta
->skb
);
675 /* Do initial setup of the DMA controller.
676 * Reset the controller, write the ring busaddress
677 * and switch the "enable" bit on.
679 static int dmacontroller_setup(struct b43_dmaring
*ring
)
684 bool parity
= ring
->dev
->dma
.parity
;
689 if (ring
->type
== B43_DMA_64BIT
) {
690 u64 ringbase
= (u64
) (ring
->dmabase
);
691 addrext
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_EXT
);
692 addrlo
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_LOW
);
693 addrhi
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_HIGH
);
695 value
= B43_DMA64_TXENABLE
;
696 value
|= (addrext
<< B43_DMA64_TXADDREXT_SHIFT
)
697 & B43_DMA64_TXADDREXT_MASK
;
699 value
|= B43_DMA64_TXPARITYDISABLE
;
700 b43_dma_write(ring
, B43_DMA64_TXCTL
, value
);
701 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, addrlo
);
702 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, addrhi
);
704 u32 ringbase
= (u32
) (ring
->dmabase
);
705 addrext
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_EXT
);
706 addrlo
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_LOW
);
708 value
= B43_DMA32_TXENABLE
;
709 value
|= (addrext
<< B43_DMA32_TXADDREXT_SHIFT
)
710 & B43_DMA32_TXADDREXT_MASK
;
712 value
|= B43_DMA32_TXPARITYDISABLE
;
713 b43_dma_write(ring
, B43_DMA32_TXCTL
, value
);
714 b43_dma_write(ring
, B43_DMA32_TXRING
, addrlo
);
717 err
= alloc_initial_descbuffers(ring
);
720 if (ring
->type
== B43_DMA_64BIT
) {
721 u64 ringbase
= (u64
) (ring
->dmabase
);
722 addrext
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_EXT
);
723 addrlo
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_LOW
);
724 addrhi
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_HIGH
);
726 value
= (ring
->frameoffset
<< B43_DMA64_RXFROFF_SHIFT
);
727 value
|= B43_DMA64_RXENABLE
;
728 value
|= (addrext
<< B43_DMA64_RXADDREXT_SHIFT
)
729 & B43_DMA64_RXADDREXT_MASK
;
731 value
|= B43_DMA64_RXPARITYDISABLE
;
732 b43_dma_write(ring
, B43_DMA64_RXCTL
, value
);
733 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, addrlo
);
734 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, addrhi
);
735 b43_dma_write(ring
, B43_DMA64_RXINDEX
, ring
->nr_slots
*
736 sizeof(struct b43_dmadesc64
));
738 u32 ringbase
= (u32
) (ring
->dmabase
);
739 addrext
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_EXT
);
740 addrlo
= b43_dma_address(&ring
->dev
->dma
, ringbase
, B43_DMA_ADDR_LOW
);
742 value
= (ring
->frameoffset
<< B43_DMA32_RXFROFF_SHIFT
);
743 value
|= B43_DMA32_RXENABLE
;
744 value
|= (addrext
<< B43_DMA32_RXADDREXT_SHIFT
)
745 & B43_DMA32_RXADDREXT_MASK
;
747 value
|= B43_DMA32_RXPARITYDISABLE
;
748 b43_dma_write(ring
, B43_DMA32_RXCTL
, value
);
749 b43_dma_write(ring
, B43_DMA32_RXRING
, addrlo
);
750 b43_dma_write(ring
, B43_DMA32_RXINDEX
, ring
->nr_slots
*
751 sizeof(struct b43_dmadesc32
));
759 /* Shutdown the DMA controller. */
760 static void dmacontroller_cleanup(struct b43_dmaring
*ring
)
763 b43_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
,
765 if (ring
->type
== B43_DMA_64BIT
) {
766 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, 0);
767 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, 0);
769 b43_dma_write(ring
, B43_DMA32_TXRING
, 0);
771 b43_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
,
773 if (ring
->type
== B43_DMA_64BIT
) {
774 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, 0);
775 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, 0);
777 b43_dma_write(ring
, B43_DMA32_RXRING
, 0);
781 static void free_all_descbuffers(struct b43_dmaring
*ring
)
783 struct b43_dmadesc_meta
*meta
;
786 if (!ring
->used_slots
)
788 for (i
= 0; i
< ring
->nr_slots
; i
++) {
789 /* get meta - ignore returned value */
790 ring
->ops
->idx2desc(ring
, i
, &meta
);
792 if (!meta
->skb
|| b43_dma_ptr_is_poisoned(meta
->skb
)) {
793 B43_WARN_ON(!ring
->tx
);
797 unmap_descbuffer(ring
, meta
->dmaaddr
,
800 unmap_descbuffer(ring
, meta
->dmaaddr
,
801 ring
->rx_buffersize
, 0);
803 free_descriptor_buffer(ring
, meta
);
807 static u64
supported_dma_mask(struct b43_wldev
*dev
)
812 tmp
= b43_read32(dev
, SSB_TMSHIGH
);
813 if (tmp
& SSB_TMSHIGH_DMA64
)
814 return DMA_BIT_MASK(64);
815 mmio_base
= b43_dmacontroller_base(0, 0);
816 b43_write32(dev
, mmio_base
+ B43_DMA32_TXCTL
, B43_DMA32_TXADDREXT_MASK
);
817 tmp
= b43_read32(dev
, mmio_base
+ B43_DMA32_TXCTL
);
818 if (tmp
& B43_DMA32_TXADDREXT_MASK
)
819 return DMA_BIT_MASK(32);
821 return DMA_BIT_MASK(30);
824 static enum b43_dmatype
dma_mask_to_engine_type(u64 dmamask
)
826 if (dmamask
== DMA_BIT_MASK(30))
827 return B43_DMA_30BIT
;
828 if (dmamask
== DMA_BIT_MASK(32))
829 return B43_DMA_32BIT
;
830 if (dmamask
== DMA_BIT_MASK(64))
831 return B43_DMA_64BIT
;
833 return B43_DMA_30BIT
;
836 /* Main initialization function. */
838 struct b43_dmaring
*b43_setup_dmaring(struct b43_wldev
*dev
,
839 int controller_index
,
841 enum b43_dmatype type
)
843 struct b43_dmaring
*ring
;
847 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
851 ring
->nr_slots
= B43_RXRING_SLOTS
;
853 ring
->nr_slots
= B43_TXRING_SLOTS
;
855 ring
->meta
= kcalloc(ring
->nr_slots
, sizeof(struct b43_dmadesc_meta
),
859 for (i
= 0; i
< ring
->nr_slots
; i
++)
860 ring
->meta
->skb
= B43_DMA_PTR_POISON
;
864 ring
->mmio_base
= b43_dmacontroller_base(type
, controller_index
);
865 ring
->index
= controller_index
;
866 if (type
== B43_DMA_64BIT
)
867 ring
->ops
= &dma64_ops
;
869 ring
->ops
= &dma32_ops
;
872 ring
->current_slot
= -1;
874 if (ring
->index
== 0) {
875 switch (dev
->fw
.hdr_format
) {
877 ring
->rx_buffersize
= B43_DMA0_RX_FW598_BUFSIZE
;
878 ring
->frameoffset
= B43_DMA0_RX_FW598_FO
;
882 ring
->rx_buffersize
= B43_DMA0_RX_FW351_BUFSIZE
;
883 ring
->frameoffset
= B43_DMA0_RX_FW351_FO
;
889 #ifdef CONFIG_B43_DEBUG
890 ring
->last_injected_overflow
= jiffies
;
894 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
895 BUILD_BUG_ON(B43_TXRING_SLOTS
% TX_SLOTS_PER_FRAME
!= 0);
897 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
/ TX_SLOTS_PER_FRAME
,
900 if (!ring
->txhdr_cache
)
903 /* test for ability to dma to txhdr_cache */
904 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
909 if (b43_dma_mapping_error(ring
, dma_test
,
910 b43_txhdr_size(dev
), 1)) {
912 kfree(ring
->txhdr_cache
);
913 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
/ TX_SLOTS_PER_FRAME
,
915 GFP_KERNEL
| GFP_DMA
);
916 if (!ring
->txhdr_cache
)
919 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
924 if (b43_dma_mapping_error(ring
, dma_test
,
925 b43_txhdr_size(dev
), 1)) {
928 "TXHDR DMA allocation failed\n");
929 goto err_kfree_txhdr_cache
;
933 dma_unmap_single(dev
->dev
->dma_dev
,
934 dma_test
, b43_txhdr_size(dev
),
938 err
= alloc_ringmemory(ring
);
940 goto err_kfree_txhdr_cache
;
941 err
= dmacontroller_setup(ring
);
943 goto err_free_ringmemory
;
949 free_ringmemory(ring
);
950 err_kfree_txhdr_cache
:
951 kfree(ring
->txhdr_cache
);
960 #define divide(a, b) ({ \
966 #define modulo(a, b) ({ \
971 /* Main cleanup function. */
972 static void b43_destroy_dmaring(struct b43_dmaring
*ring
,
973 const char *ringname
)
978 #ifdef CONFIG_B43_DEBUG
980 /* Print some statistics. */
981 u64 failed_packets
= ring
->nr_failed_tx_packets
;
982 u64 succeed_packets
= ring
->nr_succeed_tx_packets
;
983 u64 nr_packets
= failed_packets
+ succeed_packets
;
984 u64 permille_failed
= 0, average_tries
= 0;
987 permille_failed
= divide(failed_packets
* 1000, nr_packets
);
989 average_tries
= divide(ring
->nr_total_packet_tries
* 100, nr_packets
);
991 b43dbg(ring
->dev
->wl
, "DMA-%u %s: "
992 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
993 "Average tries %llu.%02llu\n",
994 (unsigned int)(ring
->type
), ringname
,
995 ring
->max_used_slots
,
997 (unsigned long long)failed_packets
,
998 (unsigned long long)nr_packets
,
999 (unsigned long long)divide(permille_failed
, 10),
1000 (unsigned long long)modulo(permille_failed
, 10),
1001 (unsigned long long)divide(average_tries
, 100),
1002 (unsigned long long)modulo(average_tries
, 100));
1006 /* Device IRQs are disabled prior entering this function,
1007 * so no need to take care of concurrency with rx handler stuff.
1009 dmacontroller_cleanup(ring
);
1010 free_all_descbuffers(ring
);
1011 free_ringmemory(ring
);
1013 kfree(ring
->txhdr_cache
);
1018 #define destroy_ring(dma, ring) do { \
1019 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1020 (dma)->ring = NULL; \
1023 void b43_dma_free(struct b43_wldev
*dev
)
1025 struct b43_dma
*dma
;
1027 if (b43_using_pio_transfers(dev
))
1031 destroy_ring(dma
, rx_ring
);
1032 destroy_ring(dma
, tx_ring_AC_BK
);
1033 destroy_ring(dma
, tx_ring_AC_BE
);
1034 destroy_ring(dma
, tx_ring_AC_VI
);
1035 destroy_ring(dma
, tx_ring_AC_VO
);
1036 destroy_ring(dma
, tx_ring_mcast
);
1039 static int b43_dma_set_mask(struct b43_wldev
*dev
, u64 mask
)
1041 u64 orig_mask
= mask
;
1045 /* Try to set the DMA mask. If it fails, try falling back to a
1046 * lower mask, as we can always also support a lower one. */
1048 err
= dma_set_mask(dev
->dev
->dma_dev
, mask
);
1050 err
= dma_set_coherent_mask(dev
->dev
->dma_dev
, mask
);
1054 if (mask
== DMA_BIT_MASK(64)) {
1055 mask
= DMA_BIT_MASK(32);
1059 if (mask
== DMA_BIT_MASK(32)) {
1060 mask
= DMA_BIT_MASK(30);
1064 b43err(dev
->wl
, "The machine/kernel does not support "
1065 "the required %u-bit DMA mask\n",
1066 (unsigned int)dma_mask_to_engine_type(orig_mask
));
1070 b43info(dev
->wl
, "DMA mask fallback from %u-bit to %u-bit\n",
1071 (unsigned int)dma_mask_to_engine_type(orig_mask
),
1072 (unsigned int)dma_mask_to_engine_type(mask
));
1078 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1079 * bit in low address word instead of high one.
1081 static bool b43_dma_translation_in_low_word(struct b43_wldev
*dev
,
1082 enum b43_dmatype type
)
1084 if (type
!= B43_DMA_64BIT
)
1087 #ifdef CONFIG_B43_SSB
1088 if (dev
->dev
->bus_type
== B43_BUS_SSB
&&
1089 dev
->dev
->sdev
->bus
->bustype
== SSB_BUSTYPE_PCI
&&
1090 !(dev
->dev
->sdev
->bus
->host_pci
->is_pcie
&&
1091 ssb_read32(dev
->dev
->sdev
, SSB_TMSHIGH
) & SSB_TMSHIGH_DMA64
))
1097 int b43_dma_init(struct b43_wldev
*dev
)
1099 struct b43_dma
*dma
= &dev
->dma
;
1102 enum b43_dmatype type
;
1104 dmamask
= supported_dma_mask(dev
);
1105 type
= dma_mask_to_engine_type(dmamask
);
1106 err
= b43_dma_set_mask(dev
, dmamask
);
1110 switch (dev
->dev
->bus_type
) {
1111 #ifdef CONFIG_B43_BCMA
1113 dma
->translation
= bcma_core_dma_translation(dev
->dev
->bdev
);
1116 #ifdef CONFIG_B43_SSB
1118 dma
->translation
= ssb_dma_translation(dev
->dev
->sdev
);
1122 dma
->translation_in_low
= b43_dma_translation_in_low_word(dev
, type
);
1125 #ifdef CONFIG_B43_BCMA
1126 /* TODO: find out which SSB devices need disabling parity */
1127 if (dev
->dev
->bus_type
== B43_BUS_BCMA
)
1128 dma
->parity
= false;
1132 /* setup TX DMA channels. */
1133 dma
->tx_ring_AC_BK
= b43_setup_dmaring(dev
, 0, 1, type
);
1134 if (!dma
->tx_ring_AC_BK
)
1137 dma
->tx_ring_AC_BE
= b43_setup_dmaring(dev
, 1, 1, type
);
1138 if (!dma
->tx_ring_AC_BE
)
1139 goto err_destroy_bk
;
1141 dma
->tx_ring_AC_VI
= b43_setup_dmaring(dev
, 2, 1, type
);
1142 if (!dma
->tx_ring_AC_VI
)
1143 goto err_destroy_be
;
1145 dma
->tx_ring_AC_VO
= b43_setup_dmaring(dev
, 3, 1, type
);
1146 if (!dma
->tx_ring_AC_VO
)
1147 goto err_destroy_vi
;
1149 dma
->tx_ring_mcast
= b43_setup_dmaring(dev
, 4, 1, type
);
1150 if (!dma
->tx_ring_mcast
)
1151 goto err_destroy_vo
;
1153 /* setup RX DMA channel. */
1154 dma
->rx_ring
= b43_setup_dmaring(dev
, 0, 0, type
);
1156 goto err_destroy_mcast
;
1158 /* No support for the TX status DMA ring. */
1159 B43_WARN_ON(dev
->dev
->core_rev
< 5);
1161 b43dbg(dev
->wl
, "%u-bit DMA initialized\n",
1162 (unsigned int)type
);
1168 destroy_ring(dma
, tx_ring_mcast
);
1170 destroy_ring(dma
, tx_ring_AC_VO
);
1172 destroy_ring(dma
, tx_ring_AC_VI
);
1174 destroy_ring(dma
, tx_ring_AC_BE
);
1176 destroy_ring(dma
, tx_ring_AC_BK
);
1180 /* Generate a cookie for the TX header. */
1181 static u16
generate_cookie(struct b43_dmaring
*ring
, int slot
)
1185 /* Use the upper 4 bits of the cookie as
1186 * DMA controller ID and store the slot number
1187 * in the lower 12 bits.
1188 * Note that the cookie must never be 0, as this
1189 * is a special value used in RX path.
1190 * It can also not be 0xFFFF because that is special
1191 * for multicast frames.
1193 cookie
= (((u16
)ring
->index
+ 1) << 12);
1194 B43_WARN_ON(slot
& ~0x0FFF);
1195 cookie
|= (u16
)slot
;
1200 /* Inspect a cookie and find out to which controller/slot it belongs. */
1202 struct b43_dmaring
*parse_cookie(struct b43_wldev
*dev
, u16 cookie
, int *slot
)
1204 struct b43_dma
*dma
= &dev
->dma
;
1205 struct b43_dmaring
*ring
= NULL
;
1207 switch (cookie
& 0xF000) {
1209 ring
= dma
->tx_ring_AC_BK
;
1212 ring
= dma
->tx_ring_AC_BE
;
1215 ring
= dma
->tx_ring_AC_VI
;
1218 ring
= dma
->tx_ring_AC_VO
;
1221 ring
= dma
->tx_ring_mcast
;
1224 *slot
= (cookie
& 0x0FFF);
1225 if (unlikely(!ring
|| *slot
< 0 || *slot
>= ring
->nr_slots
)) {
1226 b43dbg(dev
->wl
, "TX-status contains "
1227 "invalid cookie: 0x%04X\n", cookie
);
1234 static int dma_tx_fragment(struct b43_dmaring
*ring
,
1235 struct sk_buff
*skb
)
1237 const struct b43_dma_ops
*ops
= ring
->ops
;
1238 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1239 struct b43_private_tx_info
*priv_info
= b43_get_priv_tx_info(info
);
1241 int slot
, old_top_slot
, old_used_slots
;
1243 struct b43_dmadesc_generic
*desc
;
1244 struct b43_dmadesc_meta
*meta
;
1245 struct b43_dmadesc_meta
*meta_hdr
;
1247 size_t hdrsize
= b43_txhdr_size(ring
->dev
);
1249 /* Important note: If the number of used DMA slots per TX frame
1250 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1251 * the file has to be updated, too!
1254 old_top_slot
= ring
->current_slot
;
1255 old_used_slots
= ring
->used_slots
;
1257 /* Get a slot for the header. */
1258 slot
= request_slot(ring
);
1259 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1260 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1262 header
= &(ring
->txhdr_cache
[(slot
/ TX_SLOTS_PER_FRAME
) * hdrsize
]);
1263 cookie
= generate_cookie(ring
, slot
);
1264 err
= b43_generate_txhdr(ring
->dev
, header
,
1266 if (unlikely(err
)) {
1267 ring
->current_slot
= old_top_slot
;
1268 ring
->used_slots
= old_used_slots
;
1272 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1274 if (b43_dma_mapping_error(ring
, meta_hdr
->dmaaddr
, hdrsize
, 1)) {
1275 ring
->current_slot
= old_top_slot
;
1276 ring
->used_slots
= old_used_slots
;
1279 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1282 /* Get a slot for the payload. */
1283 slot
= request_slot(ring
);
1284 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1285 memset(meta
, 0, sizeof(*meta
));
1288 meta
->is_last_fragment
= 1;
1289 priv_info
->bouncebuffer
= NULL
;
1291 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1292 /* create a bounce buffer in zone_dma on mapping failure. */
1293 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1294 priv_info
->bouncebuffer
= kmemdup(skb
->data
, skb
->len
,
1295 GFP_ATOMIC
| GFP_DMA
);
1296 if (!priv_info
->bouncebuffer
) {
1297 ring
->current_slot
= old_top_slot
;
1298 ring
->used_slots
= old_used_slots
;
1303 meta
->dmaaddr
= map_descbuffer(ring
, priv_info
->bouncebuffer
, skb
->len
, 1);
1304 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1305 kfree(priv_info
->bouncebuffer
);
1306 priv_info
->bouncebuffer
= NULL
;
1307 ring
->current_slot
= old_top_slot
;
1308 ring
->used_slots
= old_used_slots
;
1314 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
, skb
->len
, 0, 1, 1);
1316 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1317 /* Tell the firmware about the cookie of the last
1318 * mcast frame, so it can clear the more-data bit in it. */
1319 b43_shm_write16(ring
->dev
, B43_SHM_SHARED
,
1320 B43_SHM_SH_MCASTCOOKIE
, cookie
);
1322 /* Now transfer the whole frame. */
1324 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1328 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1333 static inline int should_inject_overflow(struct b43_dmaring
*ring
)
1335 #ifdef CONFIG_B43_DEBUG
1336 if (unlikely(b43_debug(ring
->dev
, B43_DBG_DMAOVERFLOW
))) {
1337 /* Check if we should inject another ringbuffer overflow
1338 * to test handling of this situation in the stack. */
1339 unsigned long next_overflow
;
1341 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1342 if (time_after(jiffies
, next_overflow
)) {
1343 ring
->last_injected_overflow
= jiffies
;
1344 b43dbg(ring
->dev
->wl
,
1345 "Injecting TX ring overflow on "
1346 "DMA controller %d\n", ring
->index
);
1350 #endif /* CONFIG_B43_DEBUG */
1354 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1355 static struct b43_dmaring
*select_ring_by_priority(struct b43_wldev
*dev
,
1358 struct b43_dmaring
*ring
;
1360 if (dev
->qos_enabled
) {
1361 /* 0 = highest priority */
1362 switch (queue_prio
) {
1367 ring
= dev
->dma
.tx_ring_AC_VO
;
1370 ring
= dev
->dma
.tx_ring_AC_VI
;
1373 ring
= dev
->dma
.tx_ring_AC_BE
;
1376 ring
= dev
->dma
.tx_ring_AC_BK
;
1380 ring
= dev
->dma
.tx_ring_AC_BE
;
1385 int b43_dma_tx(struct b43_wldev
*dev
, struct sk_buff
*skb
)
1387 struct b43_dmaring
*ring
;
1388 struct ieee80211_hdr
*hdr
;
1390 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1392 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1393 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1394 /* The multicast ring will be sent after the DTIM */
1395 ring
= dev
->dma
.tx_ring_mcast
;
1396 /* Set the more-data bit. Ucode will clear it on
1397 * the last frame for us. */
1398 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
1400 /* Decide by priority where to put this frame. */
1401 ring
= select_ring_by_priority(
1402 dev
, skb_get_queue_mapping(skb
));
1405 B43_WARN_ON(!ring
->tx
);
1407 if (unlikely(ring
->stopped
)) {
1408 /* We get here only because of a bug in mac80211.
1409 * Because of a race, one packet may be queued after
1410 * the queue is stopped, thus we got called when we shouldn't.
1411 * For now, just refuse the transmit. */
1412 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
))
1413 b43err(dev
->wl
, "Packet after queue stopped\n");
1418 if (unlikely(WARN_ON(free_slots(ring
) < TX_SLOTS_PER_FRAME
))) {
1419 /* If we get here, we have a real error with the queue
1420 * full, but queues not stopped. */
1421 b43err(dev
->wl
, "DMA queue overflow\n");
1426 /* Assign the queue number to the ring (if not already done before)
1427 * so TX status handling can use it. The queue to ring mapping is
1428 * static, so we don't need to store it per frame. */
1429 ring
->queue_prio
= skb_get_queue_mapping(skb
);
1431 err
= dma_tx_fragment(ring
, skb
);
1432 if (unlikely(err
== -ENOKEY
)) {
1433 /* Drop this packet, as we don't have the encryption key
1434 * anymore and must not transmit it unencrypted. */
1435 dev_kfree_skb_any(skb
);
1439 if (unlikely(err
)) {
1440 b43err(dev
->wl
, "DMA tx mapping failure\n");
1443 if ((free_slots(ring
) < TX_SLOTS_PER_FRAME
) ||
1444 should_inject_overflow(ring
)) {
1445 /* This TX ring is full. */
1446 ieee80211_stop_queue(dev
->wl
->hw
, skb_get_queue_mapping(skb
));
1448 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1449 b43dbg(dev
->wl
, "Stopped TX ring %d\n", ring
->index
);
1457 void b43_dma_handle_txstatus(struct b43_wldev
*dev
,
1458 const struct b43_txstatus
*status
)
1460 const struct b43_dma_ops
*ops
;
1461 struct b43_dmaring
*ring
;
1462 struct b43_dmadesc_meta
*meta
;
1463 int slot
, firstused
;
1466 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1467 if (unlikely(!ring
))
1469 B43_WARN_ON(!ring
->tx
);
1471 /* Sanity check: TX packets are processed in-order on one ring.
1472 * Check if the slot deduced from the cookie really is the first
1474 firstused
= ring
->current_slot
- ring
->used_slots
+ 1;
1476 firstused
= ring
->nr_slots
+ firstused
;
1477 if (unlikely(slot
!= firstused
)) {
1478 /* This possibly is a firmware bug and will result in
1479 * malfunction, memory leaks and/or stall of DMA functionality. */
1480 b43dbg(dev
->wl
, "Out of order TX status report on DMA ring %d. "
1481 "Expected %d, but got %d\n",
1482 ring
->index
, firstused
, slot
);
1488 B43_WARN_ON(slot
< 0 || slot
>= ring
->nr_slots
);
1489 /* get meta - ignore returned value */
1490 ops
->idx2desc(ring
, slot
, &meta
);
1492 if (b43_dma_ptr_is_poisoned(meta
->skb
)) {
1493 b43dbg(dev
->wl
, "Poisoned TX slot %d (first=%d) "
1495 slot
, firstused
, ring
->index
);
1499 struct b43_private_tx_info
*priv_info
=
1500 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta
->skb
));
1502 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
, 1);
1503 kfree(priv_info
->bouncebuffer
);
1504 priv_info
->bouncebuffer
= NULL
;
1506 unmap_descbuffer(ring
, meta
->dmaaddr
,
1507 b43_txhdr_size(dev
), 1);
1510 if (meta
->is_last_fragment
) {
1511 struct ieee80211_tx_info
*info
;
1513 if (unlikely(!meta
->skb
)) {
1514 /* This is a scatter-gather fragment of a frame, so
1515 * the skb pointer must not be NULL. */
1516 b43dbg(dev
->wl
, "TX status unexpected NULL skb "
1517 "at slot %d (first=%d) on ring %d\n",
1518 slot
, firstused
, ring
->index
);
1522 info
= IEEE80211_SKB_CB(meta
->skb
);
1525 * Call back to inform the ieee80211 subsystem about
1526 * the status of the transmission.
1528 frame_succeed
= b43_fill_txstatus_report(dev
, info
, status
);
1529 #ifdef CONFIG_B43_DEBUG
1531 ring
->nr_succeed_tx_packets
++;
1533 ring
->nr_failed_tx_packets
++;
1534 ring
->nr_total_packet_tries
+= status
->frame_count
;
1536 ieee80211_tx_status(dev
->wl
->hw
, meta
->skb
);
1538 /* skb will be freed by ieee80211_tx_status().
1539 * Poison our pointer. */
1540 meta
->skb
= B43_DMA_PTR_POISON
;
1542 /* No need to call free_descriptor_buffer here, as
1543 * this is only the txhdr, which is not allocated.
1545 if (unlikely(meta
->skb
)) {
1546 b43dbg(dev
->wl
, "TX status unexpected non-NULL skb "
1547 "at slot %d (first=%d) on ring %d\n",
1548 slot
, firstused
, ring
->index
);
1553 /* Everything unmapped and free'd. So it's not used anymore. */
1556 if (meta
->is_last_fragment
) {
1557 /* This is the last scatter-gather
1558 * fragment of the frame. We are done. */
1561 slot
= next_slot(ring
, slot
);
1563 if (ring
->stopped
) {
1564 B43_WARN_ON(free_slots(ring
) < TX_SLOTS_PER_FRAME
);
1565 ieee80211_wake_queue(dev
->wl
->hw
, ring
->queue_prio
);
1567 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1568 b43dbg(dev
->wl
, "Woke up TX ring %d\n", ring
->index
);
1573 static void dma_rx(struct b43_dmaring
*ring
, int *slot
)
1575 const struct b43_dma_ops
*ops
= ring
->ops
;
1576 struct b43_dmadesc_generic
*desc
;
1577 struct b43_dmadesc_meta
*meta
;
1578 struct b43_rxhdr_fw4
*rxhdr
;
1579 struct sk_buff
*skb
;
1584 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1586 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1589 rxhdr
= (struct b43_rxhdr_fw4
*)skb
->data
;
1590 len
= le16_to_cpu(rxhdr
->frame_len
);
1597 len
= le16_to_cpu(rxhdr
->frame_len
);
1598 } while (len
== 0 && i
++ < 5);
1599 if (unlikely(len
== 0)) {
1600 dmaaddr
= meta
->dmaaddr
;
1601 goto drop_recycle_buffer
;
1604 if (unlikely(b43_rx_buffer_is_poisoned(ring
, skb
))) {
1605 /* Something went wrong with the DMA.
1606 * The device did not touch the buffer and did not overwrite the poison. */
1607 b43dbg(ring
->dev
->wl
, "DMA RX: Dropping poisoned buffer.\n");
1608 dmaaddr
= meta
->dmaaddr
;
1609 goto drop_recycle_buffer
;
1611 if (unlikely(len
+ ring
->frameoffset
> ring
->rx_buffersize
)) {
1612 /* The data did not fit into one descriptor buffer
1613 * and is split over multiple buffers.
1614 * This should never happen, as we try to allocate buffers
1615 * big enough. So simply ignore this packet.
1621 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1622 /* recycle the descriptor buffer. */
1623 b43_poison_rx_buffer(ring
, meta
->skb
);
1624 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1625 ring
->rx_buffersize
);
1626 *slot
= next_slot(ring
, *slot
);
1628 tmp
-= ring
->rx_buffersize
;
1632 b43err(ring
->dev
->wl
, "DMA RX buffer too small "
1633 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1634 len
, ring
->rx_buffersize
, cnt
);
1638 dmaaddr
= meta
->dmaaddr
;
1639 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1640 if (unlikely(err
)) {
1641 b43dbg(ring
->dev
->wl
, "DMA RX: setup_rx_descbuffer() failed\n");
1642 goto drop_recycle_buffer
;
1645 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1646 skb_put(skb
, len
+ ring
->frameoffset
);
1647 skb_pull(skb
, ring
->frameoffset
);
1649 b43_rx(ring
->dev
, skb
, rxhdr
);
1653 drop_recycle_buffer
:
1654 /* Poison and recycle the RX buffer. */
1655 b43_poison_rx_buffer(ring
, skb
);
1656 sync_descbuffer_for_device(ring
, dmaaddr
, ring
->rx_buffersize
);
1659 void b43_dma_rx(struct b43_dmaring
*ring
)
1661 const struct b43_dma_ops
*ops
= ring
->ops
;
1662 int slot
, current_slot
;
1665 B43_WARN_ON(ring
->tx
);
1666 current_slot
= ops
->get_current_rxslot(ring
);
1667 B43_WARN_ON(!(current_slot
>= 0 && current_slot
< ring
->nr_slots
));
1669 slot
= ring
->current_slot
;
1670 for (; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1671 dma_rx(ring
, &slot
);
1672 update_max_used_slots(ring
, ++used_slots
);
1675 ops
->set_current_rxslot(ring
, slot
);
1676 ring
->current_slot
= slot
;
1679 static void b43_dma_tx_suspend_ring(struct b43_dmaring
*ring
)
1681 B43_WARN_ON(!ring
->tx
);
1682 ring
->ops
->tx_suspend(ring
);
1685 static void b43_dma_tx_resume_ring(struct b43_dmaring
*ring
)
1687 B43_WARN_ON(!ring
->tx
);
1688 ring
->ops
->tx_resume(ring
);
1691 void b43_dma_tx_suspend(struct b43_wldev
*dev
)
1693 b43_power_saving_ctl_bits(dev
, B43_PS_AWAKE
);
1694 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BK
);
1695 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BE
);
1696 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VI
);
1697 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VO
);
1698 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_mcast
);
1701 void b43_dma_tx_resume(struct b43_wldev
*dev
)
1703 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_mcast
);
1704 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VO
);
1705 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VI
);
1706 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BE
);
1707 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BK
);
1708 b43_power_saving_ctl_bits(dev
, 0);
1711 static void direct_fifo_rx(struct b43_wldev
*dev
, enum b43_dmatype type
,
1712 u16 mmio_base
, bool enable
)
1716 if (type
== B43_DMA_64BIT
) {
1717 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA64_RXCTL
);
1718 ctl
&= ~B43_DMA64_RXDIRECTFIFO
;
1720 ctl
|= B43_DMA64_RXDIRECTFIFO
;
1721 b43_write32(dev
, mmio_base
+ B43_DMA64_RXCTL
, ctl
);
1723 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA32_RXCTL
);
1724 ctl
&= ~B43_DMA32_RXDIRECTFIFO
;
1726 ctl
|= B43_DMA32_RXDIRECTFIFO
;
1727 b43_write32(dev
, mmio_base
+ B43_DMA32_RXCTL
, ctl
);
1731 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1732 * This is called from PIO code, so DMA structures are not available. */
1733 void b43_dma_direct_fifo_rx(struct b43_wldev
*dev
,
1734 unsigned int engine_index
, bool enable
)
1736 enum b43_dmatype type
;
1739 type
= dma_mask_to_engine_type(supported_dma_mask(dev
));
1741 mmio_base
= b43_dmacontroller_base(type
, engine_index
);
1742 direct_fifo_rx(dev
, type
, mmio_base
, enable
);