Merge branch 'acpi-lpss'
[deliverable/linux.git] / drivers / net / wireless / b43 / dma.c
CommitLineData
e4d6b795
MB
1/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
eb032b98 7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
e4d6b795
MB
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
280d0e16 40#include <linux/etherdevice.h>
5a0e3ad6 41#include <linux/slab.h>
57df40d2 42#include <asm/div64.h>
280d0e16 43
e4d6b795 44
bdceeb2d
MB
45/* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48#define TX_SLOTS_PER_FRAME 2
49
0cc9772a
RM
50static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
51 enum b43_addrtype addrtype)
52{
53 u32 uninitialized_var(addr);
54
55 switch (addrtype) {
56 case B43_DMA_ADDR_LOW:
57 addr = lower_32_bits(dmaaddr);
58 if (dma->translation_in_low) {
59 addr &= ~SSB_DMA_TRANSLATION_MASK;
60 addr |= dma->translation;
61 }
62 break;
63 case B43_DMA_ADDR_HIGH:
64 addr = upper_32_bits(dmaaddr);
65 if (!dma->translation_in_low) {
66 addr &= ~SSB_DMA_TRANSLATION_MASK;
67 addr |= dma->translation;
68 }
69 break;
70 case B43_DMA_ADDR_EXT:
71 if (dma->translation_in_low)
72 addr = lower_32_bits(dmaaddr);
73 else
74 addr = upper_32_bits(dmaaddr);
75 addr &= SSB_DMA_TRANSLATION_MASK;
76 addr >>= SSB_DMA_TRANSLATION_SHIFT;
77 break;
78 }
79
80 return addr;
81}
bdceeb2d 82
e4d6b795
MB
83/* 32bit DMA ops. */
84static
85struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
86 int slot,
87 struct b43_dmadesc_meta **meta)
88{
89 struct b43_dmadesc32 *desc;
90
91 *meta = &(ring->meta[slot]);
92 desc = ring->descbase;
93 desc = &(desc[slot]);
94
95 return (struct b43_dmadesc_generic *)desc;
96}
97
98static void op32_fill_descriptor(struct b43_dmaring *ring,
99 struct b43_dmadesc_generic *desc,
100 dma_addr_t dmaaddr, u16 bufsize,
101 int start, int end, int irq)
102{
103 struct b43_dmadesc32 *descbase = ring->descbase;
104 int slot;
105 u32 ctl;
106 u32 addr;
107 u32 addrext;
108
109 slot = (int)(&(desc->dma32) - descbase);
110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
111
0cc9772a
RM
112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
114
8eccb53f 115 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
e4d6b795
MB
116 if (slot == ring->nr_slots - 1)
117 ctl |= B43_DMA32_DCTL_DTABLEEND;
118 if (start)
119 ctl |= B43_DMA32_DCTL_FRAMESTART;
120 if (end)
121 ctl |= B43_DMA32_DCTL_FRAMEEND;
122 if (irq)
123 ctl |= B43_DMA32_DCTL_IRQ;
124 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
125 & B43_DMA32_DCTL_ADDREXT_MASK;
126
127 desc->dma32.control = cpu_to_le32(ctl);
128 desc->dma32.address = cpu_to_le32(addr);
129}
130
131static void op32_poke_tx(struct b43_dmaring *ring, int slot)
132{
133 b43_dma_write(ring, B43_DMA32_TXINDEX,
134 (u32) (slot * sizeof(struct b43_dmadesc32)));
135}
136
137static void op32_tx_suspend(struct b43_dmaring *ring)
138{
139 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
140 | B43_DMA32_TXSUSPEND);
141}
142
143static void op32_tx_resume(struct b43_dmaring *ring)
144{
145 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
146 & ~B43_DMA32_TXSUSPEND);
147}
148
149static int op32_get_current_rxslot(struct b43_dmaring *ring)
150{
151 u32 val;
152
153 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
154 val &= B43_DMA32_RXDPTR;
155
156 return (val / sizeof(struct b43_dmadesc32));
157}
158
159static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
160{
161 b43_dma_write(ring, B43_DMA32_RXINDEX,
162 (u32) (slot * sizeof(struct b43_dmadesc32)));
163}
164
165static const struct b43_dma_ops dma32_ops = {
166 .idx2desc = op32_idx2desc,
167 .fill_descriptor = op32_fill_descriptor,
168 .poke_tx = op32_poke_tx,
169 .tx_suspend = op32_tx_suspend,
170 .tx_resume = op32_tx_resume,
171 .get_current_rxslot = op32_get_current_rxslot,
172 .set_current_rxslot = op32_set_current_rxslot,
173};
174
175/* 64bit DMA ops. */
176static
177struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
178 int slot,
179 struct b43_dmadesc_meta **meta)
180{
181 struct b43_dmadesc64 *desc;
182
183 *meta = &(ring->meta[slot]);
184 desc = ring->descbase;
185 desc = &(desc[slot]);
186
187 return (struct b43_dmadesc_generic *)desc;
188}
189
190static void op64_fill_descriptor(struct b43_dmaring *ring,
191 struct b43_dmadesc_generic *desc,
192 dma_addr_t dmaaddr, u16 bufsize,
193 int start, int end, int irq)
194{
195 struct b43_dmadesc64 *descbase = ring->descbase;
196 int slot;
197 u32 ctl0 = 0, ctl1 = 0;
198 u32 addrlo, addrhi;
199 u32 addrext;
200
201 slot = (int)(&(desc->dma64) - descbase);
202 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
203
0cc9772a
RM
204 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
205 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
206 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
207
e4d6b795
MB
208 if (slot == ring->nr_slots - 1)
209 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
210 if (start)
211 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
212 if (end)
213 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
214 if (irq)
215 ctl0 |= B43_DMA64_DCTL0_IRQ;
8eccb53f 216 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
e4d6b795
MB
217 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
218 & B43_DMA64_DCTL1_ADDREXT_MASK;
219
220 desc->dma64.control0 = cpu_to_le32(ctl0);
221 desc->dma64.control1 = cpu_to_le32(ctl1);
222 desc->dma64.address_low = cpu_to_le32(addrlo);
223 desc->dma64.address_high = cpu_to_le32(addrhi);
224}
225
226static void op64_poke_tx(struct b43_dmaring *ring, int slot)
227{
228 b43_dma_write(ring, B43_DMA64_TXINDEX,
229 (u32) (slot * sizeof(struct b43_dmadesc64)));
230}
231
232static void op64_tx_suspend(struct b43_dmaring *ring)
233{
234 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
235 | B43_DMA64_TXSUSPEND);
236}
237
238static void op64_tx_resume(struct b43_dmaring *ring)
239{
240 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
241 & ~B43_DMA64_TXSUSPEND);
242}
243
244static int op64_get_current_rxslot(struct b43_dmaring *ring)
245{
246 u32 val;
247
248 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
249 val &= B43_DMA64_RXSTATDPTR;
250
251 return (val / sizeof(struct b43_dmadesc64));
252}
253
254static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
255{
256 b43_dma_write(ring, B43_DMA64_RXINDEX,
257 (u32) (slot * sizeof(struct b43_dmadesc64)));
258}
259
260static const struct b43_dma_ops dma64_ops = {
261 .idx2desc = op64_idx2desc,
262 .fill_descriptor = op64_fill_descriptor,
263 .poke_tx = op64_poke_tx,
264 .tx_suspend = op64_tx_suspend,
265 .tx_resume = op64_tx_resume,
266 .get_current_rxslot = op64_get_current_rxslot,
267 .set_current_rxslot = op64_set_current_rxslot,
268};
269
270static inline int free_slots(struct b43_dmaring *ring)
271{
272 return (ring->nr_slots - ring->used_slots);
273}
274
275static inline int next_slot(struct b43_dmaring *ring, int slot)
276{
277 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
278 if (slot == ring->nr_slots - 1)
279 return 0;
280 return slot + 1;
281}
282
283static inline int prev_slot(struct b43_dmaring *ring, int slot)
284{
285 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
286 if (slot == 0)
287 return ring->nr_slots - 1;
288 return slot - 1;
289}
290
291#ifdef CONFIG_B43_DEBUG
292static void update_max_used_slots(struct b43_dmaring *ring,
293 int current_used_slots)
294{
295 if (current_used_slots <= ring->max_used_slots)
296 return;
297 ring->max_used_slots = current_used_slots;
298 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
299 b43dbg(ring->dev->wl,
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring->max_used_slots,
302 ring->tx ? "TX" : "RX", ring->index);
303 }
304}
305#else
306static inline
307 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
308{
309}
310#endif /* DEBUG */
311
312/* Request a slot for usage. */
313static inline int request_slot(struct b43_dmaring *ring)
314{
315 int slot;
316
317 B43_WARN_ON(!ring->tx);
318 B43_WARN_ON(ring->stopped);
319 B43_WARN_ON(free_slots(ring) == 0);
320
321 slot = next_slot(ring, ring->current_slot);
322 ring->current_slot = slot;
323 ring->used_slots++;
324
325 update_max_used_slots(ring, ring->used_slots);
326
327 return slot;
328}
329
b79caa68 330static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
e4d6b795
MB
331{
332 static const u16 map64[] = {
333 B43_MMIO_DMA64_BASE0,
334 B43_MMIO_DMA64_BASE1,
335 B43_MMIO_DMA64_BASE2,
336 B43_MMIO_DMA64_BASE3,
337 B43_MMIO_DMA64_BASE4,
338 B43_MMIO_DMA64_BASE5,
339 };
340 static const u16 map32[] = {
341 B43_MMIO_DMA32_BASE0,
342 B43_MMIO_DMA32_BASE1,
343 B43_MMIO_DMA32_BASE2,
344 B43_MMIO_DMA32_BASE3,
345 B43_MMIO_DMA32_BASE4,
346 B43_MMIO_DMA32_BASE5,
347 };
348
b79caa68 349 if (type == B43_DMA_64BIT) {
e4d6b795
MB
350 B43_WARN_ON(!(controller_idx >= 0 &&
351 controller_idx < ARRAY_SIZE(map64)));
352 return map64[controller_idx];
353 }
354 B43_WARN_ON(!(controller_idx >= 0 &&
355 controller_idx < ARRAY_SIZE(map32)));
356 return map32[controller_idx];
357}
358
359static inline
360 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
361 unsigned char *buf, size_t len, int tx)
362{
363 dma_addr_t dmaaddr;
364
365 if (tx) {
a18c715e 366 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
718e8898 367 buf, len, DMA_TO_DEVICE);
e4d6b795 368 } else {
a18c715e 369 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
718e8898 370 buf, len, DMA_FROM_DEVICE);
e4d6b795
MB
371 }
372
373 return dmaaddr;
374}
375
376static inline
377 void unmap_descbuffer(struct b43_dmaring *ring,
378 dma_addr_t addr, size_t len, int tx)
379{
380 if (tx) {
a18c715e 381 dma_unmap_single(ring->dev->dev->dma_dev,
718e8898 382 addr, len, DMA_TO_DEVICE);
e4d6b795 383 } else {
a18c715e 384 dma_unmap_single(ring->dev->dev->dma_dev,
718e8898 385 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
386 }
387}
388
389static inline
390 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
391 dma_addr_t addr, size_t len)
392{
393 B43_WARN_ON(ring->tx);
a18c715e 394 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
f225763a 395 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
396}
397
398static inline
399 void sync_descbuffer_for_device(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
401{
402 B43_WARN_ON(ring->tx);
a18c715e 403 dma_sync_single_for_device(ring->dev->dev->dma_dev,
718e8898 404 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
405}
406
407static inline
408 void free_descriptor_buffer(struct b43_dmaring *ring,
409 struct b43_dmadesc_meta *meta)
410{
411 if (meta->skb) {
78f18df4
FF
412 if (ring->tx)
413 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
414 else
415 dev_kfree_skb_any(meta->skb);
e4d6b795
MB
416 meta->skb = NULL;
417 }
418}
419
420static int alloc_ringmemory(struct b43_dmaring *ring)
421{
55afc80b
JL
422 gfp_t flags = GFP_KERNEL;
423
424 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
14a8083e
RM
425 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
426 * In practice we could use smaller buffers for the latter, but the
427 * alignment is really important because of the hardware bug. If bit
428 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
429 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
430 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
431 * more than 256 slots for ring.
013978b6 432 */
14a8083e
RM
433 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
434 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
435
a18c715e 436 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
14a8083e
RM
437 ring_mem_size, &(ring->dmabase),
438 flags);
55afc80b
JL
439 if (!ring->descbase) {
440 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
9bd568a5 441 return -ENOMEM;
e4d6b795 442 }
14a8083e 443 memset(ring->descbase, 0, ring_mem_size);
e4d6b795
MB
444
445 return 0;
446}
447
448static void free_ringmemory(struct b43_dmaring *ring)
449{
14a8083e
RM
450 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
451 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
452 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
718e8898 453 ring->descbase, ring->dmabase);
e4d6b795
MB
454}
455
456/* Reset the RX DMA channel */
b79caa68
MB
457static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
458 enum b43_dmatype type)
e4d6b795
MB
459{
460 int i;
461 u32 value;
462 u16 offset;
463
464 might_sleep();
465
b79caa68 466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
e4d6b795
MB
467 b43_write32(dev, mmio_base + offset, 0);
468 for (i = 0; i < 10; i++) {
b79caa68
MB
469 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
470 B43_DMA32_RXSTATUS;
e4d6b795 471 value = b43_read32(dev, mmio_base + offset);
b79caa68 472 if (type == B43_DMA_64BIT) {
e4d6b795
MB
473 value &= B43_DMA64_RXSTAT;
474 if (value == B43_DMA64_RXSTAT_DISABLED) {
475 i = -1;
476 break;
477 }
478 } else {
479 value &= B43_DMA32_RXSTATE;
480 if (value == B43_DMA32_RXSTAT_DISABLED) {
481 i = -1;
482 break;
483 }
484 }
485 msleep(1);
486 }
487 if (i != -1) {
488 b43err(dev->wl, "DMA RX reset timed out\n");
489 return -ENODEV;
490 }
491
492 return 0;
493}
494
013978b6 495/* Reset the TX DMA channel */
b79caa68
MB
496static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
497 enum b43_dmatype type)
e4d6b795
MB
498{
499 int i;
500 u32 value;
501 u16 offset;
502
503 might_sleep();
504
505 for (i = 0; i < 10; i++) {
b79caa68
MB
506 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
507 B43_DMA32_TXSTATUS;
e4d6b795 508 value = b43_read32(dev, mmio_base + offset);
b79caa68 509 if (type == B43_DMA_64BIT) {
e4d6b795
MB
510 value &= B43_DMA64_TXSTAT;
511 if (value == B43_DMA64_TXSTAT_DISABLED ||
512 value == B43_DMA64_TXSTAT_IDLEWAIT ||
513 value == B43_DMA64_TXSTAT_STOPPED)
514 break;
515 } else {
516 value &= B43_DMA32_TXSTATE;
517 if (value == B43_DMA32_TXSTAT_DISABLED ||
518 value == B43_DMA32_TXSTAT_IDLEWAIT ||
519 value == B43_DMA32_TXSTAT_STOPPED)
520 break;
521 }
522 msleep(1);
523 }
b79caa68 524 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
e4d6b795
MB
525 b43_write32(dev, mmio_base + offset, 0);
526 for (i = 0; i < 10; i++) {
b79caa68
MB
527 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
528 B43_DMA32_TXSTATUS;
e4d6b795 529 value = b43_read32(dev, mmio_base + offset);
b79caa68 530 if (type == B43_DMA_64BIT) {
e4d6b795
MB
531 value &= B43_DMA64_TXSTAT;
532 if (value == B43_DMA64_TXSTAT_DISABLED) {
533 i = -1;
534 break;
535 }
536 } else {
537 value &= B43_DMA32_TXSTATE;
538 if (value == B43_DMA32_TXSTAT_DISABLED) {
539 i = -1;
540 break;
541 }
542 }
543 msleep(1);
544 }
545 if (i != -1) {
546 b43err(dev->wl, "DMA TX reset timed out\n");
547 return -ENODEV;
548 }
549 /* ensure the reset is completed. */
550 msleep(1);
551
552 return 0;
553}
554
b79caa68
MB
555/* Check if a DMA mapping address is invalid. */
556static bool b43_dma_mapping_error(struct b43_dmaring *ring,
557 dma_addr_t addr,
ffa9256a 558 size_t buffersize, bool dma_to_device)
b79caa68 559{
a18c715e 560 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
b79caa68
MB
561 return 1;
562
55afc80b
JL
563 switch (ring->type) {
564 case B43_DMA_30BIT:
565 if ((u64)addr + buffersize > (1ULL << 30))
566 goto address_error;
567 break;
568 case B43_DMA_32BIT:
569 if ((u64)addr + buffersize > (1ULL << 32))
570 goto address_error;
571 break;
572 case B43_DMA_64BIT:
573 /* Currently we can't have addresses beyond
574 * 64bit in the kernel. */
575 break;
b79caa68
MB
576 }
577
578 /* The address is OK. */
579 return 0;
55afc80b
JL
580
581address_error:
582 /* We can't support this address. Unmap it again. */
583 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
584
585 return 1;
b79caa68
MB
586}
587
ec9a1d8c
MB
588static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
589{
590 unsigned char *f = skb->data + ring->frameoffset;
591
592 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
593}
594
595static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
596{
597 struct b43_rxhdr_fw4 *rxhdr;
598 unsigned char *frame;
599
600 /* This poisons the RX buffer to detect DMA failures. */
601
602 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
603 rxhdr->frame_len = 0;
604
605 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
606 frame = skb->data + ring->frameoffset;
607 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
608}
609
e4d6b795
MB
610static int setup_rx_descbuffer(struct b43_dmaring *ring,
611 struct b43_dmadesc_generic *desc,
612 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
613{
e4d6b795
MB
614 dma_addr_t dmaaddr;
615 struct sk_buff *skb;
616
617 B43_WARN_ON(ring->tx);
618
619 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
620 if (unlikely(!skb))
621 return -ENOMEM;
ec9a1d8c 622 b43_poison_rx_buffer(ring, skb);
e4d6b795 623 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
ffa9256a 624 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
e4d6b795
MB
625 /* ugh. try to realloc in zone_dma */
626 gfp_flags |= GFP_DMA;
627
628 dev_kfree_skb_any(skb);
629
630 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
631 if (unlikely(!skb))
632 return -ENOMEM;
ec9a1d8c 633 b43_poison_rx_buffer(ring, skb);
e4d6b795
MB
634 dmaaddr = map_descbuffer(ring, skb->data,
635 ring->rx_buffersize, 0);
bdceeb2d
MB
636 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
637 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
638 dev_kfree_skb_any(skb);
639 return -EIO;
640 }
e4d6b795
MB
641 }
642
643 meta->skb = skb;
644 meta->dmaaddr = dmaaddr;
645 ring->ops->fill_descriptor(ring, desc, dmaaddr,
646 ring->rx_buffersize, 0, 0, 0);
647
e4d6b795
MB
648 return 0;
649}
650
651/* Allocate the initial descbuffers.
652 * This is used for an RX ring only.
653 */
654static int alloc_initial_descbuffers(struct b43_dmaring *ring)
655{
656 int i, err = -ENOMEM;
657 struct b43_dmadesc_generic *desc;
658 struct b43_dmadesc_meta *meta;
659
660 for (i = 0; i < ring->nr_slots; i++) {
661 desc = ring->ops->idx2desc(ring, i, &meta);
662
663 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
664 if (err) {
665 b43err(ring->dev->wl,
666 "Failed to allocate initial descbuffers\n");
667 goto err_unwind;
668 }
669 }
670 mb();
671 ring->used_slots = ring->nr_slots;
672 err = 0;
673 out:
674 return err;
675
676 err_unwind:
677 for (i--; i >= 0; i--) {
678 desc = ring->ops->idx2desc(ring, i, &meta);
679
680 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
681 dev_kfree_skb(meta->skb);
682 }
683 goto out;
684}
685
686/* Do initial setup of the DMA controller.
687 * Reset the controller, write the ring busaddress
688 * and switch the "enable" bit on.
689 */
690static int dmacontroller_setup(struct b43_dmaring *ring)
691{
692 int err = 0;
693 u32 value;
694 u32 addrext;
78c1ee7e 695 bool parity = ring->dev->dma.parity;
0cc9772a
RM
696 u32 addrlo;
697 u32 addrhi;
e4d6b795
MB
698
699 if (ring->tx) {
b79caa68 700 if (ring->type == B43_DMA_64BIT) {
e4d6b795 701 u64 ringbase = (u64) (ring->dmabase);
0cc9772a
RM
702 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
703 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
704 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
e4d6b795 705
e4d6b795
MB
706 value = B43_DMA64_TXENABLE;
707 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
708 & B43_DMA64_TXADDREXT_MASK;
78c1ee7e
RM
709 if (!parity)
710 value |= B43_DMA64_TXPARITYDISABLE;
e4d6b795 711 b43_dma_write(ring, B43_DMA64_TXCTL, value);
0cc9772a
RM
712 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
713 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
e4d6b795
MB
714 } else {
715 u32 ringbase = (u32) (ring->dmabase);
0cc9772a
RM
716 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
717 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
e4d6b795 718
e4d6b795
MB
719 value = B43_DMA32_TXENABLE;
720 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
721 & B43_DMA32_TXADDREXT_MASK;
78c1ee7e
RM
722 if (!parity)
723 value |= B43_DMA32_TXPARITYDISABLE;
e4d6b795 724 b43_dma_write(ring, B43_DMA32_TXCTL, value);
0cc9772a 725 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
e4d6b795
MB
726 }
727 } else {
728 err = alloc_initial_descbuffers(ring);
729 if (err)
730 goto out;
b79caa68 731 if (ring->type == B43_DMA_64BIT) {
e4d6b795 732 u64 ringbase = (u64) (ring->dmabase);
0cc9772a
RM
733 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
734 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
735 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
e4d6b795 736
e4d6b795
MB
737 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
738 value |= B43_DMA64_RXENABLE;
739 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
740 & B43_DMA64_RXADDREXT_MASK;
78c1ee7e
RM
741 if (!parity)
742 value |= B43_DMA64_RXPARITYDISABLE;
e4d6b795 743 b43_dma_write(ring, B43_DMA64_RXCTL, value);
0cc9772a
RM
744 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
745 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
013978b6
LF
746 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
747 sizeof(struct b43_dmadesc64));
e4d6b795
MB
748 } else {
749 u32 ringbase = (u32) (ring->dmabase);
0cc9772a
RM
750 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
751 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
e4d6b795 752
e4d6b795
MB
753 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
754 value |= B43_DMA32_RXENABLE;
755 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
756 & B43_DMA32_RXADDREXT_MASK;
78c1ee7e
RM
757 if (!parity)
758 value |= B43_DMA32_RXPARITYDISABLE;
e4d6b795 759 b43_dma_write(ring, B43_DMA32_RXCTL, value);
0cc9772a 760 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
013978b6
LF
761 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
762 sizeof(struct b43_dmadesc32));
e4d6b795
MB
763 }
764 }
765
013978b6 766out:
e4d6b795
MB
767 return err;
768}
769
770/* Shutdown the DMA controller. */
771static void dmacontroller_cleanup(struct b43_dmaring *ring)
772{
773 if (ring->tx) {
774 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
b79caa68
MB
775 ring->type);
776 if (ring->type == B43_DMA_64BIT) {
e4d6b795
MB
777 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
778 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
779 } else
780 b43_dma_write(ring, B43_DMA32_TXRING, 0);
781 } else {
782 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
b79caa68
MB
783 ring->type);
784 if (ring->type == B43_DMA_64BIT) {
e4d6b795
MB
785 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
786 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
787 } else
788 b43_dma_write(ring, B43_DMA32_RXRING, 0);
789 }
790}
791
792static void free_all_descbuffers(struct b43_dmaring *ring)
793{
e4d6b795
MB
794 struct b43_dmadesc_meta *meta;
795 int i;
796
797 if (!ring->used_slots)
798 return;
799 for (i = 0; i < ring->nr_slots; i++) {
9c1cacd2
LF
800 /* get meta - ignore returned value */
801 ring->ops->idx2desc(ring, i, &meta);
e4d6b795 802
07681e21 803 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
e4d6b795
MB
804 B43_WARN_ON(!ring->tx);
805 continue;
806 }
807 if (ring->tx) {
808 unmap_descbuffer(ring, meta->dmaaddr,
809 meta->skb->len, 1);
810 } else {
811 unmap_descbuffer(ring, meta->dmaaddr,
812 ring->rx_buffersize, 0);
813 }
814 free_descriptor_buffer(ring, meta);
815 }
816}
817
818static u64 supported_dma_mask(struct b43_wldev *dev)
819{
820 u32 tmp;
821 u16 mmio_base;
822
5b36c9b4
HM
823 switch (dev->dev->bus_type) {
824#ifdef CONFIG_B43_BCMA
825 case B43_BUS_BCMA:
826 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
827 if (tmp & BCMA_IOST_DMA64)
828 return DMA_BIT_MASK(64);
829 break;
830#endif
831#ifdef CONFIG_B43_SSB
832 case B43_BUS_SSB:
833 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
834 if (tmp & SSB_TMSHIGH_DMA64)
835 return DMA_BIT_MASK(64);
836 break;
837#endif
838 }
839
e4d6b795
MB
840 mmio_base = b43_dmacontroller_base(0, 0);
841 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
842 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
843 if (tmp & B43_DMA32_TXADDREXT_MASK)
284901a9 844 return DMA_BIT_MASK(32);
e4d6b795 845
28b76796 846 return DMA_BIT_MASK(30);
e4d6b795
MB
847}
848
5100d5ac
MB
849static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
850{
28b76796 851 if (dmamask == DMA_BIT_MASK(30))
5100d5ac 852 return B43_DMA_30BIT;
284901a9 853 if (dmamask == DMA_BIT_MASK(32))
5100d5ac 854 return B43_DMA_32BIT;
6a35528a 855 if (dmamask == DMA_BIT_MASK(64))
5100d5ac
MB
856 return B43_DMA_64BIT;
857 B43_WARN_ON(1);
858 return B43_DMA_30BIT;
859}
860
e4d6b795
MB
861/* Main initialization function. */
862static
863struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
864 int controller_index,
b79caa68
MB
865 int for_tx,
866 enum b43_dmatype type)
e4d6b795
MB
867{
868 struct b43_dmaring *ring;
07681e21 869 int i, err;
e4d6b795
MB
870 dma_addr_t dma_test;
871
872 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
873 if (!ring)
874 goto out;
875
028118a5 876 ring->nr_slots = B43_RXRING_SLOTS;
e4d6b795 877 if (for_tx)
028118a5 878 ring->nr_slots = B43_TXRING_SLOTS;
e4d6b795 879
028118a5 880 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
e4d6b795
MB
881 GFP_KERNEL);
882 if (!ring->meta)
883 goto err_kfree_ring;
07681e21
MB
884 for (i = 0; i < ring->nr_slots; i++)
885 ring->meta->skb = B43_DMA_PTR_POISON;
028118a5
MB
886
887 ring->type = type;
888 ring->dev = dev;
889 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
890 ring->index = controller_index;
891 if (type == B43_DMA_64BIT)
892 ring->ops = &dma64_ops;
893 else
894 ring->ops = &dma32_ops;
e4d6b795 895 if (for_tx) {
3db1cd5c 896 ring->tx = true;
028118a5
MB
897 ring->current_slot = -1;
898 } else {
899 if (ring->index == 0) {
17030f48
RM
900 switch (dev->fw.hdr_format) {
901 case B43_FW_HDR_598:
902 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
903 ring->frameoffset = B43_DMA0_RX_FW598_FO;
904 break;
905 case B43_FW_HDR_410:
906 case B43_FW_HDR_351:
907 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
908 ring->frameoffset = B43_DMA0_RX_FW351_FO;
909 break;
910 }
028118a5
MB
911 } else
912 B43_WARN_ON(1);
913 }
028118a5
MB
914#ifdef CONFIG_B43_DEBUG
915 ring->last_injected_overflow = jiffies;
916#endif
917
918 if (for_tx) {
2d071ca5
MB
919 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
920 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
921
bdceeb2d 922 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
eb189d8b 923 b43_txhdr_size(dev),
e4d6b795
MB
924 GFP_KERNEL);
925 if (!ring->txhdr_cache)
926 goto err_kfree_meta;
927
928 /* test for ability to dma to txhdr_cache */
a18c715e 929 dma_test = dma_map_single(dev->dev->dma_dev,
718e8898
FT
930 ring->txhdr_cache,
931 b43_txhdr_size(dev),
932 DMA_TO_DEVICE);
e4d6b795 933
ffa9256a
MB
934 if (b43_dma_mapping_error(ring, dma_test,
935 b43_txhdr_size(dev), 1)) {
e4d6b795
MB
936 /* ugh realloc */
937 kfree(ring->txhdr_cache);
bdceeb2d 938 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
eb189d8b 939 b43_txhdr_size(dev),
e4d6b795
MB
940 GFP_KERNEL | GFP_DMA);
941 if (!ring->txhdr_cache)
942 goto err_kfree_meta;
943
a18c715e 944 dma_test = dma_map_single(dev->dev->dma_dev,
718e8898
FT
945 ring->txhdr_cache,
946 b43_txhdr_size(dev),
947 DMA_TO_DEVICE);
e4d6b795 948
b79caa68 949 if (b43_dma_mapping_error(ring, dma_test,
539e6f8c
MB
950 b43_txhdr_size(dev), 1)) {
951
952 b43err(dev->wl,
953 "TXHDR DMA allocation failed\n");
e4d6b795 954 goto err_kfree_txhdr_cache;
539e6f8c 955 }
e4d6b795
MB
956 }
957
a18c715e 958 dma_unmap_single(dev->dev->dma_dev,
718e8898
FT
959 dma_test, b43_txhdr_size(dev),
960 DMA_TO_DEVICE);
e4d6b795
MB
961 }
962
e4d6b795
MB
963 err = alloc_ringmemory(ring);
964 if (err)
965 goto err_kfree_txhdr_cache;
966 err = dmacontroller_setup(ring);
967 if (err)
968 goto err_free_ringmemory;
969
970 out:
971 return ring;
972
973 err_free_ringmemory:
974 free_ringmemory(ring);
975 err_kfree_txhdr_cache:
976 kfree(ring->txhdr_cache);
977 err_kfree_meta:
978 kfree(ring->meta);
979 err_kfree_ring:
980 kfree(ring);
981 ring = NULL;
982 goto out;
983}
984
57df40d2
MB
985#define divide(a, b) ({ \
986 typeof(a) __a = a; \
987 do_div(__a, b); \
988 __a; \
989 })
990
991#define modulo(a, b) ({ \
992 typeof(a) __a = a; \
993 do_div(__a, b); \
994 })
995
e4d6b795 996/* Main cleanup function. */
b27faf8e
MB
997static void b43_destroy_dmaring(struct b43_dmaring *ring,
998 const char *ringname)
e4d6b795
MB
999{
1000 if (!ring)
1001 return;
1002
57df40d2
MB
1003#ifdef CONFIG_B43_DEBUG
1004 {
1005 /* Print some statistics. */
1006 u64 failed_packets = ring->nr_failed_tx_packets;
1007 u64 succeed_packets = ring->nr_succeed_tx_packets;
1008 u64 nr_packets = failed_packets + succeed_packets;
1009 u64 permille_failed = 0, average_tries = 0;
1010
1011 if (nr_packets)
1012 permille_failed = divide(failed_packets * 1000, nr_packets);
1013 if (nr_packets)
1014 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
1015
1016 b43dbg(ring->dev->wl, "DMA-%u %s: "
1017 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1018 "Average tries %llu.%02llu\n",
1019 (unsigned int)(ring->type), ringname,
1020 ring->max_used_slots,
1021 ring->nr_slots,
1022 (unsigned long long)failed_packets,
87d96114 1023 (unsigned long long)nr_packets,
57df40d2
MB
1024 (unsigned long long)divide(permille_failed, 10),
1025 (unsigned long long)modulo(permille_failed, 10),
1026 (unsigned long long)divide(average_tries, 100),
1027 (unsigned long long)modulo(average_tries, 100));
1028 }
1029#endif /* DEBUG */
1030
e4d6b795
MB
1031 /* Device IRQs are disabled prior entering this function,
1032 * so no need to take care of concurrency with rx handler stuff.
1033 */
1034 dmacontroller_cleanup(ring);
1035 free_all_descbuffers(ring);
1036 free_ringmemory(ring);
1037
1038 kfree(ring->txhdr_cache);
1039 kfree(ring->meta);
1040 kfree(ring);
1041}
1042
b27faf8e
MB
1043#define destroy_ring(dma, ring) do { \
1044 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1045 (dma)->ring = NULL; \
1046 } while (0)
1047
e4d6b795
MB
1048void b43_dma_free(struct b43_wldev *dev)
1049{
5100d5ac
MB
1050 struct b43_dma *dma;
1051
1052 if (b43_using_pio_transfers(dev))
1053 return;
1054 dma = &dev->dma;
e4d6b795 1055
b27faf8e
MB
1056 destroy_ring(dma, rx_ring);
1057 destroy_ring(dma, tx_ring_AC_BK);
1058 destroy_ring(dma, tx_ring_AC_BE);
1059 destroy_ring(dma, tx_ring_AC_VI);
1060 destroy_ring(dma, tx_ring_AC_VO);
1061 destroy_ring(dma, tx_ring_mcast);
e4d6b795
MB
1062}
1063
1033b3ea
MB
1064static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1065{
1066 u64 orig_mask = mask;
3db1cd5c 1067 bool fallback = false;
1033b3ea
MB
1068 int err;
1069
1070 /* Try to set the DMA mask. If it fails, try falling back to a
1071 * lower mask, as we can always also support a lower one. */
1072 while (1) {
a18c715e 1073 err = dma_set_mask(dev->dev->dma_dev, mask);
718e8898 1074 if (!err) {
a18c715e 1075 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
718e8898
FT
1076 if (!err)
1077 break;
1078 }
6a35528a 1079 if (mask == DMA_BIT_MASK(64)) {
284901a9 1080 mask = DMA_BIT_MASK(32);
3db1cd5c 1081 fallback = true;
1033b3ea
MB
1082 continue;
1083 }
284901a9 1084 if (mask == DMA_BIT_MASK(32)) {
28b76796 1085 mask = DMA_BIT_MASK(30);
3db1cd5c 1086 fallback = true;
1033b3ea
MB
1087 continue;
1088 }
1089 b43err(dev->wl, "The machine/kernel does not support "
1090 "the required %u-bit DMA mask\n",
1091 (unsigned int)dma_mask_to_engine_type(orig_mask));
1092 return -EOPNOTSUPP;
1093 }
1094 if (fallback) {
1095 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1096 (unsigned int)dma_mask_to_engine_type(orig_mask),
1097 (unsigned int)dma_mask_to_engine_type(mask));
1098 }
1099
1100 return 0;
1101}
1102
0cc9772a
RM
1103/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1104 * bit in low address word instead of high one.
1105 */
1106static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1107 enum b43_dmatype type)
1108{
1109 if (type != B43_DMA_64BIT)
1110 return 1;
1111
1112#ifdef CONFIG_B43_SSB
1113 if (dev->dev->bus_type == B43_BUS_SSB &&
1114 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
dfcfb545 1115 !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
0cc9772a
RM
1116 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1117 return 1;
1118#endif
1119 return 0;
1120}
1121
e4d6b795
MB
1122int b43_dma_init(struct b43_wldev *dev)
1123{
1124 struct b43_dma *dma = &dev->dma;
e4d6b795
MB
1125 int err;
1126 u64 dmamask;
b79caa68 1127 enum b43_dmatype type;
e4d6b795
MB
1128
1129 dmamask = supported_dma_mask(dev);
5100d5ac 1130 type = dma_mask_to_engine_type(dmamask);
1033b3ea
MB
1131 err = b43_dma_set_mask(dev, dmamask);
1132 if (err)
1133 return err;
6cbab0d9
RM
1134
1135 switch (dev->dev->bus_type) {
eb90e9e8
RM
1136#ifdef CONFIG_B43_BCMA
1137 case B43_BUS_BCMA:
1138 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1139 break;
1140#endif
6cbab0d9
RM
1141#ifdef CONFIG_B43_SSB
1142 case B43_BUS_SSB:
1143 dma->translation = ssb_dma_translation(dev->dev->sdev);
1144 break;
1145#endif
1146 }
0cc9772a 1147 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
e4d6b795 1148
78c1ee7e
RM
1149 dma->parity = true;
1150#ifdef CONFIG_B43_BCMA
1151 /* TODO: find out which SSB devices need disabling parity */
1152 if (dev->dev->bus_type == B43_BUS_BCMA)
1153 dma->parity = false;
1154#endif
1155
e4d6b795
MB
1156 err = -ENOMEM;
1157 /* setup TX DMA channels. */
b27faf8e
MB
1158 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1159 if (!dma->tx_ring_AC_BK)
e4d6b795 1160 goto out;
e4d6b795 1161
b27faf8e
MB
1162 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1163 if (!dma->tx_ring_AC_BE)
1164 goto err_destroy_bk;
e4d6b795 1165
b27faf8e
MB
1166 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1167 if (!dma->tx_ring_AC_VI)
1168 goto err_destroy_be;
e4d6b795 1169
b27faf8e
MB
1170 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1171 if (!dma->tx_ring_AC_VO)
1172 goto err_destroy_vi;
e4d6b795 1173
b27faf8e
MB
1174 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1175 if (!dma->tx_ring_mcast)
1176 goto err_destroy_vo;
e4d6b795 1177
b27faf8e
MB
1178 /* setup RX DMA channel. */
1179 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1180 if (!dma->rx_ring)
1181 goto err_destroy_mcast;
e4d6b795 1182
b27faf8e 1183 /* No support for the TX status DMA ring. */
21d889d4 1184 B43_WARN_ON(dev->dev->core_rev < 5);
e4d6b795 1185
b79caa68
MB
1186 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1187 (unsigned int)type);
e4d6b795 1188 err = 0;
b27faf8e 1189out:
e4d6b795
MB
1190 return err;
1191
b27faf8e
MB
1192err_destroy_mcast:
1193 destroy_ring(dma, tx_ring_mcast);
1194err_destroy_vo:
1195 destroy_ring(dma, tx_ring_AC_VO);
1196err_destroy_vi:
1197 destroy_ring(dma, tx_ring_AC_VI);
1198err_destroy_be:
1199 destroy_ring(dma, tx_ring_AC_BE);
1200err_destroy_bk:
1201 destroy_ring(dma, tx_ring_AC_BK);
1202 return err;
e4d6b795
MB
1203}
1204
1205/* Generate a cookie for the TX header. */
1206static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1207{
b27faf8e 1208 u16 cookie;
e4d6b795
MB
1209
1210 /* Use the upper 4 bits of the cookie as
1211 * DMA controller ID and store the slot number
1212 * in the lower 12 bits.
1213 * Note that the cookie must never be 0, as this
1214 * is a special value used in RX path.
280d0e16
MB
1215 * It can also not be 0xFFFF because that is special
1216 * for multicast frames.
e4d6b795 1217 */
b27faf8e 1218 cookie = (((u16)ring->index + 1) << 12);
e4d6b795 1219 B43_WARN_ON(slot & ~0x0FFF);
b27faf8e 1220 cookie |= (u16)slot;
e4d6b795
MB
1221
1222 return cookie;
1223}
1224
1225/* Inspect a cookie and find out to which controller/slot it belongs. */
1226static
1227struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1228{
1229 struct b43_dma *dma = &dev->dma;
1230 struct b43_dmaring *ring = NULL;
1231
1232 switch (cookie & 0xF000) {
280d0e16 1233 case 0x1000:
b27faf8e 1234 ring = dma->tx_ring_AC_BK;
e4d6b795 1235 break;
280d0e16 1236 case 0x2000:
b27faf8e 1237 ring = dma->tx_ring_AC_BE;
e4d6b795 1238 break;
280d0e16 1239 case 0x3000:
b27faf8e 1240 ring = dma->tx_ring_AC_VI;
e4d6b795 1241 break;
280d0e16 1242 case 0x4000:
b27faf8e 1243 ring = dma->tx_ring_AC_VO;
e4d6b795 1244 break;
280d0e16 1245 case 0x5000:
b27faf8e 1246 ring = dma->tx_ring_mcast;
e4d6b795 1247 break;
e4d6b795
MB
1248 }
1249 *slot = (cookie & 0x0FFF);
07681e21
MB
1250 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1251 b43dbg(dev->wl, "TX-status contains "
1252 "invalid cookie: 0x%04X\n", cookie);
1253 return NULL;
1254 }
e4d6b795
MB
1255
1256 return ring;
1257}
1258
1259static int dma_tx_fragment(struct b43_dmaring *ring,
f54a5202 1260 struct sk_buff *skb)
e4d6b795
MB
1261{
1262 const struct b43_dma_ops *ops = ring->ops;
e039fa4a 1263 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
f54a5202 1264 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
e4d6b795 1265 u8 *header;
09552ccd 1266 int slot, old_top_slot, old_used_slots;
e4d6b795
MB
1267 int err;
1268 struct b43_dmadesc_generic *desc;
1269 struct b43_dmadesc_meta *meta;
1270 struct b43_dmadesc_meta *meta_hdr;
280d0e16 1271 u16 cookie;
eb189d8b 1272 size_t hdrsize = b43_txhdr_size(ring->dev);
e4d6b795 1273
bdceeb2d
MB
1274 /* Important note: If the number of used DMA slots per TX frame
1275 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1276 * the file has to be updated, too!
1277 */
e4d6b795 1278
09552ccd
MB
1279 old_top_slot = ring->current_slot;
1280 old_used_slots = ring->used_slots;
1281
e4d6b795
MB
1282 /* Get a slot for the header. */
1283 slot = request_slot(ring);
1284 desc = ops->idx2desc(ring, slot, &meta_hdr);
1285 memset(meta_hdr, 0, sizeof(*meta_hdr));
1286
bdceeb2d 1287 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
280d0e16 1288 cookie = generate_cookie(ring, slot);
09552ccd 1289 err = b43_generate_txhdr(ring->dev, header,
035d0243 1290 skb, info, cookie);
09552ccd
MB
1291 if (unlikely(err)) {
1292 ring->current_slot = old_top_slot;
1293 ring->used_slots = old_used_slots;
1294 return err;
1295 }
e4d6b795
MB
1296
1297 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
eb189d8b 1298 hdrsize, 1);
ffa9256a 1299 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
09552ccd
MB
1300 ring->current_slot = old_top_slot;
1301 ring->used_slots = old_used_slots;
e4d6b795 1302 return -EIO;
09552ccd 1303 }
e4d6b795 1304 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
eb189d8b 1305 hdrsize, 1, 0, 0);
e4d6b795
MB
1306
1307 /* Get a slot for the payload. */
1308 slot = request_slot(ring);
1309 desc = ops->idx2desc(ring, slot, &meta);
1310 memset(meta, 0, sizeof(*meta));
1311
e4d6b795 1312 meta->skb = skb;
3db1cd5c 1313 meta->is_last_fragment = true;
f54a5202 1314 priv_info->bouncebuffer = NULL;
e4d6b795
MB
1315
1316 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1317 /* create a bounce buffer in zone_dma on mapping failure. */
ffa9256a 1318 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
a61aac7c
JL
1319 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1320 GFP_ATOMIC | GFP_DMA);
f54a5202 1321 if (!priv_info->bouncebuffer) {
09552ccd
MB
1322 ring->current_slot = old_top_slot;
1323 ring->used_slots = old_used_slots;
e4d6b795
MB
1324 err = -ENOMEM;
1325 goto out_unmap_hdr;
1326 }
1327
f54a5202 1328 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
ffa9256a 1329 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
f54a5202
MB
1330 kfree(priv_info->bouncebuffer);
1331 priv_info->bouncebuffer = NULL;
09552ccd
MB
1332 ring->current_slot = old_top_slot;
1333 ring->used_slots = old_used_slots;
e4d6b795 1334 err = -EIO;
f54a5202 1335 goto out_unmap_hdr;
e4d6b795
MB
1336 }
1337 }
1338
1339 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1340
e039fa4a 1341 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
280d0e16
MB
1342 /* Tell the firmware about the cookie of the last
1343 * mcast frame, so it can clear the more-data bit in it. */
1344 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1345 B43_SHM_SH_MCASTCOOKIE, cookie);
1346 }
e4d6b795
MB
1347 /* Now transfer the whole frame. */
1348 wmb();
1349 ops->poke_tx(ring, next_slot(ring, slot));
1350 return 0;
1351
280d0e16 1352out_unmap_hdr:
e4d6b795 1353 unmap_descbuffer(ring, meta_hdr->dmaaddr,
eb189d8b 1354 hdrsize, 1);
e4d6b795
MB
1355 return err;
1356}
1357
1358static inline int should_inject_overflow(struct b43_dmaring *ring)
1359{
1360#ifdef CONFIG_B43_DEBUG
1361 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1362 /* Check if we should inject another ringbuffer overflow
1363 * to test handling of this situation in the stack. */
1364 unsigned long next_overflow;
1365
1366 next_overflow = ring->last_injected_overflow + HZ;
1367 if (time_after(jiffies, next_overflow)) {
1368 ring->last_injected_overflow = jiffies;
1369 b43dbg(ring->dev->wl,
1370 "Injecting TX ring overflow on "
1371 "DMA controller %d\n", ring->index);
1372 return 1;
1373 }
1374 }
1375#endif /* CONFIG_B43_DEBUG */
1376 return 0;
1377}
1378
e6f5b934 1379/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
99da185a
JD
1380static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1381 u8 queue_prio)
e6f5b934
MB
1382{
1383 struct b43_dmaring *ring;
1384
403a3a13 1385 if (dev->qos_enabled) {
e6f5b934
MB
1386 /* 0 = highest priority */
1387 switch (queue_prio) {
1388 default:
1389 B43_WARN_ON(1);
1390 /* fallthrough */
1391 case 0:
b27faf8e 1392 ring = dev->dma.tx_ring_AC_VO;
e6f5b934
MB
1393 break;
1394 case 1:
b27faf8e 1395 ring = dev->dma.tx_ring_AC_VI;
e6f5b934
MB
1396 break;
1397 case 2:
b27faf8e 1398 ring = dev->dma.tx_ring_AC_BE;
e6f5b934
MB
1399 break;
1400 case 3:
b27faf8e 1401 ring = dev->dma.tx_ring_AC_BK;
e6f5b934
MB
1402 break;
1403 }
1404 } else
b27faf8e 1405 ring = dev->dma.tx_ring_AC_BE;
e6f5b934
MB
1406
1407 return ring;
1408}
1409
e039fa4a 1410int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
e4d6b795
MB
1411{
1412 struct b43_dmaring *ring;
280d0e16 1413 struct ieee80211_hdr *hdr;
e4d6b795 1414 int err = 0;
e039fa4a 1415 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
e4d6b795 1416
280d0e16 1417 hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 1418 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
280d0e16 1419 /* The multicast ring will be sent after the DTIM */
b27faf8e 1420 ring = dev->dma.tx_ring_mcast;
280d0e16
MB
1421 /* Set the more-data bit. Ucode will clear it on
1422 * the last frame for us. */
1423 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1424 } else {
1425 /* Decide by priority where to put this frame. */
e2530083
JB
1426 ring = select_ring_by_priority(
1427 dev, skb_get_queue_mapping(skb));
280d0e16
MB
1428 }
1429
e4d6b795 1430 B43_WARN_ON(!ring->tx);
ca2d559e 1431
18c69510
LF
1432 if (unlikely(ring->stopped)) {
1433 /* We get here only because of a bug in mac80211.
1434 * Because of a race, one packet may be queued after
1435 * the queue is stopped, thus we got called when we shouldn't.
1436 * For now, just refuse the transmit. */
1437 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1438 b43err(dev->wl, "Packet after queue stopped\n");
1439 err = -ENOSPC;
637dae3f 1440 goto out;
18c69510
LF
1441 }
1442
1443 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1444 /* If we get here, we have a real error with the queue
1445 * full, but queues not stopped. */
1446 b43err(dev->wl, "DMA queue overflow\n");
e4d6b795 1447 err = -ENOSPC;
637dae3f 1448 goto out;
e4d6b795 1449 }
e4d6b795 1450
e6f5b934
MB
1451 /* Assign the queue number to the ring (if not already done before)
1452 * so TX status handling can use it. The queue to ring mapping is
1453 * static, so we don't need to store it per frame. */
e2530083 1454 ring->queue_prio = skb_get_queue_mapping(skb);
e6f5b934 1455
f54a5202 1456 err = dma_tx_fragment(ring, skb);
09552ccd
MB
1457 if (unlikely(err == -ENOKEY)) {
1458 /* Drop this packet, as we don't have the encryption key
1459 * anymore and must not transmit it unencrypted. */
78f18df4 1460 ieee80211_free_txskb(dev->wl->hw, skb);
09552ccd 1461 err = 0;
637dae3f 1462 goto out;
09552ccd 1463 }
e4d6b795
MB
1464 if (unlikely(err)) {
1465 b43err(dev->wl, "DMA tx mapping failure\n");
637dae3f 1466 goto out;
e4d6b795 1467 }
bdceeb2d 1468 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
e4d6b795
MB
1469 should_inject_overflow(ring)) {
1470 /* This TX ring is full. */
bad69194 1471 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1472 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1473 dev->wl->tx_queue_stopped[skb_mapping] = 1;
3db1cd5c 1474 ring->stopped = true;
e4d6b795
MB
1475 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1476 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1477 }
1478 }
637dae3f 1479out:
e4d6b795
MB
1480
1481 return err;
1482}
1483
1484void b43_dma_handle_txstatus(struct b43_wldev *dev,
1485 const struct b43_txstatus *status)
1486{
1487 const struct b43_dma_ops *ops;
1488 struct b43_dmaring *ring;
e4d6b795 1489 struct b43_dmadesc_meta *meta;
b251412d
IE
1490 static const struct b43_txstatus fake; /* filled with 0 */
1491 const struct b43_txstatus *txstat;
07681e21 1492 int slot, firstused;
5100d5ac 1493 bool frame_succeed;
b251412d
IE
1494 int skip;
1495 static u8 err_out1, err_out2;
e4d6b795
MB
1496
1497 ring = parse_cookie(dev, status->cookie, &slot);
1498 if (unlikely(!ring))
1499 return;
e4d6b795 1500 B43_WARN_ON(!ring->tx);
07681e21
MB
1501
1502 /* Sanity check: TX packets are processed in-order on one ring.
1503 * Check if the slot deduced from the cookie really is the first
1504 * used slot. */
1505 firstused = ring->current_slot - ring->used_slots + 1;
1506 if (firstused < 0)
1507 firstused = ring->nr_slots + firstused;
b251412d
IE
1508
1509 skip = 0;
07681e21
MB
1510 if (unlikely(slot != firstused)) {
1511 /* This possibly is a firmware bug and will result in
b251412d
IE
1512 * malfunction, memory leaks and/or stall of DMA functionality.
1513 */
1514 if (slot == next_slot(ring, next_slot(ring, firstused))) {
1515 /* If a single header/data pair was missed, skip over
1516 * the first two slots in an attempt to recover.
1517 */
1518 slot = firstused;
1519 skip = 2;
1520 if (!err_out1) {
1521 /* Report the error once. */
1522 b43dbg(dev->wl,
1523 "Skip on DMA ring %d slot %d.\n",
1524 ring->index, slot);
1525 err_out1 = 1;
1526 }
1527 } else {
1528 /* More than a single header/data pair were missed.
1529 * Report this error once.
1530 */
1531 if (!err_out2)
1532 b43dbg(dev->wl,
1533 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1534 ring->index, firstused, slot);
1535 err_out2 = 1;
1536 return;
1537 }
07681e21
MB
1538 }
1539
e4d6b795
MB
1540 ops = ring->ops;
1541 while (1) {
07681e21 1542 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
9c1cacd2
LF
1543 /* get meta - ignore returned value */
1544 ops->idx2desc(ring, slot, &meta);
e4d6b795 1545
07681e21
MB
1546 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1547 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1548 "on ring %d\n",
1549 slot, firstused, ring->index);
1550 break;
1551 }
b251412d 1552
f54a5202
MB
1553 if (meta->skb) {
1554 struct b43_private_tx_info *priv_info =
b251412d 1555 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
f54a5202 1556
b251412d
IE
1557 unmap_descbuffer(ring, meta->dmaaddr,
1558 meta->skb->len, 1);
f54a5202
MB
1559 kfree(priv_info->bouncebuffer);
1560 priv_info->bouncebuffer = NULL;
1561 } else {
e4d6b795 1562 unmap_descbuffer(ring, meta->dmaaddr,
eb189d8b 1563 b43_txhdr_size(dev), 1);
f54a5202 1564 }
e4d6b795
MB
1565
1566 if (meta->is_last_fragment) {
e039fa4a
JB
1567 struct ieee80211_tx_info *info;
1568
07681e21 1569 if (unlikely(!meta->skb)) {
b251412d
IE
1570 /* This is a scatter-gather fragment of a frame,
1571 * so the skb pointer must not be NULL.
1572 */
07681e21
MB
1573 b43dbg(dev->wl, "TX status unexpected NULL skb "
1574 "at slot %d (first=%d) on ring %d\n",
1575 slot, firstused, ring->index);
1576 break;
1577 }
e039fa4a
JB
1578
1579 info = IEEE80211_SKB_CB(meta->skb);
1580
e039fa4a
JB
1581 /*
1582 * Call back to inform the ieee80211 subsystem about
b251412d
IE
1583 * the status of the transmission. When skipping over
1584 * a missed TX status report, use a status structure
1585 * filled with zeros to indicate that the frame was not
1586 * sent (frame_count 0) and not acknowledged
e4d6b795 1587 */
b251412d
IE
1588 if (unlikely(skip))
1589 txstat = &fake;
1590 else
1591 txstat = status;
1592
1593 frame_succeed = b43_fill_txstatus_report(dev, info,
1594 txstat);
5100d5ac
MB
1595#ifdef CONFIG_B43_DEBUG
1596 if (frame_succeed)
1597 ring->nr_succeed_tx_packets++;
1598 else
1599 ring->nr_failed_tx_packets++;
1600 ring->nr_total_packet_tries += status->frame_count;
1601#endif /* DEBUG */
ce6c4a13 1602 ieee80211_tx_status(dev->wl->hw, meta->skb);
e039fa4a 1603
07681e21
MB
1604 /* skb will be freed by ieee80211_tx_status().
1605 * Poison our pointer. */
1606 meta->skb = B43_DMA_PTR_POISON;
e4d6b795
MB
1607 } else {
1608 /* No need to call free_descriptor_buffer here, as
1609 * this is only the txhdr, which is not allocated.
1610 */
07681e21
MB
1611 if (unlikely(meta->skb)) {
1612 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1613 "at slot %d (first=%d) on ring %d\n",
1614 slot, firstused, ring->index);
1615 break;
1616 }
e4d6b795
MB
1617 }
1618
1619 /* Everything unmapped and free'd. So it's not used anymore. */
1620 ring->used_slots--;
1621
b251412d 1622 if (meta->is_last_fragment && !skip) {
07681e21
MB
1623 /* This is the last scatter-gather
1624 * fragment of the frame. We are done. */
e4d6b795 1625 break;
07681e21 1626 }
e4d6b795 1627 slot = next_slot(ring, slot);
b251412d
IE
1628 if (skip > 0)
1629 --skip;
e4d6b795 1630 }
e4d6b795 1631 if (ring->stopped) {
bdceeb2d 1632 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
3db1cd5c 1633 ring->stopped = false;
bad69194 1634 }
1635
1636 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1637 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1638 } else {
1639 /* If the driver queue is running wake the corresponding
1640 * mac80211 queue. */
1641 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
e4d6b795
MB
1642 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1643 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1644 }
1645 }
bad69194 1646 /* Add work to the queue. */
1647 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
e4d6b795
MB
1648}
1649
e4d6b795
MB
1650static void dma_rx(struct b43_dmaring *ring, int *slot)
1651{
1652 const struct b43_dma_ops *ops = ring->ops;
1653 struct b43_dmadesc_generic *desc;
1654 struct b43_dmadesc_meta *meta;
1655 struct b43_rxhdr_fw4 *rxhdr;
1656 struct sk_buff *skb;
1657 u16 len;
1658 int err;
1659 dma_addr_t dmaaddr;
1660
1661 desc = ops->idx2desc(ring, *slot, &meta);
1662
1663 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1664 skb = meta->skb;
1665
e4d6b795
MB
1666 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1667 len = le16_to_cpu(rxhdr->frame_len);
1668 if (len == 0) {
1669 int i = 0;
1670
1671 do {
1672 udelay(2);
1673 barrier();
1674 len = le16_to_cpu(rxhdr->frame_len);
1675 } while (len == 0 && i++ < 5);
1676 if (unlikely(len == 0)) {
cf68636a
MB
1677 dmaaddr = meta->dmaaddr;
1678 goto drop_recycle_buffer;
e4d6b795
MB
1679 }
1680 }
ec9a1d8c
MB
1681 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1682 /* Something went wrong with the DMA.
1683 * The device did not touch the buffer and did not overwrite the poison. */
1684 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
cf68636a
MB
1685 dmaaddr = meta->dmaaddr;
1686 goto drop_recycle_buffer;
ec9a1d8c 1687 }
c85ce65e 1688 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
e4d6b795
MB
1689 /* The data did not fit into one descriptor buffer
1690 * and is split over multiple buffers.
1691 * This should never happen, as we try to allocate buffers
1692 * big enough. So simply ignore this packet.
1693 */
1694 int cnt = 0;
1695 s32 tmp = len;
1696
1697 while (1) {
1698 desc = ops->idx2desc(ring, *slot, &meta);
1699 /* recycle the descriptor buffer. */
cf68636a 1700 b43_poison_rx_buffer(ring, meta->skb);
e4d6b795
MB
1701 sync_descbuffer_for_device(ring, meta->dmaaddr,
1702 ring->rx_buffersize);
1703 *slot = next_slot(ring, *slot);
1704 cnt++;
1705 tmp -= ring->rx_buffersize;
1706 if (tmp <= 0)
1707 break;
1708 }
1709 b43err(ring->dev->wl, "DMA RX buffer too small "
1710 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1711 len, ring->rx_buffersize, cnt);
1712 goto drop;
1713 }
1714
1715 dmaaddr = meta->dmaaddr;
1716 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1717 if (unlikely(err)) {
1718 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
cf68636a 1719 goto drop_recycle_buffer;
e4d6b795
MB
1720 }
1721
1722 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1723 skb_put(skb, len + ring->frameoffset);
1724 skb_pull(skb, ring->frameoffset);
1725
1726 b43_rx(ring->dev, skb, rxhdr);
b27faf8e 1727drop:
e4d6b795 1728 return;
cf68636a
MB
1729
1730drop_recycle_buffer:
1731 /* Poison and recycle the RX buffer. */
1732 b43_poison_rx_buffer(ring, skb);
1733 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
e4d6b795
MB
1734}
1735
1736void b43_dma_rx(struct b43_dmaring *ring)
1737{
1738 const struct b43_dma_ops *ops = ring->ops;
1739 int slot, current_slot;
1740 int used_slots = 0;
1741
1742 B43_WARN_ON(ring->tx);
1743 current_slot = ops->get_current_rxslot(ring);
1744 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1745
1746 slot = ring->current_slot;
1747 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1748 dma_rx(ring, &slot);
1749 update_max_used_slots(ring, ++used_slots);
1750 }
73e6cdcf 1751 wmb();
e4d6b795
MB
1752 ops->set_current_rxslot(ring, slot);
1753 ring->current_slot = slot;
1754}
1755
1756static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1757{
e4d6b795
MB
1758 B43_WARN_ON(!ring->tx);
1759 ring->ops->tx_suspend(ring);
e4d6b795
MB
1760}
1761
1762static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1763{
e4d6b795
MB
1764 B43_WARN_ON(!ring->tx);
1765 ring->ops->tx_resume(ring);
e4d6b795
MB
1766}
1767
1768void b43_dma_tx_suspend(struct b43_wldev *dev)
1769{
1770 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
b27faf8e
MB
1771 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1772 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1773 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1774 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1775 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
e4d6b795
MB
1776}
1777
1778void b43_dma_tx_resume(struct b43_wldev *dev)
1779{
b27faf8e
MB
1780 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1781 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1782 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1783 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1784 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
e4d6b795
MB
1785 b43_power_saving_ctl_bits(dev, 0);
1786}
5100d5ac 1787
5100d5ac
MB
1788static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1789 u16 mmio_base, bool enable)
1790{
1791 u32 ctl;
1792
1793 if (type == B43_DMA_64BIT) {
1794 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1795 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1796 if (enable)
1797 ctl |= B43_DMA64_RXDIRECTFIFO;
1798 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1799 } else {
1800 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1801 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1802 if (enable)
1803 ctl |= B43_DMA32_RXDIRECTFIFO;
1804 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1805 }
1806}
1807
1808/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1809 * This is called from PIO code, so DMA structures are not available. */
1810void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1811 unsigned int engine_index, bool enable)
1812{
1813 enum b43_dmatype type;
1814 u16 mmio_base;
1815
1816 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1817
1818 mmio_base = b43_dmacontroller_base(type, engine_index);
1819 direct_fifo_rx(dev, type, mmio_base, enable);
1820}
This page took 0.904163 seconds and 5 git commands to generate.