b43: LCN-PHY: tweaks for channel switching
[deliverable/linux.git] / drivers / net / wireless / b43 / dma.c
CommitLineData
e4d6b795
MB
1/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
eb032b98 7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
e4d6b795
MB
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
280d0e16 40#include <linux/etherdevice.h>
5a0e3ad6 41#include <linux/slab.h>
57df40d2 42#include <asm/div64.h>
280d0e16 43
e4d6b795 44
bdceeb2d
MB
45/* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48#define TX_SLOTS_PER_FRAME 2
49
0cc9772a
RM
50static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
51 enum b43_addrtype addrtype)
52{
53 u32 uninitialized_var(addr);
54
55 switch (addrtype) {
56 case B43_DMA_ADDR_LOW:
57 addr = lower_32_bits(dmaaddr);
58 if (dma->translation_in_low) {
59 addr &= ~SSB_DMA_TRANSLATION_MASK;
60 addr |= dma->translation;
61 }
62 break;
63 case B43_DMA_ADDR_HIGH:
64 addr = upper_32_bits(dmaaddr);
65 if (!dma->translation_in_low) {
66 addr &= ~SSB_DMA_TRANSLATION_MASK;
67 addr |= dma->translation;
68 }
69 break;
70 case B43_DMA_ADDR_EXT:
71 if (dma->translation_in_low)
72 addr = lower_32_bits(dmaaddr);
73 else
74 addr = upper_32_bits(dmaaddr);
75 addr &= SSB_DMA_TRANSLATION_MASK;
76 addr >>= SSB_DMA_TRANSLATION_SHIFT;
77 break;
78 }
79
80 return addr;
81}
bdceeb2d 82
e4d6b795
MB
83/* 32bit DMA ops. */
84static
85struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
86 int slot,
87 struct b43_dmadesc_meta **meta)
88{
89 struct b43_dmadesc32 *desc;
90
91 *meta = &(ring->meta[slot]);
92 desc = ring->descbase;
93 desc = &(desc[slot]);
94
95 return (struct b43_dmadesc_generic *)desc;
96}
97
98static void op32_fill_descriptor(struct b43_dmaring *ring,
99 struct b43_dmadesc_generic *desc,
100 dma_addr_t dmaaddr, u16 bufsize,
101 int start, int end, int irq)
102{
103 struct b43_dmadesc32 *descbase = ring->descbase;
104 int slot;
105 u32 ctl;
106 u32 addr;
107 u32 addrext;
108
109 slot = (int)(&(desc->dma32) - descbase);
110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
111
0cc9772a
RM
112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
114
8eccb53f 115 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
e4d6b795
MB
116 if (slot == ring->nr_slots - 1)
117 ctl |= B43_DMA32_DCTL_DTABLEEND;
118 if (start)
119 ctl |= B43_DMA32_DCTL_FRAMESTART;
120 if (end)
121 ctl |= B43_DMA32_DCTL_FRAMEEND;
122 if (irq)
123 ctl |= B43_DMA32_DCTL_IRQ;
124 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
125 & B43_DMA32_DCTL_ADDREXT_MASK;
126
127 desc->dma32.control = cpu_to_le32(ctl);
128 desc->dma32.address = cpu_to_le32(addr);
129}
130
131static void op32_poke_tx(struct b43_dmaring *ring, int slot)
132{
133 b43_dma_write(ring, B43_DMA32_TXINDEX,
134 (u32) (slot * sizeof(struct b43_dmadesc32)));
135}
136
137static void op32_tx_suspend(struct b43_dmaring *ring)
138{
139 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
140 | B43_DMA32_TXSUSPEND);
141}
142
143static void op32_tx_resume(struct b43_dmaring *ring)
144{
145 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
146 & ~B43_DMA32_TXSUSPEND);
147}
148
149static int op32_get_current_rxslot(struct b43_dmaring *ring)
150{
151 u32 val;
152
153 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
154 val &= B43_DMA32_RXDPTR;
155
156 return (val / sizeof(struct b43_dmadesc32));
157}
158
159static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
160{
161 b43_dma_write(ring, B43_DMA32_RXINDEX,
162 (u32) (slot * sizeof(struct b43_dmadesc32)));
163}
164
165static const struct b43_dma_ops dma32_ops = {
166 .idx2desc = op32_idx2desc,
167 .fill_descriptor = op32_fill_descriptor,
168 .poke_tx = op32_poke_tx,
169 .tx_suspend = op32_tx_suspend,
170 .tx_resume = op32_tx_resume,
171 .get_current_rxslot = op32_get_current_rxslot,
172 .set_current_rxslot = op32_set_current_rxslot,
173};
174
175/* 64bit DMA ops. */
176static
177struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
178 int slot,
179 struct b43_dmadesc_meta **meta)
180{
181 struct b43_dmadesc64 *desc;
182
183 *meta = &(ring->meta[slot]);
184 desc = ring->descbase;
185 desc = &(desc[slot]);
186
187 return (struct b43_dmadesc_generic *)desc;
188}
189
190static void op64_fill_descriptor(struct b43_dmaring *ring,
191 struct b43_dmadesc_generic *desc,
192 dma_addr_t dmaaddr, u16 bufsize,
193 int start, int end, int irq)
194{
195 struct b43_dmadesc64 *descbase = ring->descbase;
196 int slot;
197 u32 ctl0 = 0, ctl1 = 0;
198 u32 addrlo, addrhi;
199 u32 addrext;
200
201 slot = (int)(&(desc->dma64) - descbase);
202 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
203
0cc9772a
RM
204 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
205 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
206 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
207
e4d6b795
MB
208 if (slot == ring->nr_slots - 1)
209 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
210 if (start)
211 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
212 if (end)
213 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
214 if (irq)
215 ctl0 |= B43_DMA64_DCTL0_IRQ;
8eccb53f 216 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
e4d6b795
MB
217 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
218 & B43_DMA64_DCTL1_ADDREXT_MASK;
219
220 desc->dma64.control0 = cpu_to_le32(ctl0);
221 desc->dma64.control1 = cpu_to_le32(ctl1);
222 desc->dma64.address_low = cpu_to_le32(addrlo);
223 desc->dma64.address_high = cpu_to_le32(addrhi);
224}
225
226static void op64_poke_tx(struct b43_dmaring *ring, int slot)
227{
228 b43_dma_write(ring, B43_DMA64_TXINDEX,
229 (u32) (slot * sizeof(struct b43_dmadesc64)));
230}
231
232static void op64_tx_suspend(struct b43_dmaring *ring)
233{
234 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
235 | B43_DMA64_TXSUSPEND);
236}
237
238static void op64_tx_resume(struct b43_dmaring *ring)
239{
240 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
241 & ~B43_DMA64_TXSUSPEND);
242}
243
244static int op64_get_current_rxslot(struct b43_dmaring *ring)
245{
246 u32 val;
247
248 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
249 val &= B43_DMA64_RXSTATDPTR;
250
251 return (val / sizeof(struct b43_dmadesc64));
252}
253
254static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
255{
256 b43_dma_write(ring, B43_DMA64_RXINDEX,
257 (u32) (slot * sizeof(struct b43_dmadesc64)));
258}
259
260static const struct b43_dma_ops dma64_ops = {
261 .idx2desc = op64_idx2desc,
262 .fill_descriptor = op64_fill_descriptor,
263 .poke_tx = op64_poke_tx,
264 .tx_suspend = op64_tx_suspend,
265 .tx_resume = op64_tx_resume,
266 .get_current_rxslot = op64_get_current_rxslot,
267 .set_current_rxslot = op64_set_current_rxslot,
268};
269
270static inline int free_slots(struct b43_dmaring *ring)
271{
272 return (ring->nr_slots - ring->used_slots);
273}
274
275static inline int next_slot(struct b43_dmaring *ring, int slot)
276{
277 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
278 if (slot == ring->nr_slots - 1)
279 return 0;
280 return slot + 1;
281}
282
283static inline int prev_slot(struct b43_dmaring *ring, int slot)
284{
285 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
286 if (slot == 0)
287 return ring->nr_slots - 1;
288 return slot - 1;
289}
290
291#ifdef CONFIG_B43_DEBUG
292static void update_max_used_slots(struct b43_dmaring *ring,
293 int current_used_slots)
294{
295 if (current_used_slots <= ring->max_used_slots)
296 return;
297 ring->max_used_slots = current_used_slots;
298 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
299 b43dbg(ring->dev->wl,
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring->max_used_slots,
302 ring->tx ? "TX" : "RX", ring->index);
303 }
304}
305#else
306static inline
307 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
308{
309}
310#endif /* DEBUG */
311
312/* Request a slot for usage. */
313static inline int request_slot(struct b43_dmaring *ring)
314{
315 int slot;
316
317 B43_WARN_ON(!ring->tx);
318 B43_WARN_ON(ring->stopped);
319 B43_WARN_ON(free_slots(ring) == 0);
320
321 slot = next_slot(ring, ring->current_slot);
322 ring->current_slot = slot;
323 ring->used_slots++;
324
325 update_max_used_slots(ring, ring->used_slots);
326
327 return slot;
328}
329
b79caa68 330static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
e4d6b795
MB
331{
332 static const u16 map64[] = {
333 B43_MMIO_DMA64_BASE0,
334 B43_MMIO_DMA64_BASE1,
335 B43_MMIO_DMA64_BASE2,
336 B43_MMIO_DMA64_BASE3,
337 B43_MMIO_DMA64_BASE4,
338 B43_MMIO_DMA64_BASE5,
339 };
340 static const u16 map32[] = {
341 B43_MMIO_DMA32_BASE0,
342 B43_MMIO_DMA32_BASE1,
343 B43_MMIO_DMA32_BASE2,
344 B43_MMIO_DMA32_BASE3,
345 B43_MMIO_DMA32_BASE4,
346 B43_MMIO_DMA32_BASE5,
347 };
348
b79caa68 349 if (type == B43_DMA_64BIT) {
e4d6b795
MB
350 B43_WARN_ON(!(controller_idx >= 0 &&
351 controller_idx < ARRAY_SIZE(map64)));
352 return map64[controller_idx];
353 }
354 B43_WARN_ON(!(controller_idx >= 0 &&
355 controller_idx < ARRAY_SIZE(map32)));
356 return map32[controller_idx];
357}
358
359static inline
360 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
361 unsigned char *buf, size_t len, int tx)
362{
363 dma_addr_t dmaaddr;
364
365 if (tx) {
a18c715e 366 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
718e8898 367 buf, len, DMA_TO_DEVICE);
e4d6b795 368 } else {
a18c715e 369 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
718e8898 370 buf, len, DMA_FROM_DEVICE);
e4d6b795
MB
371 }
372
373 return dmaaddr;
374}
375
376static inline
377 void unmap_descbuffer(struct b43_dmaring *ring,
378 dma_addr_t addr, size_t len, int tx)
379{
380 if (tx) {
a18c715e 381 dma_unmap_single(ring->dev->dev->dma_dev,
718e8898 382 addr, len, DMA_TO_DEVICE);
e4d6b795 383 } else {
a18c715e 384 dma_unmap_single(ring->dev->dev->dma_dev,
718e8898 385 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
386 }
387}
388
389static inline
390 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
391 dma_addr_t addr, size_t len)
392{
393 B43_WARN_ON(ring->tx);
a18c715e 394 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
f225763a 395 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
396}
397
398static inline
399 void sync_descbuffer_for_device(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
401{
402 B43_WARN_ON(ring->tx);
a18c715e 403 dma_sync_single_for_device(ring->dev->dev->dma_dev,
718e8898 404 addr, len, DMA_FROM_DEVICE);
e4d6b795
MB
405}
406
407static inline
408 void free_descriptor_buffer(struct b43_dmaring *ring,
409 struct b43_dmadesc_meta *meta)
410{
411 if (meta->skb) {
412 dev_kfree_skb_any(meta->skb);
413 meta->skb = NULL;
414 }
415}
416
417static int alloc_ringmemory(struct b43_dmaring *ring)
418{
55afc80b
JL
419 gfp_t flags = GFP_KERNEL;
420
421 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
14a8083e
RM
422 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
423 * In practice we could use smaller buffers for the latter, but the
424 * alignment is really important because of the hardware bug. If bit
425 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
426 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
427 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
428 * more than 256 slots for ring.
013978b6 429 */
14a8083e
RM
430 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
431 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
432
a18c715e 433 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
14a8083e
RM
434 ring_mem_size, &(ring->dmabase),
435 flags);
55afc80b
JL
436 if (!ring->descbase) {
437 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
9bd568a5 438 return -ENOMEM;
e4d6b795 439 }
14a8083e 440 memset(ring->descbase, 0, ring_mem_size);
e4d6b795
MB
441
442 return 0;
443}
444
445static void free_ringmemory(struct b43_dmaring *ring)
446{
14a8083e
RM
447 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
448 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
449 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
718e8898 450 ring->descbase, ring->dmabase);
e4d6b795
MB
451}
452
453/* Reset the RX DMA channel */
b79caa68
MB
454static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
455 enum b43_dmatype type)
e4d6b795
MB
456{
457 int i;
458 u32 value;
459 u16 offset;
460
461 might_sleep();
462
b79caa68 463 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
e4d6b795
MB
464 b43_write32(dev, mmio_base + offset, 0);
465 for (i = 0; i < 10; i++) {
b79caa68
MB
466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
467 B43_DMA32_RXSTATUS;
e4d6b795 468 value = b43_read32(dev, mmio_base + offset);
b79caa68 469 if (type == B43_DMA_64BIT) {
e4d6b795
MB
470 value &= B43_DMA64_RXSTAT;
471 if (value == B43_DMA64_RXSTAT_DISABLED) {
472 i = -1;
473 break;
474 }
475 } else {
476 value &= B43_DMA32_RXSTATE;
477 if (value == B43_DMA32_RXSTAT_DISABLED) {
478 i = -1;
479 break;
480 }
481 }
482 msleep(1);
483 }
484 if (i != -1) {
485 b43err(dev->wl, "DMA RX reset timed out\n");
486 return -ENODEV;
487 }
488
489 return 0;
490}
491
013978b6 492/* Reset the TX DMA channel */
b79caa68
MB
493static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
494 enum b43_dmatype type)
e4d6b795
MB
495{
496 int i;
497 u32 value;
498 u16 offset;
499
500 might_sleep();
501
502 for (i = 0; i < 10; i++) {
b79caa68
MB
503 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
504 B43_DMA32_TXSTATUS;
e4d6b795 505 value = b43_read32(dev, mmio_base + offset);
b79caa68 506 if (type == B43_DMA_64BIT) {
e4d6b795
MB
507 value &= B43_DMA64_TXSTAT;
508 if (value == B43_DMA64_TXSTAT_DISABLED ||
509 value == B43_DMA64_TXSTAT_IDLEWAIT ||
510 value == B43_DMA64_TXSTAT_STOPPED)
511 break;
512 } else {
513 value &= B43_DMA32_TXSTATE;
514 if (value == B43_DMA32_TXSTAT_DISABLED ||
515 value == B43_DMA32_TXSTAT_IDLEWAIT ||
516 value == B43_DMA32_TXSTAT_STOPPED)
517 break;
518 }
519 msleep(1);
520 }
b79caa68 521 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
e4d6b795
MB
522 b43_write32(dev, mmio_base + offset, 0);
523 for (i = 0; i < 10; i++) {
b79caa68
MB
524 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
525 B43_DMA32_TXSTATUS;
e4d6b795 526 value = b43_read32(dev, mmio_base + offset);
b79caa68 527 if (type == B43_DMA_64BIT) {
e4d6b795
MB
528 value &= B43_DMA64_TXSTAT;
529 if (value == B43_DMA64_TXSTAT_DISABLED) {
530 i = -1;
531 break;
532 }
533 } else {
534 value &= B43_DMA32_TXSTATE;
535 if (value == B43_DMA32_TXSTAT_DISABLED) {
536 i = -1;
537 break;
538 }
539 }
540 msleep(1);
541 }
542 if (i != -1) {
543 b43err(dev->wl, "DMA TX reset timed out\n");
544 return -ENODEV;
545 }
546 /* ensure the reset is completed. */
547 msleep(1);
548
549 return 0;
550}
551
b79caa68
MB
552/* Check if a DMA mapping address is invalid. */
553static bool b43_dma_mapping_error(struct b43_dmaring *ring,
554 dma_addr_t addr,
ffa9256a 555 size_t buffersize, bool dma_to_device)
b79caa68 556{
a18c715e 557 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
b79caa68
MB
558 return 1;
559
55afc80b
JL
560 switch (ring->type) {
561 case B43_DMA_30BIT:
562 if ((u64)addr + buffersize > (1ULL << 30))
563 goto address_error;
564 break;
565 case B43_DMA_32BIT:
566 if ((u64)addr + buffersize > (1ULL << 32))
567 goto address_error;
568 break;
569 case B43_DMA_64BIT:
570 /* Currently we can't have addresses beyond
571 * 64bit in the kernel. */
572 break;
b79caa68
MB
573 }
574
575 /* The address is OK. */
576 return 0;
55afc80b
JL
577
578address_error:
579 /* We can't support this address. Unmap it again. */
580 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
581
582 return 1;
b79caa68
MB
583}
584
ec9a1d8c
MB
585static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
586{
587 unsigned char *f = skb->data + ring->frameoffset;
588
589 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
590}
591
592static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
593{
594 struct b43_rxhdr_fw4 *rxhdr;
595 unsigned char *frame;
596
597 /* This poisons the RX buffer to detect DMA failures. */
598
599 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
600 rxhdr->frame_len = 0;
601
602 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
603 frame = skb->data + ring->frameoffset;
604 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
605}
606
e4d6b795
MB
607static int setup_rx_descbuffer(struct b43_dmaring *ring,
608 struct b43_dmadesc_generic *desc,
609 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
610{
e4d6b795
MB
611 dma_addr_t dmaaddr;
612 struct sk_buff *skb;
613
614 B43_WARN_ON(ring->tx);
615
616 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
617 if (unlikely(!skb))
618 return -ENOMEM;
ec9a1d8c 619 b43_poison_rx_buffer(ring, skb);
e4d6b795 620 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
ffa9256a 621 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
e4d6b795
MB
622 /* ugh. try to realloc in zone_dma */
623 gfp_flags |= GFP_DMA;
624
625 dev_kfree_skb_any(skb);
626
627 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
628 if (unlikely(!skb))
629 return -ENOMEM;
ec9a1d8c 630 b43_poison_rx_buffer(ring, skb);
e4d6b795
MB
631 dmaaddr = map_descbuffer(ring, skb->data,
632 ring->rx_buffersize, 0);
bdceeb2d
MB
633 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
634 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
635 dev_kfree_skb_any(skb);
636 return -EIO;
637 }
e4d6b795
MB
638 }
639
640 meta->skb = skb;
641 meta->dmaaddr = dmaaddr;
642 ring->ops->fill_descriptor(ring, desc, dmaaddr,
643 ring->rx_buffersize, 0, 0, 0);
644
e4d6b795
MB
645 return 0;
646}
647
648/* Allocate the initial descbuffers.
649 * This is used for an RX ring only.
650 */
651static int alloc_initial_descbuffers(struct b43_dmaring *ring)
652{
653 int i, err = -ENOMEM;
654 struct b43_dmadesc_generic *desc;
655 struct b43_dmadesc_meta *meta;
656
657 for (i = 0; i < ring->nr_slots; i++) {
658 desc = ring->ops->idx2desc(ring, i, &meta);
659
660 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
661 if (err) {
662 b43err(ring->dev->wl,
663 "Failed to allocate initial descbuffers\n");
664 goto err_unwind;
665 }
666 }
667 mb();
668 ring->used_slots = ring->nr_slots;
669 err = 0;
670 out:
671 return err;
672
673 err_unwind:
674 for (i--; i >= 0; i--) {
675 desc = ring->ops->idx2desc(ring, i, &meta);
676
677 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
678 dev_kfree_skb(meta->skb);
679 }
680 goto out;
681}
682
683/* Do initial setup of the DMA controller.
684 * Reset the controller, write the ring busaddress
685 * and switch the "enable" bit on.
686 */
687static int dmacontroller_setup(struct b43_dmaring *ring)
688{
689 int err = 0;
690 u32 value;
691 u32 addrext;
78c1ee7e 692 bool parity = ring->dev->dma.parity;
0cc9772a
RM
693 u32 addrlo;
694 u32 addrhi;
e4d6b795
MB
695
696 if (ring->tx) {
b79caa68 697 if (ring->type == B43_DMA_64BIT) {
e4d6b795 698 u64 ringbase = (u64) (ring->dmabase);
0cc9772a
RM
699 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
700 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
701 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
e4d6b795 702
e4d6b795
MB
703 value = B43_DMA64_TXENABLE;
704 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
705 & B43_DMA64_TXADDREXT_MASK;
78c1ee7e
RM
706 if (!parity)
707 value |= B43_DMA64_TXPARITYDISABLE;
e4d6b795 708 b43_dma_write(ring, B43_DMA64_TXCTL, value);
0cc9772a
RM
709 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
710 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
e4d6b795
MB
711 } else {
712 u32 ringbase = (u32) (ring->dmabase);
0cc9772a
RM
713 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
714 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
e4d6b795 715
e4d6b795
MB
716 value = B43_DMA32_TXENABLE;
717 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
718 & B43_DMA32_TXADDREXT_MASK;
78c1ee7e
RM
719 if (!parity)
720 value |= B43_DMA32_TXPARITYDISABLE;
e4d6b795 721 b43_dma_write(ring, B43_DMA32_TXCTL, value);
0cc9772a 722 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
e4d6b795
MB
723 }
724 } else {
725 err = alloc_initial_descbuffers(ring);
726 if (err)
727 goto out;
b79caa68 728 if (ring->type == B43_DMA_64BIT) {
e4d6b795 729 u64 ringbase = (u64) (ring->dmabase);
0cc9772a
RM
730 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
731 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
732 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
e4d6b795 733
e4d6b795
MB
734 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
735 value |= B43_DMA64_RXENABLE;
736 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
737 & B43_DMA64_RXADDREXT_MASK;
78c1ee7e
RM
738 if (!parity)
739 value |= B43_DMA64_RXPARITYDISABLE;
e4d6b795 740 b43_dma_write(ring, B43_DMA64_RXCTL, value);
0cc9772a
RM
741 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
742 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
013978b6
LF
743 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
744 sizeof(struct b43_dmadesc64));
e4d6b795
MB
745 } else {
746 u32 ringbase = (u32) (ring->dmabase);
0cc9772a
RM
747 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
748 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
e4d6b795 749
e4d6b795
MB
750 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
751 value |= B43_DMA32_RXENABLE;
752 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
753 & B43_DMA32_RXADDREXT_MASK;
78c1ee7e
RM
754 if (!parity)
755 value |= B43_DMA32_RXPARITYDISABLE;
e4d6b795 756 b43_dma_write(ring, B43_DMA32_RXCTL, value);
0cc9772a 757 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
013978b6
LF
758 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
759 sizeof(struct b43_dmadesc32));
e4d6b795
MB
760 }
761 }
762
013978b6 763out:
e4d6b795
MB
764 return err;
765}
766
767/* Shutdown the DMA controller. */
768static void dmacontroller_cleanup(struct b43_dmaring *ring)
769{
770 if (ring->tx) {
771 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
b79caa68
MB
772 ring->type);
773 if (ring->type == B43_DMA_64BIT) {
e4d6b795
MB
774 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
775 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
776 } else
777 b43_dma_write(ring, B43_DMA32_TXRING, 0);
778 } else {
779 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
b79caa68
MB
780 ring->type);
781 if (ring->type == B43_DMA_64BIT) {
e4d6b795
MB
782 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
783 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
784 } else
785 b43_dma_write(ring, B43_DMA32_RXRING, 0);
786 }
787}
788
789static void free_all_descbuffers(struct b43_dmaring *ring)
790{
e4d6b795
MB
791 struct b43_dmadesc_meta *meta;
792 int i;
793
794 if (!ring->used_slots)
795 return;
796 for (i = 0; i < ring->nr_slots; i++) {
9c1cacd2
LF
797 /* get meta - ignore returned value */
798 ring->ops->idx2desc(ring, i, &meta);
e4d6b795 799
07681e21 800 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
e4d6b795
MB
801 B43_WARN_ON(!ring->tx);
802 continue;
803 }
804 if (ring->tx) {
805 unmap_descbuffer(ring, meta->dmaaddr,
806 meta->skb->len, 1);
807 } else {
808 unmap_descbuffer(ring, meta->dmaaddr,
809 ring->rx_buffersize, 0);
810 }
811 free_descriptor_buffer(ring, meta);
812 }
813}
814
815static u64 supported_dma_mask(struct b43_wldev *dev)
816{
817 u32 tmp;
818 u16 mmio_base;
819
820 tmp = b43_read32(dev, SSB_TMSHIGH);
821 if (tmp & SSB_TMSHIGH_DMA64)
6a35528a 822 return DMA_BIT_MASK(64);
e4d6b795
MB
823 mmio_base = b43_dmacontroller_base(0, 0);
824 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
825 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
826 if (tmp & B43_DMA32_TXADDREXT_MASK)
284901a9 827 return DMA_BIT_MASK(32);
e4d6b795 828
28b76796 829 return DMA_BIT_MASK(30);
e4d6b795
MB
830}
831
5100d5ac
MB
832static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
833{
28b76796 834 if (dmamask == DMA_BIT_MASK(30))
5100d5ac 835 return B43_DMA_30BIT;
284901a9 836 if (dmamask == DMA_BIT_MASK(32))
5100d5ac 837 return B43_DMA_32BIT;
6a35528a 838 if (dmamask == DMA_BIT_MASK(64))
5100d5ac
MB
839 return B43_DMA_64BIT;
840 B43_WARN_ON(1);
841 return B43_DMA_30BIT;
842}
843
e4d6b795
MB
844/* Main initialization function. */
845static
846struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
847 int controller_index,
b79caa68
MB
848 int for_tx,
849 enum b43_dmatype type)
e4d6b795
MB
850{
851 struct b43_dmaring *ring;
07681e21 852 int i, err;
e4d6b795
MB
853 dma_addr_t dma_test;
854
855 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
856 if (!ring)
857 goto out;
858
028118a5 859 ring->nr_slots = B43_RXRING_SLOTS;
e4d6b795 860 if (for_tx)
028118a5 861 ring->nr_slots = B43_TXRING_SLOTS;
e4d6b795 862
028118a5 863 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
e4d6b795
MB
864 GFP_KERNEL);
865 if (!ring->meta)
866 goto err_kfree_ring;
07681e21
MB
867 for (i = 0; i < ring->nr_slots; i++)
868 ring->meta->skb = B43_DMA_PTR_POISON;
028118a5
MB
869
870 ring->type = type;
871 ring->dev = dev;
872 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
873 ring->index = controller_index;
874 if (type == B43_DMA_64BIT)
875 ring->ops = &dma64_ops;
876 else
877 ring->ops = &dma32_ops;
e4d6b795 878 if (for_tx) {
028118a5
MB
879 ring->tx = 1;
880 ring->current_slot = -1;
881 } else {
882 if (ring->index == 0) {
17030f48
RM
883 switch (dev->fw.hdr_format) {
884 case B43_FW_HDR_598:
885 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
886 ring->frameoffset = B43_DMA0_RX_FW598_FO;
887 break;
888 case B43_FW_HDR_410:
889 case B43_FW_HDR_351:
890 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
891 ring->frameoffset = B43_DMA0_RX_FW351_FO;
892 break;
893 }
028118a5
MB
894 } else
895 B43_WARN_ON(1);
896 }
028118a5
MB
897#ifdef CONFIG_B43_DEBUG
898 ring->last_injected_overflow = jiffies;
899#endif
900
901 if (for_tx) {
2d071ca5
MB
902 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
903 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
904
bdceeb2d 905 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
eb189d8b 906 b43_txhdr_size(dev),
e4d6b795
MB
907 GFP_KERNEL);
908 if (!ring->txhdr_cache)
909 goto err_kfree_meta;
910
911 /* test for ability to dma to txhdr_cache */
a18c715e 912 dma_test = dma_map_single(dev->dev->dma_dev,
718e8898
FT
913 ring->txhdr_cache,
914 b43_txhdr_size(dev),
915 DMA_TO_DEVICE);
e4d6b795 916
ffa9256a
MB
917 if (b43_dma_mapping_error(ring, dma_test,
918 b43_txhdr_size(dev), 1)) {
e4d6b795
MB
919 /* ugh realloc */
920 kfree(ring->txhdr_cache);
bdceeb2d 921 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
eb189d8b 922 b43_txhdr_size(dev),
e4d6b795
MB
923 GFP_KERNEL | GFP_DMA);
924 if (!ring->txhdr_cache)
925 goto err_kfree_meta;
926
a18c715e 927 dma_test = dma_map_single(dev->dev->dma_dev,
718e8898
FT
928 ring->txhdr_cache,
929 b43_txhdr_size(dev),
930 DMA_TO_DEVICE);
e4d6b795 931
b79caa68 932 if (b43_dma_mapping_error(ring, dma_test,
539e6f8c
MB
933 b43_txhdr_size(dev), 1)) {
934
935 b43err(dev->wl,
936 "TXHDR DMA allocation failed\n");
e4d6b795 937 goto err_kfree_txhdr_cache;
539e6f8c 938 }
e4d6b795
MB
939 }
940
a18c715e 941 dma_unmap_single(dev->dev->dma_dev,
718e8898
FT
942 dma_test, b43_txhdr_size(dev),
943 DMA_TO_DEVICE);
e4d6b795
MB
944 }
945
e4d6b795
MB
946 err = alloc_ringmemory(ring);
947 if (err)
948 goto err_kfree_txhdr_cache;
949 err = dmacontroller_setup(ring);
950 if (err)
951 goto err_free_ringmemory;
952
953 out:
954 return ring;
955
956 err_free_ringmemory:
957 free_ringmemory(ring);
958 err_kfree_txhdr_cache:
959 kfree(ring->txhdr_cache);
960 err_kfree_meta:
961 kfree(ring->meta);
962 err_kfree_ring:
963 kfree(ring);
964 ring = NULL;
965 goto out;
966}
967
57df40d2
MB
968#define divide(a, b) ({ \
969 typeof(a) __a = a; \
970 do_div(__a, b); \
971 __a; \
972 })
973
974#define modulo(a, b) ({ \
975 typeof(a) __a = a; \
976 do_div(__a, b); \
977 })
978
e4d6b795 979/* Main cleanup function. */
b27faf8e
MB
980static void b43_destroy_dmaring(struct b43_dmaring *ring,
981 const char *ringname)
e4d6b795
MB
982{
983 if (!ring)
984 return;
985
57df40d2
MB
986#ifdef CONFIG_B43_DEBUG
987 {
988 /* Print some statistics. */
989 u64 failed_packets = ring->nr_failed_tx_packets;
990 u64 succeed_packets = ring->nr_succeed_tx_packets;
991 u64 nr_packets = failed_packets + succeed_packets;
992 u64 permille_failed = 0, average_tries = 0;
993
994 if (nr_packets)
995 permille_failed = divide(failed_packets * 1000, nr_packets);
996 if (nr_packets)
997 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
998
999 b43dbg(ring->dev->wl, "DMA-%u %s: "
1000 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1001 "Average tries %llu.%02llu\n",
1002 (unsigned int)(ring->type), ringname,
1003 ring->max_used_slots,
1004 ring->nr_slots,
1005 (unsigned long long)failed_packets,
87d96114 1006 (unsigned long long)nr_packets,
57df40d2
MB
1007 (unsigned long long)divide(permille_failed, 10),
1008 (unsigned long long)modulo(permille_failed, 10),
1009 (unsigned long long)divide(average_tries, 100),
1010 (unsigned long long)modulo(average_tries, 100));
1011 }
1012#endif /* DEBUG */
1013
e4d6b795
MB
1014 /* Device IRQs are disabled prior entering this function,
1015 * so no need to take care of concurrency with rx handler stuff.
1016 */
1017 dmacontroller_cleanup(ring);
1018 free_all_descbuffers(ring);
1019 free_ringmemory(ring);
1020
1021 kfree(ring->txhdr_cache);
1022 kfree(ring->meta);
1023 kfree(ring);
1024}
1025
b27faf8e
MB
1026#define destroy_ring(dma, ring) do { \
1027 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1028 (dma)->ring = NULL; \
1029 } while (0)
1030
e4d6b795
MB
1031void b43_dma_free(struct b43_wldev *dev)
1032{
5100d5ac
MB
1033 struct b43_dma *dma;
1034
1035 if (b43_using_pio_transfers(dev))
1036 return;
1037 dma = &dev->dma;
e4d6b795 1038
b27faf8e
MB
1039 destroy_ring(dma, rx_ring);
1040 destroy_ring(dma, tx_ring_AC_BK);
1041 destroy_ring(dma, tx_ring_AC_BE);
1042 destroy_ring(dma, tx_ring_AC_VI);
1043 destroy_ring(dma, tx_ring_AC_VO);
1044 destroy_ring(dma, tx_ring_mcast);
e4d6b795
MB
1045}
1046
1033b3ea
MB
1047static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1048{
1049 u64 orig_mask = mask;
1050 bool fallback = 0;
1051 int err;
1052
1053 /* Try to set the DMA mask. If it fails, try falling back to a
1054 * lower mask, as we can always also support a lower one. */
1055 while (1) {
a18c715e 1056 err = dma_set_mask(dev->dev->dma_dev, mask);
718e8898 1057 if (!err) {
a18c715e 1058 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
718e8898
FT
1059 if (!err)
1060 break;
1061 }
6a35528a 1062 if (mask == DMA_BIT_MASK(64)) {
284901a9 1063 mask = DMA_BIT_MASK(32);
1033b3ea
MB
1064 fallback = 1;
1065 continue;
1066 }
284901a9 1067 if (mask == DMA_BIT_MASK(32)) {
28b76796 1068 mask = DMA_BIT_MASK(30);
1033b3ea
MB
1069 fallback = 1;
1070 continue;
1071 }
1072 b43err(dev->wl, "The machine/kernel does not support "
1073 "the required %u-bit DMA mask\n",
1074 (unsigned int)dma_mask_to_engine_type(orig_mask));
1075 return -EOPNOTSUPP;
1076 }
1077 if (fallback) {
1078 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1079 (unsigned int)dma_mask_to_engine_type(orig_mask),
1080 (unsigned int)dma_mask_to_engine_type(mask));
1081 }
1082
1083 return 0;
1084}
1085
0cc9772a
RM
1086/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1087 * bit in low address word instead of high one.
1088 */
1089static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1090 enum b43_dmatype type)
1091{
1092 if (type != B43_DMA_64BIT)
1093 return 1;
1094
1095#ifdef CONFIG_B43_SSB
1096 if (dev->dev->bus_type == B43_BUS_SSB &&
1097 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
1098 !(dev->dev->sdev->bus->host_pci->is_pcie &&
1099 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1100 return 1;
1101#endif
1102 return 0;
1103}
1104
e4d6b795
MB
1105int b43_dma_init(struct b43_wldev *dev)
1106{
1107 struct b43_dma *dma = &dev->dma;
e4d6b795
MB
1108 int err;
1109 u64 dmamask;
b79caa68 1110 enum b43_dmatype type;
e4d6b795
MB
1111
1112 dmamask = supported_dma_mask(dev);
5100d5ac 1113 type = dma_mask_to_engine_type(dmamask);
1033b3ea
MB
1114 err = b43_dma_set_mask(dev, dmamask);
1115 if (err)
1116 return err;
6cbab0d9
RM
1117
1118 switch (dev->dev->bus_type) {
eb90e9e8
RM
1119#ifdef CONFIG_B43_BCMA
1120 case B43_BUS_BCMA:
1121 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1122 break;
1123#endif
6cbab0d9
RM
1124#ifdef CONFIG_B43_SSB
1125 case B43_BUS_SSB:
1126 dma->translation = ssb_dma_translation(dev->dev->sdev);
1127 break;
1128#endif
1129 }
0cc9772a 1130 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
e4d6b795 1131
78c1ee7e
RM
1132 dma->parity = true;
1133#ifdef CONFIG_B43_BCMA
1134 /* TODO: find out which SSB devices need disabling parity */
1135 if (dev->dev->bus_type == B43_BUS_BCMA)
1136 dma->parity = false;
1137#endif
1138
e4d6b795
MB
1139 err = -ENOMEM;
1140 /* setup TX DMA channels. */
b27faf8e
MB
1141 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1142 if (!dma->tx_ring_AC_BK)
e4d6b795 1143 goto out;
e4d6b795 1144
b27faf8e
MB
1145 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1146 if (!dma->tx_ring_AC_BE)
1147 goto err_destroy_bk;
e4d6b795 1148
b27faf8e
MB
1149 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1150 if (!dma->tx_ring_AC_VI)
1151 goto err_destroy_be;
e4d6b795 1152
b27faf8e
MB
1153 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1154 if (!dma->tx_ring_AC_VO)
1155 goto err_destroy_vi;
e4d6b795 1156
b27faf8e
MB
1157 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1158 if (!dma->tx_ring_mcast)
1159 goto err_destroy_vo;
e4d6b795 1160
b27faf8e
MB
1161 /* setup RX DMA channel. */
1162 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1163 if (!dma->rx_ring)
1164 goto err_destroy_mcast;
e4d6b795 1165
b27faf8e 1166 /* No support for the TX status DMA ring. */
21d889d4 1167 B43_WARN_ON(dev->dev->core_rev < 5);
e4d6b795 1168
b79caa68
MB
1169 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1170 (unsigned int)type);
e4d6b795 1171 err = 0;
b27faf8e 1172out:
e4d6b795
MB
1173 return err;
1174
b27faf8e
MB
1175err_destroy_mcast:
1176 destroy_ring(dma, tx_ring_mcast);
1177err_destroy_vo:
1178 destroy_ring(dma, tx_ring_AC_VO);
1179err_destroy_vi:
1180 destroy_ring(dma, tx_ring_AC_VI);
1181err_destroy_be:
1182 destroy_ring(dma, tx_ring_AC_BE);
1183err_destroy_bk:
1184 destroy_ring(dma, tx_ring_AC_BK);
1185 return err;
e4d6b795
MB
1186}
1187
1188/* Generate a cookie for the TX header. */
1189static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1190{
b27faf8e 1191 u16 cookie;
e4d6b795
MB
1192
1193 /* Use the upper 4 bits of the cookie as
1194 * DMA controller ID and store the slot number
1195 * in the lower 12 bits.
1196 * Note that the cookie must never be 0, as this
1197 * is a special value used in RX path.
280d0e16
MB
1198 * It can also not be 0xFFFF because that is special
1199 * for multicast frames.
e4d6b795 1200 */
b27faf8e 1201 cookie = (((u16)ring->index + 1) << 12);
e4d6b795 1202 B43_WARN_ON(slot & ~0x0FFF);
b27faf8e 1203 cookie |= (u16)slot;
e4d6b795
MB
1204
1205 return cookie;
1206}
1207
1208/* Inspect a cookie and find out to which controller/slot it belongs. */
1209static
1210struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1211{
1212 struct b43_dma *dma = &dev->dma;
1213 struct b43_dmaring *ring = NULL;
1214
1215 switch (cookie & 0xF000) {
280d0e16 1216 case 0x1000:
b27faf8e 1217 ring = dma->tx_ring_AC_BK;
e4d6b795 1218 break;
280d0e16 1219 case 0x2000:
b27faf8e 1220 ring = dma->tx_ring_AC_BE;
e4d6b795 1221 break;
280d0e16 1222 case 0x3000:
b27faf8e 1223 ring = dma->tx_ring_AC_VI;
e4d6b795 1224 break;
280d0e16 1225 case 0x4000:
b27faf8e 1226 ring = dma->tx_ring_AC_VO;
e4d6b795 1227 break;
280d0e16 1228 case 0x5000:
b27faf8e 1229 ring = dma->tx_ring_mcast;
e4d6b795 1230 break;
e4d6b795
MB
1231 }
1232 *slot = (cookie & 0x0FFF);
07681e21
MB
1233 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1234 b43dbg(dev->wl, "TX-status contains "
1235 "invalid cookie: 0x%04X\n", cookie);
1236 return NULL;
1237 }
e4d6b795
MB
1238
1239 return ring;
1240}
1241
1242static int dma_tx_fragment(struct b43_dmaring *ring,
f54a5202 1243 struct sk_buff *skb)
e4d6b795
MB
1244{
1245 const struct b43_dma_ops *ops = ring->ops;
e039fa4a 1246 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
f54a5202 1247 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
e4d6b795 1248 u8 *header;
09552ccd 1249 int slot, old_top_slot, old_used_slots;
e4d6b795
MB
1250 int err;
1251 struct b43_dmadesc_generic *desc;
1252 struct b43_dmadesc_meta *meta;
1253 struct b43_dmadesc_meta *meta_hdr;
280d0e16 1254 u16 cookie;
eb189d8b 1255 size_t hdrsize = b43_txhdr_size(ring->dev);
e4d6b795 1256
bdceeb2d
MB
1257 /* Important note: If the number of used DMA slots per TX frame
1258 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1259 * the file has to be updated, too!
1260 */
e4d6b795 1261
09552ccd
MB
1262 old_top_slot = ring->current_slot;
1263 old_used_slots = ring->used_slots;
1264
e4d6b795
MB
1265 /* Get a slot for the header. */
1266 slot = request_slot(ring);
1267 desc = ops->idx2desc(ring, slot, &meta_hdr);
1268 memset(meta_hdr, 0, sizeof(*meta_hdr));
1269
bdceeb2d 1270 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
280d0e16 1271 cookie = generate_cookie(ring, slot);
09552ccd 1272 err = b43_generate_txhdr(ring->dev, header,
035d0243 1273 skb, info, cookie);
09552ccd
MB
1274 if (unlikely(err)) {
1275 ring->current_slot = old_top_slot;
1276 ring->used_slots = old_used_slots;
1277 return err;
1278 }
e4d6b795
MB
1279
1280 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
eb189d8b 1281 hdrsize, 1);
ffa9256a 1282 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
09552ccd
MB
1283 ring->current_slot = old_top_slot;
1284 ring->used_slots = old_used_slots;
e4d6b795 1285 return -EIO;
09552ccd 1286 }
e4d6b795 1287 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
eb189d8b 1288 hdrsize, 1, 0, 0);
e4d6b795
MB
1289
1290 /* Get a slot for the payload. */
1291 slot = request_slot(ring);
1292 desc = ops->idx2desc(ring, slot, &meta);
1293 memset(meta, 0, sizeof(*meta));
1294
e4d6b795
MB
1295 meta->skb = skb;
1296 meta->is_last_fragment = 1;
f54a5202 1297 priv_info->bouncebuffer = NULL;
e4d6b795
MB
1298
1299 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1300 /* create a bounce buffer in zone_dma on mapping failure. */
ffa9256a 1301 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
a61aac7c
JL
1302 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1303 GFP_ATOMIC | GFP_DMA);
f54a5202 1304 if (!priv_info->bouncebuffer) {
09552ccd
MB
1305 ring->current_slot = old_top_slot;
1306 ring->used_slots = old_used_slots;
e4d6b795
MB
1307 err = -ENOMEM;
1308 goto out_unmap_hdr;
1309 }
1310
f54a5202 1311 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
ffa9256a 1312 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
f54a5202
MB
1313 kfree(priv_info->bouncebuffer);
1314 priv_info->bouncebuffer = NULL;
09552ccd
MB
1315 ring->current_slot = old_top_slot;
1316 ring->used_slots = old_used_slots;
e4d6b795 1317 err = -EIO;
f54a5202 1318 goto out_unmap_hdr;
e4d6b795
MB
1319 }
1320 }
1321
1322 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1323
e039fa4a 1324 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
280d0e16
MB
1325 /* Tell the firmware about the cookie of the last
1326 * mcast frame, so it can clear the more-data bit in it. */
1327 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1328 B43_SHM_SH_MCASTCOOKIE, cookie);
1329 }
e4d6b795
MB
1330 /* Now transfer the whole frame. */
1331 wmb();
1332 ops->poke_tx(ring, next_slot(ring, slot));
1333 return 0;
1334
280d0e16 1335out_unmap_hdr:
e4d6b795 1336 unmap_descbuffer(ring, meta_hdr->dmaaddr,
eb189d8b 1337 hdrsize, 1);
e4d6b795
MB
1338 return err;
1339}
1340
1341static inline int should_inject_overflow(struct b43_dmaring *ring)
1342{
1343#ifdef CONFIG_B43_DEBUG
1344 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1345 /* Check if we should inject another ringbuffer overflow
1346 * to test handling of this situation in the stack. */
1347 unsigned long next_overflow;
1348
1349 next_overflow = ring->last_injected_overflow + HZ;
1350 if (time_after(jiffies, next_overflow)) {
1351 ring->last_injected_overflow = jiffies;
1352 b43dbg(ring->dev->wl,
1353 "Injecting TX ring overflow on "
1354 "DMA controller %d\n", ring->index);
1355 return 1;
1356 }
1357 }
1358#endif /* CONFIG_B43_DEBUG */
1359 return 0;
1360}
1361
e6f5b934 1362/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
99da185a
JD
1363static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1364 u8 queue_prio)
e6f5b934
MB
1365{
1366 struct b43_dmaring *ring;
1367
403a3a13 1368 if (dev->qos_enabled) {
e6f5b934
MB
1369 /* 0 = highest priority */
1370 switch (queue_prio) {
1371 default:
1372 B43_WARN_ON(1);
1373 /* fallthrough */
1374 case 0:
b27faf8e 1375 ring = dev->dma.tx_ring_AC_VO;
e6f5b934
MB
1376 break;
1377 case 1:
b27faf8e 1378 ring = dev->dma.tx_ring_AC_VI;
e6f5b934
MB
1379 break;
1380 case 2:
b27faf8e 1381 ring = dev->dma.tx_ring_AC_BE;
e6f5b934
MB
1382 break;
1383 case 3:
b27faf8e 1384 ring = dev->dma.tx_ring_AC_BK;
e6f5b934
MB
1385 break;
1386 }
1387 } else
b27faf8e 1388 ring = dev->dma.tx_ring_AC_BE;
e6f5b934
MB
1389
1390 return ring;
1391}
1392
e039fa4a 1393int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
e4d6b795
MB
1394{
1395 struct b43_dmaring *ring;
280d0e16 1396 struct ieee80211_hdr *hdr;
e4d6b795 1397 int err = 0;
e039fa4a 1398 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
e4d6b795 1399
280d0e16 1400 hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 1401 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
280d0e16 1402 /* The multicast ring will be sent after the DTIM */
b27faf8e 1403 ring = dev->dma.tx_ring_mcast;
280d0e16
MB
1404 /* Set the more-data bit. Ucode will clear it on
1405 * the last frame for us. */
1406 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1407 } else {
1408 /* Decide by priority where to put this frame. */
e2530083
JB
1409 ring = select_ring_by_priority(
1410 dev, skb_get_queue_mapping(skb));
280d0e16
MB
1411 }
1412
e4d6b795 1413 B43_WARN_ON(!ring->tx);
ca2d559e 1414
18c69510
LF
1415 if (unlikely(ring->stopped)) {
1416 /* We get here only because of a bug in mac80211.
1417 * Because of a race, one packet may be queued after
1418 * the queue is stopped, thus we got called when we shouldn't.
1419 * For now, just refuse the transmit. */
1420 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1421 b43err(dev->wl, "Packet after queue stopped\n");
1422 err = -ENOSPC;
637dae3f 1423 goto out;
18c69510
LF
1424 }
1425
1426 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1427 /* If we get here, we have a real error with the queue
1428 * full, but queues not stopped. */
1429 b43err(dev->wl, "DMA queue overflow\n");
e4d6b795 1430 err = -ENOSPC;
637dae3f 1431 goto out;
e4d6b795 1432 }
e4d6b795 1433
e6f5b934
MB
1434 /* Assign the queue number to the ring (if not already done before)
1435 * so TX status handling can use it. The queue to ring mapping is
1436 * static, so we don't need to store it per frame. */
e2530083 1437 ring->queue_prio = skb_get_queue_mapping(skb);
e6f5b934 1438
f54a5202 1439 err = dma_tx_fragment(ring, skb);
09552ccd
MB
1440 if (unlikely(err == -ENOKEY)) {
1441 /* Drop this packet, as we don't have the encryption key
1442 * anymore and must not transmit it unencrypted. */
1443 dev_kfree_skb_any(skb);
1444 err = 0;
637dae3f 1445 goto out;
09552ccd 1446 }
e4d6b795
MB
1447 if (unlikely(err)) {
1448 b43err(dev->wl, "DMA tx mapping failure\n");
637dae3f 1449 goto out;
e4d6b795 1450 }
bdceeb2d 1451 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
e4d6b795
MB
1452 should_inject_overflow(ring)) {
1453 /* This TX ring is full. */
e2530083 1454 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
e4d6b795
MB
1455 ring->stopped = 1;
1456 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1457 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1458 }
1459 }
637dae3f 1460out:
e4d6b795
MB
1461
1462 return err;
1463}
1464
1465void b43_dma_handle_txstatus(struct b43_wldev *dev,
1466 const struct b43_txstatus *status)
1467{
1468 const struct b43_dma_ops *ops;
1469 struct b43_dmaring *ring;
e4d6b795 1470 struct b43_dmadesc_meta *meta;
07681e21 1471 int slot, firstused;
5100d5ac 1472 bool frame_succeed;
e4d6b795
MB
1473
1474 ring = parse_cookie(dev, status->cookie, &slot);
1475 if (unlikely(!ring))
1476 return;
e4d6b795 1477 B43_WARN_ON(!ring->tx);
07681e21
MB
1478
1479 /* Sanity check: TX packets are processed in-order on one ring.
1480 * Check if the slot deduced from the cookie really is the first
1481 * used slot. */
1482 firstused = ring->current_slot - ring->used_slots + 1;
1483 if (firstused < 0)
1484 firstused = ring->nr_slots + firstused;
1485 if (unlikely(slot != firstused)) {
1486 /* This possibly is a firmware bug and will result in
1487 * malfunction, memory leaks and/or stall of DMA functionality. */
1488 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1489 "Expected %d, but got %d\n",
1490 ring->index, firstused, slot);
1491 return;
1492 }
1493
e4d6b795
MB
1494 ops = ring->ops;
1495 while (1) {
07681e21 1496 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
9c1cacd2
LF
1497 /* get meta - ignore returned value */
1498 ops->idx2desc(ring, slot, &meta);
e4d6b795 1499
07681e21
MB
1500 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1501 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1502 "on ring %d\n",
1503 slot, firstused, ring->index);
1504 break;
1505 }
f54a5202
MB
1506 if (meta->skb) {
1507 struct b43_private_tx_info *priv_info =
1508 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1509
1510 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1511 kfree(priv_info->bouncebuffer);
1512 priv_info->bouncebuffer = NULL;
1513 } else {
e4d6b795 1514 unmap_descbuffer(ring, meta->dmaaddr,
eb189d8b 1515 b43_txhdr_size(dev), 1);
f54a5202 1516 }
e4d6b795
MB
1517
1518 if (meta->is_last_fragment) {
e039fa4a
JB
1519 struct ieee80211_tx_info *info;
1520
07681e21
MB
1521 if (unlikely(!meta->skb)) {
1522 /* This is a scatter-gather fragment of a frame, so
1523 * the skb pointer must not be NULL. */
1524 b43dbg(dev->wl, "TX status unexpected NULL skb "
1525 "at slot %d (first=%d) on ring %d\n",
1526 slot, firstused, ring->index);
1527 break;
1528 }
e039fa4a
JB
1529
1530 info = IEEE80211_SKB_CB(meta->skb);
1531
e039fa4a
JB
1532 /*
1533 * Call back to inform the ieee80211 subsystem about
1534 * the status of the transmission.
e4d6b795 1535 */
e6a9854b 1536 frame_succeed = b43_fill_txstatus_report(dev, info, status);
5100d5ac
MB
1537#ifdef CONFIG_B43_DEBUG
1538 if (frame_succeed)
1539 ring->nr_succeed_tx_packets++;
1540 else
1541 ring->nr_failed_tx_packets++;
1542 ring->nr_total_packet_tries += status->frame_count;
1543#endif /* DEBUG */
ce6c4a13 1544 ieee80211_tx_status(dev->wl->hw, meta->skb);
e039fa4a 1545
07681e21
MB
1546 /* skb will be freed by ieee80211_tx_status().
1547 * Poison our pointer. */
1548 meta->skb = B43_DMA_PTR_POISON;
e4d6b795
MB
1549 } else {
1550 /* No need to call free_descriptor_buffer here, as
1551 * this is only the txhdr, which is not allocated.
1552 */
07681e21
MB
1553 if (unlikely(meta->skb)) {
1554 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1555 "at slot %d (first=%d) on ring %d\n",
1556 slot, firstused, ring->index);
1557 break;
1558 }
e4d6b795
MB
1559 }
1560
1561 /* Everything unmapped and free'd. So it's not used anymore. */
1562 ring->used_slots--;
1563
07681e21
MB
1564 if (meta->is_last_fragment) {
1565 /* This is the last scatter-gather
1566 * fragment of the frame. We are done. */
e4d6b795 1567 break;
07681e21 1568 }
e4d6b795
MB
1569 slot = next_slot(ring, slot);
1570 }
e4d6b795 1571 if (ring->stopped) {
bdceeb2d 1572 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
e6f5b934 1573 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
e4d6b795
MB
1574 ring->stopped = 0;
1575 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1576 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1577 }
1578 }
e4d6b795
MB
1579}
1580
e4d6b795
MB
1581static void dma_rx(struct b43_dmaring *ring, int *slot)
1582{
1583 const struct b43_dma_ops *ops = ring->ops;
1584 struct b43_dmadesc_generic *desc;
1585 struct b43_dmadesc_meta *meta;
1586 struct b43_rxhdr_fw4 *rxhdr;
1587 struct sk_buff *skb;
1588 u16 len;
1589 int err;
1590 dma_addr_t dmaaddr;
1591
1592 desc = ops->idx2desc(ring, *slot, &meta);
1593
1594 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1595 skb = meta->skb;
1596
e4d6b795
MB
1597 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1598 len = le16_to_cpu(rxhdr->frame_len);
1599 if (len == 0) {
1600 int i = 0;
1601
1602 do {
1603 udelay(2);
1604 barrier();
1605 len = le16_to_cpu(rxhdr->frame_len);
1606 } while (len == 0 && i++ < 5);
1607 if (unlikely(len == 0)) {
cf68636a
MB
1608 dmaaddr = meta->dmaaddr;
1609 goto drop_recycle_buffer;
e4d6b795
MB
1610 }
1611 }
ec9a1d8c
MB
1612 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1613 /* Something went wrong with the DMA.
1614 * The device did not touch the buffer and did not overwrite the poison. */
1615 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
cf68636a
MB
1616 dmaaddr = meta->dmaaddr;
1617 goto drop_recycle_buffer;
ec9a1d8c 1618 }
c85ce65e 1619 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
e4d6b795
MB
1620 /* The data did not fit into one descriptor buffer
1621 * and is split over multiple buffers.
1622 * This should never happen, as we try to allocate buffers
1623 * big enough. So simply ignore this packet.
1624 */
1625 int cnt = 0;
1626 s32 tmp = len;
1627
1628 while (1) {
1629 desc = ops->idx2desc(ring, *slot, &meta);
1630 /* recycle the descriptor buffer. */
cf68636a 1631 b43_poison_rx_buffer(ring, meta->skb);
e4d6b795
MB
1632 sync_descbuffer_for_device(ring, meta->dmaaddr,
1633 ring->rx_buffersize);
1634 *slot = next_slot(ring, *slot);
1635 cnt++;
1636 tmp -= ring->rx_buffersize;
1637 if (tmp <= 0)
1638 break;
1639 }
1640 b43err(ring->dev->wl, "DMA RX buffer too small "
1641 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1642 len, ring->rx_buffersize, cnt);
1643 goto drop;
1644 }
1645
1646 dmaaddr = meta->dmaaddr;
1647 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1648 if (unlikely(err)) {
1649 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
cf68636a 1650 goto drop_recycle_buffer;
e4d6b795
MB
1651 }
1652
1653 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1654 skb_put(skb, len + ring->frameoffset);
1655 skb_pull(skb, ring->frameoffset);
1656
1657 b43_rx(ring->dev, skb, rxhdr);
b27faf8e 1658drop:
e4d6b795 1659 return;
cf68636a
MB
1660
1661drop_recycle_buffer:
1662 /* Poison and recycle the RX buffer. */
1663 b43_poison_rx_buffer(ring, skb);
1664 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
e4d6b795
MB
1665}
1666
1667void b43_dma_rx(struct b43_dmaring *ring)
1668{
1669 const struct b43_dma_ops *ops = ring->ops;
1670 int slot, current_slot;
1671 int used_slots = 0;
1672
1673 B43_WARN_ON(ring->tx);
1674 current_slot = ops->get_current_rxslot(ring);
1675 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1676
1677 slot = ring->current_slot;
1678 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1679 dma_rx(ring, &slot);
1680 update_max_used_slots(ring, ++used_slots);
1681 }
73e6cdcf 1682 wmb();
e4d6b795
MB
1683 ops->set_current_rxslot(ring, slot);
1684 ring->current_slot = slot;
1685}
1686
1687static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1688{
e4d6b795
MB
1689 B43_WARN_ON(!ring->tx);
1690 ring->ops->tx_suspend(ring);
e4d6b795
MB
1691}
1692
1693static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1694{
e4d6b795
MB
1695 B43_WARN_ON(!ring->tx);
1696 ring->ops->tx_resume(ring);
e4d6b795
MB
1697}
1698
1699void b43_dma_tx_suspend(struct b43_wldev *dev)
1700{
1701 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
b27faf8e
MB
1702 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1703 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1704 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1705 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1706 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
e4d6b795
MB
1707}
1708
1709void b43_dma_tx_resume(struct b43_wldev *dev)
1710{
b27faf8e
MB
1711 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1712 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1713 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1714 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1715 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
e4d6b795
MB
1716 b43_power_saving_ctl_bits(dev, 0);
1717}
5100d5ac 1718
5100d5ac
MB
1719static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1720 u16 mmio_base, bool enable)
1721{
1722 u32 ctl;
1723
1724 if (type == B43_DMA_64BIT) {
1725 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1726 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1727 if (enable)
1728 ctl |= B43_DMA64_RXDIRECTFIFO;
1729 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1730 } else {
1731 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1732 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1733 if (enable)
1734 ctl |= B43_DMA32_RXDIRECTFIFO;
1735 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1736 }
1737}
1738
1739/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1740 * This is called from PIO code, so DMA structures are not available. */
1741void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1742 unsigned int engine_index, bool enable)
1743{
1744 enum b43_dmatype type;
1745 u16 mmio_base;
1746
1747 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1748
1749 mmio_base = b43_dmacontroller_base(type, engine_index);
1750 direct_fifo_rx(dev, type, mmio_base, enable);
1751}
This page took 0.748032 seconds and 5 git commands to generate.