Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / drivers / net / wireless / b43 / dma.c
1 /*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "b43.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
43
44
45 /* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48 #define TX_SLOTS_PER_FRAME 2
49
50 static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
51 enum b43_addrtype addrtype)
52 {
53 u32 uninitialized_var(addr);
54
55 switch (addrtype) {
56 case B43_DMA_ADDR_LOW:
57 addr = lower_32_bits(dmaaddr);
58 if (dma->translation_in_low) {
59 addr &= ~SSB_DMA_TRANSLATION_MASK;
60 addr |= dma->translation;
61 }
62 break;
63 case B43_DMA_ADDR_HIGH:
64 addr = upper_32_bits(dmaaddr);
65 if (!dma->translation_in_low) {
66 addr &= ~SSB_DMA_TRANSLATION_MASK;
67 addr |= dma->translation;
68 }
69 break;
70 case B43_DMA_ADDR_EXT:
71 if (dma->translation_in_low)
72 addr = lower_32_bits(dmaaddr);
73 else
74 addr = upper_32_bits(dmaaddr);
75 addr &= SSB_DMA_TRANSLATION_MASK;
76 addr >>= SSB_DMA_TRANSLATION_SHIFT;
77 break;
78 }
79
80 return addr;
81 }
82
83 /* 32bit DMA ops. */
84 static
85 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
86 int slot,
87 struct b43_dmadesc_meta **meta)
88 {
89 struct b43_dmadesc32 *desc;
90
91 *meta = &(ring->meta[slot]);
92 desc = ring->descbase;
93 desc = &(desc[slot]);
94
95 return (struct b43_dmadesc_generic *)desc;
96 }
97
98 static void op32_fill_descriptor(struct b43_dmaring *ring,
99 struct b43_dmadesc_generic *desc,
100 dma_addr_t dmaaddr, u16 bufsize,
101 int start, int end, int irq)
102 {
103 struct b43_dmadesc32 *descbase = ring->descbase;
104 int slot;
105 u32 ctl;
106 u32 addr;
107 u32 addrext;
108
109 slot = (int)(&(desc->dma32) - descbase);
110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
111
112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
114
115 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
116 if (slot == ring->nr_slots - 1)
117 ctl |= B43_DMA32_DCTL_DTABLEEND;
118 if (start)
119 ctl |= B43_DMA32_DCTL_FRAMESTART;
120 if (end)
121 ctl |= B43_DMA32_DCTL_FRAMEEND;
122 if (irq)
123 ctl |= B43_DMA32_DCTL_IRQ;
124 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
125 & B43_DMA32_DCTL_ADDREXT_MASK;
126
127 desc->dma32.control = cpu_to_le32(ctl);
128 desc->dma32.address = cpu_to_le32(addr);
129 }
130
131 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
132 {
133 b43_dma_write(ring, B43_DMA32_TXINDEX,
134 (u32) (slot * sizeof(struct b43_dmadesc32)));
135 }
136
137 static void op32_tx_suspend(struct b43_dmaring *ring)
138 {
139 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
140 | B43_DMA32_TXSUSPEND);
141 }
142
143 static void op32_tx_resume(struct b43_dmaring *ring)
144 {
145 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
146 & ~B43_DMA32_TXSUSPEND);
147 }
148
149 static int op32_get_current_rxslot(struct b43_dmaring *ring)
150 {
151 u32 val;
152
153 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
154 val &= B43_DMA32_RXDPTR;
155
156 return (val / sizeof(struct b43_dmadesc32));
157 }
158
159 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
160 {
161 b43_dma_write(ring, B43_DMA32_RXINDEX,
162 (u32) (slot * sizeof(struct b43_dmadesc32)));
163 }
164
165 static const struct b43_dma_ops dma32_ops = {
166 .idx2desc = op32_idx2desc,
167 .fill_descriptor = op32_fill_descriptor,
168 .poke_tx = op32_poke_tx,
169 .tx_suspend = op32_tx_suspend,
170 .tx_resume = op32_tx_resume,
171 .get_current_rxslot = op32_get_current_rxslot,
172 .set_current_rxslot = op32_set_current_rxslot,
173 };
174
175 /* 64bit DMA ops. */
176 static
177 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
178 int slot,
179 struct b43_dmadesc_meta **meta)
180 {
181 struct b43_dmadesc64 *desc;
182
183 *meta = &(ring->meta[slot]);
184 desc = ring->descbase;
185 desc = &(desc[slot]);
186
187 return (struct b43_dmadesc_generic *)desc;
188 }
189
190 static void op64_fill_descriptor(struct b43_dmaring *ring,
191 struct b43_dmadesc_generic *desc,
192 dma_addr_t dmaaddr, u16 bufsize,
193 int start, int end, int irq)
194 {
195 struct b43_dmadesc64 *descbase = ring->descbase;
196 int slot;
197 u32 ctl0 = 0, ctl1 = 0;
198 u32 addrlo, addrhi;
199 u32 addrext;
200
201 slot = (int)(&(desc->dma64) - descbase);
202 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
203
204 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
205 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
206 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
207
208 if (slot == ring->nr_slots - 1)
209 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
210 if (start)
211 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
212 if (end)
213 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
214 if (irq)
215 ctl0 |= B43_DMA64_DCTL0_IRQ;
216 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
217 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
218 & B43_DMA64_DCTL1_ADDREXT_MASK;
219
220 desc->dma64.control0 = cpu_to_le32(ctl0);
221 desc->dma64.control1 = cpu_to_le32(ctl1);
222 desc->dma64.address_low = cpu_to_le32(addrlo);
223 desc->dma64.address_high = cpu_to_le32(addrhi);
224 }
225
226 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
227 {
228 b43_dma_write(ring, B43_DMA64_TXINDEX,
229 (u32) (slot * sizeof(struct b43_dmadesc64)));
230 }
231
232 static void op64_tx_suspend(struct b43_dmaring *ring)
233 {
234 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
235 | B43_DMA64_TXSUSPEND);
236 }
237
238 static void op64_tx_resume(struct b43_dmaring *ring)
239 {
240 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
241 & ~B43_DMA64_TXSUSPEND);
242 }
243
244 static int op64_get_current_rxslot(struct b43_dmaring *ring)
245 {
246 u32 val;
247
248 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
249 val &= B43_DMA64_RXSTATDPTR;
250
251 return (val / sizeof(struct b43_dmadesc64));
252 }
253
254 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
255 {
256 b43_dma_write(ring, B43_DMA64_RXINDEX,
257 (u32) (slot * sizeof(struct b43_dmadesc64)));
258 }
259
260 static const struct b43_dma_ops dma64_ops = {
261 .idx2desc = op64_idx2desc,
262 .fill_descriptor = op64_fill_descriptor,
263 .poke_tx = op64_poke_tx,
264 .tx_suspend = op64_tx_suspend,
265 .tx_resume = op64_tx_resume,
266 .get_current_rxslot = op64_get_current_rxslot,
267 .set_current_rxslot = op64_set_current_rxslot,
268 };
269
270 static inline int free_slots(struct b43_dmaring *ring)
271 {
272 return (ring->nr_slots - ring->used_slots);
273 }
274
275 static inline int next_slot(struct b43_dmaring *ring, int slot)
276 {
277 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
278 if (slot == ring->nr_slots - 1)
279 return 0;
280 return slot + 1;
281 }
282
283 static inline int prev_slot(struct b43_dmaring *ring, int slot)
284 {
285 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
286 if (slot == 0)
287 return ring->nr_slots - 1;
288 return slot - 1;
289 }
290
291 #ifdef CONFIG_B43_DEBUG
292 static void update_max_used_slots(struct b43_dmaring *ring,
293 int current_used_slots)
294 {
295 if (current_used_slots <= ring->max_used_slots)
296 return;
297 ring->max_used_slots = current_used_slots;
298 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
299 b43dbg(ring->dev->wl,
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring->max_used_slots,
302 ring->tx ? "TX" : "RX", ring->index);
303 }
304 }
305 #else
306 static inline
307 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
308 {
309 }
310 #endif /* DEBUG */
311
312 /* Request a slot for usage. */
313 static inline int request_slot(struct b43_dmaring *ring)
314 {
315 int slot;
316
317 B43_WARN_ON(!ring->tx);
318 B43_WARN_ON(ring->stopped);
319 B43_WARN_ON(free_slots(ring) == 0);
320
321 slot = next_slot(ring, ring->current_slot);
322 ring->current_slot = slot;
323 ring->used_slots++;
324
325 update_max_used_slots(ring, ring->used_slots);
326
327 return slot;
328 }
329
330 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
331 {
332 static const u16 map64[] = {
333 B43_MMIO_DMA64_BASE0,
334 B43_MMIO_DMA64_BASE1,
335 B43_MMIO_DMA64_BASE2,
336 B43_MMIO_DMA64_BASE3,
337 B43_MMIO_DMA64_BASE4,
338 B43_MMIO_DMA64_BASE5,
339 };
340 static const u16 map32[] = {
341 B43_MMIO_DMA32_BASE0,
342 B43_MMIO_DMA32_BASE1,
343 B43_MMIO_DMA32_BASE2,
344 B43_MMIO_DMA32_BASE3,
345 B43_MMIO_DMA32_BASE4,
346 B43_MMIO_DMA32_BASE5,
347 };
348
349 if (type == B43_DMA_64BIT) {
350 B43_WARN_ON(!(controller_idx >= 0 &&
351 controller_idx < ARRAY_SIZE(map64)));
352 return map64[controller_idx];
353 }
354 B43_WARN_ON(!(controller_idx >= 0 &&
355 controller_idx < ARRAY_SIZE(map32)));
356 return map32[controller_idx];
357 }
358
359 static inline
360 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
361 unsigned char *buf, size_t len, int tx)
362 {
363 dma_addr_t dmaaddr;
364
365 if (tx) {
366 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
367 buf, len, DMA_TO_DEVICE);
368 } else {
369 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
370 buf, len, DMA_FROM_DEVICE);
371 }
372
373 return dmaaddr;
374 }
375
376 static inline
377 void unmap_descbuffer(struct b43_dmaring *ring,
378 dma_addr_t addr, size_t len, int tx)
379 {
380 if (tx) {
381 dma_unmap_single(ring->dev->dev->dma_dev,
382 addr, len, DMA_TO_DEVICE);
383 } else {
384 dma_unmap_single(ring->dev->dev->dma_dev,
385 addr, len, DMA_FROM_DEVICE);
386 }
387 }
388
389 static inline
390 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
391 dma_addr_t addr, size_t len)
392 {
393 B43_WARN_ON(ring->tx);
394 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
395 addr, len, DMA_FROM_DEVICE);
396 }
397
398 static inline
399 void sync_descbuffer_for_device(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
401 {
402 B43_WARN_ON(ring->tx);
403 dma_sync_single_for_device(ring->dev->dev->dma_dev,
404 addr, len, DMA_FROM_DEVICE);
405 }
406
407 static inline
408 void free_descriptor_buffer(struct b43_dmaring *ring,
409 struct b43_dmadesc_meta *meta)
410 {
411 if (meta->skb) {
412 dev_kfree_skb_any(meta->skb);
413 meta->skb = NULL;
414 }
415 }
416
417 static int alloc_ringmemory(struct b43_dmaring *ring)
418 {
419 gfp_t flags = GFP_KERNEL;
420
421 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
422 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
423 * In practice we could use smaller buffers for the latter, but the
424 * alignment is really important because of the hardware bug. If bit
425 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
426 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
427 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
428 * more than 256 slots for ring.
429 */
430 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
431 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
432
433 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
434 ring_mem_size, &(ring->dmabase),
435 flags);
436 if (!ring->descbase) {
437 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
438 return -ENOMEM;
439 }
440 memset(ring->descbase, 0, ring_mem_size);
441
442 return 0;
443 }
444
445 static void free_ringmemory(struct b43_dmaring *ring)
446 {
447 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
448 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
449 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
450 ring->descbase, ring->dmabase);
451 }
452
453 /* Reset the RX DMA channel */
454 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
455 enum b43_dmatype type)
456 {
457 int i;
458 u32 value;
459 u16 offset;
460
461 might_sleep();
462
463 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
464 b43_write32(dev, mmio_base + offset, 0);
465 for (i = 0; i < 10; i++) {
466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
467 B43_DMA32_RXSTATUS;
468 value = b43_read32(dev, mmio_base + offset);
469 if (type == B43_DMA_64BIT) {
470 value &= B43_DMA64_RXSTAT;
471 if (value == B43_DMA64_RXSTAT_DISABLED) {
472 i = -1;
473 break;
474 }
475 } else {
476 value &= B43_DMA32_RXSTATE;
477 if (value == B43_DMA32_RXSTAT_DISABLED) {
478 i = -1;
479 break;
480 }
481 }
482 msleep(1);
483 }
484 if (i != -1) {
485 b43err(dev->wl, "DMA RX reset timed out\n");
486 return -ENODEV;
487 }
488
489 return 0;
490 }
491
492 /* Reset the TX DMA channel */
493 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
494 enum b43_dmatype type)
495 {
496 int i;
497 u32 value;
498 u16 offset;
499
500 might_sleep();
501
502 for (i = 0; i < 10; i++) {
503 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
504 B43_DMA32_TXSTATUS;
505 value = b43_read32(dev, mmio_base + offset);
506 if (type == B43_DMA_64BIT) {
507 value &= B43_DMA64_TXSTAT;
508 if (value == B43_DMA64_TXSTAT_DISABLED ||
509 value == B43_DMA64_TXSTAT_IDLEWAIT ||
510 value == B43_DMA64_TXSTAT_STOPPED)
511 break;
512 } else {
513 value &= B43_DMA32_TXSTATE;
514 if (value == B43_DMA32_TXSTAT_DISABLED ||
515 value == B43_DMA32_TXSTAT_IDLEWAIT ||
516 value == B43_DMA32_TXSTAT_STOPPED)
517 break;
518 }
519 msleep(1);
520 }
521 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
522 b43_write32(dev, mmio_base + offset, 0);
523 for (i = 0; i < 10; i++) {
524 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
525 B43_DMA32_TXSTATUS;
526 value = b43_read32(dev, mmio_base + offset);
527 if (type == B43_DMA_64BIT) {
528 value &= B43_DMA64_TXSTAT;
529 if (value == B43_DMA64_TXSTAT_DISABLED) {
530 i = -1;
531 break;
532 }
533 } else {
534 value &= B43_DMA32_TXSTATE;
535 if (value == B43_DMA32_TXSTAT_DISABLED) {
536 i = -1;
537 break;
538 }
539 }
540 msleep(1);
541 }
542 if (i != -1) {
543 b43err(dev->wl, "DMA TX reset timed out\n");
544 return -ENODEV;
545 }
546 /* ensure the reset is completed. */
547 msleep(1);
548
549 return 0;
550 }
551
552 /* Check if a DMA mapping address is invalid. */
553 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
554 dma_addr_t addr,
555 size_t buffersize, bool dma_to_device)
556 {
557 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
558 return 1;
559
560 switch (ring->type) {
561 case B43_DMA_30BIT:
562 if ((u64)addr + buffersize > (1ULL << 30))
563 goto address_error;
564 break;
565 case B43_DMA_32BIT:
566 if ((u64)addr + buffersize > (1ULL << 32))
567 goto address_error;
568 break;
569 case B43_DMA_64BIT:
570 /* Currently we can't have addresses beyond
571 * 64bit in the kernel. */
572 break;
573 }
574
575 /* The address is OK. */
576 return 0;
577
578 address_error:
579 /* We can't support this address. Unmap it again. */
580 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
581
582 return 1;
583 }
584
585 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
586 {
587 unsigned char *f = skb->data + ring->frameoffset;
588
589 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
590 }
591
592 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
593 {
594 struct b43_rxhdr_fw4 *rxhdr;
595 unsigned char *frame;
596
597 /* This poisons the RX buffer to detect DMA failures. */
598
599 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
600 rxhdr->frame_len = 0;
601
602 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
603 frame = skb->data + ring->frameoffset;
604 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
605 }
606
607 static int setup_rx_descbuffer(struct b43_dmaring *ring,
608 struct b43_dmadesc_generic *desc,
609 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
610 {
611 dma_addr_t dmaaddr;
612 struct sk_buff *skb;
613
614 B43_WARN_ON(ring->tx);
615
616 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
617 if (unlikely(!skb))
618 return -ENOMEM;
619 b43_poison_rx_buffer(ring, skb);
620 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
621 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
622 /* ugh. try to realloc in zone_dma */
623 gfp_flags |= GFP_DMA;
624
625 dev_kfree_skb_any(skb);
626
627 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
628 if (unlikely(!skb))
629 return -ENOMEM;
630 b43_poison_rx_buffer(ring, skb);
631 dmaaddr = map_descbuffer(ring, skb->data,
632 ring->rx_buffersize, 0);
633 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
634 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
635 dev_kfree_skb_any(skb);
636 return -EIO;
637 }
638 }
639
640 meta->skb = skb;
641 meta->dmaaddr = dmaaddr;
642 ring->ops->fill_descriptor(ring, desc, dmaaddr,
643 ring->rx_buffersize, 0, 0, 0);
644
645 return 0;
646 }
647
648 /* Allocate the initial descbuffers.
649 * This is used for an RX ring only.
650 */
651 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
652 {
653 int i, err = -ENOMEM;
654 struct b43_dmadesc_generic *desc;
655 struct b43_dmadesc_meta *meta;
656
657 for (i = 0; i < ring->nr_slots; i++) {
658 desc = ring->ops->idx2desc(ring, i, &meta);
659
660 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
661 if (err) {
662 b43err(ring->dev->wl,
663 "Failed to allocate initial descbuffers\n");
664 goto err_unwind;
665 }
666 }
667 mb();
668 ring->used_slots = ring->nr_slots;
669 err = 0;
670 out:
671 return err;
672
673 err_unwind:
674 for (i--; i >= 0; i--) {
675 desc = ring->ops->idx2desc(ring, i, &meta);
676
677 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
678 dev_kfree_skb(meta->skb);
679 }
680 goto out;
681 }
682
683 /* Do initial setup of the DMA controller.
684 * Reset the controller, write the ring busaddress
685 * and switch the "enable" bit on.
686 */
687 static int dmacontroller_setup(struct b43_dmaring *ring)
688 {
689 int err = 0;
690 u32 value;
691 u32 addrext;
692 bool parity = ring->dev->dma.parity;
693 u32 addrlo;
694 u32 addrhi;
695
696 if (ring->tx) {
697 if (ring->type == B43_DMA_64BIT) {
698 u64 ringbase = (u64) (ring->dmabase);
699 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
700 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
701 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
702
703 value = B43_DMA64_TXENABLE;
704 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
705 & B43_DMA64_TXADDREXT_MASK;
706 if (!parity)
707 value |= B43_DMA64_TXPARITYDISABLE;
708 b43_dma_write(ring, B43_DMA64_TXCTL, value);
709 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
710 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
711 } else {
712 u32 ringbase = (u32) (ring->dmabase);
713 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
714 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
715
716 value = B43_DMA32_TXENABLE;
717 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
718 & B43_DMA32_TXADDREXT_MASK;
719 if (!parity)
720 value |= B43_DMA32_TXPARITYDISABLE;
721 b43_dma_write(ring, B43_DMA32_TXCTL, value);
722 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
723 }
724 } else {
725 err = alloc_initial_descbuffers(ring);
726 if (err)
727 goto out;
728 if (ring->type == B43_DMA_64BIT) {
729 u64 ringbase = (u64) (ring->dmabase);
730 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
731 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
732 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
733
734 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
735 value |= B43_DMA64_RXENABLE;
736 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
737 & B43_DMA64_RXADDREXT_MASK;
738 if (!parity)
739 value |= B43_DMA64_RXPARITYDISABLE;
740 b43_dma_write(ring, B43_DMA64_RXCTL, value);
741 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
742 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
743 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
744 sizeof(struct b43_dmadesc64));
745 } else {
746 u32 ringbase = (u32) (ring->dmabase);
747 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
748 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
749
750 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
751 value |= B43_DMA32_RXENABLE;
752 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
753 & B43_DMA32_RXADDREXT_MASK;
754 if (!parity)
755 value |= B43_DMA32_RXPARITYDISABLE;
756 b43_dma_write(ring, B43_DMA32_RXCTL, value);
757 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
758 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
759 sizeof(struct b43_dmadesc32));
760 }
761 }
762
763 out:
764 return err;
765 }
766
767 /* Shutdown the DMA controller. */
768 static void dmacontroller_cleanup(struct b43_dmaring *ring)
769 {
770 if (ring->tx) {
771 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
772 ring->type);
773 if (ring->type == B43_DMA_64BIT) {
774 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
775 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
776 } else
777 b43_dma_write(ring, B43_DMA32_TXRING, 0);
778 } else {
779 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
780 ring->type);
781 if (ring->type == B43_DMA_64BIT) {
782 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
783 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
784 } else
785 b43_dma_write(ring, B43_DMA32_RXRING, 0);
786 }
787 }
788
789 static void free_all_descbuffers(struct b43_dmaring *ring)
790 {
791 struct b43_dmadesc_meta *meta;
792 int i;
793
794 if (!ring->used_slots)
795 return;
796 for (i = 0; i < ring->nr_slots; i++) {
797 /* get meta - ignore returned value */
798 ring->ops->idx2desc(ring, i, &meta);
799
800 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
801 B43_WARN_ON(!ring->tx);
802 continue;
803 }
804 if (ring->tx) {
805 unmap_descbuffer(ring, meta->dmaaddr,
806 meta->skb->len, 1);
807 } else {
808 unmap_descbuffer(ring, meta->dmaaddr,
809 ring->rx_buffersize, 0);
810 }
811 free_descriptor_buffer(ring, meta);
812 }
813 }
814
815 static u64 supported_dma_mask(struct b43_wldev *dev)
816 {
817 u32 tmp;
818 u16 mmio_base;
819
820 switch (dev->dev->bus_type) {
821 #ifdef CONFIG_B43_BCMA
822 case B43_BUS_BCMA:
823 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
824 if (tmp & BCMA_IOST_DMA64)
825 return DMA_BIT_MASK(64);
826 break;
827 #endif
828 #ifdef CONFIG_B43_SSB
829 case B43_BUS_SSB:
830 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
831 if (tmp & SSB_TMSHIGH_DMA64)
832 return DMA_BIT_MASK(64);
833 break;
834 #endif
835 }
836
837 mmio_base = b43_dmacontroller_base(0, 0);
838 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
839 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
840 if (tmp & B43_DMA32_TXADDREXT_MASK)
841 return DMA_BIT_MASK(32);
842
843 return DMA_BIT_MASK(30);
844 }
845
846 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
847 {
848 if (dmamask == DMA_BIT_MASK(30))
849 return B43_DMA_30BIT;
850 if (dmamask == DMA_BIT_MASK(32))
851 return B43_DMA_32BIT;
852 if (dmamask == DMA_BIT_MASK(64))
853 return B43_DMA_64BIT;
854 B43_WARN_ON(1);
855 return B43_DMA_30BIT;
856 }
857
858 /* Main initialization function. */
859 static
860 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
861 int controller_index,
862 int for_tx,
863 enum b43_dmatype type)
864 {
865 struct b43_dmaring *ring;
866 int i, err;
867 dma_addr_t dma_test;
868
869 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
870 if (!ring)
871 goto out;
872
873 ring->nr_slots = B43_RXRING_SLOTS;
874 if (for_tx)
875 ring->nr_slots = B43_TXRING_SLOTS;
876
877 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
878 GFP_KERNEL);
879 if (!ring->meta)
880 goto err_kfree_ring;
881 for (i = 0; i < ring->nr_slots; i++)
882 ring->meta->skb = B43_DMA_PTR_POISON;
883
884 ring->type = type;
885 ring->dev = dev;
886 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
887 ring->index = controller_index;
888 if (type == B43_DMA_64BIT)
889 ring->ops = &dma64_ops;
890 else
891 ring->ops = &dma32_ops;
892 if (for_tx) {
893 ring->tx = true;
894 ring->current_slot = -1;
895 } else {
896 if (ring->index == 0) {
897 switch (dev->fw.hdr_format) {
898 case B43_FW_HDR_598:
899 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
900 ring->frameoffset = B43_DMA0_RX_FW598_FO;
901 break;
902 case B43_FW_HDR_410:
903 case B43_FW_HDR_351:
904 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
905 ring->frameoffset = B43_DMA0_RX_FW351_FO;
906 break;
907 }
908 } else
909 B43_WARN_ON(1);
910 }
911 #ifdef CONFIG_B43_DEBUG
912 ring->last_injected_overflow = jiffies;
913 #endif
914
915 if (for_tx) {
916 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
917 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
918
919 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
920 b43_txhdr_size(dev),
921 GFP_KERNEL);
922 if (!ring->txhdr_cache)
923 goto err_kfree_meta;
924
925 /* test for ability to dma to txhdr_cache */
926 dma_test = dma_map_single(dev->dev->dma_dev,
927 ring->txhdr_cache,
928 b43_txhdr_size(dev),
929 DMA_TO_DEVICE);
930
931 if (b43_dma_mapping_error(ring, dma_test,
932 b43_txhdr_size(dev), 1)) {
933 /* ugh realloc */
934 kfree(ring->txhdr_cache);
935 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
936 b43_txhdr_size(dev),
937 GFP_KERNEL | GFP_DMA);
938 if (!ring->txhdr_cache)
939 goto err_kfree_meta;
940
941 dma_test = dma_map_single(dev->dev->dma_dev,
942 ring->txhdr_cache,
943 b43_txhdr_size(dev),
944 DMA_TO_DEVICE);
945
946 if (b43_dma_mapping_error(ring, dma_test,
947 b43_txhdr_size(dev), 1)) {
948
949 b43err(dev->wl,
950 "TXHDR DMA allocation failed\n");
951 goto err_kfree_txhdr_cache;
952 }
953 }
954
955 dma_unmap_single(dev->dev->dma_dev,
956 dma_test, b43_txhdr_size(dev),
957 DMA_TO_DEVICE);
958 }
959
960 err = alloc_ringmemory(ring);
961 if (err)
962 goto err_kfree_txhdr_cache;
963 err = dmacontroller_setup(ring);
964 if (err)
965 goto err_free_ringmemory;
966
967 out:
968 return ring;
969
970 err_free_ringmemory:
971 free_ringmemory(ring);
972 err_kfree_txhdr_cache:
973 kfree(ring->txhdr_cache);
974 err_kfree_meta:
975 kfree(ring->meta);
976 err_kfree_ring:
977 kfree(ring);
978 ring = NULL;
979 goto out;
980 }
981
982 #define divide(a, b) ({ \
983 typeof(a) __a = a; \
984 do_div(__a, b); \
985 __a; \
986 })
987
988 #define modulo(a, b) ({ \
989 typeof(a) __a = a; \
990 do_div(__a, b); \
991 })
992
993 /* Main cleanup function. */
994 static void b43_destroy_dmaring(struct b43_dmaring *ring,
995 const char *ringname)
996 {
997 if (!ring)
998 return;
999
1000 #ifdef CONFIG_B43_DEBUG
1001 {
1002 /* Print some statistics. */
1003 u64 failed_packets = ring->nr_failed_tx_packets;
1004 u64 succeed_packets = ring->nr_succeed_tx_packets;
1005 u64 nr_packets = failed_packets + succeed_packets;
1006 u64 permille_failed = 0, average_tries = 0;
1007
1008 if (nr_packets)
1009 permille_failed = divide(failed_packets * 1000, nr_packets);
1010 if (nr_packets)
1011 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
1012
1013 b43dbg(ring->dev->wl, "DMA-%u %s: "
1014 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1015 "Average tries %llu.%02llu\n",
1016 (unsigned int)(ring->type), ringname,
1017 ring->max_used_slots,
1018 ring->nr_slots,
1019 (unsigned long long)failed_packets,
1020 (unsigned long long)nr_packets,
1021 (unsigned long long)divide(permille_failed, 10),
1022 (unsigned long long)modulo(permille_failed, 10),
1023 (unsigned long long)divide(average_tries, 100),
1024 (unsigned long long)modulo(average_tries, 100));
1025 }
1026 #endif /* DEBUG */
1027
1028 /* Device IRQs are disabled prior entering this function,
1029 * so no need to take care of concurrency with rx handler stuff.
1030 */
1031 dmacontroller_cleanup(ring);
1032 free_all_descbuffers(ring);
1033 free_ringmemory(ring);
1034
1035 kfree(ring->txhdr_cache);
1036 kfree(ring->meta);
1037 kfree(ring);
1038 }
1039
1040 #define destroy_ring(dma, ring) do { \
1041 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1042 (dma)->ring = NULL; \
1043 } while (0)
1044
1045 void b43_dma_free(struct b43_wldev *dev)
1046 {
1047 struct b43_dma *dma;
1048
1049 if (b43_using_pio_transfers(dev))
1050 return;
1051 dma = &dev->dma;
1052
1053 destroy_ring(dma, rx_ring);
1054 destroy_ring(dma, tx_ring_AC_BK);
1055 destroy_ring(dma, tx_ring_AC_BE);
1056 destroy_ring(dma, tx_ring_AC_VI);
1057 destroy_ring(dma, tx_ring_AC_VO);
1058 destroy_ring(dma, tx_ring_mcast);
1059 }
1060
1061 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1062 {
1063 u64 orig_mask = mask;
1064 bool fallback = false;
1065 int err;
1066
1067 /* Try to set the DMA mask. If it fails, try falling back to a
1068 * lower mask, as we can always also support a lower one. */
1069 while (1) {
1070 err = dma_set_mask(dev->dev->dma_dev, mask);
1071 if (!err) {
1072 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1073 if (!err)
1074 break;
1075 }
1076 if (mask == DMA_BIT_MASK(64)) {
1077 mask = DMA_BIT_MASK(32);
1078 fallback = true;
1079 continue;
1080 }
1081 if (mask == DMA_BIT_MASK(32)) {
1082 mask = DMA_BIT_MASK(30);
1083 fallback = true;
1084 continue;
1085 }
1086 b43err(dev->wl, "The machine/kernel does not support "
1087 "the required %u-bit DMA mask\n",
1088 (unsigned int)dma_mask_to_engine_type(orig_mask));
1089 return -EOPNOTSUPP;
1090 }
1091 if (fallback) {
1092 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1093 (unsigned int)dma_mask_to_engine_type(orig_mask),
1094 (unsigned int)dma_mask_to_engine_type(mask));
1095 }
1096
1097 return 0;
1098 }
1099
1100 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1101 * bit in low address word instead of high one.
1102 */
1103 static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1104 enum b43_dmatype type)
1105 {
1106 if (type != B43_DMA_64BIT)
1107 return 1;
1108
1109 #ifdef CONFIG_B43_SSB
1110 if (dev->dev->bus_type == B43_BUS_SSB &&
1111 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
1112 !(dev->dev->sdev->bus->host_pci->is_pcie &&
1113 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1114 return 1;
1115 #endif
1116 return 0;
1117 }
1118
1119 int b43_dma_init(struct b43_wldev *dev)
1120 {
1121 struct b43_dma *dma = &dev->dma;
1122 int err;
1123 u64 dmamask;
1124 enum b43_dmatype type;
1125
1126 dmamask = supported_dma_mask(dev);
1127 type = dma_mask_to_engine_type(dmamask);
1128 err = b43_dma_set_mask(dev, dmamask);
1129 if (err)
1130 return err;
1131
1132 switch (dev->dev->bus_type) {
1133 #ifdef CONFIG_B43_BCMA
1134 case B43_BUS_BCMA:
1135 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1136 break;
1137 #endif
1138 #ifdef CONFIG_B43_SSB
1139 case B43_BUS_SSB:
1140 dma->translation = ssb_dma_translation(dev->dev->sdev);
1141 break;
1142 #endif
1143 }
1144 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
1145
1146 dma->parity = true;
1147 #ifdef CONFIG_B43_BCMA
1148 /* TODO: find out which SSB devices need disabling parity */
1149 if (dev->dev->bus_type == B43_BUS_BCMA)
1150 dma->parity = false;
1151 #endif
1152
1153 err = -ENOMEM;
1154 /* setup TX DMA channels. */
1155 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1156 if (!dma->tx_ring_AC_BK)
1157 goto out;
1158
1159 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1160 if (!dma->tx_ring_AC_BE)
1161 goto err_destroy_bk;
1162
1163 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1164 if (!dma->tx_ring_AC_VI)
1165 goto err_destroy_be;
1166
1167 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1168 if (!dma->tx_ring_AC_VO)
1169 goto err_destroy_vi;
1170
1171 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1172 if (!dma->tx_ring_mcast)
1173 goto err_destroy_vo;
1174
1175 /* setup RX DMA channel. */
1176 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1177 if (!dma->rx_ring)
1178 goto err_destroy_mcast;
1179
1180 /* No support for the TX status DMA ring. */
1181 B43_WARN_ON(dev->dev->core_rev < 5);
1182
1183 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1184 (unsigned int)type);
1185 err = 0;
1186 out:
1187 return err;
1188
1189 err_destroy_mcast:
1190 destroy_ring(dma, tx_ring_mcast);
1191 err_destroy_vo:
1192 destroy_ring(dma, tx_ring_AC_VO);
1193 err_destroy_vi:
1194 destroy_ring(dma, tx_ring_AC_VI);
1195 err_destroy_be:
1196 destroy_ring(dma, tx_ring_AC_BE);
1197 err_destroy_bk:
1198 destroy_ring(dma, tx_ring_AC_BK);
1199 return err;
1200 }
1201
1202 /* Generate a cookie for the TX header. */
1203 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1204 {
1205 u16 cookie;
1206
1207 /* Use the upper 4 bits of the cookie as
1208 * DMA controller ID and store the slot number
1209 * in the lower 12 bits.
1210 * Note that the cookie must never be 0, as this
1211 * is a special value used in RX path.
1212 * It can also not be 0xFFFF because that is special
1213 * for multicast frames.
1214 */
1215 cookie = (((u16)ring->index + 1) << 12);
1216 B43_WARN_ON(slot & ~0x0FFF);
1217 cookie |= (u16)slot;
1218
1219 return cookie;
1220 }
1221
1222 /* Inspect a cookie and find out to which controller/slot it belongs. */
1223 static
1224 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1225 {
1226 struct b43_dma *dma = &dev->dma;
1227 struct b43_dmaring *ring = NULL;
1228
1229 switch (cookie & 0xF000) {
1230 case 0x1000:
1231 ring = dma->tx_ring_AC_BK;
1232 break;
1233 case 0x2000:
1234 ring = dma->tx_ring_AC_BE;
1235 break;
1236 case 0x3000:
1237 ring = dma->tx_ring_AC_VI;
1238 break;
1239 case 0x4000:
1240 ring = dma->tx_ring_AC_VO;
1241 break;
1242 case 0x5000:
1243 ring = dma->tx_ring_mcast;
1244 break;
1245 }
1246 *slot = (cookie & 0x0FFF);
1247 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1248 b43dbg(dev->wl, "TX-status contains "
1249 "invalid cookie: 0x%04X\n", cookie);
1250 return NULL;
1251 }
1252
1253 return ring;
1254 }
1255
1256 static int dma_tx_fragment(struct b43_dmaring *ring,
1257 struct sk_buff *skb)
1258 {
1259 const struct b43_dma_ops *ops = ring->ops;
1260 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1261 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1262 u8 *header;
1263 int slot, old_top_slot, old_used_slots;
1264 int err;
1265 struct b43_dmadesc_generic *desc;
1266 struct b43_dmadesc_meta *meta;
1267 struct b43_dmadesc_meta *meta_hdr;
1268 u16 cookie;
1269 size_t hdrsize = b43_txhdr_size(ring->dev);
1270
1271 /* Important note: If the number of used DMA slots per TX frame
1272 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1273 * the file has to be updated, too!
1274 */
1275
1276 old_top_slot = ring->current_slot;
1277 old_used_slots = ring->used_slots;
1278
1279 /* Get a slot for the header. */
1280 slot = request_slot(ring);
1281 desc = ops->idx2desc(ring, slot, &meta_hdr);
1282 memset(meta_hdr, 0, sizeof(*meta_hdr));
1283
1284 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1285 cookie = generate_cookie(ring, slot);
1286 err = b43_generate_txhdr(ring->dev, header,
1287 skb, info, cookie);
1288 if (unlikely(err)) {
1289 ring->current_slot = old_top_slot;
1290 ring->used_slots = old_used_slots;
1291 return err;
1292 }
1293
1294 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1295 hdrsize, 1);
1296 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1297 ring->current_slot = old_top_slot;
1298 ring->used_slots = old_used_slots;
1299 return -EIO;
1300 }
1301 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1302 hdrsize, 1, 0, 0);
1303
1304 /* Get a slot for the payload. */
1305 slot = request_slot(ring);
1306 desc = ops->idx2desc(ring, slot, &meta);
1307 memset(meta, 0, sizeof(*meta));
1308
1309 meta->skb = skb;
1310 meta->is_last_fragment = true;
1311 priv_info->bouncebuffer = NULL;
1312
1313 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1314 /* create a bounce buffer in zone_dma on mapping failure. */
1315 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1316 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1317 GFP_ATOMIC | GFP_DMA);
1318 if (!priv_info->bouncebuffer) {
1319 ring->current_slot = old_top_slot;
1320 ring->used_slots = old_used_slots;
1321 err = -ENOMEM;
1322 goto out_unmap_hdr;
1323 }
1324
1325 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1326 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1327 kfree(priv_info->bouncebuffer);
1328 priv_info->bouncebuffer = NULL;
1329 ring->current_slot = old_top_slot;
1330 ring->used_slots = old_used_slots;
1331 err = -EIO;
1332 goto out_unmap_hdr;
1333 }
1334 }
1335
1336 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1337
1338 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1339 /* Tell the firmware about the cookie of the last
1340 * mcast frame, so it can clear the more-data bit in it. */
1341 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1342 B43_SHM_SH_MCASTCOOKIE, cookie);
1343 }
1344 /* Now transfer the whole frame. */
1345 wmb();
1346 ops->poke_tx(ring, next_slot(ring, slot));
1347 return 0;
1348
1349 out_unmap_hdr:
1350 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1351 hdrsize, 1);
1352 return err;
1353 }
1354
1355 static inline int should_inject_overflow(struct b43_dmaring *ring)
1356 {
1357 #ifdef CONFIG_B43_DEBUG
1358 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1359 /* Check if we should inject another ringbuffer overflow
1360 * to test handling of this situation in the stack. */
1361 unsigned long next_overflow;
1362
1363 next_overflow = ring->last_injected_overflow + HZ;
1364 if (time_after(jiffies, next_overflow)) {
1365 ring->last_injected_overflow = jiffies;
1366 b43dbg(ring->dev->wl,
1367 "Injecting TX ring overflow on "
1368 "DMA controller %d\n", ring->index);
1369 return 1;
1370 }
1371 }
1372 #endif /* CONFIG_B43_DEBUG */
1373 return 0;
1374 }
1375
1376 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1377 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1378 u8 queue_prio)
1379 {
1380 struct b43_dmaring *ring;
1381
1382 if (dev->qos_enabled) {
1383 /* 0 = highest priority */
1384 switch (queue_prio) {
1385 default:
1386 B43_WARN_ON(1);
1387 /* fallthrough */
1388 case 0:
1389 ring = dev->dma.tx_ring_AC_VO;
1390 break;
1391 case 1:
1392 ring = dev->dma.tx_ring_AC_VI;
1393 break;
1394 case 2:
1395 ring = dev->dma.tx_ring_AC_BE;
1396 break;
1397 case 3:
1398 ring = dev->dma.tx_ring_AC_BK;
1399 break;
1400 }
1401 } else
1402 ring = dev->dma.tx_ring_AC_BE;
1403
1404 return ring;
1405 }
1406
1407 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1408 {
1409 struct b43_dmaring *ring;
1410 struct ieee80211_hdr *hdr;
1411 int err = 0;
1412 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1413
1414 hdr = (struct ieee80211_hdr *)skb->data;
1415 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1416 /* The multicast ring will be sent after the DTIM */
1417 ring = dev->dma.tx_ring_mcast;
1418 /* Set the more-data bit. Ucode will clear it on
1419 * the last frame for us. */
1420 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1421 } else {
1422 /* Decide by priority where to put this frame. */
1423 ring = select_ring_by_priority(
1424 dev, skb_get_queue_mapping(skb));
1425 }
1426
1427 B43_WARN_ON(!ring->tx);
1428
1429 if (unlikely(ring->stopped)) {
1430 /* We get here only because of a bug in mac80211.
1431 * Because of a race, one packet may be queued after
1432 * the queue is stopped, thus we got called when we shouldn't.
1433 * For now, just refuse the transmit. */
1434 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1435 b43err(dev->wl, "Packet after queue stopped\n");
1436 err = -ENOSPC;
1437 goto out;
1438 }
1439
1440 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1441 /* If we get here, we have a real error with the queue
1442 * full, but queues not stopped. */
1443 b43err(dev->wl, "DMA queue overflow\n");
1444 err = -ENOSPC;
1445 goto out;
1446 }
1447
1448 /* Assign the queue number to the ring (if not already done before)
1449 * so TX status handling can use it. The queue to ring mapping is
1450 * static, so we don't need to store it per frame. */
1451 ring->queue_prio = skb_get_queue_mapping(skb);
1452
1453 err = dma_tx_fragment(ring, skb);
1454 if (unlikely(err == -ENOKEY)) {
1455 /* Drop this packet, as we don't have the encryption key
1456 * anymore and must not transmit it unencrypted. */
1457 dev_kfree_skb_any(skb);
1458 err = 0;
1459 goto out;
1460 }
1461 if (unlikely(err)) {
1462 b43err(dev->wl, "DMA tx mapping failure\n");
1463 goto out;
1464 }
1465 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1466 should_inject_overflow(ring)) {
1467 /* This TX ring is full. */
1468 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1469 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1470 dev->wl->tx_queue_stopped[skb_mapping] = 1;
1471 ring->stopped = true;
1472 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1473 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1474 }
1475 }
1476 out:
1477
1478 return err;
1479 }
1480
1481 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1482 const struct b43_txstatus *status)
1483 {
1484 const struct b43_dma_ops *ops;
1485 struct b43_dmaring *ring;
1486 struct b43_dmadesc_meta *meta;
1487 int slot, firstused;
1488 bool frame_succeed;
1489
1490 ring = parse_cookie(dev, status->cookie, &slot);
1491 if (unlikely(!ring))
1492 return;
1493 B43_WARN_ON(!ring->tx);
1494
1495 /* Sanity check: TX packets are processed in-order on one ring.
1496 * Check if the slot deduced from the cookie really is the first
1497 * used slot. */
1498 firstused = ring->current_slot - ring->used_slots + 1;
1499 if (firstused < 0)
1500 firstused = ring->nr_slots + firstused;
1501 if (unlikely(slot != firstused)) {
1502 /* This possibly is a firmware bug and will result in
1503 * malfunction, memory leaks and/or stall of DMA functionality. */
1504 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1505 "Expected %d, but got %d\n",
1506 ring->index, firstused, slot);
1507 return;
1508 }
1509
1510 ops = ring->ops;
1511 while (1) {
1512 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1513 /* get meta - ignore returned value */
1514 ops->idx2desc(ring, slot, &meta);
1515
1516 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1517 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1518 "on ring %d\n",
1519 slot, firstused, ring->index);
1520 break;
1521 }
1522 if (meta->skb) {
1523 struct b43_private_tx_info *priv_info =
1524 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1525
1526 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1527 kfree(priv_info->bouncebuffer);
1528 priv_info->bouncebuffer = NULL;
1529 } else {
1530 unmap_descbuffer(ring, meta->dmaaddr,
1531 b43_txhdr_size(dev), 1);
1532 }
1533
1534 if (meta->is_last_fragment) {
1535 struct ieee80211_tx_info *info;
1536
1537 if (unlikely(!meta->skb)) {
1538 /* This is a scatter-gather fragment of a frame, so
1539 * the skb pointer must not be NULL. */
1540 b43dbg(dev->wl, "TX status unexpected NULL skb "
1541 "at slot %d (first=%d) on ring %d\n",
1542 slot, firstused, ring->index);
1543 break;
1544 }
1545
1546 info = IEEE80211_SKB_CB(meta->skb);
1547
1548 /*
1549 * Call back to inform the ieee80211 subsystem about
1550 * the status of the transmission.
1551 */
1552 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1553 #ifdef CONFIG_B43_DEBUG
1554 if (frame_succeed)
1555 ring->nr_succeed_tx_packets++;
1556 else
1557 ring->nr_failed_tx_packets++;
1558 ring->nr_total_packet_tries += status->frame_count;
1559 #endif /* DEBUG */
1560 ieee80211_tx_status(dev->wl->hw, meta->skb);
1561
1562 /* skb will be freed by ieee80211_tx_status().
1563 * Poison our pointer. */
1564 meta->skb = B43_DMA_PTR_POISON;
1565 } else {
1566 /* No need to call free_descriptor_buffer here, as
1567 * this is only the txhdr, which is not allocated.
1568 */
1569 if (unlikely(meta->skb)) {
1570 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1571 "at slot %d (first=%d) on ring %d\n",
1572 slot, firstused, ring->index);
1573 break;
1574 }
1575 }
1576
1577 /* Everything unmapped and free'd. So it's not used anymore. */
1578 ring->used_slots--;
1579
1580 if (meta->is_last_fragment) {
1581 /* This is the last scatter-gather
1582 * fragment of the frame. We are done. */
1583 break;
1584 }
1585 slot = next_slot(ring, slot);
1586 }
1587 if (ring->stopped) {
1588 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1589 ring->stopped = false;
1590 }
1591
1592 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1593 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1594 } else {
1595 /* If the driver queue is running wake the corresponding
1596 * mac80211 queue. */
1597 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1598 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1599 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1600 }
1601 }
1602 /* Add work to the queue. */
1603 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1604 }
1605
1606 static void dma_rx(struct b43_dmaring *ring, int *slot)
1607 {
1608 const struct b43_dma_ops *ops = ring->ops;
1609 struct b43_dmadesc_generic *desc;
1610 struct b43_dmadesc_meta *meta;
1611 struct b43_rxhdr_fw4 *rxhdr;
1612 struct sk_buff *skb;
1613 u16 len;
1614 int err;
1615 dma_addr_t dmaaddr;
1616
1617 desc = ops->idx2desc(ring, *slot, &meta);
1618
1619 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1620 skb = meta->skb;
1621
1622 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1623 len = le16_to_cpu(rxhdr->frame_len);
1624 if (len == 0) {
1625 int i = 0;
1626
1627 do {
1628 udelay(2);
1629 barrier();
1630 len = le16_to_cpu(rxhdr->frame_len);
1631 } while (len == 0 && i++ < 5);
1632 if (unlikely(len == 0)) {
1633 dmaaddr = meta->dmaaddr;
1634 goto drop_recycle_buffer;
1635 }
1636 }
1637 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1638 /* Something went wrong with the DMA.
1639 * The device did not touch the buffer and did not overwrite the poison. */
1640 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1641 dmaaddr = meta->dmaaddr;
1642 goto drop_recycle_buffer;
1643 }
1644 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1645 /* The data did not fit into one descriptor buffer
1646 * and is split over multiple buffers.
1647 * This should never happen, as we try to allocate buffers
1648 * big enough. So simply ignore this packet.
1649 */
1650 int cnt = 0;
1651 s32 tmp = len;
1652
1653 while (1) {
1654 desc = ops->idx2desc(ring, *slot, &meta);
1655 /* recycle the descriptor buffer. */
1656 b43_poison_rx_buffer(ring, meta->skb);
1657 sync_descbuffer_for_device(ring, meta->dmaaddr,
1658 ring->rx_buffersize);
1659 *slot = next_slot(ring, *slot);
1660 cnt++;
1661 tmp -= ring->rx_buffersize;
1662 if (tmp <= 0)
1663 break;
1664 }
1665 b43err(ring->dev->wl, "DMA RX buffer too small "
1666 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1667 len, ring->rx_buffersize, cnt);
1668 goto drop;
1669 }
1670
1671 dmaaddr = meta->dmaaddr;
1672 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1673 if (unlikely(err)) {
1674 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1675 goto drop_recycle_buffer;
1676 }
1677
1678 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1679 skb_put(skb, len + ring->frameoffset);
1680 skb_pull(skb, ring->frameoffset);
1681
1682 b43_rx(ring->dev, skb, rxhdr);
1683 drop:
1684 return;
1685
1686 drop_recycle_buffer:
1687 /* Poison and recycle the RX buffer. */
1688 b43_poison_rx_buffer(ring, skb);
1689 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1690 }
1691
1692 void b43_dma_rx(struct b43_dmaring *ring)
1693 {
1694 const struct b43_dma_ops *ops = ring->ops;
1695 int slot, current_slot;
1696 int used_slots = 0;
1697
1698 B43_WARN_ON(ring->tx);
1699 current_slot = ops->get_current_rxslot(ring);
1700 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1701
1702 slot = ring->current_slot;
1703 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1704 dma_rx(ring, &slot);
1705 update_max_used_slots(ring, ++used_slots);
1706 }
1707 wmb();
1708 ops->set_current_rxslot(ring, slot);
1709 ring->current_slot = slot;
1710 }
1711
1712 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1713 {
1714 B43_WARN_ON(!ring->tx);
1715 ring->ops->tx_suspend(ring);
1716 }
1717
1718 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1719 {
1720 B43_WARN_ON(!ring->tx);
1721 ring->ops->tx_resume(ring);
1722 }
1723
1724 void b43_dma_tx_suspend(struct b43_wldev *dev)
1725 {
1726 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1727 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1728 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1729 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1730 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1731 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1732 }
1733
1734 void b43_dma_tx_resume(struct b43_wldev *dev)
1735 {
1736 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1737 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1738 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1739 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1740 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1741 b43_power_saving_ctl_bits(dev, 0);
1742 }
1743
1744 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1745 u16 mmio_base, bool enable)
1746 {
1747 u32 ctl;
1748
1749 if (type == B43_DMA_64BIT) {
1750 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1751 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1752 if (enable)
1753 ctl |= B43_DMA64_RXDIRECTFIFO;
1754 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1755 } else {
1756 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1757 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1758 if (enable)
1759 ctl |= B43_DMA32_RXDIRECTFIFO;
1760 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1761 }
1762 }
1763
1764 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1765 * This is called from PIO code, so DMA structures are not available. */
1766 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1767 unsigned int engine_index, bool enable)
1768 {
1769 enum b43_dmatype type;
1770 u16 mmio_base;
1771
1772 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1773
1774 mmio_base = b43_dmacontroller_base(type, engine_index);
1775 direct_fifo_rx(dev, type, mmio_base, enable);
1776 }
This page took 0.071191 seconds and 5 git commands to generate.