642c992122a066703a93618858528e5b40ffae0e
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
21 #include <brcmu_utils.h>
27 #include <asm/addrspace.h>
31 * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
33 #define D64RINGALIGN_BITS 13
34 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
35 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
37 #define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
39 /* transmit channel control */
40 #define D64_XC_XE 0x00000001 /* transmit enable */
41 #define D64_XC_SE 0x00000002 /* transmit suspend request */
42 #define D64_XC_LE 0x00000004 /* loopback enable */
43 #define D64_XC_FL 0x00000010 /* flush request */
44 #define D64_XC_PD 0x00000800 /* parity check disable */
45 #define D64_XC_AE 0x00030000 /* address extension bits */
46 #define D64_XC_AE_SHIFT 16
48 /* transmit descriptor table pointer */
49 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
51 /* transmit channel status */
52 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
53 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
54 #define D64_XS0_XS_SHIFT 28
55 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
56 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
57 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
58 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
59 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
61 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
62 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
63 #define D64_XS1_XE_SHIFT 28
64 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
65 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
66 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
67 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
68 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
69 #define D64_XS1_XE_COREE 0x50000000 /* core error */
71 /* receive channel control */
72 #define D64_RC_RE 0x00000001 /* receive enable */
73 #define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
74 #define D64_RC_RO_SHIFT 1
75 #define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
76 #define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
77 #define D64_RC_OC 0x00000400 /* overflow continue */
78 #define D64_RC_PD 0x00000800 /* parity check disable */
79 #define D64_RC_AE 0x00030000 /* address extension bits */
80 #define D64_RC_AE_SHIFT 16
82 /* flags for dma controller */
83 #define DMA_CTRL_PEN (1 << 0) /* partity enable */
84 #define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
85 #define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
86 #define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
88 /* receive descriptor table pointer */
89 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
91 /* receive channel status */
92 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
93 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
94 #define D64_RS0_RS_SHIFT 28
95 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
96 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
97 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
98 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
99 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
101 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
102 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
103 #define D64_RS1_RE_SHIFT 28
104 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
105 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
106 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
107 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
108 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
109 #define D64_RS1_RE_COREE 0x50000000 /* core error */
112 #define D64_FA_OFF_MASK 0xffff /* offset */
113 #define D64_FA_SEL_MASK 0xf0000 /* select */
114 #define D64_FA_SEL_SHIFT 16
115 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
116 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
117 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
118 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
119 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
120 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
121 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
122 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
123 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
124 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
126 /* descriptor control flags 1 */
127 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
128 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
129 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
130 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
131 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
133 /* descriptor control flags 2 */
134 #define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
135 #define D64_CTRL2_AE 0x00030000 /* address extension bits */
136 #define D64_CTRL2_AE_SHIFT 16
137 #define D64_CTRL2_PARITY 0x00040000 /* parity bit */
139 /* control flags in the range [27:20] are core-specific and not defined here */
140 #define D64_CTRL_CORE_MASK 0x0ff00000
142 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
143 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
144 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
145 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
147 #define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
148 #define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
149 #define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
150 #define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
152 /* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
153 * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
154 * There is a compile time check in wlc.c which ensure that this value is at least as big
155 * as TXOFF. This value is used in dma_rxfill (dma.c).
158 #define BCMEXTRAHDROOM 172
162 #define DMA_ERROR(args) \
164 if (!(*di->msg_level & 1)) \
169 #define DMA_TRACE(args) \
171 if (!(*di->msg_level & 2)) \
177 #define DMA_ERROR(args)
178 #define DMA_TRACE(args)
181 #define DMA_NONE(args)
183 typedef unsigned long dmaaddr_t
;
184 #define PHYSADDRHI(_pa) (0)
185 #define PHYSADDRHISET(_pa, _val)
186 #define PHYSADDRLO(_pa) ((_pa))
187 #define PHYSADDRLOSET(_pa, _val) \
192 #define d64txregs dregs.d64_u.txregs_64
193 #define d64rxregs dregs.d64_u.rxregs_64
194 #define txd64 dregs.d64_u.txd_64
195 #define rxd64 dregs.d64_u.rxd_64
197 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
198 static uint dma_msg_level
;
200 #define MAXNAMEL 8 /* 8 char names */
202 #define DI_INFO(dmah) ((dma_info_t *)dmah)
204 #define R_SM(r) (*(r))
205 #define W_SM(r, v) (*(r) = (v))
207 /* One physical DMA segment */
214 void *oshdmah
; /* Opaque handle for OSL to store its information */
215 uint origsize
; /* Size of the virtual packet */
217 dma_seg_t segs
[MAX_DMA_SEGS
];
222 * Descriptors are only read by the hardware, never written back.
224 typedef volatile struct {
225 u32 ctrl1
; /* misc control bits & bufcount */
226 u32 ctrl2
; /* buffer count and address extension */
227 u32 addrlow
; /* memory address of the date buffer, bits 31:0 */
228 u32 addrhigh
; /* memory address of the date buffer, bits 63:32 */
231 /* dma engine software state */
232 typedef struct dma_info
{
233 struct dma_pub dma
; /* exported structure */
234 uint
*msg_level
; /* message level pointer */
235 char name
[MAXNAMEL
]; /* callers name for diag msgs */
237 void *pbus
; /* bus handle */
239 bool dma64
; /* this dma engine is operating in 64-bit mode */
240 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
244 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
245 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
246 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
247 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
251 u16 dmadesc_align
; /* alignment requirement for dma descriptors */
253 u16 ntxd
; /* # tx descriptors tunable */
254 u16 txin
; /* index of next descriptor to reclaim */
255 u16 txout
; /* index of next descriptor to post */
256 void **txp
; /* pointer to parallel array of pointers to packets */
257 dma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
258 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
259 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
260 u16 txdalign
; /* #bytes added to alloc'd mem to align txd */
261 u32 txdalloc
; /* #bytes allocated for the ring */
262 u32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
263 * is not just an index, it needs all 13 bits to be
264 * an offset from the addr register.
267 u16 nrxd
; /* # rx descriptors tunable */
268 u16 rxin
; /* index of next descriptor to reclaim */
269 u16 rxout
; /* index of next descriptor to post */
270 void **rxp
; /* pointer to parallel array of pointers to packets */
271 dma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
272 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
273 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
274 u16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
275 u32 rxdalloc
; /* #bytes allocated for the ring */
276 u32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
279 unsigned int rxbufsize
; /* rx buffer size in bytes,
280 * not including the extra headroom
282 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
283 * e.g. some rx pkt buffers will be bridged to tx side
284 * without byte copying. The extra headroom needs to be
285 * large enough to fit txheader needs.
286 * Some dongle driver may not need it.
288 uint nrxpost
; /* # rx buffers to keep posted */
289 unsigned int rxoffset
; /* rxcontrol offset */
290 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
291 uint ddoffsethigh
; /* high 32 bits */
292 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
293 uint dataoffsethigh
; /* high 32 bits */
294 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
297 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
298 #ifdef BCMDMASGLISTOSL
299 #define DMASGLIST_ENAB true
301 #define DMASGLIST_ENAB false
302 #endif /* BCMDMASGLISTOSL */
304 /* descriptor bumping macros */
305 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
306 #define TXD(x) XXD((x), di->ntxd)
307 #define RXD(x) XXD((x), di->nrxd)
308 #define NEXTTXD(i) TXD((i) + 1)
309 #define PREVTXD(i) TXD((i) - 1)
310 #define NEXTRXD(i) RXD((i) + 1)
311 #define PREVRXD(i) RXD((i) - 1)
313 #define NTXDACTIVE(h, t) TXD((t) - (h))
314 #define NRXDACTIVE(h, t) RXD((t) - (h))
316 /* macros to convert between byte offsets and indexes */
317 #define B2I(bytes, type) ((bytes) / sizeof(type))
318 #define I2B(index, type) ((index) * sizeof(type))
320 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
321 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
323 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
324 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
326 /* Common prototypes */
327 static bool _dma_isaddrext(dma_info_t
*di
);
328 static bool _dma_descriptor_align(dma_info_t
*di
);
329 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
330 static void _dma_detach(dma_info_t
*di
);
331 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
332 static void _dma_rxinit(dma_info_t
*di
);
333 static void *_dma_rx(dma_info_t
*di
);
334 static bool _dma_rxfill(dma_info_t
*di
);
335 static void _dma_rxreclaim(dma_info_t
*di
);
336 static void _dma_rxenable(dma_info_t
*di
);
337 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
338 static void _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
,
341 static void _dma_txblock(dma_info_t
*di
);
342 static void _dma_txunblock(dma_info_t
*di
);
343 static uint
_dma_txactive(dma_info_t
*di
);
344 static uint
_dma_rxactive(dma_info_t
*di
);
345 static uint
_dma_txpending(dma_info_t
*di
);
346 static uint
_dma_txcommitted(dma_info_t
*di
);
348 static void *_dma_peeknexttxp(dma_info_t
*di
);
349 static void *_dma_peeknextrxp(dma_info_t
*di
);
350 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
);
351 static void _dma_counterreset(dma_info_t
*di
);
352 static void _dma_fifoloopbackenable(dma_info_t
*di
);
353 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
354 static u8
dma_align_sizetobits(uint size
);
355 static void *dma_ringalloc(dma_info_t
*di
, u32 boundary
, uint size
,
356 u16
*alignbits
, uint
*alloced
,
359 /* Prototypes for 64-bit routines */
360 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
361 static bool dma64_txreset(dma_info_t
*di
);
362 static bool dma64_rxreset(dma_info_t
*di
);
363 static bool dma64_txsuspendedidle(dma_info_t
*di
);
364 static int dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
, bool commit
);
365 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
366 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
367 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
368 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
369 static void dma64_txrotate(dma_info_t
*di
);
371 static bool dma64_rxidle(dma_info_t
*di
);
372 static void dma64_txinit(dma_info_t
*di
);
373 static bool dma64_txenabled(dma_info_t
*di
);
374 static void dma64_txsuspend(dma_info_t
*di
);
375 static void dma64_txresume(dma_info_t
*di
);
376 static bool dma64_txsuspended(dma_info_t
*di
);
377 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
378 static bool dma64_txstopped(dma_info_t
*di
);
379 static bool dma64_rxstopped(dma_info_t
*di
);
380 static bool dma64_rxenabled(dma_info_t
*di
);
381 static bool _dma64_addrext(dma64regs_t
*dma64regs
);
383 static inline u32
parity32(u32 data
);
385 const di_fcn_t dma64proc
= {
386 (di_detach_t
) _dma_detach
,
387 (di_txinit_t
) dma64_txinit
,
388 (di_txreset_t
) dma64_txreset
,
389 (di_txenabled_t
) dma64_txenabled
,
390 (di_txsuspend_t
) dma64_txsuspend
,
391 (di_txresume_t
) dma64_txresume
,
392 (di_txsuspended_t
) dma64_txsuspended
,
393 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
394 (di_txfast_t
) dma64_txfast
,
395 (di_txunframed_t
) dma64_txunframed
,
396 (di_getpos_t
) dma64_getpos
,
397 (di_txstopped_t
) dma64_txstopped
,
398 (di_txreclaim_t
) dma64_txreclaim
,
399 (di_getnexttxp_t
) dma64_getnexttxp
,
400 (di_peeknexttxp_t
) _dma_peeknexttxp
,
401 (di_txblock_t
) _dma_txblock
,
402 (di_txunblock_t
) _dma_txunblock
,
403 (di_txactive_t
) _dma_txactive
,
404 (di_txrotate_t
) dma64_txrotate
,
406 (di_rxinit_t
) _dma_rxinit
,
407 (di_rxreset_t
) dma64_rxreset
,
408 (di_rxidle_t
) dma64_rxidle
,
409 (di_rxstopped_t
) dma64_rxstopped
,
410 (di_rxenable_t
) _dma_rxenable
,
411 (di_rxenabled_t
) dma64_rxenabled
,
413 (di_rxfill_t
) _dma_rxfill
,
414 (di_rxreclaim_t
) _dma_rxreclaim
,
415 (di_getnextrxp_t
) _dma_getnextrxp
,
416 (di_peeknextrxp_t
) _dma_peeknextrxp
,
417 (di_rxparam_get_t
) _dma_rx_param_get
,
419 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
420 (di_getvar_t
) _dma_getvar
,
421 (di_counterreset_t
) _dma_counterreset
,
422 (di_ctrlflags_t
) _dma_ctrlflags
,
426 (di_rxactive_t
) _dma_rxactive
,
427 (di_txpending_t
) _dma_txpending
,
428 (di_txcommitted_t
) _dma_txcommitted
,
432 struct dma_pub
*dma_attach(char *name
, struct si_pub
*sih
,
433 void *dmaregstx
, void *dmaregsrx
, uint ntxd
,
434 uint nrxd
, uint rxbufsize
, int rxextheadroom
,
435 uint nrxpost
, uint rxoffset
, uint
*msg_level
)
440 /* allocate private info structure */
441 di
= kzalloc(sizeof(dma_info_t
), GFP_ATOMIC
);
444 printk(KERN_ERR
"dma_attach: out of memory\n");
449 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
452 di
->dma64
= ((ai_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
454 /* init dma reg pointer */
455 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
456 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
457 di
->dma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
459 /* Default flags (which can be changed by the driver calling dma_ctrlflags
460 * before enable): For backwards compatibility both Rx Overflow Continue
461 * and Parity are DISABLED.
464 di
->dma
.di_fn
->ctrlflags(&di
->dma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
,
467 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
468 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
469 "dmaregstx %p dmaregsrx %p\n", name
, "DMA64",
470 di
->dma
.dmactrlflags
, ntxd
, nrxd
, rxbufsize
,
471 rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
473 /* make a private copy of our callers name */
474 strncpy(di
->name
, name
, MAXNAMEL
);
475 di
->name
[MAXNAMEL
- 1] = '\0';
477 di
->pbus
= ((struct si_info
*)sih
)->pbus
;
480 di
->ntxd
= (u16
) ntxd
;
481 di
->nrxd
= (u16
) nrxd
;
483 /* the actual dma size doesn't include the extra headroom */
485 (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
486 if (rxbufsize
> BCMEXTRAHDROOM
)
487 di
->rxbufsize
= (u16
) (rxbufsize
- di
->rxextrahdrroom
);
489 di
->rxbufsize
= (u16
) rxbufsize
;
491 di
->nrxpost
= (u16
) nrxpost
;
492 di
->rxoffset
= (u8
) rxoffset
;
495 * figure out the DMA physical address offset for dd and data
496 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
497 * Other bus: use zero
498 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
501 di
->dataoffsetlow
= 0;
502 /* for pci bus, add offset */
503 if (sih
->bustype
== PCI_BUS
) {
504 /* pcie with DMA64 */
506 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
507 di
->dataoffsetlow
= di
->ddoffsetlow
;
508 di
->dataoffsethigh
= di
->ddoffsethigh
;
510 #if defined(__mips__) && defined(IL_BIGENDIAN)
511 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
512 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
513 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
514 if ((ai_coreid(sih
) == SDIOD_CORE_ID
)
515 && ((ai_corerev(sih
) > 0) && (ai_corerev(sih
) <= 2)))
517 else if ((ai_coreid(sih
) == I2S_CORE_ID
) &&
518 ((ai_corerev(sih
) == 0) || (ai_corerev(sih
) == 1)))
521 di
->addrext
= _dma_isaddrext(di
);
523 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
524 di
->aligndesc_4k
= _dma_descriptor_align(di
);
525 if (di
->aligndesc_4k
) {
526 di
->dmadesc_align
= D64RINGALIGN_BITS
;
527 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
528 /* for smaller dd table, HW relax alignment reqmnt */
529 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
532 di
->dmadesc_align
= 4; /* 16 byte alignment */
534 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
535 di
->aligndesc_4k
, di
->dmadesc_align
));
537 /* allocate tx packet pointer vector */
539 size
= ntxd
* sizeof(void *);
540 di
->txp
= kzalloc(size
, GFP_ATOMIC
);
541 if (di
->txp
== NULL
) {
542 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di
->name
));
547 /* allocate rx packet pointer vector */
549 size
= nrxd
* sizeof(void *);
550 di
->rxp
= kzalloc(size
, GFP_ATOMIC
);
551 if (di
->rxp
== NULL
) {
552 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di
->name
));
557 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
559 if (!_dma_alloc(di
, DMA_TX
))
563 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
565 if (!_dma_alloc(di
, DMA_RX
))
569 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
570 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
571 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->txdpa
)));
574 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
575 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->rxdpa
)));
580 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
582 /* allocate DMA mapping vectors */
583 if (DMASGLIST_ENAB
) {
585 size
= ntxd
* sizeof(dma_seg_map_t
);
586 di
->txp_dmah
= kzalloc(size
, GFP_ATOMIC
);
587 if (di
->txp_dmah
== NULL
)
592 size
= nrxd
* sizeof(dma_seg_map_t
);
593 di
->rxp_dmah
= kzalloc(size
, GFP_ATOMIC
);
594 if (di
->rxp_dmah
== NULL
)
599 return (struct dma_pub
*) di
;
606 /* Check for odd number of 1's */
607 static inline u32
parity32(u32 data
)
618 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
621 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
622 u32
*flags
, u32 bufcount
)
624 u32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
626 /* PCI bus with big(>1G) physical address, use address extension */
627 #if defined(__mips__) && defined(IL_BIGENDIAN)
628 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
)
629 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
631 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
632 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
634 W_SM(&ddring
[outidx
].addrlow
,
635 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
636 W_SM(&ddring
[outidx
].addrhigh
,
637 BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
638 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
639 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
641 /* address extension for 32-bit PCI */
644 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
645 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
647 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
648 W_SM(&ddring
[outidx
].addrlow
,
649 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
650 W_SM(&ddring
[outidx
].addrhigh
,
651 BUS_SWAP32(0 + di
->dataoffsethigh
));
652 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
653 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
655 if (di
->dma
.dmactrlflags
& DMA_CTRL_PEN
) {
656 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
657 W_SM(&ddring
[outidx
].ctrl2
,
658 BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
663 static bool _dma_alloc(dma_info_t
*di
, uint direction
)
665 return dma64_alloc(di
, direction
);
668 void *dma_alloc_consistent(struct pci_dev
*pdev
, uint size
, u16 align_bits
,
669 uint
*alloced
, unsigned long *pap
)
672 u16 align
= (1 << align_bits
);
673 if (!IS_ALIGNED(PAGE_SIZE
, align
))
677 return pci_alloc_consistent(pdev
, size
, (dma_addr_t
*) pap
);
680 /* !! may be called with core in reset */
681 static void _dma_detach(dma_info_t
*di
)
684 DMA_TRACE(("%s: dma_detach\n", di
->name
));
686 /* free dma descriptor rings */
688 pci_free_consistent(di
->pbus
, di
->txdalloc
,
689 ((s8
*)di
->txd64
- di
->txdalign
),
692 pci_free_consistent(di
->pbus
, di
->rxdalloc
,
693 ((s8
*)di
->rxd64
- di
->rxdalign
),
696 /* free packet pointer vectors */
700 /* free tx packet DMA handles */
703 /* free rx packet DMA handles */
706 /* free our private info structure */
711 static bool _dma_descriptor_align(dma_info_t
*di
)
715 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
716 if (di
->d64txregs
!= NULL
) {
717 W_REG(&di
->d64txregs
->addrlow
, 0xff0);
718 addrl
= R_REG(&di
->d64txregs
->addrlow
);
721 } else if (di
->d64rxregs
!= NULL
) {
722 W_REG(&di
->d64rxregs
->addrlow
, 0xff0);
723 addrl
= R_REG(&di
->d64rxregs
->addrlow
);
730 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
731 static bool _dma_isaddrext(dma_info_t
*di
)
733 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
735 /* not all tx or rx channel are available */
736 if (di
->d64txregs
!= NULL
) {
737 if (!_dma64_addrext(di
->d64txregs
)) {
738 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
739 "AE set\n", di
->name
));
742 } else if (di
->d64rxregs
!= NULL
) {
743 if (!_dma64_addrext(di
->d64rxregs
)) {
744 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
745 "AE set\n", di
->name
));
752 /* initialize descriptor table base address */
753 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
755 if (!di
->aligndesc_4k
) {
756 if (direction
== DMA_TX
)
757 di
->xmtptrbase
= PHYSADDRLO(pa
);
759 di
->rcvptrbase
= PHYSADDRLO(pa
);
762 if ((di
->ddoffsetlow
== 0)
763 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
764 if (direction
== DMA_TX
) {
765 W_REG(&di
->d64txregs
->addrlow
,
766 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
767 W_REG(&di
->d64txregs
->addrhigh
,
768 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
770 W_REG(&di
->d64rxregs
->addrlow
,
771 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
772 W_REG(&di
->d64rxregs
->addrhigh
,
773 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
776 /* DMA64 32bits address extension */
779 /* shift the high bit(s) from pa to ae */
780 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
781 PCI32ADDR_HIGH_SHIFT
;
782 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
784 if (direction
== DMA_TX
) {
785 W_REG(&di
->d64txregs
->addrlow
,
786 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
787 W_REG(&di
->d64txregs
->addrhigh
,
789 SET_REG(&di
->d64txregs
->control
,
790 D64_XC_AE
, (ae
<< D64_XC_AE_SHIFT
));
792 W_REG(&di
->d64rxregs
->addrlow
,
793 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
794 W_REG(&di
->d64rxregs
->addrhigh
,
796 SET_REG(&di
->d64rxregs
->control
,
797 D64_RC_AE
, (ae
<< D64_RC_AE_SHIFT
));
802 static void _dma_fifoloopbackenable(dma_info_t
*di
)
804 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
806 OR_REG(&di
->d64txregs
->control
, D64_XC_LE
);
809 static void _dma_rxinit(dma_info_t
*di
)
811 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
816 di
->rxin
= di
->rxout
= 0;
818 /* clear rx descriptor ring */
819 memset((void *)di
->rxd64
, '\0',
820 (di
->nrxd
* sizeof(dma64dd_t
)));
822 /* DMA engine with out alignment requirement requires table to be inited
823 * before enabling the engine
825 if (!di
->aligndesc_4k
)
826 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
830 if (di
->aligndesc_4k
)
831 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
834 static void _dma_rxenable(dma_info_t
*di
)
836 uint dmactrlflags
= di
->dma
.dmactrlflags
;
839 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
842 (R_REG(&di
->d64rxregs
->control
) & D64_RC_AE
) |
845 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
846 control
|= D64_RC_PD
;
848 if (dmactrlflags
& DMA_CTRL_ROC
)
849 control
|= D64_RC_OC
;
851 W_REG(&di
->d64rxregs
->control
,
852 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
856 _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
, u16
*rxbufsize
)
858 /* the normal values fit into 16 bits */
859 *rxoffset
= (u16
) di
->rxoffset
;
860 *rxbufsize
= (u16
) di
->rxbufsize
;
863 /* !! rx entry routine
864 * returns a pointer to the next frame received, or NULL if there are no more
865 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
867 * otherwise, it's treated as giant pkt and will be tossed.
868 * The DMA scattering starts with normal DMA header, followed by first buffer data.
869 * After it reaches the max size of buffer, the data continues in next DMA descriptor
870 * buffer WITHOUT DMA header
872 static void *_dma_rx(dma_info_t
*di
)
874 struct sk_buff
*p
, *head
, *tail
;
880 head
= _dma_getnextrxp(di
, false);
884 len
= le16_to_cpu(*(u16
*) (head
->data
));
885 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
886 dma_spin_for_len(len
, head
);
888 /* set actual length */
889 pkt_len
= min((di
->rxoffset
+ len
), di
->rxbufsize
);
890 __skb_trim(head
, pkt_len
);
891 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
893 /* check for single or multi-buffer rx */
896 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, false))) {
898 pkt_len
= min(resid
, (int)di
->rxbufsize
);
899 __skb_trim(p
, pkt_len
);
902 resid
-= di
->rxbufsize
;
909 B2I(((R_REG(&di
->d64rxregs
->status0
) &
911 di
->rcvptrbase
) & D64_RS0_CD_MASK
,
913 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
914 di
->rxin
, di
->rxout
, cur
));
918 if ((di
->dma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
919 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
921 brcmu_pkt_buf_free_skb(head
);
930 /* post receive buffers
931 * return false is refill failed completely and ring is empty
932 * this will stall the rx dma and user might want to call rxfill again asap
933 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
935 static bool _dma_rxfill(dma_info_t
*di
)
943 uint extra_offset
= 0;
949 * Determine how many receive buffers we're lacking
950 * from the full complement, allocate, initialize,
951 * and post them, then update the chip rx lastdscr.
957 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
959 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
961 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
962 extra_offset
= di
->rxextrahdrroom
;
964 for (i
= 0; i
< n
; i
++) {
965 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
969 p
= brcmu_pkt_buf_get_skb(di
->rxbufsize
+ extra_offset
);
972 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
974 if (i
== 0 && dma64_rxidle(di
)) {
975 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
982 /* reserve an extra headroom, if applicable */
984 skb_pull(p
, extra_offset
);
986 /* Do a cached write instead of uncached write since DMA_MAP
987 * will flush the cache.
989 *(u32
*) (p
->data
) = 0;
992 memset(&di
->rxp_dmah
[rxout
], 0,
993 sizeof(dma_seg_map_t
));
995 pa
= pci_map_single(di
->pbus
, p
->data
,
996 di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
998 /* save the free packet pointer */
1001 /* reset flags for each descriptor */
1003 if (rxout
== (di
->nrxd
- 1))
1004 flags
= D64_CTRL1_EOT
;
1006 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
,
1008 rxout
= NEXTRXD(rxout
);
1013 /* update the chip lastdscr pointer */
1014 W_REG(&di
->d64rxregs
->ptr
,
1015 di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1020 /* like getnexttxp but no reclaim */
1021 static void *_dma_peeknexttxp(dma_info_t
*di
)
1029 B2I(((R_REG(&di
->d64txregs
->status0
) &
1030 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1033 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1040 /* like getnextrxp but not take off the ring */
1041 static void *_dma_peeknextrxp(dma_info_t
*di
)
1049 B2I(((R_REG(&di
->d64rxregs
->status0
) &
1050 D64_RS0_CD_MASK
) - di
->rcvptrbase
) & D64_RS0_CD_MASK
,
1053 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1060 static void _dma_rxreclaim(dma_info_t
*di
)
1064 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1066 while ((p
= _dma_getnextrxp(di
, true)))
1067 brcmu_pkt_buf_free_skb(p
);
1070 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1075 return dma64_getnextrxp(di
, forceall
);
1078 static void _dma_txblock(dma_info_t
*di
)
1080 di
->dma
.txavail
= 0;
1083 static void _dma_txunblock(dma_info_t
*di
)
1085 di
->dma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1088 static uint
_dma_txactive(dma_info_t
*di
)
1090 return NTXDACTIVE(di
->txin
, di
->txout
);
1093 static uint
_dma_txpending(dma_info_t
*di
)
1098 B2I(((R_REG(&di
->d64txregs
->status0
) &
1099 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1102 return NTXDACTIVE(curr
, di
->txout
);
1105 static uint
_dma_txcommitted(dma_info_t
*di
)
1108 uint txin
= di
->txin
;
1110 if (txin
== di
->txout
)
1113 ptr
= B2I(R_REG(&di
->d64txregs
->ptr
), dma64dd_t
);
1115 return NTXDACTIVE(di
->txin
, ptr
);
1118 static uint
_dma_rxactive(dma_info_t
*di
)
1120 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1123 static void _dma_counterreset(dma_info_t
*di
)
1125 /* reset all software counter */
1126 di
->dma
.rxgiants
= 0;
1127 di
->dma
.rxnobuf
= 0;
1128 di
->dma
.txnobuf
= 0;
1131 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1133 uint dmactrlflags
= di
->dma
.dmactrlflags
;
1136 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1140 dmactrlflags
&= ~mask
;
1141 dmactrlflags
|= flags
;
1143 /* If trying to enable parity, check if parity is actually supported */
1144 if (dmactrlflags
& DMA_CTRL_PEN
) {
1147 control
= R_REG(&di
->d64txregs
->control
);
1148 W_REG(&di
->d64txregs
->control
,
1149 control
| D64_XC_PD
);
1150 if (R_REG(&di
->d64txregs
->control
) & D64_XC_PD
) {
1151 /* We *can* disable it so it is supported,
1152 * restore control register
1154 W_REG(&di
->d64txregs
->control
,
1157 /* Not supported, don't allow it to be enabled */
1158 dmactrlflags
&= ~DMA_CTRL_PEN
;
1162 di
->dma
.dmactrlflags
= dmactrlflags
;
1164 return dmactrlflags
;
1167 /* get the address of the var in order to change later */
1168 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
)
1170 if (!strcmp(name
, "&txavail"))
1171 return (unsigned long)&(di
->dma
.txavail
);
1176 u8
dma_align_sizetobits(uint size
)
1179 while (size
>>= 1) {
1185 /* This function ensures that the DMA descriptor ring will not get allocated
1186 * across Page boundary. If the allocation is done across the page boundary
1187 * at the first time, then it is freed and the allocation is done at
1188 * descriptor ring size aligned location. This will ensure that the ring will
1189 * not cross page boundary
1191 static void *dma_ringalloc(dma_info_t
*di
, u32 boundary
, uint size
,
1192 u16
*alignbits
, uint
*alloced
,
1197 u32 alignbytes
= 1 << *alignbits
;
1199 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
, alloced
, descpa
);
1204 desc_strtaddr
= (u32
) roundup((unsigned long)va
, alignbytes
);
1205 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
1207 *alignbits
= dma_align_sizetobits(size
);
1208 pci_free_consistent(di
->pbus
, size
, va
, *descpa
);
1209 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
,
1215 /* 64-bit DMA functions */
1217 static void dma64_txinit(dma_info_t
*di
)
1219 u32 control
= D64_XC_XE
;
1221 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1226 di
->txin
= di
->txout
= 0;
1227 di
->dma
.txavail
= di
->ntxd
- 1;
1229 /* clear tx descriptor ring */
1230 memset((void *)di
->txd64
, '\0', (di
->ntxd
* sizeof(dma64dd_t
)));
1232 /* DMA engine with out alignment requirement requires table to be inited
1233 * before enabling the engine
1235 if (!di
->aligndesc_4k
)
1236 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1238 if ((di
->dma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1239 control
|= D64_XC_PD
;
1240 OR_REG(&di
->d64txregs
->control
, control
);
1242 /* DMA engine with alignment requirement requires table to be inited
1243 * before enabling the engine
1245 if (di
->aligndesc_4k
)
1246 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1249 static bool dma64_txenabled(dma_info_t
*di
)
1253 /* If the chip is dead, it is not enabled :-) */
1254 xc
= R_REG(&di
->d64txregs
->control
);
1255 return (xc
!= 0xffffffff) && (xc
& D64_XC_XE
);
1258 static void dma64_txsuspend(dma_info_t
*di
)
1260 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1265 OR_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1268 static void dma64_txresume(dma_info_t
*di
)
1270 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1275 AND_REG(&di
->d64txregs
->control
, ~D64_XC_SE
);
1278 static bool dma64_txsuspended(dma_info_t
*di
)
1280 return (di
->ntxd
== 0) ||
1281 ((R_REG(&di
->d64txregs
->control
) & D64_XC_SE
) ==
1285 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
1289 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1290 (range
== DMA_RANGE_ALL
) ? "all" :
1292 DMA_RANGE_TRANSMITTED
) ? "transmitted" :
1295 if (di
->txin
== di
->txout
)
1298 while ((p
= dma64_getnexttxp(di
, range
))) {
1299 /* For unframed data, we don't have any packets to free */
1300 if (!(di
->dma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
1301 brcmu_pkt_buf_free_skb(p
);
1305 static bool dma64_txstopped(dma_info_t
*di
)
1307 return ((R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1308 D64_XS0_XS_STOPPED
);
1311 static bool dma64_rxstopped(dma_info_t
*di
)
1313 return ((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
1314 D64_RS0_RS_STOPPED
);
1317 static bool dma64_alloc(dma_info_t
*di
, uint direction
)
1326 ddlen
= sizeof(dma64dd_t
);
1328 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1329 align_bits
= di
->dmadesc_align
;
1330 align
= (1 << align_bits
);
1332 if (direction
== DMA_TX
) {
1333 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
1334 &alloced
, &di
->txdpaorig
);
1336 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
1339 align
= (1 << align_bits
);
1340 di
->txd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
1341 di
->txdalign
= (uint
) ((s8
*)di
->txd64
- (s8
*) va
);
1342 PHYSADDRLOSET(di
->txdpa
,
1343 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1344 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
1345 di
->txdalloc
= alloced
;
1347 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
1348 &alloced
, &di
->rxdpaorig
);
1350 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
1353 align
= (1 << align_bits
);
1354 di
->rxd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
1355 di
->rxdalign
= (uint
) ((s8
*)di
->rxd64
- (s8
*) va
);
1356 PHYSADDRLOSET(di
->rxdpa
,
1357 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1358 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
1359 di
->rxdalloc
= alloced
;
1365 static bool dma64_txreset(dma_info_t
*di
)
1372 /* suspend tx DMA first */
1373 W_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1375 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1376 != D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
1377 && (status
!= D64_XS0_XS_STOPPED
), 10000);
1379 W_REG(&di
->d64txregs
->control
, 0);
1381 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1382 != D64_XS0_XS_DISABLED
), 10000);
1384 /* wait for the last transaction to complete */
1387 return status
== D64_XS0_XS_DISABLED
;
1390 static bool dma64_rxidle(dma_info_t
*di
)
1392 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1397 return ((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1398 (R_REG(&di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
1401 static bool dma64_rxreset(dma_info_t
*di
)
1408 W_REG(&di
->d64rxregs
->control
, 0);
1410 (R_REG(&di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
))
1411 != D64_RS0_RS_DISABLED
), 10000);
1413 return status
== D64_RS0_RS_DISABLED
;
1416 static bool dma64_rxenabled(dma_info_t
*di
)
1420 rc
= R_REG(&di
->d64rxregs
->control
);
1421 return (rc
!= 0xffffffff) && (rc
& D64_RC_RE
);
1424 static bool dma64_txsuspendedidle(dma_info_t
*di
)
1430 if (!(R_REG(&di
->d64txregs
->control
) & D64_XC_SE
))
1433 if ((R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1440 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1441 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1442 * If DMA is idle, we return NULL.
1444 static void *dma64_getpos(dma_info_t
*di
, bool direction
)
1450 if (direction
== DMA_TX
) {
1452 R_REG(&di
->d64txregs
->status0
) & D64_XS0_CD_MASK
;
1453 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
1454 va
= di
->txp
[B2I(cd_offset
, dma64dd_t
)];
1457 R_REG(&di
->d64rxregs
->status0
) & D64_XS0_CD_MASK
;
1458 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
1459 va
= di
->rxp
[B2I(cd_offset
, dma64dd_t
)];
1462 /* If DMA is IDLE, return NULL */
1464 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__
));
1471 /* TX of unframed data
1473 * Adds a DMA ring descriptor for the data pointed to by "buf".
1474 * This is for DMA of a buffer of data and is unlike other dma TX functions
1475 * that take a pointer to a "packet"
1476 * Each call to this is results in a single descriptor being added for "len" bytes of
1477 * data starting at "buf", it doesn't handle chained buffers.
1479 static int dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
1483 dmaaddr_t pa
; /* phys addr */
1487 /* return nonzero if out of tx descriptors */
1488 if (NEXTTXD(txout
) == di
->txin
)
1494 pa
= pci_map_single(di
->pbus
, buf
, len
, PCI_DMA_TODEVICE
);
1496 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1498 if (txout
== (di
->ntxd
- 1))
1499 flags
|= D64_CTRL1_EOT
;
1501 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1503 /* save the buffer pointer - used by dma_getpos */
1504 di
->txp
[txout
] = buf
;
1506 txout
= NEXTTXD(txout
);
1507 /* bump the tx descriptor index */
1512 W_REG(&di
->d64txregs
->ptr
,
1513 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
1516 /* tx flow control */
1517 di
->dma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1522 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __func__
));
1523 di
->dma
.txavail
= 0;
1528 /* !! tx entry routine
1529 * WARNING: call must check the return value for error.
1530 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1532 static int dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
,
1535 struct sk_buff
*p
, *next
;
1536 unsigned char *data
;
1542 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1547 * Walk the chain of packet buffers
1548 * allocating and initializing transmit descriptor entries.
1550 for (p
= p0
; p
; p
= next
) {
1558 /* return nonzero if out of tx descriptors */
1559 if (NEXTTXD(txout
) == di
->txin
)
1565 /* get physical address of buffer start */
1567 memset(&di
->txp_dmah
[txout
], 0,
1568 sizeof(dma_seg_map_t
));
1570 pa
= pci_map_single(di
->pbus
, data
, len
, PCI_DMA_TODEVICE
);
1572 if (DMASGLIST_ENAB
) {
1573 map
= &di
->txp_dmah
[txout
];
1575 /* See if all the segments can be accounted for */
1577 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
1585 for (j
= 1; j
<= nsegs
; j
++) {
1587 if (p
== p0
&& j
== 1)
1588 flags
|= D64_CTRL1_SOF
;
1590 /* With a DMA segment list, Descriptor table is filled
1591 * using the segment list instead of looping over
1592 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1593 * end of segment list is reached.
1595 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
1596 (DMASGLIST_ENAB
&& j
== nsegs
))
1597 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1598 if (txout
== (di
->ntxd
- 1))
1599 flags
|= D64_CTRL1_EOT
;
1601 if (DMASGLIST_ENAB
) {
1602 len
= map
->segs
[j
- 1].length
;
1603 pa
= map
->segs
[j
- 1].addr
;
1605 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1607 txout
= NEXTTXD(txout
);
1610 /* See above. No need to loop over individual buffers */
1615 /* if last txd eof not set, fix it */
1616 if (!(flags
& D64_CTRL1_EOF
))
1617 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
1618 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
1620 /* save the packet */
1621 di
->txp
[PREVTXD(txout
)] = p0
;
1623 /* bump the tx descriptor index */
1628 W_REG(&di
->d64txregs
->ptr
,
1629 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
1631 /* tx flow control */
1632 di
->dma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1637 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
1638 brcmu_pkt_buf_free_skb(p0
);
1639 di
->dma
.txavail
= 0;
1645 * Reclaim next completed txd (txds if using chained buffers) in the range
1646 * specified and return associated packet.
1647 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1648 * transmitted as noted by the hardware "CurrDescr" pointer.
1649 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1650 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1651 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1652 * return associated packet regardless of the value of hardware pointers.
1654 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
1660 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1661 (range
== DMA_RANGE_ALL
) ? "all" :
1663 DMA_RANGE_TRANSMITTED
) ? "transmitted" :
1672 if (range
== DMA_RANGE_ALL
)
1675 dma64regs_t
*dregs
= di
->d64txregs
;
1679 (((R_REG(&dregs
->status0
) &
1681 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
1683 if (range
== DMA_RANGE_TRANSFERED
) {
1685 (u16
) (R_REG(&dregs
->status1
) &
1688 (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
1689 active_desc
= B2I(active_desc
, dma64dd_t
);
1690 if (end
!= active_desc
)
1691 end
= PREVTXD(active_desc
);
1695 if ((start
== 0) && (end
> di
->txout
))
1698 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1700 dma_seg_map_t
*map
= NULL
;
1701 uint size
, j
, nsegs
;
1704 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) -
1705 di
->dataoffsetlow
));
1707 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) -
1708 di
->dataoffsethigh
));
1710 if (DMASGLIST_ENAB
) {
1711 map
= &di
->txp_dmah
[i
];
1712 size
= map
->origsize
;
1716 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) &
1721 for (j
= nsegs
; j
> 0; j
--) {
1722 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
1723 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
1731 pci_unmap_single(di
->pbus
, pa
, size
, PCI_DMA_TODEVICE
);
1736 /* tx flow control */
1737 di
->dma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1742 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
1746 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
1754 /* return if no packets posted */
1759 B2I(((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1760 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1762 /* ignore curr if forceall */
1763 if (!forceall
&& (i
== curr
))
1766 /* get the packet pointer that corresponds to the rx descriptor */
1771 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) -
1772 di
->dataoffsetlow
));
1774 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) -
1775 di
->dataoffsethigh
));
1777 /* clear this packet from the descriptor ring */
1778 pci_unmap_single(di
->pbus
, pa
, di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
1780 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
1781 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
1783 di
->rxin
= NEXTRXD(i
);
1788 static bool _dma64_addrext(dma64regs_t
*dma64regs
)
1791 OR_REG(&dma64regs
->control
, D64_XC_AE
);
1792 w
= R_REG(&dma64regs
->control
);
1793 AND_REG(&dma64regs
->control
, ~D64_XC_AE
);
1794 return (w
& D64_XC_AE
) == D64_XC_AE
;
1798 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1800 static void dma64_txrotate(dma_info_t
*di
)
1809 nactive
= _dma_txactive(di
);
1811 ((((R_REG(&di
->d64txregs
->status1
) &
1813 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
1814 rot
= TXD(ad
- di
->txin
);
1816 /* full-ring case is a lot harder - don't worry about this */
1817 if (rot
>= (di
->ntxd
- nactive
)) {
1818 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1823 last
= PREVTXD(di
->txout
);
1825 /* move entries starting at last and moving backwards to first */
1826 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1827 new = TXD(old
+ rot
);
1830 * Move the tx dma descriptor.
1831 * EOT is set only in the last entry in the ring.
1833 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
1834 if (new == (di
->ntxd
- 1))
1836 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
1838 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
1839 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
1841 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
1842 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
1844 /* zap the old tx dma descriptor address field */
1845 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
1846 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
1848 /* move the corresponding txp[] entry */
1849 di
->txp
[new] = di
->txp
[old
];
1852 if (DMASGLIST_ENAB
) {
1853 memcpy(&di
->txp_dmah
[new], &di
->txp_dmah
[old
],
1854 sizeof(dma_seg_map_t
));
1855 memset(&di
->txp_dmah
[old
], 0, sizeof(dma_seg_map_t
));
1858 di
->txp
[old
] = NULL
;
1861 /* update txin and txout */
1863 di
->txout
= TXD(di
->txout
+ rot
);
1864 di
->dma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1867 W_REG(&di
->d64txregs
->ptr
,
1868 di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
1871 uint
dma_addrwidth(struct si_pub
*sih
, void *dmaregs
)
1873 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1874 /* DMA engine is 64-bit capable */
1875 if ((ai_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
1876 /* backplane are 64-bit capable */
1877 if (ai_backplane64(sih
))
1878 /* If bus is System Backplane or PCIE then we can access 64-bits */
1879 if ((sih
->bustype
== SI_BUS
) ||
1880 ((sih
->bustype
== PCI_BUS
) &&
1881 (sih
->buscoretype
== PCIE_CORE_ID
)))
1882 return DMADDRWIDTH_64
;
1884 /* DMA hardware not supported by this driver*/
1885 return DMADDRWIDTH_64
;
1889 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1890 * modified. The modified portion of the packet is not under control of the DMA
1891 * engine. This function calls a caller-supplied function for each packet in
1892 * the caller specified dma chain.
1894 void dma_walk_packets(struct dma_pub
*dmah
, void (*callback_fnc
)
1895 (void *pkt
, void *arg_a
), void *arg_a
)
1897 dma_info_t
*di
= (dma_info_t
*) dmah
;
1899 uint end
= di
->txout
;
1900 struct sk_buff
*skb
;
1901 struct ieee80211_tx_info
*tx_info
;
1904 skb
= (struct sk_buff
*)di
->txp
[i
];
1906 tx_info
= (struct ieee80211_tx_info
*)skb
->cb
;
1907 (callback_fnc
)(tx_info
, arg_a
);
This page took 0.069708 seconds and 4 git commands to generate.