642c992122a066703a93618858528e5b40ffae0e
[deliverable/linux.git] / drivers / staging / brcm80211 / brcmsmac / dma.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
20
21 #include <brcmu_utils.h>
22 #include <aiutils.h>
23 #include "types.h"
24 #include "dma.h"
25
26 #if defined(__mips__)
27 #include <asm/addrspace.h>
28 #endif
29
30 /*
31 * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
32 */
33 #define D64RINGALIGN_BITS 13
34 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
35 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
36
37 #define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
38
39 /* transmit channel control */
40 #define D64_XC_XE 0x00000001 /* transmit enable */
41 #define D64_XC_SE 0x00000002 /* transmit suspend request */
42 #define D64_XC_LE 0x00000004 /* loopback enable */
43 #define D64_XC_FL 0x00000010 /* flush request */
44 #define D64_XC_PD 0x00000800 /* parity check disable */
45 #define D64_XC_AE 0x00030000 /* address extension bits */
46 #define D64_XC_AE_SHIFT 16
47
48 /* transmit descriptor table pointer */
49 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
50
51 /* transmit channel status */
52 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
53 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
54 #define D64_XS0_XS_SHIFT 28
55 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
56 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
57 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
58 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
59 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
60
61 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
62 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
63 #define D64_XS1_XE_SHIFT 28
64 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
65 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
66 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
67 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
68 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
69 #define D64_XS1_XE_COREE 0x50000000 /* core error */
70
71 /* receive channel control */
72 #define D64_RC_RE 0x00000001 /* receive enable */
73 #define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
74 #define D64_RC_RO_SHIFT 1
75 #define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
76 #define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
77 #define D64_RC_OC 0x00000400 /* overflow continue */
78 #define D64_RC_PD 0x00000800 /* parity check disable */
79 #define D64_RC_AE 0x00030000 /* address extension bits */
80 #define D64_RC_AE_SHIFT 16
81
82 /* flags for dma controller */
83 #define DMA_CTRL_PEN (1 << 0) /* partity enable */
84 #define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
85 #define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
86 #define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
87
88 /* receive descriptor table pointer */
89 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
90
91 /* receive channel status */
92 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
93 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
94 #define D64_RS0_RS_SHIFT 28
95 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
96 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
97 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
98 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
99 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
100
101 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
102 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
103 #define D64_RS1_RE_SHIFT 28
104 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
105 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
106 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
107 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
108 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
109 #define D64_RS1_RE_COREE 0x50000000 /* core error */
110
111 /* fifoaddr */
112 #define D64_FA_OFF_MASK 0xffff /* offset */
113 #define D64_FA_SEL_MASK 0xf0000 /* select */
114 #define D64_FA_SEL_SHIFT 16
115 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
116 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
117 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
118 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
119 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
120 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
121 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
122 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
123 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
124 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
125
126 /* descriptor control flags 1 */
127 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
128 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
129 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
130 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
131 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
132
133 /* descriptor control flags 2 */
134 #define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
135 #define D64_CTRL2_AE 0x00030000 /* address extension bits */
136 #define D64_CTRL2_AE_SHIFT 16
137 #define D64_CTRL2_PARITY 0x00040000 /* parity bit */
138
139 /* control flags in the range [27:20] are core-specific and not defined here */
140 #define D64_CTRL_CORE_MASK 0x0ff00000
141
142 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
143 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
144 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
145 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
146
147 #define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
148 #define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
149 #define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
150 #define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
151
152 /* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
153 * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
154 * There is a compile time check in wlc.c which ensure that this value is at least as big
155 * as TXOFF. This value is used in dma_rxfill (dma.c).
156 */
157
158 #define BCMEXTRAHDROOM 172
159
160 /* debug/trace */
161 #ifdef BCMDBG
162 #define DMA_ERROR(args) \
163 do { \
164 if (!(*di->msg_level & 1)) \
165 ; \
166 else \
167 printk args; \
168 } while (0)
169 #define DMA_TRACE(args) \
170 do { \
171 if (!(*di->msg_level & 2)) \
172 ; \
173 else \
174 printk args; \
175 } while (0)
176 #else
177 #define DMA_ERROR(args)
178 #define DMA_TRACE(args)
179 #endif /* BCMDBG */
180
181 #define DMA_NONE(args)
182
183 typedef unsigned long dmaaddr_t;
184 #define PHYSADDRHI(_pa) (0)
185 #define PHYSADDRHISET(_pa, _val)
186 #define PHYSADDRLO(_pa) ((_pa))
187 #define PHYSADDRLOSET(_pa, _val) \
188 do { \
189 (_pa) = (_val); \
190 } while (0)
191
192 #define d64txregs dregs.d64_u.txregs_64
193 #define d64rxregs dregs.d64_u.rxregs_64
194 #define txd64 dregs.d64_u.txd_64
195 #define rxd64 dregs.d64_u.rxd_64
196
197 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
198 static uint dma_msg_level;
199
200 #define MAXNAMEL 8 /* 8 char names */
201
202 #define DI_INFO(dmah) ((dma_info_t *)dmah)
203
204 #define R_SM(r) (*(r))
205 #define W_SM(r, v) (*(r) = (v))
206
207 /* One physical DMA segment */
208 typedef struct {
209 dmaaddr_t addr;
210 u32 length;
211 } dma_seg_t;
212
213 typedef struct {
214 void *oshdmah; /* Opaque handle for OSL to store its information */
215 uint origsize; /* Size of the virtual packet */
216 uint nsegs;
217 dma_seg_t segs[MAX_DMA_SEGS];
218 } dma_seg_map_t;
219
220 /*
221 * DMA Descriptor
222 * Descriptors are only read by the hardware, never written back.
223 */
224 typedef volatile struct {
225 u32 ctrl1; /* misc control bits & bufcount */
226 u32 ctrl2; /* buffer count and address extension */
227 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
228 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
229 } dma64dd_t;
230
231 /* dma engine software state */
232 typedef struct dma_info {
233 struct dma_pub dma; /* exported structure */
234 uint *msg_level; /* message level pointer */
235 char name[MAXNAMEL]; /* callers name for diag msgs */
236
237 void *pbus; /* bus handle */
238
239 bool dma64; /* this dma engine is operating in 64-bit mode */
240 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
241
242 union {
243 struct {
244 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
245 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
246 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
247 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
248 } d64_u;
249 } dregs;
250
251 u16 dmadesc_align; /* alignment requirement for dma descriptors */
252
253 u16 ntxd; /* # tx descriptors tunable */
254 u16 txin; /* index of next descriptor to reclaim */
255 u16 txout; /* index of next descriptor to post */
256 void **txp; /* pointer to parallel array of pointers to packets */
257 dma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
258 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
259 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
260 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
261 u32 txdalloc; /* #bytes allocated for the ring */
262 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
263 * is not just an index, it needs all 13 bits to be
264 * an offset from the addr register.
265 */
266
267 u16 nrxd; /* # rx descriptors tunable */
268 u16 rxin; /* index of next descriptor to reclaim */
269 u16 rxout; /* index of next descriptor to post */
270 void **rxp; /* pointer to parallel array of pointers to packets */
271 dma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
272 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
273 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
274 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
275 u32 rxdalloc; /* #bytes allocated for the ring */
276 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
277
278 /* tunables */
279 unsigned int rxbufsize; /* rx buffer size in bytes,
280 * not including the extra headroom
281 */
282 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
283 * e.g. some rx pkt buffers will be bridged to tx side
284 * without byte copying. The extra headroom needs to be
285 * large enough to fit txheader needs.
286 * Some dongle driver may not need it.
287 */
288 uint nrxpost; /* # rx buffers to keep posted */
289 unsigned int rxoffset; /* rxcontrol offset */
290 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
291 uint ddoffsethigh; /* high 32 bits */
292 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
293 uint dataoffsethigh; /* high 32 bits */
294 bool aligndesc_4k; /* descriptor base need to be aligned or not */
295 } dma_info_t;
296
297 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
298 #ifdef BCMDMASGLISTOSL
299 #define DMASGLIST_ENAB true
300 #else
301 #define DMASGLIST_ENAB false
302 #endif /* BCMDMASGLISTOSL */
303
304 /* descriptor bumping macros */
305 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
306 #define TXD(x) XXD((x), di->ntxd)
307 #define RXD(x) XXD((x), di->nrxd)
308 #define NEXTTXD(i) TXD((i) + 1)
309 #define PREVTXD(i) TXD((i) - 1)
310 #define NEXTRXD(i) RXD((i) + 1)
311 #define PREVRXD(i) RXD((i) - 1)
312
313 #define NTXDACTIVE(h, t) TXD((t) - (h))
314 #define NRXDACTIVE(h, t) RXD((t) - (h))
315
316 /* macros to convert between byte offsets and indexes */
317 #define B2I(bytes, type) ((bytes) / sizeof(type))
318 #define I2B(index, type) ((index) * sizeof(type))
319
320 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
321 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
322
323 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
324 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
325
326 /* Common prototypes */
327 static bool _dma_isaddrext(dma_info_t *di);
328 static bool _dma_descriptor_align(dma_info_t *di);
329 static bool _dma_alloc(dma_info_t *di, uint direction);
330 static void _dma_detach(dma_info_t *di);
331 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
332 static void _dma_rxinit(dma_info_t *di);
333 static void *_dma_rx(dma_info_t *di);
334 static bool _dma_rxfill(dma_info_t *di);
335 static void _dma_rxreclaim(dma_info_t *di);
336 static void _dma_rxenable(dma_info_t *di);
337 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
338 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
339 u16 *rxbufsize);
340
341 static void _dma_txblock(dma_info_t *di);
342 static void _dma_txunblock(dma_info_t *di);
343 static uint _dma_txactive(dma_info_t *di);
344 static uint _dma_rxactive(dma_info_t *di);
345 static uint _dma_txpending(dma_info_t *di);
346 static uint _dma_txcommitted(dma_info_t *di);
347
348 static void *_dma_peeknexttxp(dma_info_t *di);
349 static void *_dma_peeknextrxp(dma_info_t *di);
350 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
351 static void _dma_counterreset(dma_info_t *di);
352 static void _dma_fifoloopbackenable(dma_info_t *di);
353 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
354 static u8 dma_align_sizetobits(uint size);
355 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
356 u16 *alignbits, uint *alloced,
357 dmaaddr_t *descpa);
358
359 /* Prototypes for 64-bit routines */
360 static bool dma64_alloc(dma_info_t *di, uint direction);
361 static bool dma64_txreset(dma_info_t *di);
362 static bool dma64_rxreset(dma_info_t *di);
363 static bool dma64_txsuspendedidle(dma_info_t *di);
364 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
365 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
366 static void *dma64_getpos(dma_info_t *di, bool direction);
367 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
368 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
369 static void dma64_txrotate(dma_info_t *di);
370
371 static bool dma64_rxidle(dma_info_t *di);
372 static void dma64_txinit(dma_info_t *di);
373 static bool dma64_txenabled(dma_info_t *di);
374 static void dma64_txsuspend(dma_info_t *di);
375 static void dma64_txresume(dma_info_t *di);
376 static bool dma64_txsuspended(dma_info_t *di);
377 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
378 static bool dma64_txstopped(dma_info_t *di);
379 static bool dma64_rxstopped(dma_info_t *di);
380 static bool dma64_rxenabled(dma_info_t *di);
381 static bool _dma64_addrext(dma64regs_t *dma64regs);
382
383 static inline u32 parity32(u32 data);
384
385 const di_fcn_t dma64proc = {
386 (di_detach_t) _dma_detach,
387 (di_txinit_t) dma64_txinit,
388 (di_txreset_t) dma64_txreset,
389 (di_txenabled_t) dma64_txenabled,
390 (di_txsuspend_t) dma64_txsuspend,
391 (di_txresume_t) dma64_txresume,
392 (di_txsuspended_t) dma64_txsuspended,
393 (di_txsuspendedidle_t) dma64_txsuspendedidle,
394 (di_txfast_t) dma64_txfast,
395 (di_txunframed_t) dma64_txunframed,
396 (di_getpos_t) dma64_getpos,
397 (di_txstopped_t) dma64_txstopped,
398 (di_txreclaim_t) dma64_txreclaim,
399 (di_getnexttxp_t) dma64_getnexttxp,
400 (di_peeknexttxp_t) _dma_peeknexttxp,
401 (di_txblock_t) _dma_txblock,
402 (di_txunblock_t) _dma_txunblock,
403 (di_txactive_t) _dma_txactive,
404 (di_txrotate_t) dma64_txrotate,
405
406 (di_rxinit_t) _dma_rxinit,
407 (di_rxreset_t) dma64_rxreset,
408 (di_rxidle_t) dma64_rxidle,
409 (di_rxstopped_t) dma64_rxstopped,
410 (di_rxenable_t) _dma_rxenable,
411 (di_rxenabled_t) dma64_rxenabled,
412 (di_rx_t) _dma_rx,
413 (di_rxfill_t) _dma_rxfill,
414 (di_rxreclaim_t) _dma_rxreclaim,
415 (di_getnextrxp_t) _dma_getnextrxp,
416 (di_peeknextrxp_t) _dma_peeknextrxp,
417 (di_rxparam_get_t) _dma_rx_param_get,
418
419 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
420 (di_getvar_t) _dma_getvar,
421 (di_counterreset_t) _dma_counterreset,
422 (di_ctrlflags_t) _dma_ctrlflags,
423 NULL,
424 NULL,
425 NULL,
426 (di_rxactive_t) _dma_rxactive,
427 (di_txpending_t) _dma_txpending,
428 (di_txcommitted_t) _dma_txcommitted,
429 39
430 };
431
432 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
433 void *dmaregstx, void *dmaregsrx, uint ntxd,
434 uint nrxd, uint rxbufsize, int rxextheadroom,
435 uint nrxpost, uint rxoffset, uint *msg_level)
436 {
437 dma_info_t *di;
438 uint size;
439
440 /* allocate private info structure */
441 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
442 if (di == NULL) {
443 #ifdef BCMDBG
444 printk(KERN_ERR "dma_attach: out of memory\n");
445 #endif
446 return NULL;
447 }
448
449 di->msg_level = msg_level ? msg_level : &dma_msg_level;
450
451
452 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
453
454 /* init dma reg pointer */
455 di->d64txregs = (dma64regs_t *) dmaregstx;
456 di->d64rxregs = (dma64regs_t *) dmaregsrx;
457 di->dma.di_fn = (const di_fcn_t *)&dma64proc;
458
459 /* Default flags (which can be changed by the driver calling dma_ctrlflags
460 * before enable): For backwards compatibility both Rx Overflow Continue
461 * and Parity are DISABLED.
462 * supports it.
463 */
464 di->dma.di_fn->ctrlflags(&di->dma, DMA_CTRL_ROC | DMA_CTRL_PEN,
465 0);
466
467 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
468 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
469 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
470 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
471 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
472
473 /* make a private copy of our callers name */
474 strncpy(di->name, name, MAXNAMEL);
475 di->name[MAXNAMEL - 1] = '\0';
476
477 di->pbus = ((struct si_info *)sih)->pbus;
478
479 /* save tunables */
480 di->ntxd = (u16) ntxd;
481 di->nrxd = (u16) nrxd;
482
483 /* the actual dma size doesn't include the extra headroom */
484 di->rxextrahdrroom =
485 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
486 if (rxbufsize > BCMEXTRAHDROOM)
487 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
488 else
489 di->rxbufsize = (u16) rxbufsize;
490
491 di->nrxpost = (u16) nrxpost;
492 di->rxoffset = (u8) rxoffset;
493
494 /*
495 * figure out the DMA physical address offset for dd and data
496 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
497 * Other bus: use zero
498 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
499 */
500 di->ddoffsetlow = 0;
501 di->dataoffsetlow = 0;
502 /* for pci bus, add offset */
503 if (sih->bustype == PCI_BUS) {
504 /* pcie with DMA64 */
505 di->ddoffsetlow = 0;
506 di->ddoffsethigh = SI_PCIE_DMA_H32;
507 di->dataoffsetlow = di->ddoffsetlow;
508 di->dataoffsethigh = di->ddoffsethigh;
509 }
510 #if defined(__mips__) && defined(IL_BIGENDIAN)
511 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
512 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
513 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
514 if ((ai_coreid(sih) == SDIOD_CORE_ID)
515 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
516 di->addrext = 0;
517 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
518 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
519 di->addrext = 0;
520 else
521 di->addrext = _dma_isaddrext(di);
522
523 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
524 di->aligndesc_4k = _dma_descriptor_align(di);
525 if (di->aligndesc_4k) {
526 di->dmadesc_align = D64RINGALIGN_BITS;
527 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
528 /* for smaller dd table, HW relax alignment reqmnt */
529 di->dmadesc_align = D64RINGALIGN_BITS - 1;
530 }
531 } else
532 di->dmadesc_align = 4; /* 16 byte alignment */
533
534 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
535 di->aligndesc_4k, di->dmadesc_align));
536
537 /* allocate tx packet pointer vector */
538 if (ntxd) {
539 size = ntxd * sizeof(void *);
540 di->txp = kzalloc(size, GFP_ATOMIC);
541 if (di->txp == NULL) {
542 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
543 goto fail;
544 }
545 }
546
547 /* allocate rx packet pointer vector */
548 if (nrxd) {
549 size = nrxd * sizeof(void *);
550 di->rxp = kzalloc(size, GFP_ATOMIC);
551 if (di->rxp == NULL) {
552 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
553 goto fail;
554 }
555 }
556
557 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
558 if (ntxd) {
559 if (!_dma_alloc(di, DMA_TX))
560 goto fail;
561 }
562
563 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
564 if (nrxd) {
565 if (!_dma_alloc(di, DMA_RX))
566 goto fail;
567 }
568
569 if ((di->ddoffsetlow != 0) && !di->addrext) {
570 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
571 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
572 goto fail;
573 }
574 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
575 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
576 goto fail;
577 }
578 }
579
580 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
581
582 /* allocate DMA mapping vectors */
583 if (DMASGLIST_ENAB) {
584 if (ntxd) {
585 size = ntxd * sizeof(dma_seg_map_t);
586 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
587 if (di->txp_dmah == NULL)
588 goto fail;
589 }
590
591 if (nrxd) {
592 size = nrxd * sizeof(dma_seg_map_t);
593 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
594 if (di->rxp_dmah == NULL)
595 goto fail;
596 }
597 }
598
599 return (struct dma_pub *) di;
600
601 fail:
602 _dma_detach(di);
603 return NULL;
604 }
605
606 /* Check for odd number of 1's */
607 static inline u32 parity32(u32 data)
608 {
609 data ^= data >> 16;
610 data ^= data >> 8;
611 data ^= data >> 4;
612 data ^= data >> 2;
613 data ^= data >> 1;
614
615 return data & 1;
616 }
617
618 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
619
620 static inline void
621 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
622 u32 *flags, u32 bufcount)
623 {
624 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
625
626 /* PCI bus with big(>1G) physical address, use address extension */
627 #if defined(__mips__) && defined(IL_BIGENDIAN)
628 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
629 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
630 #else
631 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
632 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
633
634 W_SM(&ddring[outidx].addrlow,
635 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
636 W_SM(&ddring[outidx].addrhigh,
637 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
638 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
639 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
640 } else {
641 /* address extension for 32-bit PCI */
642 u32 ae;
643
644 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
645 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
646
647 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
648 W_SM(&ddring[outidx].addrlow,
649 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
650 W_SM(&ddring[outidx].addrhigh,
651 BUS_SWAP32(0 + di->dataoffsethigh));
652 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
653 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
654 }
655 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
656 if (DMA64_DD_PARITY(&ddring[outidx])) {
657 W_SM(&ddring[outidx].ctrl2,
658 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
659 }
660 }
661 }
662
663 static bool _dma_alloc(dma_info_t *di, uint direction)
664 {
665 return dma64_alloc(di, direction);
666 }
667
668 void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
669 uint *alloced, unsigned long *pap)
670 {
671 if (align_bits) {
672 u16 align = (1 << align_bits);
673 if (!IS_ALIGNED(PAGE_SIZE, align))
674 size += align;
675 *alloced = size;
676 }
677 return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
678 }
679
680 /* !! may be called with core in reset */
681 static void _dma_detach(dma_info_t *di)
682 {
683
684 DMA_TRACE(("%s: dma_detach\n", di->name));
685
686 /* free dma descriptor rings */
687 if (di->txd64)
688 pci_free_consistent(di->pbus, di->txdalloc,
689 ((s8 *)di->txd64 - di->txdalign),
690 (di->txdpaorig));
691 if (di->rxd64)
692 pci_free_consistent(di->pbus, di->rxdalloc,
693 ((s8 *)di->rxd64 - di->rxdalign),
694 (di->rxdpaorig));
695
696 /* free packet pointer vectors */
697 kfree(di->txp);
698 kfree(di->rxp);
699
700 /* free tx packet DMA handles */
701 kfree(di->txp_dmah);
702
703 /* free rx packet DMA handles */
704 kfree(di->rxp_dmah);
705
706 /* free our private info structure */
707 kfree(di);
708
709 }
710
711 static bool _dma_descriptor_align(dma_info_t *di)
712 {
713 u32 addrl;
714
715 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
716 if (di->d64txregs != NULL) {
717 W_REG(&di->d64txregs->addrlow, 0xff0);
718 addrl = R_REG(&di->d64txregs->addrlow);
719 if (addrl != 0)
720 return false;
721 } else if (di->d64rxregs != NULL) {
722 W_REG(&di->d64rxregs->addrlow, 0xff0);
723 addrl = R_REG(&di->d64rxregs->addrlow);
724 if (addrl != 0)
725 return false;
726 }
727 return true;
728 }
729
730 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
731 static bool _dma_isaddrext(dma_info_t *di)
732 {
733 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
734
735 /* not all tx or rx channel are available */
736 if (di->d64txregs != NULL) {
737 if (!_dma64_addrext(di->d64txregs)) {
738 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
739 "AE set\n", di->name));
740 }
741 return true;
742 } else if (di->d64rxregs != NULL) {
743 if (!_dma64_addrext(di->d64rxregs)) {
744 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
745 "AE set\n", di->name));
746 }
747 return true;
748 }
749 return false;
750 }
751
752 /* initialize descriptor table base address */
753 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
754 {
755 if (!di->aligndesc_4k) {
756 if (direction == DMA_TX)
757 di->xmtptrbase = PHYSADDRLO(pa);
758 else
759 di->rcvptrbase = PHYSADDRLO(pa);
760 }
761
762 if ((di->ddoffsetlow == 0)
763 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
764 if (direction == DMA_TX) {
765 W_REG(&di->d64txregs->addrlow,
766 (PHYSADDRLO(pa) + di->ddoffsetlow));
767 W_REG(&di->d64txregs->addrhigh,
768 (PHYSADDRHI(pa) + di->ddoffsethigh));
769 } else {
770 W_REG(&di->d64rxregs->addrlow,
771 (PHYSADDRLO(pa) + di->ddoffsetlow));
772 W_REG(&di->d64rxregs->addrhigh,
773 (PHYSADDRHI(pa) + di->ddoffsethigh));
774 }
775 } else {
776 /* DMA64 32bits address extension */
777 u32 ae;
778
779 /* shift the high bit(s) from pa to ae */
780 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
781 PCI32ADDR_HIGH_SHIFT;
782 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
783
784 if (direction == DMA_TX) {
785 W_REG(&di->d64txregs->addrlow,
786 (PHYSADDRLO(pa) + di->ddoffsetlow));
787 W_REG(&di->d64txregs->addrhigh,
788 di->ddoffsethigh);
789 SET_REG(&di->d64txregs->control,
790 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
791 } else {
792 W_REG(&di->d64rxregs->addrlow,
793 (PHYSADDRLO(pa) + di->ddoffsetlow));
794 W_REG(&di->d64rxregs->addrhigh,
795 di->ddoffsethigh);
796 SET_REG(&di->d64rxregs->control,
797 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
798 }
799 }
800 }
801
802 static void _dma_fifoloopbackenable(dma_info_t *di)
803 {
804 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
805
806 OR_REG(&di->d64txregs->control, D64_XC_LE);
807 }
808
809 static void _dma_rxinit(dma_info_t *di)
810 {
811 DMA_TRACE(("%s: dma_rxinit\n", di->name));
812
813 if (di->nrxd == 0)
814 return;
815
816 di->rxin = di->rxout = 0;
817
818 /* clear rx descriptor ring */
819 memset((void *)di->rxd64, '\0',
820 (di->nrxd * sizeof(dma64dd_t)));
821
822 /* DMA engine with out alignment requirement requires table to be inited
823 * before enabling the engine
824 */
825 if (!di->aligndesc_4k)
826 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
827
828 _dma_rxenable(di);
829
830 if (di->aligndesc_4k)
831 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
832 }
833
834 static void _dma_rxenable(dma_info_t *di)
835 {
836 uint dmactrlflags = di->dma.dmactrlflags;
837 u32 control;
838
839 DMA_TRACE(("%s: dma_rxenable\n", di->name));
840
841 control =
842 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
843 D64_RC_RE;
844
845 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
846 control |= D64_RC_PD;
847
848 if (dmactrlflags & DMA_CTRL_ROC)
849 control |= D64_RC_OC;
850
851 W_REG(&di->d64rxregs->control,
852 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
853 }
854
855 static void
856 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
857 {
858 /* the normal values fit into 16 bits */
859 *rxoffset = (u16) di->rxoffset;
860 *rxbufsize = (u16) di->rxbufsize;
861 }
862
863 /* !! rx entry routine
864 * returns a pointer to the next frame received, or NULL if there are no more
865 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
866 * with pkts chain
867 * otherwise, it's treated as giant pkt and will be tossed.
868 * The DMA scattering starts with normal DMA header, followed by first buffer data.
869 * After it reaches the max size of buffer, the data continues in next DMA descriptor
870 * buffer WITHOUT DMA header
871 */
872 static void *_dma_rx(dma_info_t *di)
873 {
874 struct sk_buff *p, *head, *tail;
875 uint len;
876 uint pkt_len;
877 int resid = 0;
878
879 next_frame:
880 head = _dma_getnextrxp(di, false);
881 if (head == NULL)
882 return NULL;
883
884 len = le16_to_cpu(*(u16 *) (head->data));
885 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
886 dma_spin_for_len(len, head);
887
888 /* set actual length */
889 pkt_len = min((di->rxoffset + len), di->rxbufsize);
890 __skb_trim(head, pkt_len);
891 resid = len - (di->rxbufsize - di->rxoffset);
892
893 /* check for single or multi-buffer rx */
894 if (resid > 0) {
895 tail = head;
896 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
897 tail->next = p;
898 pkt_len = min(resid, (int)di->rxbufsize);
899 __skb_trim(p, pkt_len);
900
901 tail = p;
902 resid -= di->rxbufsize;
903 }
904
905 #ifdef BCMDBG
906 if (resid > 0) {
907 uint cur;
908 cur =
909 B2I(((R_REG(&di->d64rxregs->status0) &
910 D64_RS0_CD_MASK) -
911 di->rcvptrbase) & D64_RS0_CD_MASK,
912 dma64dd_t);
913 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
914 di->rxin, di->rxout, cur));
915 }
916 #endif /* BCMDBG */
917
918 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
919 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
920 di->name, len));
921 brcmu_pkt_buf_free_skb(head);
922 di->dma.rxgiants++;
923 goto next_frame;
924 }
925 }
926
927 return head;
928 }
929
930 /* post receive buffers
931 * return false is refill failed completely and ring is empty
932 * this will stall the rx dma and user might want to call rxfill again asap
933 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
934 */
935 static bool _dma_rxfill(dma_info_t *di)
936 {
937 struct sk_buff *p;
938 u16 rxin, rxout;
939 u32 flags = 0;
940 uint n;
941 uint i;
942 dmaaddr_t pa;
943 uint extra_offset = 0;
944 bool ring_empty;
945
946 ring_empty = false;
947
948 /*
949 * Determine how many receive buffers we're lacking
950 * from the full complement, allocate, initialize,
951 * and post them, then update the chip rx lastdscr.
952 */
953
954 rxin = di->rxin;
955 rxout = di->rxout;
956
957 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
958
959 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
960
961 if (di->rxbufsize > BCMEXTRAHDROOM)
962 extra_offset = di->rxextrahdrroom;
963
964 for (i = 0; i < n; i++) {
965 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
966 size to be allocated
967 */
968
969 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
970
971 if (p == NULL) {
972 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
973 di->name));
974 if (i == 0 && dma64_rxidle(di)) {
975 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
976 di->name));
977 ring_empty = true;
978 }
979 di->dma.rxnobuf++;
980 break;
981 }
982 /* reserve an extra headroom, if applicable */
983 if (extra_offset)
984 skb_pull(p, extra_offset);
985
986 /* Do a cached write instead of uncached write since DMA_MAP
987 * will flush the cache.
988 */
989 *(u32 *) (p->data) = 0;
990
991 if (DMASGLIST_ENAB)
992 memset(&di->rxp_dmah[rxout], 0,
993 sizeof(dma_seg_map_t));
994
995 pa = pci_map_single(di->pbus, p->data,
996 di->rxbufsize, PCI_DMA_FROMDEVICE);
997
998 /* save the free packet pointer */
999 di->rxp[rxout] = p;
1000
1001 /* reset flags for each descriptor */
1002 flags = 0;
1003 if (rxout == (di->nrxd - 1))
1004 flags = D64_CTRL1_EOT;
1005
1006 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1007 di->rxbufsize);
1008 rxout = NEXTRXD(rxout);
1009 }
1010
1011 di->rxout = rxout;
1012
1013 /* update the chip lastdscr pointer */
1014 W_REG(&di->d64rxregs->ptr,
1015 di->rcvptrbase + I2B(rxout, dma64dd_t));
1016
1017 return ring_empty;
1018 }
1019
1020 /* like getnexttxp but no reclaim */
1021 static void *_dma_peeknexttxp(dma_info_t *di)
1022 {
1023 uint end, i;
1024
1025 if (di->ntxd == 0)
1026 return NULL;
1027
1028 end =
1029 B2I(((R_REG(&di->d64txregs->status0) &
1030 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1031 dma64dd_t);
1032
1033 for (i = di->txin; i != end; i = NEXTTXD(i))
1034 if (di->txp[i])
1035 return di->txp[i];
1036
1037 return NULL;
1038 }
1039
1040 /* like getnextrxp but not take off the ring */
1041 static void *_dma_peeknextrxp(dma_info_t *di)
1042 {
1043 uint end, i;
1044
1045 if (di->nrxd == 0)
1046 return NULL;
1047
1048 end =
1049 B2I(((R_REG(&di->d64rxregs->status0) &
1050 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1051 dma64dd_t);
1052
1053 for (i = di->rxin; i != end; i = NEXTRXD(i))
1054 if (di->rxp[i])
1055 return di->rxp[i];
1056
1057 return NULL;
1058 }
1059
1060 static void _dma_rxreclaim(dma_info_t *di)
1061 {
1062 void *p;
1063
1064 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1065
1066 while ((p = _dma_getnextrxp(di, true)))
1067 brcmu_pkt_buf_free_skb(p);
1068 }
1069
1070 static void *_dma_getnextrxp(dma_info_t *di, bool forceall)
1071 {
1072 if (di->nrxd == 0)
1073 return NULL;
1074
1075 return dma64_getnextrxp(di, forceall);
1076 }
1077
1078 static void _dma_txblock(dma_info_t *di)
1079 {
1080 di->dma.txavail = 0;
1081 }
1082
1083 static void _dma_txunblock(dma_info_t *di)
1084 {
1085 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1086 }
1087
1088 static uint _dma_txactive(dma_info_t *di)
1089 {
1090 return NTXDACTIVE(di->txin, di->txout);
1091 }
1092
1093 static uint _dma_txpending(dma_info_t *di)
1094 {
1095 uint curr;
1096
1097 curr =
1098 B2I(((R_REG(&di->d64txregs->status0) &
1099 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1100 dma64dd_t);
1101
1102 return NTXDACTIVE(curr, di->txout);
1103 }
1104
1105 static uint _dma_txcommitted(dma_info_t *di)
1106 {
1107 uint ptr;
1108 uint txin = di->txin;
1109
1110 if (txin == di->txout)
1111 return 0;
1112
1113 ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
1114
1115 return NTXDACTIVE(di->txin, ptr);
1116 }
1117
1118 static uint _dma_rxactive(dma_info_t *di)
1119 {
1120 return NRXDACTIVE(di->rxin, di->rxout);
1121 }
1122
1123 static void _dma_counterreset(dma_info_t *di)
1124 {
1125 /* reset all software counter */
1126 di->dma.rxgiants = 0;
1127 di->dma.rxnobuf = 0;
1128 di->dma.txnobuf = 0;
1129 }
1130
1131 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1132 {
1133 uint dmactrlflags = di->dma.dmactrlflags;
1134
1135 if (di == NULL) {
1136 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1137 return 0;
1138 }
1139
1140 dmactrlflags &= ~mask;
1141 dmactrlflags |= flags;
1142
1143 /* If trying to enable parity, check if parity is actually supported */
1144 if (dmactrlflags & DMA_CTRL_PEN) {
1145 u32 control;
1146
1147 control = R_REG(&di->d64txregs->control);
1148 W_REG(&di->d64txregs->control,
1149 control | D64_XC_PD);
1150 if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
1151 /* We *can* disable it so it is supported,
1152 * restore control register
1153 */
1154 W_REG(&di->d64txregs->control,
1155 control);
1156 } else {
1157 /* Not supported, don't allow it to be enabled */
1158 dmactrlflags &= ~DMA_CTRL_PEN;
1159 }
1160 }
1161
1162 di->dma.dmactrlflags = dmactrlflags;
1163
1164 return dmactrlflags;
1165 }
1166
1167 /* get the address of the var in order to change later */
1168 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1169 {
1170 if (!strcmp(name, "&txavail"))
1171 return (unsigned long)&(di->dma.txavail);
1172 return 0;
1173 }
1174
1175 static
1176 u8 dma_align_sizetobits(uint size)
1177 {
1178 u8 bitpos = 0;
1179 while (size >>= 1) {
1180 bitpos++;
1181 }
1182 return bitpos;
1183 }
1184
1185 /* This function ensures that the DMA descriptor ring will not get allocated
1186 * across Page boundary. If the allocation is done across the page boundary
1187 * at the first time, then it is freed and the allocation is done at
1188 * descriptor ring size aligned location. This will ensure that the ring will
1189 * not cross page boundary
1190 */
1191 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
1192 u16 *alignbits, uint *alloced,
1193 dmaaddr_t *descpa)
1194 {
1195 void *va;
1196 u32 desc_strtaddr;
1197 u32 alignbytes = 1 << *alignbits;
1198
1199 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
1200
1201 if (NULL == va)
1202 return NULL;
1203
1204 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1205 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1206 & boundary)) {
1207 *alignbits = dma_align_sizetobits(size);
1208 pci_free_consistent(di->pbus, size, va, *descpa);
1209 va = dma_alloc_consistent(di->pbus, size, *alignbits,
1210 alloced, descpa);
1211 }
1212 return va;
1213 }
1214
1215 /* 64-bit DMA functions */
1216
1217 static void dma64_txinit(dma_info_t *di)
1218 {
1219 u32 control = D64_XC_XE;
1220
1221 DMA_TRACE(("%s: dma_txinit\n", di->name));
1222
1223 if (di->ntxd == 0)
1224 return;
1225
1226 di->txin = di->txout = 0;
1227 di->dma.txavail = di->ntxd - 1;
1228
1229 /* clear tx descriptor ring */
1230 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
1231
1232 /* DMA engine with out alignment requirement requires table to be inited
1233 * before enabling the engine
1234 */
1235 if (!di->aligndesc_4k)
1236 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1237
1238 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1239 control |= D64_XC_PD;
1240 OR_REG(&di->d64txregs->control, control);
1241
1242 /* DMA engine with alignment requirement requires table to be inited
1243 * before enabling the engine
1244 */
1245 if (di->aligndesc_4k)
1246 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1247 }
1248
1249 static bool dma64_txenabled(dma_info_t *di)
1250 {
1251 u32 xc;
1252
1253 /* If the chip is dead, it is not enabled :-) */
1254 xc = R_REG(&di->d64txregs->control);
1255 return (xc != 0xffffffff) && (xc & D64_XC_XE);
1256 }
1257
1258 static void dma64_txsuspend(dma_info_t *di)
1259 {
1260 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1261
1262 if (di->ntxd == 0)
1263 return;
1264
1265 OR_REG(&di->d64txregs->control, D64_XC_SE);
1266 }
1267
1268 static void dma64_txresume(dma_info_t *di)
1269 {
1270 DMA_TRACE(("%s: dma_txresume\n", di->name));
1271
1272 if (di->ntxd == 0)
1273 return;
1274
1275 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1276 }
1277
1278 static bool dma64_txsuspended(dma_info_t *di)
1279 {
1280 return (di->ntxd == 0) ||
1281 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1282 D64_XC_SE);
1283 }
1284
1285 static void dma64_txreclaim(dma_info_t *di, txd_range_t range)
1286 {
1287 void *p;
1288
1289 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1290 (range == DMA_RANGE_ALL) ? "all" :
1291 ((range ==
1292 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1293 "transferred")));
1294
1295 if (di->txin == di->txout)
1296 return;
1297
1298 while ((p = dma64_getnexttxp(di, range))) {
1299 /* For unframed data, we don't have any packets to free */
1300 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1301 brcmu_pkt_buf_free_skb(p);
1302 }
1303 }
1304
1305 static bool dma64_txstopped(dma_info_t *di)
1306 {
1307 return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1308 D64_XS0_XS_STOPPED);
1309 }
1310
1311 static bool dma64_rxstopped(dma_info_t *di)
1312 {
1313 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1314 D64_RS0_RS_STOPPED);
1315 }
1316
1317 static bool dma64_alloc(dma_info_t *di, uint direction)
1318 {
1319 u16 size;
1320 uint ddlen;
1321 void *va;
1322 uint alloced = 0;
1323 u16 align;
1324 u16 align_bits;
1325
1326 ddlen = sizeof(dma64dd_t);
1327
1328 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1329 align_bits = di->dmadesc_align;
1330 align = (1 << align_bits);
1331
1332 if (direction == DMA_TX) {
1333 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1334 &alloced, &di->txdpaorig);
1335 if (va == NULL) {
1336 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1337 return false;
1338 }
1339 align = (1 << align_bits);
1340 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1341 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
1342 PHYSADDRLOSET(di->txdpa,
1343 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1344 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1345 di->txdalloc = alloced;
1346 } else {
1347 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1348 &alloced, &di->rxdpaorig);
1349 if (va == NULL) {
1350 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1351 return false;
1352 }
1353 align = (1 << align_bits);
1354 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1355 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
1356 PHYSADDRLOSET(di->rxdpa,
1357 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1358 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1359 di->rxdalloc = alloced;
1360 }
1361
1362 return true;
1363 }
1364
1365 static bool dma64_txreset(dma_info_t *di)
1366 {
1367 u32 status;
1368
1369 if (di->ntxd == 0)
1370 return true;
1371
1372 /* suspend tx DMA first */
1373 W_REG(&di->d64txregs->control, D64_XC_SE);
1374 SPINWAIT(((status =
1375 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1376 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1377 && (status != D64_XS0_XS_STOPPED), 10000);
1378
1379 W_REG(&di->d64txregs->control, 0);
1380 SPINWAIT(((status =
1381 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1382 != D64_XS0_XS_DISABLED), 10000);
1383
1384 /* wait for the last transaction to complete */
1385 udelay(300);
1386
1387 return status == D64_XS0_XS_DISABLED;
1388 }
1389
1390 static bool dma64_rxidle(dma_info_t *di)
1391 {
1392 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1393
1394 if (di->nrxd == 0)
1395 return true;
1396
1397 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1398 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
1399 }
1400
1401 static bool dma64_rxreset(dma_info_t *di)
1402 {
1403 u32 status;
1404
1405 if (di->nrxd == 0)
1406 return true;
1407
1408 W_REG(&di->d64rxregs->control, 0);
1409 SPINWAIT(((status =
1410 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1411 != D64_RS0_RS_DISABLED), 10000);
1412
1413 return status == D64_RS0_RS_DISABLED;
1414 }
1415
1416 static bool dma64_rxenabled(dma_info_t *di)
1417 {
1418 u32 rc;
1419
1420 rc = R_REG(&di->d64rxregs->control);
1421 return (rc != 0xffffffff) && (rc & D64_RC_RE);
1422 }
1423
1424 static bool dma64_txsuspendedidle(dma_info_t *di)
1425 {
1426
1427 if (di->ntxd == 0)
1428 return true;
1429
1430 if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
1431 return 0;
1432
1433 if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1434 D64_XS0_XS_IDLE)
1435 return 1;
1436
1437 return 0;
1438 }
1439
1440 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1441 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1442 * If DMA is idle, we return NULL.
1443 */
1444 static void *dma64_getpos(dma_info_t *di, bool direction)
1445 {
1446 void *va;
1447 bool idle;
1448 u32 cd_offset;
1449
1450 if (direction == DMA_TX) {
1451 cd_offset =
1452 R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
1453 idle = !NTXDACTIVE(di->txin, di->txout);
1454 va = di->txp[B2I(cd_offset, dma64dd_t)];
1455 } else {
1456 cd_offset =
1457 R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
1458 idle = !NRXDACTIVE(di->rxin, di->rxout);
1459 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1460 }
1461
1462 /* If DMA is IDLE, return NULL */
1463 if (idle) {
1464 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1465 va = NULL;
1466 }
1467
1468 return va;
1469 }
1470
1471 /* TX of unframed data
1472 *
1473 * Adds a DMA ring descriptor for the data pointed to by "buf".
1474 * This is for DMA of a buffer of data and is unlike other dma TX functions
1475 * that take a pointer to a "packet"
1476 * Each call to this is results in a single descriptor being added for "len" bytes of
1477 * data starting at "buf", it doesn't handle chained buffers.
1478 */
1479 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
1480 {
1481 u16 txout;
1482 u32 flags = 0;
1483 dmaaddr_t pa; /* phys addr */
1484
1485 txout = di->txout;
1486
1487 /* return nonzero if out of tx descriptors */
1488 if (NEXTTXD(txout) == di->txin)
1489 goto outoftxd;
1490
1491 if (len == 0)
1492 return 0;
1493
1494 pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
1495
1496 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1497
1498 if (txout == (di->ntxd - 1))
1499 flags |= D64_CTRL1_EOT;
1500
1501 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1502
1503 /* save the buffer pointer - used by dma_getpos */
1504 di->txp[txout] = buf;
1505
1506 txout = NEXTTXD(txout);
1507 /* bump the tx descriptor index */
1508 di->txout = txout;
1509
1510 /* kick the chip */
1511 if (commit) {
1512 W_REG(&di->d64txregs->ptr,
1513 di->xmtptrbase + I2B(txout, dma64dd_t));
1514 }
1515
1516 /* tx flow control */
1517 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1518
1519 return 0;
1520
1521 outoftxd:
1522 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1523 di->dma.txavail = 0;
1524 di->dma.txnobuf++;
1525 return -1;
1526 }
1527
1528 /* !! tx entry routine
1529 * WARNING: call must check the return value for error.
1530 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1531 */
1532 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1533 bool commit)
1534 {
1535 struct sk_buff *p, *next;
1536 unsigned char *data;
1537 uint len;
1538 u16 txout;
1539 u32 flags = 0;
1540 dmaaddr_t pa;
1541
1542 DMA_TRACE(("%s: dma_txfast\n", di->name));
1543
1544 txout = di->txout;
1545
1546 /*
1547 * Walk the chain of packet buffers
1548 * allocating and initializing transmit descriptor entries.
1549 */
1550 for (p = p0; p; p = next) {
1551 uint nsegs, j;
1552 dma_seg_map_t *map;
1553
1554 data = p->data;
1555 len = p->len;
1556 next = p->next;
1557
1558 /* return nonzero if out of tx descriptors */
1559 if (NEXTTXD(txout) == di->txin)
1560 goto outoftxd;
1561
1562 if (len == 0)
1563 continue;
1564
1565 /* get physical address of buffer start */
1566 if (DMASGLIST_ENAB)
1567 memset(&di->txp_dmah[txout], 0,
1568 sizeof(dma_seg_map_t));
1569
1570 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1571
1572 if (DMASGLIST_ENAB) {
1573 map = &di->txp_dmah[txout];
1574
1575 /* See if all the segments can be accounted for */
1576 if (map->nsegs >
1577 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1578 1))
1579 goto outoftxd;
1580
1581 nsegs = map->nsegs;
1582 } else
1583 nsegs = 1;
1584
1585 for (j = 1; j <= nsegs; j++) {
1586 flags = 0;
1587 if (p == p0 && j == 1)
1588 flags |= D64_CTRL1_SOF;
1589
1590 /* With a DMA segment list, Descriptor table is filled
1591 * using the segment list instead of looping over
1592 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1593 * end of segment list is reached.
1594 */
1595 if ((!DMASGLIST_ENAB && next == NULL) ||
1596 (DMASGLIST_ENAB && j == nsegs))
1597 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1598 if (txout == (di->ntxd - 1))
1599 flags |= D64_CTRL1_EOT;
1600
1601 if (DMASGLIST_ENAB) {
1602 len = map->segs[j - 1].length;
1603 pa = map->segs[j - 1].addr;
1604 }
1605 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1606
1607 txout = NEXTTXD(txout);
1608 }
1609
1610 /* See above. No need to loop over individual buffers */
1611 if (DMASGLIST_ENAB)
1612 break;
1613 }
1614
1615 /* if last txd eof not set, fix it */
1616 if (!(flags & D64_CTRL1_EOF))
1617 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1618 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1619
1620 /* save the packet */
1621 di->txp[PREVTXD(txout)] = p0;
1622
1623 /* bump the tx descriptor index */
1624 di->txout = txout;
1625
1626 /* kick the chip */
1627 if (commit)
1628 W_REG(&di->d64txregs->ptr,
1629 di->xmtptrbase + I2B(txout, dma64dd_t));
1630
1631 /* tx flow control */
1632 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1633
1634 return 0;
1635
1636 outoftxd:
1637 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1638 brcmu_pkt_buf_free_skb(p0);
1639 di->dma.txavail = 0;
1640 di->dma.txnobuf++;
1641 return -1;
1642 }
1643
1644 /*
1645 * Reclaim next completed txd (txds if using chained buffers) in the range
1646 * specified and return associated packet.
1647 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1648 * transmitted as noted by the hardware "CurrDescr" pointer.
1649 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1650 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1651 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1652 * return associated packet regardless of the value of hardware pointers.
1653 */
1654 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
1655 {
1656 u16 start, end, i;
1657 u16 active_desc;
1658 void *txp;
1659
1660 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1661 (range == DMA_RANGE_ALL) ? "all" :
1662 ((range ==
1663 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1664 "transferred")));
1665
1666 if (di->ntxd == 0)
1667 return NULL;
1668
1669 txp = NULL;
1670
1671 start = di->txin;
1672 if (range == DMA_RANGE_ALL)
1673 end = di->txout;
1674 else {
1675 dma64regs_t *dregs = di->d64txregs;
1676
1677 end =
1678 (u16) (B2I
1679 (((R_REG(&dregs->status0) &
1680 D64_XS0_CD_MASK) -
1681 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1682
1683 if (range == DMA_RANGE_TRANSFERED) {
1684 active_desc =
1685 (u16) (R_REG(&dregs->status1) &
1686 D64_XS1_AD_MASK);
1687 active_desc =
1688 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1689 active_desc = B2I(active_desc, dma64dd_t);
1690 if (end != active_desc)
1691 end = PREVTXD(active_desc);
1692 }
1693 }
1694
1695 if ((start == 0) && (end > di->txout))
1696 goto bogus;
1697
1698 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1699 dmaaddr_t pa;
1700 dma_seg_map_t *map = NULL;
1701 uint size, j, nsegs;
1702
1703 PHYSADDRLOSET(pa,
1704 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1705 di->dataoffsetlow));
1706 PHYSADDRHISET(pa,
1707 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1708 di->dataoffsethigh));
1709
1710 if (DMASGLIST_ENAB) {
1711 map = &di->txp_dmah[i];
1712 size = map->origsize;
1713 nsegs = map->nsegs;
1714 } else {
1715 size =
1716 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1717 D64_CTRL2_BC_MASK);
1718 nsegs = 1;
1719 }
1720
1721 for (j = nsegs; j > 0; j--) {
1722 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1723 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1724
1725 txp = di->txp[i];
1726 di->txp[i] = NULL;
1727 if (j > 1)
1728 i = NEXTTXD(i);
1729 }
1730
1731 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1732 }
1733
1734 di->txin = i;
1735
1736 /* tx flow control */
1737 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1738
1739 return txp;
1740
1741 bogus:
1742 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1743 return NULL;
1744 }
1745
1746 static void *dma64_getnextrxp(dma_info_t *di, bool forceall)
1747 {
1748 uint i, curr;
1749 void *rxp;
1750 dmaaddr_t pa;
1751
1752 i = di->rxin;
1753
1754 /* return if no packets posted */
1755 if (i == di->rxout)
1756 return NULL;
1757
1758 curr =
1759 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
1760 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1761
1762 /* ignore curr if forceall */
1763 if (!forceall && (i == curr))
1764 return NULL;
1765
1766 /* get the packet pointer that corresponds to the rx descriptor */
1767 rxp = di->rxp[i];
1768 di->rxp[i] = NULL;
1769
1770 PHYSADDRLOSET(pa,
1771 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1772 di->dataoffsetlow));
1773 PHYSADDRHISET(pa,
1774 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1775 di->dataoffsethigh));
1776
1777 /* clear this packet from the descriptor ring */
1778 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
1779
1780 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1781 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1782
1783 di->rxin = NEXTRXD(i);
1784
1785 return rxp;
1786 }
1787
1788 static bool _dma64_addrext(dma64regs_t *dma64regs)
1789 {
1790 u32 w;
1791 OR_REG(&dma64regs->control, D64_XC_AE);
1792 w = R_REG(&dma64regs->control);
1793 AND_REG(&dma64regs->control, ~D64_XC_AE);
1794 return (w & D64_XC_AE) == D64_XC_AE;
1795 }
1796
1797 /*
1798 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1799 */
1800 static void dma64_txrotate(dma_info_t *di)
1801 {
1802 u16 ad;
1803 uint nactive;
1804 uint rot;
1805 u16 old, new;
1806 u32 w;
1807 u16 first, last;
1808
1809 nactive = _dma_txactive(di);
1810 ad = (u16) (B2I
1811 ((((R_REG(&di->d64txregs->status1) &
1812 D64_XS1_AD_MASK)
1813 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1814 rot = TXD(ad - di->txin);
1815
1816 /* full-ring case is a lot harder - don't worry about this */
1817 if (rot >= (di->ntxd - nactive)) {
1818 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1819 return;
1820 }
1821
1822 first = di->txin;
1823 last = PREVTXD(di->txout);
1824
1825 /* move entries starting at last and moving backwards to first */
1826 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1827 new = TXD(old + rot);
1828
1829 /*
1830 * Move the tx dma descriptor.
1831 * EOT is set only in the last entry in the ring.
1832 */
1833 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1834 if (new == (di->ntxd - 1))
1835 w |= D64_CTRL1_EOT;
1836 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1837
1838 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1839 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1840
1841 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1842 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1843
1844 /* zap the old tx dma descriptor address field */
1845 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1846 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1847
1848 /* move the corresponding txp[] entry */
1849 di->txp[new] = di->txp[old];
1850
1851 /* Move the map */
1852 if (DMASGLIST_ENAB) {
1853 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1854 sizeof(dma_seg_map_t));
1855 memset(&di->txp_dmah[old], 0, sizeof(dma_seg_map_t));
1856 }
1857
1858 di->txp[old] = NULL;
1859 }
1860
1861 /* update txin and txout */
1862 di->txin = ad;
1863 di->txout = TXD(di->txout + rot);
1864 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1865
1866 /* kick the chip */
1867 W_REG(&di->d64txregs->ptr,
1868 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1869 }
1870
1871 uint dma_addrwidth(struct si_pub *sih, void *dmaregs)
1872 {
1873 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1874 /* DMA engine is 64-bit capable */
1875 if ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1876 /* backplane are 64-bit capable */
1877 if (ai_backplane64(sih))
1878 /* If bus is System Backplane or PCIE then we can access 64-bits */
1879 if ((sih->bustype == SI_BUS) ||
1880 ((sih->bustype == PCI_BUS) &&
1881 (sih->buscoretype == PCIE_CORE_ID)))
1882 return DMADDRWIDTH_64;
1883 }
1884 /* DMA hardware not supported by this driver*/
1885 return DMADDRWIDTH_64;
1886 }
1887
1888 /*
1889 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1890 * modified. The modified portion of the packet is not under control of the DMA
1891 * engine. This function calls a caller-supplied function for each packet in
1892 * the caller specified dma chain.
1893 */
1894 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1895 (void *pkt, void *arg_a), void *arg_a)
1896 {
1897 dma_info_t *di = (dma_info_t *) dmah;
1898 uint i = di->txin;
1899 uint end = di->txout;
1900 struct sk_buff *skb;
1901 struct ieee80211_tx_info *tx_info;
1902
1903 while (i != end) {
1904 skb = (struct sk_buff *)di->txp[i];
1905 if (skb != NULL) {
1906 tx_info = (struct ieee80211_tx_info *)skb->cb;
1907 (callback_fnc)(tx_info, arg_a);
1908 }
1909 i = NEXTTXD(i);
1910 }
1911 }
This page took 0.069708 seconds and 4 git commands to generate.