Commit | Line | Data |
---|---|---|
5b435de0 AS |
1 | /* |
2 | * Copyright (c) 2010 Broadcom Corporation | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | #include <linux/slab.h> | |
5b435de0 AS |
17 | #include <linux/delay.h> |
18 | #include <linux/pci.h> | |
19 | ||
20 | #include <brcmu_utils.h> | |
21 | #include <aiutils.h> | |
22 | #include "types.h" | |
23 | #include "dma.h" | |
23038214 | 24 | #include "soc.h" |
5b435de0 AS |
25 | |
26 | /* | |
27 | * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within | |
28 | * a contiguous 8kB physical address. | |
29 | */ | |
30 | #define D64RINGALIGN_BITS 13 | |
31 | #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) | |
32 | #define D64RINGALIGN (1 << D64RINGALIGN_BITS) | |
33 | ||
34 | #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) | |
35 | ||
36 | /* transmit channel control */ | |
37 | #define D64_XC_XE 0x00000001 /* transmit enable */ | |
38 | #define D64_XC_SE 0x00000002 /* transmit suspend request */ | |
39 | #define D64_XC_LE 0x00000004 /* loopback enable */ | |
40 | #define D64_XC_FL 0x00000010 /* flush request */ | |
41 | #define D64_XC_PD 0x00000800 /* parity check disable */ | |
42 | #define D64_XC_AE 0x00030000 /* address extension bits */ | |
43 | #define D64_XC_AE_SHIFT 16 | |
44 | ||
45 | /* transmit descriptor table pointer */ | |
46 | #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ | |
47 | ||
48 | /* transmit channel status */ | |
49 | #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
50 | #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ | |
51 | #define D64_XS0_XS_SHIFT 28 | |
52 | #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ | |
53 | #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ | |
54 | #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ | |
55 | #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ | |
56 | #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ | |
57 | ||
58 | #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ | |
59 | #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ | |
60 | #define D64_XS1_XE_SHIFT 28 | |
61 | #define D64_XS1_XE_NOERR 0x00000000 /* no error */ | |
62 | #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ | |
63 | #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ | |
64 | #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ | |
65 | #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ | |
66 | #define D64_XS1_XE_COREE 0x50000000 /* core error */ | |
67 | ||
68 | /* receive channel control */ | |
69 | /* receive enable */ | |
70 | #define D64_RC_RE 0x00000001 | |
71 | /* receive frame offset */ | |
72 | #define D64_RC_RO_MASK 0x000000fe | |
73 | #define D64_RC_RO_SHIFT 1 | |
74 | /* direct fifo receive (pio) mode */ | |
75 | #define D64_RC_FM 0x00000100 | |
76 | /* separate rx header descriptor enable */ | |
77 | #define D64_RC_SH 0x00000200 | |
78 | /* overflow continue */ | |
79 | #define D64_RC_OC 0x00000400 | |
80 | /* parity check disable */ | |
81 | #define D64_RC_PD 0x00000800 | |
82 | /* address extension bits */ | |
83 | #define D64_RC_AE 0x00030000 | |
84 | #define D64_RC_AE_SHIFT 16 | |
85 | ||
86 | /* flags for dma controller */ | |
87 | /* partity enable */ | |
88 | #define DMA_CTRL_PEN (1 << 0) | |
89 | /* rx overflow continue */ | |
90 | #define DMA_CTRL_ROC (1 << 1) | |
91 | /* allow rx scatter to multiple descriptors */ | |
92 | #define DMA_CTRL_RXMULTI (1 << 2) | |
93 | /* Unframed Rx/Tx data */ | |
94 | #define DMA_CTRL_UNFRAMED (1 << 3) | |
95 | ||
96 | /* receive descriptor table pointer */ | |
97 | #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ | |
98 | ||
99 | /* receive channel status */ | |
100 | #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
101 | #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ | |
102 | #define D64_RS0_RS_SHIFT 28 | |
103 | #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ | |
104 | #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ | |
105 | #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ | |
106 | #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ | |
107 | #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ | |
108 | ||
109 | #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ | |
110 | #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ | |
111 | #define D64_RS1_RE_SHIFT 28 | |
112 | #define D64_RS1_RE_NOERR 0x00000000 /* no error */ | |
113 | #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ | |
114 | #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ | |
115 | #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ | |
116 | #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ | |
117 | #define D64_RS1_RE_COREE 0x50000000 /* core error */ | |
118 | ||
119 | /* fifoaddr */ | |
120 | #define D64_FA_OFF_MASK 0xffff /* offset */ | |
121 | #define D64_FA_SEL_MASK 0xf0000 /* select */ | |
122 | #define D64_FA_SEL_SHIFT 16 | |
123 | #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ | |
124 | #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ | |
125 | #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ | |
126 | #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ | |
127 | #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ | |
128 | #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ | |
129 | #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ | |
130 | #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ | |
131 | #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ | |
132 | #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ | |
133 | ||
134 | /* descriptor control flags 1 */ | |
135 | #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ | |
136 | #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ | |
137 | #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ | |
138 | #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ | |
139 | #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ | |
140 | ||
141 | /* descriptor control flags 2 */ | |
142 | /* buffer byte count. real data len must <= 16KB */ | |
143 | #define D64_CTRL2_BC_MASK 0x00007fff | |
144 | /* address extension bits */ | |
145 | #define D64_CTRL2_AE 0x00030000 | |
146 | #define D64_CTRL2_AE_SHIFT 16 | |
147 | /* parity bit */ | |
148 | #define D64_CTRL2_PARITY 0x00040000 | |
149 | ||
150 | /* control flags in the range [27:20] are core-specific and not defined here */ | |
151 | #define D64_CTRL_CORE_MASK 0x0ff00000 | |
152 | ||
153 | #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ | |
154 | #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ | |
155 | #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ | |
156 | #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ | |
157 | ||
158 | /* | |
159 | * packet headroom necessary to accommodate the largest header | |
160 | * in the system, (i.e TXOFF). By doing, we avoid the need to | |
161 | * allocate an extra buffer for the header when bridging to WL. | |
162 | * There is a compile time check in wlc.c which ensure that this | |
163 | * value is at least as big as TXOFF. This value is used in | |
164 | * dma_rxfill(). | |
165 | */ | |
166 | ||
167 | #define BCMEXTRAHDROOM 172 | |
168 | ||
169 | /* debug/trace */ | |
170 | #ifdef BCMDBG | |
171 | #define DMA_ERROR(args) \ | |
172 | do { \ | |
173 | if (!(*di->msg_level & 1)) \ | |
174 | ; \ | |
175 | else \ | |
176 | printk args; \ | |
177 | } while (0) | |
178 | #define DMA_TRACE(args) \ | |
179 | do { \ | |
180 | if (!(*di->msg_level & 2)) \ | |
181 | ; \ | |
182 | else \ | |
183 | printk args; \ | |
184 | } while (0) | |
185 | #else | |
186 | #define DMA_ERROR(args) | |
187 | #define DMA_TRACE(args) | |
188 | #endif /* BCMDBG */ | |
189 | ||
190 | #define DMA_NONE(args) | |
191 | ||
192 | #define MAXNAMEL 8 /* 8 char names */ | |
193 | ||
194 | /* macros to convert between byte offsets and indexes */ | |
195 | #define B2I(bytes, type) ((bytes) / sizeof(type)) | |
196 | #define I2B(index, type) ((index) * sizeof(type)) | |
197 | ||
198 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ | |
199 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ | |
200 | ||
201 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ | |
202 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ | |
203 | ||
204 | /* | |
205 | * DMA Descriptor | |
206 | * Descriptors are only read by the hardware, never written back. | |
207 | */ | |
208 | struct dma64desc { | |
209 | __le32 ctrl1; /* misc control bits & bufcount */ | |
210 | __le32 ctrl2; /* buffer count and address extension */ | |
211 | __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
212 | __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
213 | }; | |
214 | ||
215 | /* dma engine software state */ | |
216 | struct dma_info { | |
217 | struct dma_pub dma; /* exported structure */ | |
218 | uint *msg_level; /* message level pointer */ | |
219 | char name[MAXNAMEL]; /* callers name for diag msgs */ | |
220 | ||
221 | struct pci_dev *pbus; /* bus handle */ | |
222 | ||
223 | bool dma64; /* this dma engine is operating in 64-bit mode */ | |
224 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ | |
225 | ||
226 | /* 64-bit dma tx engine registers */ | |
227 | struct dma64regs __iomem *d64txregs; | |
228 | /* 64-bit dma rx engine registers */ | |
229 | struct dma64regs __iomem *d64rxregs; | |
230 | /* pointer to dma64 tx descriptor ring */ | |
231 | struct dma64desc *txd64; | |
232 | /* pointer to dma64 rx descriptor ring */ | |
233 | struct dma64desc *rxd64; | |
234 | ||
235 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ | |
236 | ||
237 | u16 ntxd; /* # tx descriptors tunable */ | |
238 | u16 txin; /* index of next descriptor to reclaim */ | |
239 | u16 txout; /* index of next descriptor to post */ | |
240 | /* pointer to parallel array of pointers to packets */ | |
241 | struct sk_buff **txp; | |
242 | /* Aligned physical address of descriptor ring */ | |
243 | dma_addr_t txdpa; | |
244 | /* Original physical address of descriptor ring */ | |
245 | dma_addr_t txdpaorig; | |
246 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ | |
247 | u32 txdalloc; /* #bytes allocated for the ring */ | |
248 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register | |
249 | * is not just an index, it needs all 13 bits to be | |
250 | * an offset from the addr register. | |
251 | */ | |
252 | ||
253 | u16 nrxd; /* # rx descriptors tunable */ | |
254 | u16 rxin; /* index of next descriptor to reclaim */ | |
255 | u16 rxout; /* index of next descriptor to post */ | |
256 | /* pointer to parallel array of pointers to packets */ | |
257 | struct sk_buff **rxp; | |
258 | /* Aligned physical address of descriptor ring */ | |
259 | dma_addr_t rxdpa; | |
260 | /* Original physical address of descriptor ring */ | |
261 | dma_addr_t rxdpaorig; | |
262 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ | |
263 | u32 rxdalloc; /* #bytes allocated for the ring */ | |
264 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ | |
265 | ||
266 | /* tunables */ | |
267 | unsigned int rxbufsize; /* rx buffer size in bytes, not including | |
268 | * the extra headroom | |
269 | */ | |
270 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper | |
271 | * stack, e.g. some rx pkt buffers will be | |
272 | * bridged to tx side without byte copying. | |
273 | * The extra headroom needs to be large enough | |
274 | * to fit txheader needs. Some dongle driver may | |
275 | * not need it. | |
276 | */ | |
277 | uint nrxpost; /* # rx buffers to keep posted */ | |
278 | unsigned int rxoffset; /* rxcontrol offset */ | |
279 | /* add to get dma address of descriptor ring, low 32 bits */ | |
280 | uint ddoffsetlow; | |
281 | /* high 32 bits */ | |
282 | uint ddoffsethigh; | |
283 | /* add to get dma address of data buffer, low 32 bits */ | |
284 | uint dataoffsetlow; | |
285 | /* high 32 bits */ | |
286 | uint dataoffsethigh; | |
287 | /* descriptor base need to be aligned or not */ | |
288 | bool aligndesc_4k; | |
289 | }; | |
290 | ||
291 | /* | |
292 | * default dma message level (if input msg_level | |
293 | * pointer is null in dma_attach()) | |
294 | */ | |
295 | static uint dma_msg_level; | |
296 | ||
297 | /* Check for odd number of 1's */ | |
298 | static u32 parity32(__le32 data) | |
299 | { | |
300 | /* no swap needed for counting 1's */ | |
301 | u32 par_data = *(u32 *)&data; | |
302 | ||
303 | par_data ^= par_data >> 16; | |
304 | par_data ^= par_data >> 8; | |
305 | par_data ^= par_data >> 4; | |
306 | par_data ^= par_data >> 2; | |
307 | par_data ^= par_data >> 1; | |
308 | ||
309 | return par_data & 1; | |
310 | } | |
311 | ||
312 | static bool dma64_dd_parity(struct dma64desc *dd) | |
313 | { | |
314 | return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); | |
315 | } | |
316 | ||
317 | /* descriptor bumping functions */ | |
318 | ||
319 | static uint xxd(uint x, uint n) | |
320 | { | |
321 | return x & (n - 1); /* faster than %, but n must be power of 2 */ | |
322 | } | |
323 | ||
324 | static uint txd(struct dma_info *di, uint x) | |
325 | { | |
326 | return xxd(x, di->ntxd); | |
327 | } | |
328 | ||
329 | static uint rxd(struct dma_info *di, uint x) | |
330 | { | |
331 | return xxd(x, di->nrxd); | |
332 | } | |
333 | ||
334 | static uint nexttxd(struct dma_info *di, uint i) | |
335 | { | |
336 | return txd(di, i + 1); | |
337 | } | |
338 | ||
339 | static uint prevtxd(struct dma_info *di, uint i) | |
340 | { | |
341 | return txd(di, i - 1); | |
342 | } | |
343 | ||
344 | static uint nextrxd(struct dma_info *di, uint i) | |
345 | { | |
346 | return txd(di, i + 1); | |
347 | } | |
348 | ||
349 | static uint ntxdactive(struct dma_info *di, uint h, uint t) | |
350 | { | |
351 | return txd(di, t-h); | |
352 | } | |
353 | ||
354 | static uint nrxdactive(struct dma_info *di, uint h, uint t) | |
355 | { | |
356 | return rxd(di, t-h); | |
357 | } | |
358 | ||
359 | static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) | |
360 | { | |
ae8e4672 | 361 | uint dmactrlflags; |
5b435de0 AS |
362 | |
363 | if (di == NULL) { | |
ae8e4672 | 364 | DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); |
5b435de0 AS |
365 | return 0; |
366 | } | |
367 | ||
ae8e4672 | 368 | dmactrlflags = di->dma.dmactrlflags; |
5b435de0 AS |
369 | dmactrlflags &= ~mask; |
370 | dmactrlflags |= flags; | |
371 | ||
372 | /* If trying to enable parity, check if parity is actually supported */ | |
373 | if (dmactrlflags & DMA_CTRL_PEN) { | |
374 | u32 control; | |
375 | ||
376 | control = R_REG(&di->d64txregs->control); | |
377 | W_REG(&di->d64txregs->control, | |
378 | control | D64_XC_PD); | |
379 | if (R_REG(&di->d64txregs->control) & D64_XC_PD) | |
380 | /* We *can* disable it so it is supported, | |
381 | * restore control register | |
382 | */ | |
383 | W_REG(&di->d64txregs->control, | |
384 | control); | |
385 | else | |
386 | /* Not supported, don't allow it to be enabled */ | |
387 | dmactrlflags &= ~DMA_CTRL_PEN; | |
388 | } | |
389 | ||
390 | di->dma.dmactrlflags = dmactrlflags; | |
391 | ||
392 | return dmactrlflags; | |
393 | } | |
394 | ||
395 | static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) | |
396 | { | |
397 | u32 w; | |
398 | OR_REG(&dma64regs->control, D64_XC_AE); | |
399 | w = R_REG(&dma64regs->control); | |
400 | AND_REG(&dma64regs->control, ~D64_XC_AE); | |
401 | return (w & D64_XC_AE) == D64_XC_AE; | |
402 | } | |
403 | ||
404 | /* | |
405 | * return true if this dma engine supports DmaExtendedAddrChanges, | |
406 | * otherwise false | |
407 | */ | |
408 | static bool _dma_isaddrext(struct dma_info *di) | |
409 | { | |
410 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ | |
411 | ||
412 | /* not all tx or rx channel are available */ | |
413 | if (di->d64txregs != NULL) { | |
414 | if (!_dma64_addrext(di->d64txregs)) | |
415 | DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have " | |
416 | "AE set\n", di->name)); | |
417 | return true; | |
418 | } else if (di->d64rxregs != NULL) { | |
419 | if (!_dma64_addrext(di->d64rxregs)) | |
420 | DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have " | |
421 | "AE set\n", di->name)); | |
422 | return true; | |
423 | } | |
424 | ||
425 | return false; | |
426 | } | |
427 | ||
428 | static bool _dma_descriptor_align(struct dma_info *di) | |
429 | { | |
430 | u32 addrl; | |
431 | ||
432 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ | |
433 | if (di->d64txregs != NULL) { | |
434 | W_REG(&di->d64txregs->addrlow, 0xff0); | |
435 | addrl = R_REG(&di->d64txregs->addrlow); | |
436 | if (addrl != 0) | |
437 | return false; | |
438 | } else if (di->d64rxregs != NULL) { | |
439 | W_REG(&di->d64rxregs->addrlow, 0xff0); | |
440 | addrl = R_REG(&di->d64rxregs->addrlow); | |
441 | if (addrl != 0) | |
442 | return false; | |
443 | } | |
444 | return true; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Descriptor table must start at the DMA hardware dictated alignment, so | |
449 | * allocated memory must be large enough to support this requirement. | |
450 | */ | |
451 | static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, | |
452 | u16 align_bits, uint *alloced, | |
453 | dma_addr_t *pap) | |
454 | { | |
455 | if (align_bits) { | |
456 | u16 align = (1 << align_bits); | |
457 | if (!IS_ALIGNED(PAGE_SIZE, align)) | |
458 | size += align; | |
459 | *alloced = size; | |
460 | } | |
461 | return pci_alloc_consistent(pdev, size, pap); | |
462 | } | |
463 | ||
464 | static | |
465 | u8 dma_align_sizetobits(uint size) | |
466 | { | |
467 | u8 bitpos = 0; | |
468 | while (size >>= 1) | |
469 | bitpos++; | |
470 | return bitpos; | |
471 | } | |
472 | ||
473 | /* This function ensures that the DMA descriptor ring will not get allocated | |
474 | * across Page boundary. If the allocation is done across the page boundary | |
475 | * at the first time, then it is freed and the allocation is done at | |
476 | * descriptor ring size aligned location. This will ensure that the ring will | |
477 | * not cross page boundary | |
478 | */ | |
479 | static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, | |
480 | u16 *alignbits, uint *alloced, | |
481 | dma_addr_t *descpa) | |
482 | { | |
483 | void *va; | |
484 | u32 desc_strtaddr; | |
485 | u32 alignbytes = 1 << *alignbits; | |
486 | ||
487 | va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); | |
488 | ||
489 | if (NULL == va) | |
490 | return NULL; | |
491 | ||
492 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); | |
493 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr | |
494 | & boundary)) { | |
495 | *alignbits = dma_align_sizetobits(size); | |
496 | pci_free_consistent(di->pbus, size, va, *descpa); | |
497 | va = dma_alloc_consistent(di->pbus, size, *alignbits, | |
498 | alloced, descpa); | |
499 | } | |
500 | return va; | |
501 | } | |
502 | ||
503 | static bool dma64_alloc(struct dma_info *di, uint direction) | |
504 | { | |
505 | u16 size; | |
506 | uint ddlen; | |
507 | void *va; | |
508 | uint alloced = 0; | |
509 | u16 align; | |
510 | u16 align_bits; | |
511 | ||
512 | ddlen = sizeof(struct dma64desc); | |
513 | ||
514 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); | |
515 | align_bits = di->dmadesc_align; | |
516 | align = (1 << align_bits); | |
517 | ||
518 | if (direction == DMA_TX) { | |
519 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
520 | &alloced, &di->txdpaorig); | |
521 | if (va == NULL) { | |
522 | DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)" | |
523 | " failed\n", di->name)); | |
524 | return false; | |
525 | } | |
526 | align = (1 << align_bits); | |
527 | di->txd64 = (struct dma64desc *) | |
528 | roundup((unsigned long)va, align); | |
529 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); | |
530 | di->txdpa = di->txdpaorig + di->txdalign; | |
531 | di->txdalloc = alloced; | |
532 | } else { | |
533 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
534 | &alloced, &di->rxdpaorig); | |
535 | if (va == NULL) { | |
536 | DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)" | |
537 | " failed\n", di->name)); | |
538 | return false; | |
539 | } | |
540 | align = (1 << align_bits); | |
541 | di->rxd64 = (struct dma64desc *) | |
542 | roundup((unsigned long)va, align); | |
543 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); | |
544 | di->rxdpa = di->rxdpaorig + di->rxdalign; | |
545 | di->rxdalloc = alloced; | |
546 | } | |
547 | ||
548 | return true; | |
549 | } | |
550 | ||
551 | static bool _dma_alloc(struct dma_info *di, uint direction) | |
552 | { | |
553 | return dma64_alloc(di, direction); | |
554 | } | |
555 | ||
556 | struct dma_pub *dma_attach(char *name, struct si_pub *sih, | |
557 | void __iomem *dmaregstx, void __iomem *dmaregsrx, | |
558 | uint ntxd, uint nrxd, | |
559 | uint rxbufsize, int rxextheadroom, | |
560 | uint nrxpost, uint rxoffset, uint *msg_level) | |
561 | { | |
562 | struct dma_info *di; | |
563 | uint size; | |
564 | ||
565 | /* allocate private info structure */ | |
566 | di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); | |
567 | if (di == NULL) | |
568 | return NULL; | |
569 | ||
570 | di->msg_level = msg_level ? msg_level : &dma_msg_level; | |
571 | ||
572 | ||
573 | di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); | |
574 | ||
575 | /* init dma reg pointer */ | |
576 | di->d64txregs = (struct dma64regs __iomem *) dmaregstx; | |
577 | di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; | |
578 | ||
579 | /* | |
580 | * Default flags (which can be changed by the driver calling | |
581 | * dma_ctrlflags before enable): For backwards compatibility | |
582 | * both Rx Overflow Continue and Parity are DISABLED. | |
583 | */ | |
584 | _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); | |
585 | ||
586 | DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " | |
587 | "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " | |
588 | "dmaregstx %p dmaregsrx %p\n", name, "DMA64", | |
589 | di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, | |
590 | rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); | |
591 | ||
592 | /* make a private copy of our callers name */ | |
593 | strncpy(di->name, name, MAXNAMEL); | |
594 | di->name[MAXNAMEL - 1] = '\0'; | |
595 | ||
596 | di->pbus = ((struct si_info *)sih)->pbus; | |
597 | ||
598 | /* save tunables */ | |
599 | di->ntxd = (u16) ntxd; | |
600 | di->nrxd = (u16) nrxd; | |
601 | ||
602 | /* the actual dma size doesn't include the extra headroom */ | |
603 | di->rxextrahdrroom = | |
604 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; | |
605 | if (rxbufsize > BCMEXTRAHDROOM) | |
606 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); | |
607 | else | |
608 | di->rxbufsize = (u16) rxbufsize; | |
609 | ||
610 | di->nrxpost = (u16) nrxpost; | |
611 | di->rxoffset = (u8) rxoffset; | |
612 | ||
613 | /* | |
614 | * figure out the DMA physical address offset for dd and data | |
615 | * PCI/PCIE: they map silicon backplace address to zero | |
616 | * based memory, need offset | |
617 | * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram | |
618 | * swapped region for data buffer, not descriptor | |
619 | */ | |
620 | di->ddoffsetlow = 0; | |
621 | di->dataoffsetlow = 0; | |
622 | /* add offset for pcie with DMA64 bus */ | |
623 | di->ddoffsetlow = 0; | |
624 | di->ddoffsethigh = SI_PCIE_DMA_H32; | |
625 | di->dataoffsetlow = di->ddoffsetlow; | |
626 | di->dataoffsethigh = di->ddoffsethigh; | |
627 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ | |
628 | if ((ai_coreid(sih) == SDIOD_CORE_ID) | |
629 | && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) | |
630 | di->addrext = 0; | |
631 | else if ((ai_coreid(sih) == I2S_CORE_ID) && | |
632 | ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) | |
633 | di->addrext = 0; | |
634 | else | |
635 | di->addrext = _dma_isaddrext(di); | |
636 | ||
637 | /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ | |
638 | di->aligndesc_4k = _dma_descriptor_align(di); | |
639 | if (di->aligndesc_4k) { | |
640 | di->dmadesc_align = D64RINGALIGN_BITS; | |
641 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) | |
642 | /* for smaller dd table, HW relax alignment reqmnt */ | |
643 | di->dmadesc_align = D64RINGALIGN_BITS - 1; | |
644 | } else { | |
645 | di->dmadesc_align = 4; /* 16 byte alignment */ | |
646 | } | |
647 | ||
648 | DMA_NONE(("DMA descriptor align_needed %d, align %d\n", | |
649 | di->aligndesc_4k, di->dmadesc_align)); | |
650 | ||
651 | /* allocate tx packet pointer vector */ | |
652 | if (ntxd) { | |
653 | size = ntxd * sizeof(void *); | |
654 | di->txp = kzalloc(size, GFP_ATOMIC); | |
655 | if (di->txp == NULL) | |
656 | goto fail; | |
657 | } | |
658 | ||
659 | /* allocate rx packet pointer vector */ | |
660 | if (nrxd) { | |
661 | size = nrxd * sizeof(void *); | |
662 | di->rxp = kzalloc(size, GFP_ATOMIC); | |
663 | if (di->rxp == NULL) | |
664 | goto fail; | |
665 | } | |
666 | ||
667 | /* | |
668 | * allocate transmit descriptor ring, only need ntxd descriptors | |
669 | * but it must be aligned | |
670 | */ | |
671 | if (ntxd) { | |
672 | if (!_dma_alloc(di, DMA_TX)) | |
673 | goto fail; | |
674 | } | |
675 | ||
676 | /* | |
677 | * allocate receive descriptor ring, only need nrxd descriptors | |
678 | * but it must be aligned | |
679 | */ | |
680 | if (nrxd) { | |
681 | if (!_dma_alloc(di, DMA_RX)) | |
682 | goto fail; | |
683 | } | |
684 | ||
685 | if ((di->ddoffsetlow != 0) && !di->addrext) { | |
686 | if (di->txdpa > SI_PCI_DMA_SZ) { | |
687 | DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not " | |
688 | "supported\n", di->name, (u32)di->txdpa)); | |
689 | goto fail; | |
690 | } | |
691 | if (di->rxdpa > SI_PCI_DMA_SZ) { | |
692 | DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not " | |
693 | "supported\n", di->name, (u32)di->rxdpa)); | |
694 | goto fail; | |
695 | } | |
696 | } | |
697 | ||
698 | DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x " | |
699 | "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, | |
700 | di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, | |
701 | di->addrext)); | |
702 | ||
703 | return (struct dma_pub *) di; | |
704 | ||
705 | fail: | |
706 | dma_detach((struct dma_pub *)di); | |
707 | return NULL; | |
708 | } | |
709 | ||
710 | static inline void | |
711 | dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, | |
712 | dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) | |
713 | { | |
714 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; | |
715 | ||
716 | /* PCI bus with big(>1G) physical address, use address extension */ | |
717 | if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { | |
718 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
719 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
720 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
721 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
722 | } else { | |
723 | /* address extension for 32-bit PCI */ | |
724 | u32 ae; | |
725 | ||
726 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
727 | pa &= ~PCI32ADDR_HIGH; | |
728 | ||
729 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; | |
730 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
731 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
732 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
733 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
734 | } | |
735 | if (di->dma.dmactrlflags & DMA_CTRL_PEN) { | |
736 | if (dma64_dd_parity(&ddring[outidx])) | |
737 | ddring[outidx].ctrl2 = | |
738 | cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); | |
739 | } | |
740 | } | |
741 | ||
742 | /* !! may be called with core in reset */ | |
743 | void dma_detach(struct dma_pub *pub) | |
744 | { | |
745 | struct dma_info *di = (struct dma_info *)pub; | |
746 | ||
747 | DMA_TRACE(("%s: dma_detach\n", di->name)); | |
748 | ||
749 | /* free dma descriptor rings */ | |
750 | if (di->txd64) | |
751 | pci_free_consistent(di->pbus, di->txdalloc, | |
752 | ((s8 *)di->txd64 - di->txdalign), | |
753 | (di->txdpaorig)); | |
754 | if (di->rxd64) | |
755 | pci_free_consistent(di->pbus, di->rxdalloc, | |
756 | ((s8 *)di->rxd64 - di->rxdalign), | |
757 | (di->rxdpaorig)); | |
758 | ||
759 | /* free packet pointer vectors */ | |
760 | kfree(di->txp); | |
761 | kfree(di->rxp); | |
762 | ||
763 | /* free our private info structure */ | |
764 | kfree(di); | |
765 | ||
766 | } | |
767 | ||
768 | /* initialize descriptor table base address */ | |
769 | static void | |
770 | _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) | |
771 | { | |
772 | if (!di->aligndesc_4k) { | |
773 | if (direction == DMA_TX) | |
774 | di->xmtptrbase = pa; | |
775 | else | |
776 | di->rcvptrbase = pa; | |
777 | } | |
778 | ||
779 | if ((di->ddoffsetlow == 0) | |
780 | || !(pa & PCI32ADDR_HIGH)) { | |
781 | if (direction == DMA_TX) { | |
782 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
783 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
784 | } else { | |
785 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
786 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
787 | } | |
788 | } else { | |
789 | /* DMA64 32bits address extension */ | |
790 | u32 ae; | |
791 | ||
792 | /* shift the high bit(s) from pa to ae */ | |
793 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
794 | pa &= ~PCI32ADDR_HIGH; | |
795 | ||
796 | if (direction == DMA_TX) { | |
797 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
798 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
799 | SET_REG(&di->d64txregs->control, | |
800 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); | |
801 | } else { | |
802 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
803 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
804 | SET_REG(&di->d64rxregs->control, | |
805 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); | |
806 | } | |
807 | } | |
808 | } | |
809 | ||
810 | static void _dma_rxenable(struct dma_info *di) | |
811 | { | |
812 | uint dmactrlflags = di->dma.dmactrlflags; | |
813 | u32 control; | |
814 | ||
815 | DMA_TRACE(("%s: dma_rxenable\n", di->name)); | |
816 | ||
817 | control = | |
818 | (R_REG(&di->d64rxregs->control) & D64_RC_AE) | | |
819 | D64_RC_RE; | |
820 | ||
821 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) | |
822 | control |= D64_RC_PD; | |
823 | ||
824 | if (dmactrlflags & DMA_CTRL_ROC) | |
825 | control |= D64_RC_OC; | |
826 | ||
827 | W_REG(&di->d64rxregs->control, | |
828 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); | |
829 | } | |
830 | ||
831 | void dma_rxinit(struct dma_pub *pub) | |
832 | { | |
833 | struct dma_info *di = (struct dma_info *)pub; | |
834 | ||
835 | DMA_TRACE(("%s: dma_rxinit\n", di->name)); | |
836 | ||
837 | if (di->nrxd == 0) | |
838 | return; | |
839 | ||
840 | di->rxin = di->rxout = 0; | |
841 | ||
842 | /* clear rx descriptor ring */ | |
843 | memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); | |
844 | ||
845 | /* DMA engine with out alignment requirement requires table to be inited | |
846 | * before enabling the engine | |
847 | */ | |
848 | if (!di->aligndesc_4k) | |
849 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
850 | ||
851 | _dma_rxenable(di); | |
852 | ||
853 | if (di->aligndesc_4k) | |
854 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
855 | } | |
856 | ||
857 | static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) | |
858 | { | |
859 | uint i, curr; | |
860 | struct sk_buff *rxp; | |
861 | dma_addr_t pa; | |
862 | ||
863 | i = di->rxin; | |
864 | ||
865 | /* return if no packets posted */ | |
866 | if (i == di->rxout) | |
867 | return NULL; | |
868 | ||
869 | curr = | |
870 | B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - | |
871 | di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); | |
872 | ||
873 | /* ignore curr if forceall */ | |
874 | if (!forceall && (i == curr)) | |
875 | return NULL; | |
876 | ||
877 | /* get the packet pointer that corresponds to the rx descriptor */ | |
878 | rxp = di->rxp[i]; | |
879 | di->rxp[i] = NULL; | |
880 | ||
881 | pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; | |
882 | ||
883 | /* clear this packet from the descriptor ring */ | |
884 | pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); | |
885 | ||
886 | di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
887 | di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
888 | ||
889 | di->rxin = nextrxd(di, i); | |
890 | ||
891 | return rxp; | |
892 | } | |
893 | ||
894 | static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) | |
895 | { | |
896 | if (di->nrxd == 0) | |
897 | return NULL; | |
898 | ||
899 | return dma64_getnextrxp(di, forceall); | |
900 | } | |
901 | ||
902 | /* | |
903 | * !! rx entry routine | |
3fd172d3 | 904 | * returns the number packages in the next frame, or 0 if there are no more |
5b435de0 AS |
905 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is |
906 | * supported with pkts chain | |
907 | * otherwise, it's treated as giant pkt and will be tossed. | |
908 | * The DMA scattering starts with normal DMA header, followed by first | |
909 | * buffer data. After it reaches the max size of buffer, the data continues | |
910 | * in next DMA descriptor buffer WITHOUT DMA header | |
911 | */ | |
3fd172d3 | 912 | int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) |
5b435de0 AS |
913 | { |
914 | struct dma_info *di = (struct dma_info *)pub; | |
3fd172d3 AS |
915 | struct sk_buff_head dma_frames; |
916 | struct sk_buff *p, *next; | |
5b435de0 AS |
917 | uint len; |
918 | uint pkt_len; | |
919 | int resid = 0; | |
3fd172d3 | 920 | int pktcnt = 1; |
5b435de0 | 921 | |
3fd172d3 | 922 | skb_queue_head_init(&dma_frames); |
5b435de0 | 923 | next_frame: |
3fd172d3 AS |
924 | p = _dma_getnextrxp(di, false); |
925 | if (p == NULL) | |
926 | return 0; | |
5b435de0 | 927 | |
3fd172d3 | 928 | len = le16_to_cpu(*(__le16 *) (p->data)); |
5b435de0 | 929 | DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); |
3fd172d3 | 930 | dma_spin_for_len(len, p); |
5b435de0 AS |
931 | |
932 | /* set actual length */ | |
933 | pkt_len = min((di->rxoffset + len), di->rxbufsize); | |
3fd172d3 AS |
934 | __skb_trim(p, pkt_len); |
935 | skb_queue_tail(&dma_frames, p); | |
5b435de0 AS |
936 | resid = len - (di->rxbufsize - di->rxoffset); |
937 | ||
938 | /* check for single or multi-buffer rx */ | |
939 | if (resid > 0) { | |
5b435de0 | 940 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
5b435de0 AS |
941 | pkt_len = min_t(uint, resid, di->rxbufsize); |
942 | __skb_trim(p, pkt_len); | |
3fd172d3 | 943 | skb_queue_tail(&dma_frames, p); |
5b435de0 | 944 | resid -= di->rxbufsize; |
3fd172d3 | 945 | pktcnt++; |
5b435de0 AS |
946 | } |
947 | ||
948 | #ifdef BCMDBG | |
949 | if (resid > 0) { | |
950 | uint cur; | |
951 | cur = | |
952 | B2I(((R_REG(&di->d64rxregs->status0) & | |
953 | D64_RS0_CD_MASK) - | |
954 | di->rcvptrbase) & D64_RS0_CD_MASK, | |
955 | struct dma64desc); | |
956 | DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n", | |
957 | di->rxin, di->rxout, cur)); | |
958 | } | |
959 | #endif /* BCMDBG */ | |
960 | ||
961 | if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { | |
962 | DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", | |
963 | di->name, len)); | |
3fd172d3 AS |
964 | skb_queue_walk_safe(&dma_frames, p, next) { |
965 | skb_unlink(p, &dma_frames); | |
966 | brcmu_pkt_buf_free_skb(p); | |
967 | } | |
5b435de0 | 968 | di->dma.rxgiants++; |
3fd172d3 | 969 | pktcnt = 1; |
5b435de0 AS |
970 | goto next_frame; |
971 | } | |
972 | } | |
973 | ||
3fd172d3 AS |
974 | skb_queue_splice_tail(&dma_frames, skb_list); |
975 | return pktcnt; | |
5b435de0 AS |
976 | } |
977 | ||
978 | static bool dma64_rxidle(struct dma_info *di) | |
979 | { | |
980 | DMA_TRACE(("%s: dma_rxidle\n", di->name)); | |
981 | ||
982 | if (di->nrxd == 0) | |
983 | return true; | |
984 | ||
985 | return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == | |
986 | (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); | |
987 | } | |
988 | ||
989 | /* | |
990 | * post receive buffers | |
991 | * return false is refill failed completely and ring is empty this will stall | |
992 | * the rx dma and user might want to call rxfill again asap. This unlikely | |
993 | * happens on memory-rich NIC, but often on memory-constrained dongle | |
994 | */ | |
995 | bool dma_rxfill(struct dma_pub *pub) | |
996 | { | |
997 | struct dma_info *di = (struct dma_info *)pub; | |
998 | struct sk_buff *p; | |
999 | u16 rxin, rxout; | |
1000 | u32 flags = 0; | |
1001 | uint n; | |
1002 | uint i; | |
1003 | dma_addr_t pa; | |
1004 | uint extra_offset = 0; | |
1005 | bool ring_empty; | |
1006 | ||
1007 | ring_empty = false; | |
1008 | ||
1009 | /* | |
1010 | * Determine how many receive buffers we're lacking | |
1011 | * from the full complement, allocate, initialize, | |
1012 | * and post them, then update the chip rx lastdscr. | |
1013 | */ | |
1014 | ||
1015 | rxin = di->rxin; | |
1016 | rxout = di->rxout; | |
1017 | ||
1018 | n = di->nrxpost - nrxdactive(di, rxin, rxout); | |
1019 | ||
1020 | DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); | |
1021 | ||
1022 | if (di->rxbufsize > BCMEXTRAHDROOM) | |
1023 | extra_offset = di->rxextrahdrroom; | |
1024 | ||
1025 | for (i = 0; i < n; i++) { | |
1026 | /* | |
1027 | * the di->rxbufsize doesn't include the extra headroom, | |
1028 | * we need to add it to the size to be allocated | |
1029 | */ | |
1030 | p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); | |
1031 | ||
1032 | if (p == NULL) { | |
1033 | DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", | |
1034 | di->name)); | |
1035 | if (i == 0 && dma64_rxidle(di)) { | |
1036 | DMA_ERROR(("%s: rxfill64: ring is empty !\n", | |
1037 | di->name)); | |
1038 | ring_empty = true; | |
1039 | } | |
1040 | di->dma.rxnobuf++; | |
1041 | break; | |
1042 | } | |
1043 | /* reserve an extra headroom, if applicable */ | |
1044 | if (extra_offset) | |
1045 | skb_pull(p, extra_offset); | |
1046 | ||
1047 | /* Do a cached write instead of uncached write since DMA_MAP | |
1048 | * will flush the cache. | |
1049 | */ | |
1050 | *(u32 *) (p->data) = 0; | |
1051 | ||
1052 | pa = pci_map_single(di->pbus, p->data, | |
1053 | di->rxbufsize, PCI_DMA_FROMDEVICE); | |
1054 | ||
1055 | /* save the free packet pointer */ | |
1056 | di->rxp[rxout] = p; | |
1057 | ||
1058 | /* reset flags for each descriptor */ | |
1059 | flags = 0; | |
1060 | if (rxout == (di->nrxd - 1)) | |
1061 | flags = D64_CTRL1_EOT; | |
1062 | ||
1063 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, | |
1064 | di->rxbufsize); | |
1065 | rxout = nextrxd(di, rxout); | |
1066 | } | |
1067 | ||
1068 | di->rxout = rxout; | |
1069 | ||
1070 | /* update the chip lastdscr pointer */ | |
1071 | W_REG(&di->d64rxregs->ptr, | |
1072 | di->rcvptrbase + I2B(rxout, struct dma64desc)); | |
1073 | ||
1074 | return ring_empty; | |
1075 | } | |
1076 | ||
1077 | void dma_rxreclaim(struct dma_pub *pub) | |
1078 | { | |
1079 | struct dma_info *di = (struct dma_info *)pub; | |
1080 | struct sk_buff *p; | |
1081 | ||
1082 | DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); | |
1083 | ||
1084 | while ((p = _dma_getnextrxp(di, true))) | |
1085 | brcmu_pkt_buf_free_skb(p); | |
1086 | } | |
1087 | ||
1088 | void dma_counterreset(struct dma_pub *pub) | |
1089 | { | |
1090 | /* reset all software counters */ | |
1091 | pub->rxgiants = 0; | |
1092 | pub->rxnobuf = 0; | |
1093 | pub->txnobuf = 0; | |
1094 | } | |
1095 | ||
1096 | /* get the address of the var in order to change later */ | |
1097 | unsigned long dma_getvar(struct dma_pub *pub, const char *name) | |
1098 | { | |
1099 | struct dma_info *di = (struct dma_info *)pub; | |
1100 | ||
1101 | if (!strcmp(name, "&txavail")) | |
1102 | return (unsigned long)&(di->dma.txavail); | |
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | /* 64-bit DMA functions */ | |
1107 | ||
1108 | void dma_txinit(struct dma_pub *pub) | |
1109 | { | |
1110 | struct dma_info *di = (struct dma_info *)pub; | |
1111 | u32 control = D64_XC_XE; | |
1112 | ||
1113 | DMA_TRACE(("%s: dma_txinit\n", di->name)); | |
1114 | ||
1115 | if (di->ntxd == 0) | |
1116 | return; | |
1117 | ||
1118 | di->txin = di->txout = 0; | |
1119 | di->dma.txavail = di->ntxd - 1; | |
1120 | ||
1121 | /* clear tx descriptor ring */ | |
1122 | memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); | |
1123 | ||
1124 | /* DMA engine with out alignment requirement requires table to be inited | |
1125 | * before enabling the engine | |
1126 | */ | |
1127 | if (!di->aligndesc_4k) | |
1128 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1129 | ||
1130 | if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) | |
1131 | control |= D64_XC_PD; | |
1132 | OR_REG(&di->d64txregs->control, control); | |
1133 | ||
1134 | /* DMA engine with alignment requirement requires table to be inited | |
1135 | * before enabling the engine | |
1136 | */ | |
1137 | if (di->aligndesc_4k) | |
1138 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1139 | } | |
1140 | ||
1141 | void dma_txsuspend(struct dma_pub *pub) | |
1142 | { | |
1143 | struct dma_info *di = (struct dma_info *)pub; | |
1144 | ||
1145 | DMA_TRACE(("%s: dma_txsuspend\n", di->name)); | |
1146 | ||
1147 | if (di->ntxd == 0) | |
1148 | return; | |
1149 | ||
1150 | OR_REG(&di->d64txregs->control, D64_XC_SE); | |
1151 | } | |
1152 | ||
1153 | void dma_txresume(struct dma_pub *pub) | |
1154 | { | |
1155 | struct dma_info *di = (struct dma_info *)pub; | |
1156 | ||
1157 | DMA_TRACE(("%s: dma_txresume\n", di->name)); | |
1158 | ||
1159 | if (di->ntxd == 0) | |
1160 | return; | |
1161 | ||
1162 | AND_REG(&di->d64txregs->control, ~D64_XC_SE); | |
1163 | } | |
1164 | ||
1165 | bool dma_txsuspended(struct dma_pub *pub) | |
1166 | { | |
1167 | struct dma_info *di = (struct dma_info *)pub; | |
1168 | ||
1169 | return (di->ntxd == 0) || | |
1170 | ((R_REG(&di->d64txregs->control) & D64_XC_SE) == | |
1171 | D64_XC_SE); | |
1172 | } | |
1173 | ||
1174 | void dma_txreclaim(struct dma_pub *pub, enum txd_range range) | |
1175 | { | |
1176 | struct dma_info *di = (struct dma_info *)pub; | |
1177 | struct sk_buff *p; | |
1178 | ||
1179 | DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, | |
1180 | (range == DMA_RANGE_ALL) ? "all" : | |
1181 | ((range == | |
1182 | DMA_RANGE_TRANSMITTED) ? "transmitted" : | |
1183 | "transferred"))); | |
1184 | ||
1185 | if (di->txin == di->txout) | |
1186 | return; | |
1187 | ||
1188 | while ((p = dma_getnexttxp(pub, range))) { | |
1189 | /* For unframed data, we don't have any packets to free */ | |
1190 | if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) | |
1191 | brcmu_pkt_buf_free_skb(p); | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | bool dma_txreset(struct dma_pub *pub) | |
1196 | { | |
1197 | struct dma_info *di = (struct dma_info *)pub; | |
1198 | u32 status; | |
1199 | ||
1200 | if (di->ntxd == 0) | |
1201 | return true; | |
1202 | ||
1203 | /* suspend tx DMA first */ | |
1204 | W_REG(&di->d64txregs->control, D64_XC_SE); | |
1205 | SPINWAIT(((status = | |
1206 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1207 | != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) | |
1208 | && (status != D64_XS0_XS_STOPPED), 10000); | |
1209 | ||
1210 | W_REG(&di->d64txregs->control, 0); | |
1211 | SPINWAIT(((status = | |
1212 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1213 | != D64_XS0_XS_DISABLED), 10000); | |
1214 | ||
1215 | /* wait for the last transaction to complete */ | |
1216 | udelay(300); | |
1217 | ||
1218 | return status == D64_XS0_XS_DISABLED; | |
1219 | } | |
1220 | ||
1221 | bool dma_rxreset(struct dma_pub *pub) | |
1222 | { | |
1223 | struct dma_info *di = (struct dma_info *)pub; | |
1224 | u32 status; | |
1225 | ||
1226 | if (di->nrxd == 0) | |
1227 | return true; | |
1228 | ||
1229 | W_REG(&di->d64rxregs->control, 0); | |
1230 | SPINWAIT(((status = | |
1231 | (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) | |
1232 | != D64_RS0_RS_DISABLED), 10000); | |
1233 | ||
1234 | return status == D64_RS0_RS_DISABLED; | |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * !! tx entry routine | |
1239 | * WARNING: call must check the return value for error. | |
1240 | * the error(toss frames) could be fatal and cause many subsequent hard | |
1241 | * to debug problems | |
1242 | */ | |
1243 | int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) | |
1244 | { | |
1245 | struct dma_info *di = (struct dma_info *)pub; | |
1246 | struct sk_buff *p, *next; | |
1247 | unsigned char *data; | |
1248 | uint len; | |
1249 | u16 txout; | |
1250 | u32 flags = 0; | |
1251 | dma_addr_t pa; | |
1252 | ||
1253 | DMA_TRACE(("%s: dma_txfast\n", di->name)); | |
1254 | ||
1255 | txout = di->txout; | |
1256 | ||
1257 | /* | |
1258 | * Walk the chain of packet buffers | |
1259 | * allocating and initializing transmit descriptor entries. | |
1260 | */ | |
1261 | for (p = p0; p; p = next) { | |
1262 | data = p->data; | |
1263 | len = p->len; | |
1264 | next = p->next; | |
1265 | ||
1266 | /* return nonzero if out of tx descriptors */ | |
1267 | if (nexttxd(di, txout) == di->txin) | |
1268 | goto outoftxd; | |
1269 | ||
1270 | if (len == 0) | |
1271 | continue; | |
1272 | ||
1273 | /* get physical address of buffer start */ | |
1274 | pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); | |
1275 | ||
1276 | flags = 0; | |
1277 | if (p == p0) | |
1278 | flags |= D64_CTRL1_SOF; | |
1279 | ||
1280 | /* With a DMA segment list, Descriptor table is filled | |
1281 | * using the segment list instead of looping over | |
1282 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST | |
1283 | * is when end of segment list is reached. | |
1284 | */ | |
1285 | if (next == NULL) | |
1286 | flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); | |
1287 | if (txout == (di->ntxd - 1)) | |
1288 | flags |= D64_CTRL1_EOT; | |
1289 | ||
1290 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); | |
1291 | ||
1292 | txout = nexttxd(di, txout); | |
1293 | } | |
1294 | ||
1295 | /* if last txd eof not set, fix it */ | |
1296 | if (!(flags & D64_CTRL1_EOF)) | |
1297 | di->txd64[prevtxd(di, txout)].ctrl1 = | |
1298 | cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF); | |
1299 | ||
1300 | /* save the packet */ | |
1301 | di->txp[prevtxd(di, txout)] = p0; | |
1302 | ||
1303 | /* bump the tx descriptor index */ | |
1304 | di->txout = txout; | |
1305 | ||
1306 | /* kick the chip */ | |
1307 | if (commit) | |
1308 | W_REG(&di->d64txregs->ptr, | |
1309 | di->xmtptrbase + I2B(txout, struct dma64desc)); | |
1310 | ||
1311 | /* tx flow control */ | |
1312 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1313 | ||
1314 | return 0; | |
1315 | ||
1316 | outoftxd: | |
1317 | DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); | |
1318 | brcmu_pkt_buf_free_skb(p0); | |
1319 | di->dma.txavail = 0; | |
1320 | di->dma.txnobuf++; | |
1321 | return -1; | |
1322 | } | |
1323 | ||
1324 | /* | |
1325 | * Reclaim next completed txd (txds if using chained buffers) in the range | |
1326 | * specified and return associated packet. | |
1327 | * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be | |
1328 | * transmitted as noted by the hardware "CurrDescr" pointer. | |
1329 | * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be | |
1330 | * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. | |
1331 | * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and | |
1332 | * return associated packet regardless of the value of hardware pointers. | |
1333 | */ | |
1334 | struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) | |
1335 | { | |
1336 | struct dma_info *di = (struct dma_info *)pub; | |
1337 | u16 start, end, i; | |
1338 | u16 active_desc; | |
1339 | struct sk_buff *txp; | |
1340 | ||
1341 | DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, | |
1342 | (range == DMA_RANGE_ALL) ? "all" : | |
1343 | ((range == | |
1344 | DMA_RANGE_TRANSMITTED) ? "transmitted" : | |
1345 | "transferred"))); | |
1346 | ||
1347 | if (di->ntxd == 0) | |
1348 | return NULL; | |
1349 | ||
1350 | txp = NULL; | |
1351 | ||
1352 | start = di->txin; | |
1353 | if (range == DMA_RANGE_ALL) | |
1354 | end = di->txout; | |
1355 | else { | |
1356 | struct dma64regs __iomem *dregs = di->d64txregs; | |
1357 | ||
1358 | end = (u16) (B2I(((R_REG(&dregs->status0) & | |
1359 | D64_XS0_CD_MASK) - | |
1360 | di->xmtptrbase) & D64_XS0_CD_MASK, | |
1361 | struct dma64desc)); | |
1362 | ||
1363 | if (range == DMA_RANGE_TRANSFERED) { | |
1364 | active_desc = | |
1365 | (u16) (R_REG(&dregs->status1) & | |
1366 | D64_XS1_AD_MASK); | |
1367 | active_desc = | |
1368 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; | |
1369 | active_desc = B2I(active_desc, struct dma64desc); | |
1370 | if (end != active_desc) | |
1371 | end = prevtxd(di, active_desc); | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | if ((start == 0) && (end > di->txout)) | |
1376 | goto bogus; | |
1377 | ||
1378 | for (i = start; i != end && !txp; i = nexttxd(di, i)) { | |
1379 | dma_addr_t pa; | |
1380 | uint size; | |
1381 | ||
1382 | pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; | |
1383 | ||
1384 | size = | |
1385 | (le32_to_cpu(di->txd64[i].ctrl2) & | |
1386 | D64_CTRL2_BC_MASK); | |
1387 | ||
1388 | di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
1389 | di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
1390 | ||
1391 | txp = di->txp[i]; | |
1392 | di->txp[i] = NULL; | |
1393 | ||
1394 | pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); | |
1395 | } | |
1396 | ||
1397 | di->txin = i; | |
1398 | ||
1399 | /* tx flow control */ | |
1400 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1401 | ||
1402 | return txp; | |
1403 | ||
1404 | bogus: | |
1405 | DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d " | |
1406 | "force %d\n", start, end, di->txout, forceall)); | |
1407 | return NULL; | |
1408 | } | |
1409 | ||
1410 | /* | |
1411 | * Mac80211 initiated actions sometimes require packets in the DMA queue to be | |
1412 | * modified. The modified portion of the packet is not under control of the DMA | |
1413 | * engine. This function calls a caller-supplied function for each packet in | |
1414 | * the caller specified dma chain. | |
1415 | */ | |
1416 | void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) | |
1417 | (void *pkt, void *arg_a), void *arg_a) | |
1418 | { | |
1419 | struct dma_info *di = (struct dma_info *) dmah; | |
1420 | uint i = di->txin; | |
1421 | uint end = di->txout; | |
1422 | struct sk_buff *skb; | |
1423 | struct ieee80211_tx_info *tx_info; | |
1424 | ||
1425 | while (i != end) { | |
1426 | skb = (struct sk_buff *)di->txp[i]; | |
1427 | if (skb != NULL) { | |
1428 | tx_info = (struct ieee80211_tx_info *)skb->cb; | |
1429 | (callback_fnc)(tx_info, arg_a); | |
1430 | } | |
1431 | i = nexttxd(di, i); | |
1432 | } | |
1433 | } |