Commit | Line | Data |
---|---|---|
5b435de0 AS |
1 | /* |
2 | * Copyright (c) 2010 Broadcom Corporation | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
8505a7e6 JP |
16 | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | ||
5b435de0 | 19 | #include <linux/slab.h> |
5b435de0 AS |
20 | #include <linux/delay.h> |
21 | #include <linux/pci.h> | |
22 | ||
23 | #include <brcmu_utils.h> | |
24 | #include <aiutils.h> | |
25 | #include "types.h" | |
26 | #include "dma.h" | |
23038214 | 27 | #include "soc.h" |
5b435de0 AS |
28 | |
29 | /* | |
30 | * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within | |
31 | * a contiguous 8kB physical address. | |
32 | */ | |
33 | #define D64RINGALIGN_BITS 13 | |
34 | #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) | |
35 | #define D64RINGALIGN (1 << D64RINGALIGN_BITS) | |
36 | ||
37 | #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) | |
38 | ||
39 | /* transmit channel control */ | |
40 | #define D64_XC_XE 0x00000001 /* transmit enable */ | |
41 | #define D64_XC_SE 0x00000002 /* transmit suspend request */ | |
42 | #define D64_XC_LE 0x00000004 /* loopback enable */ | |
43 | #define D64_XC_FL 0x00000010 /* flush request */ | |
44 | #define D64_XC_PD 0x00000800 /* parity check disable */ | |
45 | #define D64_XC_AE 0x00030000 /* address extension bits */ | |
46 | #define D64_XC_AE_SHIFT 16 | |
47 | ||
48 | /* transmit descriptor table pointer */ | |
49 | #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ | |
50 | ||
51 | /* transmit channel status */ | |
52 | #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
53 | #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ | |
54 | #define D64_XS0_XS_SHIFT 28 | |
55 | #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ | |
56 | #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ | |
57 | #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ | |
58 | #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ | |
59 | #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ | |
60 | ||
61 | #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ | |
62 | #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ | |
63 | #define D64_XS1_XE_SHIFT 28 | |
64 | #define D64_XS1_XE_NOERR 0x00000000 /* no error */ | |
65 | #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ | |
66 | #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ | |
67 | #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ | |
68 | #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ | |
69 | #define D64_XS1_XE_COREE 0x50000000 /* core error */ | |
70 | ||
71 | /* receive channel control */ | |
72 | /* receive enable */ | |
73 | #define D64_RC_RE 0x00000001 | |
74 | /* receive frame offset */ | |
75 | #define D64_RC_RO_MASK 0x000000fe | |
76 | #define D64_RC_RO_SHIFT 1 | |
77 | /* direct fifo receive (pio) mode */ | |
78 | #define D64_RC_FM 0x00000100 | |
79 | /* separate rx header descriptor enable */ | |
80 | #define D64_RC_SH 0x00000200 | |
81 | /* overflow continue */ | |
82 | #define D64_RC_OC 0x00000400 | |
83 | /* parity check disable */ | |
84 | #define D64_RC_PD 0x00000800 | |
85 | /* address extension bits */ | |
86 | #define D64_RC_AE 0x00030000 | |
87 | #define D64_RC_AE_SHIFT 16 | |
88 | ||
89 | /* flags for dma controller */ | |
90 | /* partity enable */ | |
91 | #define DMA_CTRL_PEN (1 << 0) | |
92 | /* rx overflow continue */ | |
93 | #define DMA_CTRL_ROC (1 << 1) | |
94 | /* allow rx scatter to multiple descriptors */ | |
95 | #define DMA_CTRL_RXMULTI (1 << 2) | |
96 | /* Unframed Rx/Tx data */ | |
97 | #define DMA_CTRL_UNFRAMED (1 << 3) | |
98 | ||
99 | /* receive descriptor table pointer */ | |
100 | #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ | |
101 | ||
102 | /* receive channel status */ | |
103 | #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
104 | #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ | |
105 | #define D64_RS0_RS_SHIFT 28 | |
106 | #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ | |
107 | #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ | |
108 | #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ | |
109 | #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ | |
110 | #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ | |
111 | ||
112 | #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ | |
113 | #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ | |
114 | #define D64_RS1_RE_SHIFT 28 | |
115 | #define D64_RS1_RE_NOERR 0x00000000 /* no error */ | |
116 | #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ | |
117 | #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ | |
118 | #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ | |
119 | #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ | |
120 | #define D64_RS1_RE_COREE 0x50000000 /* core error */ | |
121 | ||
122 | /* fifoaddr */ | |
123 | #define D64_FA_OFF_MASK 0xffff /* offset */ | |
124 | #define D64_FA_SEL_MASK 0xf0000 /* select */ | |
125 | #define D64_FA_SEL_SHIFT 16 | |
126 | #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ | |
127 | #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ | |
128 | #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ | |
129 | #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ | |
130 | #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ | |
131 | #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ | |
132 | #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ | |
133 | #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ | |
134 | #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ | |
135 | #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ | |
136 | ||
137 | /* descriptor control flags 1 */ | |
138 | #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ | |
139 | #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ | |
140 | #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ | |
141 | #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ | |
142 | #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ | |
143 | ||
144 | /* descriptor control flags 2 */ | |
145 | /* buffer byte count. real data len must <= 16KB */ | |
146 | #define D64_CTRL2_BC_MASK 0x00007fff | |
147 | /* address extension bits */ | |
148 | #define D64_CTRL2_AE 0x00030000 | |
149 | #define D64_CTRL2_AE_SHIFT 16 | |
150 | /* parity bit */ | |
151 | #define D64_CTRL2_PARITY 0x00040000 | |
152 | ||
153 | /* control flags in the range [27:20] are core-specific and not defined here */ | |
154 | #define D64_CTRL_CORE_MASK 0x0ff00000 | |
155 | ||
156 | #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ | |
157 | #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ | |
158 | #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ | |
159 | #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ | |
160 | ||
161 | /* | |
162 | * packet headroom necessary to accommodate the largest header | |
163 | * in the system, (i.e TXOFF). By doing, we avoid the need to | |
164 | * allocate an extra buffer for the header when bridging to WL. | |
165 | * There is a compile time check in wlc.c which ensure that this | |
166 | * value is at least as big as TXOFF. This value is used in | |
167 | * dma_rxfill(). | |
168 | */ | |
169 | ||
170 | #define BCMEXTRAHDROOM 172 | |
171 | ||
172 | /* debug/trace */ | |
173 | #ifdef BCMDBG | |
8505a7e6 JP |
174 | #define DMA_ERROR(fmt, ...) \ |
175 | do { \ | |
176 | if (*di->msg_level & 1) \ | |
177 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
178 | } while (0) | |
179 | #define DMA_TRACE(fmt, ...) \ | |
180 | do { \ | |
181 | if (*di->msg_level & 2) \ | |
182 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
183 | } while (0) | |
5b435de0 | 184 | #else |
8505a7e6 JP |
185 | #define DMA_ERROR(fmt, ...) \ |
186 | no_printk(fmt, ##__VA_ARGS__) | |
187 | #define DMA_TRACE(fmt, ...) \ | |
188 | no_printk(fmt, ##__VA_ARGS__) | |
5b435de0 AS |
189 | #endif /* BCMDBG */ |
190 | ||
8505a7e6 JP |
191 | #define DMA_NONE(fmt, ...) \ |
192 | no_printk(fmt, ##__VA_ARGS__) | |
5b435de0 AS |
193 | |
194 | #define MAXNAMEL 8 /* 8 char names */ | |
195 | ||
196 | /* macros to convert between byte offsets and indexes */ | |
197 | #define B2I(bytes, type) ((bytes) / sizeof(type)) | |
198 | #define I2B(index, type) ((index) * sizeof(type)) | |
199 | ||
200 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ | |
201 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ | |
202 | ||
203 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ | |
204 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ | |
205 | ||
206 | /* | |
207 | * DMA Descriptor | |
208 | * Descriptors are only read by the hardware, never written back. | |
209 | */ | |
210 | struct dma64desc { | |
211 | __le32 ctrl1; /* misc control bits & bufcount */ | |
212 | __le32 ctrl2; /* buffer count and address extension */ | |
213 | __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
214 | __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
215 | }; | |
216 | ||
217 | /* dma engine software state */ | |
218 | struct dma_info { | |
219 | struct dma_pub dma; /* exported structure */ | |
220 | uint *msg_level; /* message level pointer */ | |
221 | char name[MAXNAMEL]; /* callers name for diag msgs */ | |
222 | ||
223 | struct pci_dev *pbus; /* bus handle */ | |
224 | ||
225 | bool dma64; /* this dma engine is operating in 64-bit mode */ | |
226 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ | |
227 | ||
228 | /* 64-bit dma tx engine registers */ | |
229 | struct dma64regs __iomem *d64txregs; | |
230 | /* 64-bit dma rx engine registers */ | |
231 | struct dma64regs __iomem *d64rxregs; | |
232 | /* pointer to dma64 tx descriptor ring */ | |
233 | struct dma64desc *txd64; | |
234 | /* pointer to dma64 rx descriptor ring */ | |
235 | struct dma64desc *rxd64; | |
236 | ||
237 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ | |
238 | ||
239 | u16 ntxd; /* # tx descriptors tunable */ | |
240 | u16 txin; /* index of next descriptor to reclaim */ | |
241 | u16 txout; /* index of next descriptor to post */ | |
242 | /* pointer to parallel array of pointers to packets */ | |
243 | struct sk_buff **txp; | |
244 | /* Aligned physical address of descriptor ring */ | |
245 | dma_addr_t txdpa; | |
246 | /* Original physical address of descriptor ring */ | |
247 | dma_addr_t txdpaorig; | |
248 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ | |
249 | u32 txdalloc; /* #bytes allocated for the ring */ | |
250 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register | |
251 | * is not just an index, it needs all 13 bits to be | |
252 | * an offset from the addr register. | |
253 | */ | |
254 | ||
255 | u16 nrxd; /* # rx descriptors tunable */ | |
256 | u16 rxin; /* index of next descriptor to reclaim */ | |
257 | u16 rxout; /* index of next descriptor to post */ | |
258 | /* pointer to parallel array of pointers to packets */ | |
259 | struct sk_buff **rxp; | |
260 | /* Aligned physical address of descriptor ring */ | |
261 | dma_addr_t rxdpa; | |
262 | /* Original physical address of descriptor ring */ | |
263 | dma_addr_t rxdpaorig; | |
264 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ | |
265 | u32 rxdalloc; /* #bytes allocated for the ring */ | |
266 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ | |
267 | ||
268 | /* tunables */ | |
269 | unsigned int rxbufsize; /* rx buffer size in bytes, not including | |
270 | * the extra headroom | |
271 | */ | |
272 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper | |
273 | * stack, e.g. some rx pkt buffers will be | |
274 | * bridged to tx side without byte copying. | |
275 | * The extra headroom needs to be large enough | |
276 | * to fit txheader needs. Some dongle driver may | |
277 | * not need it. | |
278 | */ | |
279 | uint nrxpost; /* # rx buffers to keep posted */ | |
280 | unsigned int rxoffset; /* rxcontrol offset */ | |
281 | /* add to get dma address of descriptor ring, low 32 bits */ | |
282 | uint ddoffsetlow; | |
283 | /* high 32 bits */ | |
284 | uint ddoffsethigh; | |
285 | /* add to get dma address of data buffer, low 32 bits */ | |
286 | uint dataoffsetlow; | |
287 | /* high 32 bits */ | |
288 | uint dataoffsethigh; | |
289 | /* descriptor base need to be aligned or not */ | |
290 | bool aligndesc_4k; | |
291 | }; | |
292 | ||
293 | /* | |
294 | * default dma message level (if input msg_level | |
295 | * pointer is null in dma_attach()) | |
296 | */ | |
297 | static uint dma_msg_level; | |
298 | ||
299 | /* Check for odd number of 1's */ | |
300 | static u32 parity32(__le32 data) | |
301 | { | |
302 | /* no swap needed for counting 1's */ | |
303 | u32 par_data = *(u32 *)&data; | |
304 | ||
305 | par_data ^= par_data >> 16; | |
306 | par_data ^= par_data >> 8; | |
307 | par_data ^= par_data >> 4; | |
308 | par_data ^= par_data >> 2; | |
309 | par_data ^= par_data >> 1; | |
310 | ||
311 | return par_data & 1; | |
312 | } | |
313 | ||
314 | static bool dma64_dd_parity(struct dma64desc *dd) | |
315 | { | |
316 | return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); | |
317 | } | |
318 | ||
319 | /* descriptor bumping functions */ | |
320 | ||
321 | static uint xxd(uint x, uint n) | |
322 | { | |
323 | return x & (n - 1); /* faster than %, but n must be power of 2 */ | |
324 | } | |
325 | ||
326 | static uint txd(struct dma_info *di, uint x) | |
327 | { | |
328 | return xxd(x, di->ntxd); | |
329 | } | |
330 | ||
331 | static uint rxd(struct dma_info *di, uint x) | |
332 | { | |
333 | return xxd(x, di->nrxd); | |
334 | } | |
335 | ||
336 | static uint nexttxd(struct dma_info *di, uint i) | |
337 | { | |
338 | return txd(di, i + 1); | |
339 | } | |
340 | ||
341 | static uint prevtxd(struct dma_info *di, uint i) | |
342 | { | |
343 | return txd(di, i - 1); | |
344 | } | |
345 | ||
346 | static uint nextrxd(struct dma_info *di, uint i) | |
347 | { | |
348 | return txd(di, i + 1); | |
349 | } | |
350 | ||
351 | static uint ntxdactive(struct dma_info *di, uint h, uint t) | |
352 | { | |
353 | return txd(di, t-h); | |
354 | } | |
355 | ||
356 | static uint nrxdactive(struct dma_info *di, uint h, uint t) | |
357 | { | |
358 | return rxd(di, t-h); | |
359 | } | |
360 | ||
361 | static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) | |
362 | { | |
ae8e4672 | 363 | uint dmactrlflags; |
5b435de0 AS |
364 | |
365 | if (di == NULL) { | |
8505a7e6 | 366 | DMA_ERROR("NULL dma handle\n"); |
5b435de0 AS |
367 | return 0; |
368 | } | |
369 | ||
ae8e4672 | 370 | dmactrlflags = di->dma.dmactrlflags; |
5b435de0 AS |
371 | dmactrlflags &= ~mask; |
372 | dmactrlflags |= flags; | |
373 | ||
374 | /* If trying to enable parity, check if parity is actually supported */ | |
375 | if (dmactrlflags & DMA_CTRL_PEN) { | |
376 | u32 control; | |
377 | ||
378 | control = R_REG(&di->d64txregs->control); | |
379 | W_REG(&di->d64txregs->control, | |
380 | control | D64_XC_PD); | |
381 | if (R_REG(&di->d64txregs->control) & D64_XC_PD) | |
382 | /* We *can* disable it so it is supported, | |
383 | * restore control register | |
384 | */ | |
385 | W_REG(&di->d64txregs->control, | |
386 | control); | |
387 | else | |
388 | /* Not supported, don't allow it to be enabled */ | |
389 | dmactrlflags &= ~DMA_CTRL_PEN; | |
390 | } | |
391 | ||
392 | di->dma.dmactrlflags = dmactrlflags; | |
393 | ||
394 | return dmactrlflags; | |
395 | } | |
396 | ||
397 | static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) | |
398 | { | |
399 | u32 w; | |
400 | OR_REG(&dma64regs->control, D64_XC_AE); | |
401 | w = R_REG(&dma64regs->control); | |
402 | AND_REG(&dma64regs->control, ~D64_XC_AE); | |
403 | return (w & D64_XC_AE) == D64_XC_AE; | |
404 | } | |
405 | ||
406 | /* | |
407 | * return true if this dma engine supports DmaExtendedAddrChanges, | |
408 | * otherwise false | |
409 | */ | |
410 | static bool _dma_isaddrext(struct dma_info *di) | |
411 | { | |
412 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ | |
413 | ||
414 | /* not all tx or rx channel are available */ | |
415 | if (di->d64txregs != NULL) { | |
416 | if (!_dma64_addrext(di->d64txregs)) | |
8505a7e6 JP |
417 | DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", |
418 | di->name); | |
5b435de0 AS |
419 | return true; |
420 | } else if (di->d64rxregs != NULL) { | |
421 | if (!_dma64_addrext(di->d64rxregs)) | |
8505a7e6 JP |
422 | DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", |
423 | di->name); | |
5b435de0 AS |
424 | return true; |
425 | } | |
426 | ||
427 | return false; | |
428 | } | |
429 | ||
430 | static bool _dma_descriptor_align(struct dma_info *di) | |
431 | { | |
432 | u32 addrl; | |
433 | ||
434 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ | |
435 | if (di->d64txregs != NULL) { | |
436 | W_REG(&di->d64txregs->addrlow, 0xff0); | |
437 | addrl = R_REG(&di->d64txregs->addrlow); | |
438 | if (addrl != 0) | |
439 | return false; | |
440 | } else if (di->d64rxregs != NULL) { | |
441 | W_REG(&di->d64rxregs->addrlow, 0xff0); | |
442 | addrl = R_REG(&di->d64rxregs->addrlow); | |
443 | if (addrl != 0) | |
444 | return false; | |
445 | } | |
446 | return true; | |
447 | } | |
448 | ||
449 | /* | |
450 | * Descriptor table must start at the DMA hardware dictated alignment, so | |
451 | * allocated memory must be large enough to support this requirement. | |
452 | */ | |
453 | static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, | |
454 | u16 align_bits, uint *alloced, | |
455 | dma_addr_t *pap) | |
456 | { | |
457 | if (align_bits) { | |
458 | u16 align = (1 << align_bits); | |
459 | if (!IS_ALIGNED(PAGE_SIZE, align)) | |
460 | size += align; | |
461 | *alloced = size; | |
462 | } | |
463 | return pci_alloc_consistent(pdev, size, pap); | |
464 | } | |
465 | ||
466 | static | |
467 | u8 dma_align_sizetobits(uint size) | |
468 | { | |
469 | u8 bitpos = 0; | |
470 | while (size >>= 1) | |
471 | bitpos++; | |
472 | return bitpos; | |
473 | } | |
474 | ||
475 | /* This function ensures that the DMA descriptor ring will not get allocated | |
476 | * across Page boundary. If the allocation is done across the page boundary | |
477 | * at the first time, then it is freed and the allocation is done at | |
478 | * descriptor ring size aligned location. This will ensure that the ring will | |
479 | * not cross page boundary | |
480 | */ | |
481 | static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, | |
482 | u16 *alignbits, uint *alloced, | |
483 | dma_addr_t *descpa) | |
484 | { | |
485 | void *va; | |
486 | u32 desc_strtaddr; | |
487 | u32 alignbytes = 1 << *alignbits; | |
488 | ||
489 | va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); | |
490 | ||
491 | if (NULL == va) | |
492 | return NULL; | |
493 | ||
494 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); | |
495 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr | |
496 | & boundary)) { | |
497 | *alignbits = dma_align_sizetobits(size); | |
498 | pci_free_consistent(di->pbus, size, va, *descpa); | |
499 | va = dma_alloc_consistent(di->pbus, size, *alignbits, | |
500 | alloced, descpa); | |
501 | } | |
502 | return va; | |
503 | } | |
504 | ||
505 | static bool dma64_alloc(struct dma_info *di, uint direction) | |
506 | { | |
507 | u16 size; | |
508 | uint ddlen; | |
509 | void *va; | |
510 | uint alloced = 0; | |
511 | u16 align; | |
512 | u16 align_bits; | |
513 | ||
514 | ddlen = sizeof(struct dma64desc); | |
515 | ||
516 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); | |
517 | align_bits = di->dmadesc_align; | |
518 | align = (1 << align_bits); | |
519 | ||
520 | if (direction == DMA_TX) { | |
521 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
522 | &alloced, &di->txdpaorig); | |
523 | if (va == NULL) { | |
8505a7e6 JP |
524 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", |
525 | di->name); | |
5b435de0 AS |
526 | return false; |
527 | } | |
528 | align = (1 << align_bits); | |
529 | di->txd64 = (struct dma64desc *) | |
530 | roundup((unsigned long)va, align); | |
531 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); | |
532 | di->txdpa = di->txdpaorig + di->txdalign; | |
533 | di->txdalloc = alloced; | |
534 | } else { | |
535 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
536 | &alloced, &di->rxdpaorig); | |
537 | if (va == NULL) { | |
8505a7e6 JP |
538 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", |
539 | di->name); | |
5b435de0 AS |
540 | return false; |
541 | } | |
542 | align = (1 << align_bits); | |
543 | di->rxd64 = (struct dma64desc *) | |
544 | roundup((unsigned long)va, align); | |
545 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); | |
546 | di->rxdpa = di->rxdpaorig + di->rxdalign; | |
547 | di->rxdalloc = alloced; | |
548 | } | |
549 | ||
550 | return true; | |
551 | } | |
552 | ||
553 | static bool _dma_alloc(struct dma_info *di, uint direction) | |
554 | { | |
555 | return dma64_alloc(di, direction); | |
556 | } | |
557 | ||
558 | struct dma_pub *dma_attach(char *name, struct si_pub *sih, | |
559 | void __iomem *dmaregstx, void __iomem *dmaregsrx, | |
560 | uint ntxd, uint nrxd, | |
561 | uint rxbufsize, int rxextheadroom, | |
562 | uint nrxpost, uint rxoffset, uint *msg_level) | |
563 | { | |
564 | struct dma_info *di; | |
565 | uint size; | |
566 | ||
567 | /* allocate private info structure */ | |
568 | di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); | |
569 | if (di == NULL) | |
570 | return NULL; | |
571 | ||
572 | di->msg_level = msg_level ? msg_level : &dma_msg_level; | |
573 | ||
574 | ||
575 | di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); | |
576 | ||
577 | /* init dma reg pointer */ | |
578 | di->d64txregs = (struct dma64regs __iomem *) dmaregstx; | |
579 | di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; | |
580 | ||
581 | /* | |
582 | * Default flags (which can be changed by the driver calling | |
583 | * dma_ctrlflags before enable): For backwards compatibility | |
584 | * both Rx Overflow Continue and Parity are DISABLED. | |
585 | */ | |
586 | _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); | |
587 | ||
8505a7e6 JP |
588 | DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", |
589 | name, "DMA64", | |
590 | di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, | |
591 | rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx); | |
5b435de0 AS |
592 | |
593 | /* make a private copy of our callers name */ | |
594 | strncpy(di->name, name, MAXNAMEL); | |
595 | di->name[MAXNAMEL - 1] = '\0'; | |
596 | ||
597 | di->pbus = ((struct si_info *)sih)->pbus; | |
598 | ||
599 | /* save tunables */ | |
600 | di->ntxd = (u16) ntxd; | |
601 | di->nrxd = (u16) nrxd; | |
602 | ||
603 | /* the actual dma size doesn't include the extra headroom */ | |
604 | di->rxextrahdrroom = | |
605 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; | |
606 | if (rxbufsize > BCMEXTRAHDROOM) | |
607 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); | |
608 | else | |
609 | di->rxbufsize = (u16) rxbufsize; | |
610 | ||
611 | di->nrxpost = (u16) nrxpost; | |
612 | di->rxoffset = (u8) rxoffset; | |
613 | ||
614 | /* | |
615 | * figure out the DMA physical address offset for dd and data | |
616 | * PCI/PCIE: they map silicon backplace address to zero | |
617 | * based memory, need offset | |
618 | * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram | |
619 | * swapped region for data buffer, not descriptor | |
620 | */ | |
621 | di->ddoffsetlow = 0; | |
622 | di->dataoffsetlow = 0; | |
623 | /* add offset for pcie with DMA64 bus */ | |
624 | di->ddoffsetlow = 0; | |
625 | di->ddoffsethigh = SI_PCIE_DMA_H32; | |
626 | di->dataoffsetlow = di->ddoffsetlow; | |
627 | di->dataoffsethigh = di->ddoffsethigh; | |
628 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ | |
629 | if ((ai_coreid(sih) == SDIOD_CORE_ID) | |
630 | && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) | |
631 | di->addrext = 0; | |
632 | else if ((ai_coreid(sih) == I2S_CORE_ID) && | |
633 | ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) | |
634 | di->addrext = 0; | |
635 | else | |
636 | di->addrext = _dma_isaddrext(di); | |
637 | ||
638 | /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ | |
639 | di->aligndesc_4k = _dma_descriptor_align(di); | |
640 | if (di->aligndesc_4k) { | |
641 | di->dmadesc_align = D64RINGALIGN_BITS; | |
642 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) | |
643 | /* for smaller dd table, HW relax alignment reqmnt */ | |
644 | di->dmadesc_align = D64RINGALIGN_BITS - 1; | |
645 | } else { | |
646 | di->dmadesc_align = 4; /* 16 byte alignment */ | |
647 | } | |
648 | ||
8505a7e6 JP |
649 | DMA_NONE("DMA descriptor align_needed %d, align %d\n", |
650 | di->aligndesc_4k, di->dmadesc_align); | |
5b435de0 AS |
651 | |
652 | /* allocate tx packet pointer vector */ | |
653 | if (ntxd) { | |
654 | size = ntxd * sizeof(void *); | |
655 | di->txp = kzalloc(size, GFP_ATOMIC); | |
656 | if (di->txp == NULL) | |
657 | goto fail; | |
658 | } | |
659 | ||
660 | /* allocate rx packet pointer vector */ | |
661 | if (nrxd) { | |
662 | size = nrxd * sizeof(void *); | |
663 | di->rxp = kzalloc(size, GFP_ATOMIC); | |
664 | if (di->rxp == NULL) | |
665 | goto fail; | |
666 | } | |
667 | ||
668 | /* | |
669 | * allocate transmit descriptor ring, only need ntxd descriptors | |
670 | * but it must be aligned | |
671 | */ | |
672 | if (ntxd) { | |
673 | if (!_dma_alloc(di, DMA_TX)) | |
674 | goto fail; | |
675 | } | |
676 | ||
677 | /* | |
678 | * allocate receive descriptor ring, only need nrxd descriptors | |
679 | * but it must be aligned | |
680 | */ | |
681 | if (nrxd) { | |
682 | if (!_dma_alloc(di, DMA_RX)) | |
683 | goto fail; | |
684 | } | |
685 | ||
686 | if ((di->ddoffsetlow != 0) && !di->addrext) { | |
687 | if (di->txdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
688 | DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", |
689 | di->name, (u32)di->txdpa); | |
5b435de0 AS |
690 | goto fail; |
691 | } | |
692 | if (di->rxdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
693 | DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", |
694 | di->name, (u32)di->rxdpa); | |
5b435de0 AS |
695 | goto fail; |
696 | } | |
697 | } | |
698 | ||
8505a7e6 JP |
699 | DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", |
700 | di->ddoffsetlow, di->ddoffsethigh, | |
701 | di->dataoffsetlow, di->dataoffsethigh, | |
702 | di->addrext); | |
5b435de0 AS |
703 | |
704 | return (struct dma_pub *) di; | |
705 | ||
706 | fail: | |
707 | dma_detach((struct dma_pub *)di); | |
708 | return NULL; | |
709 | } | |
710 | ||
711 | static inline void | |
712 | dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, | |
713 | dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) | |
714 | { | |
715 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; | |
716 | ||
717 | /* PCI bus with big(>1G) physical address, use address extension */ | |
718 | if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { | |
719 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
720 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
721 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
722 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
723 | } else { | |
724 | /* address extension for 32-bit PCI */ | |
725 | u32 ae; | |
726 | ||
727 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
728 | pa &= ~PCI32ADDR_HIGH; | |
729 | ||
730 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; | |
731 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
732 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
733 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
734 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
735 | } | |
736 | if (di->dma.dmactrlflags & DMA_CTRL_PEN) { | |
737 | if (dma64_dd_parity(&ddring[outidx])) | |
738 | ddring[outidx].ctrl2 = | |
739 | cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); | |
740 | } | |
741 | } | |
742 | ||
743 | /* !! may be called with core in reset */ | |
744 | void dma_detach(struct dma_pub *pub) | |
745 | { | |
746 | struct dma_info *di = (struct dma_info *)pub; | |
747 | ||
8505a7e6 | 748 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
749 | |
750 | /* free dma descriptor rings */ | |
751 | if (di->txd64) | |
752 | pci_free_consistent(di->pbus, di->txdalloc, | |
753 | ((s8 *)di->txd64 - di->txdalign), | |
754 | (di->txdpaorig)); | |
755 | if (di->rxd64) | |
756 | pci_free_consistent(di->pbus, di->rxdalloc, | |
757 | ((s8 *)di->rxd64 - di->rxdalign), | |
758 | (di->rxdpaorig)); | |
759 | ||
760 | /* free packet pointer vectors */ | |
761 | kfree(di->txp); | |
762 | kfree(di->rxp); | |
763 | ||
764 | /* free our private info structure */ | |
765 | kfree(di); | |
766 | ||
767 | } | |
768 | ||
769 | /* initialize descriptor table base address */ | |
770 | static void | |
771 | _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) | |
772 | { | |
773 | if (!di->aligndesc_4k) { | |
774 | if (direction == DMA_TX) | |
775 | di->xmtptrbase = pa; | |
776 | else | |
777 | di->rcvptrbase = pa; | |
778 | } | |
779 | ||
780 | if ((di->ddoffsetlow == 0) | |
781 | || !(pa & PCI32ADDR_HIGH)) { | |
782 | if (direction == DMA_TX) { | |
783 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
784 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
785 | } else { | |
786 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
787 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
788 | } | |
789 | } else { | |
790 | /* DMA64 32bits address extension */ | |
791 | u32 ae; | |
792 | ||
793 | /* shift the high bit(s) from pa to ae */ | |
794 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
795 | pa &= ~PCI32ADDR_HIGH; | |
796 | ||
797 | if (direction == DMA_TX) { | |
798 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
799 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
800 | SET_REG(&di->d64txregs->control, | |
801 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); | |
802 | } else { | |
803 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
804 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
805 | SET_REG(&di->d64rxregs->control, | |
806 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); | |
807 | } | |
808 | } | |
809 | } | |
810 | ||
811 | static void _dma_rxenable(struct dma_info *di) | |
812 | { | |
813 | uint dmactrlflags = di->dma.dmactrlflags; | |
814 | u32 control; | |
815 | ||
8505a7e6 | 816 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
817 | |
818 | control = | |
819 | (R_REG(&di->d64rxregs->control) & D64_RC_AE) | | |
820 | D64_RC_RE; | |
821 | ||
822 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) | |
823 | control |= D64_RC_PD; | |
824 | ||
825 | if (dmactrlflags & DMA_CTRL_ROC) | |
826 | control |= D64_RC_OC; | |
827 | ||
828 | W_REG(&di->d64rxregs->control, | |
829 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); | |
830 | } | |
831 | ||
832 | void dma_rxinit(struct dma_pub *pub) | |
833 | { | |
834 | struct dma_info *di = (struct dma_info *)pub; | |
835 | ||
8505a7e6 | 836 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
837 | |
838 | if (di->nrxd == 0) | |
839 | return; | |
840 | ||
841 | di->rxin = di->rxout = 0; | |
842 | ||
843 | /* clear rx descriptor ring */ | |
844 | memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); | |
845 | ||
846 | /* DMA engine with out alignment requirement requires table to be inited | |
847 | * before enabling the engine | |
848 | */ | |
849 | if (!di->aligndesc_4k) | |
850 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
851 | ||
852 | _dma_rxenable(di); | |
853 | ||
854 | if (di->aligndesc_4k) | |
855 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
856 | } | |
857 | ||
858 | static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) | |
859 | { | |
860 | uint i, curr; | |
861 | struct sk_buff *rxp; | |
862 | dma_addr_t pa; | |
863 | ||
864 | i = di->rxin; | |
865 | ||
866 | /* return if no packets posted */ | |
867 | if (i == di->rxout) | |
868 | return NULL; | |
869 | ||
870 | curr = | |
871 | B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - | |
872 | di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); | |
873 | ||
874 | /* ignore curr if forceall */ | |
875 | if (!forceall && (i == curr)) | |
876 | return NULL; | |
877 | ||
878 | /* get the packet pointer that corresponds to the rx descriptor */ | |
879 | rxp = di->rxp[i]; | |
880 | di->rxp[i] = NULL; | |
881 | ||
882 | pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; | |
883 | ||
884 | /* clear this packet from the descriptor ring */ | |
885 | pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); | |
886 | ||
887 | di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
888 | di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
889 | ||
890 | di->rxin = nextrxd(di, i); | |
891 | ||
892 | return rxp; | |
893 | } | |
894 | ||
895 | static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) | |
896 | { | |
897 | if (di->nrxd == 0) | |
898 | return NULL; | |
899 | ||
900 | return dma64_getnextrxp(di, forceall); | |
901 | } | |
902 | ||
903 | /* | |
904 | * !! rx entry routine | |
3fd172d3 | 905 | * returns the number packages in the next frame, or 0 if there are no more |
5b435de0 AS |
906 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is |
907 | * supported with pkts chain | |
908 | * otherwise, it's treated as giant pkt and will be tossed. | |
909 | * The DMA scattering starts with normal DMA header, followed by first | |
910 | * buffer data. After it reaches the max size of buffer, the data continues | |
911 | * in next DMA descriptor buffer WITHOUT DMA header | |
912 | */ | |
3fd172d3 | 913 | int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) |
5b435de0 AS |
914 | { |
915 | struct dma_info *di = (struct dma_info *)pub; | |
3fd172d3 AS |
916 | struct sk_buff_head dma_frames; |
917 | struct sk_buff *p, *next; | |
5b435de0 AS |
918 | uint len; |
919 | uint pkt_len; | |
920 | int resid = 0; | |
3fd172d3 | 921 | int pktcnt = 1; |
5b435de0 | 922 | |
3fd172d3 | 923 | skb_queue_head_init(&dma_frames); |
5b435de0 | 924 | next_frame: |
3fd172d3 AS |
925 | p = _dma_getnextrxp(di, false); |
926 | if (p == NULL) | |
927 | return 0; | |
5b435de0 | 928 | |
3fd172d3 | 929 | len = le16_to_cpu(*(__le16 *) (p->data)); |
8505a7e6 | 930 | DMA_TRACE("%s: dma_rx len %d\n", di->name, len); |
3fd172d3 | 931 | dma_spin_for_len(len, p); |
5b435de0 AS |
932 | |
933 | /* set actual length */ | |
934 | pkt_len = min((di->rxoffset + len), di->rxbufsize); | |
3fd172d3 AS |
935 | __skb_trim(p, pkt_len); |
936 | skb_queue_tail(&dma_frames, p); | |
5b435de0 AS |
937 | resid = len - (di->rxbufsize - di->rxoffset); |
938 | ||
939 | /* check for single or multi-buffer rx */ | |
940 | if (resid > 0) { | |
5b435de0 | 941 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
5b435de0 AS |
942 | pkt_len = min_t(uint, resid, di->rxbufsize); |
943 | __skb_trim(p, pkt_len); | |
3fd172d3 | 944 | skb_queue_tail(&dma_frames, p); |
5b435de0 | 945 | resid -= di->rxbufsize; |
3fd172d3 | 946 | pktcnt++; |
5b435de0 AS |
947 | } |
948 | ||
949 | #ifdef BCMDBG | |
950 | if (resid > 0) { | |
951 | uint cur; | |
952 | cur = | |
953 | B2I(((R_REG(&di->d64rxregs->status0) & | |
954 | D64_RS0_CD_MASK) - | |
955 | di->rcvptrbase) & D64_RS0_CD_MASK, | |
956 | struct dma64desc); | |
8505a7e6 JP |
957 | DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", |
958 | di->rxin, di->rxout, cur); | |
5b435de0 AS |
959 | } |
960 | #endif /* BCMDBG */ | |
961 | ||
962 | if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { | |
8505a7e6 JP |
963 | DMA_ERROR("%s: bad frame length (%d)\n", |
964 | di->name, len); | |
3fd172d3 AS |
965 | skb_queue_walk_safe(&dma_frames, p, next) { |
966 | skb_unlink(p, &dma_frames); | |
967 | brcmu_pkt_buf_free_skb(p); | |
968 | } | |
5b435de0 | 969 | di->dma.rxgiants++; |
3fd172d3 | 970 | pktcnt = 1; |
5b435de0 AS |
971 | goto next_frame; |
972 | } | |
973 | } | |
974 | ||
3fd172d3 AS |
975 | skb_queue_splice_tail(&dma_frames, skb_list); |
976 | return pktcnt; | |
5b435de0 AS |
977 | } |
978 | ||
979 | static bool dma64_rxidle(struct dma_info *di) | |
980 | { | |
8505a7e6 | 981 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
982 | |
983 | if (di->nrxd == 0) | |
984 | return true; | |
985 | ||
986 | return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == | |
987 | (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); | |
988 | } | |
989 | ||
990 | /* | |
991 | * post receive buffers | |
992 | * return false is refill failed completely and ring is empty this will stall | |
993 | * the rx dma and user might want to call rxfill again asap. This unlikely | |
994 | * happens on memory-rich NIC, but often on memory-constrained dongle | |
995 | */ | |
996 | bool dma_rxfill(struct dma_pub *pub) | |
997 | { | |
998 | struct dma_info *di = (struct dma_info *)pub; | |
999 | struct sk_buff *p; | |
1000 | u16 rxin, rxout; | |
1001 | u32 flags = 0; | |
1002 | uint n; | |
1003 | uint i; | |
1004 | dma_addr_t pa; | |
1005 | uint extra_offset = 0; | |
1006 | bool ring_empty; | |
1007 | ||
1008 | ring_empty = false; | |
1009 | ||
1010 | /* | |
1011 | * Determine how many receive buffers we're lacking | |
1012 | * from the full complement, allocate, initialize, | |
1013 | * and post them, then update the chip rx lastdscr. | |
1014 | */ | |
1015 | ||
1016 | rxin = di->rxin; | |
1017 | rxout = di->rxout; | |
1018 | ||
1019 | n = di->nrxpost - nrxdactive(di, rxin, rxout); | |
1020 | ||
8505a7e6 | 1021 | DMA_TRACE("%s: post %d\n", di->name, n); |
5b435de0 AS |
1022 | |
1023 | if (di->rxbufsize > BCMEXTRAHDROOM) | |
1024 | extra_offset = di->rxextrahdrroom; | |
1025 | ||
1026 | for (i = 0; i < n; i++) { | |
1027 | /* | |
1028 | * the di->rxbufsize doesn't include the extra headroom, | |
1029 | * we need to add it to the size to be allocated | |
1030 | */ | |
1031 | p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); | |
1032 | ||
1033 | if (p == NULL) { | |
8505a7e6 | 1034 | DMA_ERROR("%s: out of rxbufs\n", di->name); |
5b435de0 | 1035 | if (i == 0 && dma64_rxidle(di)) { |
8505a7e6 | 1036 | DMA_ERROR("%s: ring is empty !\n", di->name); |
5b435de0 AS |
1037 | ring_empty = true; |
1038 | } | |
1039 | di->dma.rxnobuf++; | |
1040 | break; | |
1041 | } | |
1042 | /* reserve an extra headroom, if applicable */ | |
1043 | if (extra_offset) | |
1044 | skb_pull(p, extra_offset); | |
1045 | ||
1046 | /* Do a cached write instead of uncached write since DMA_MAP | |
1047 | * will flush the cache. | |
1048 | */ | |
1049 | *(u32 *) (p->data) = 0; | |
1050 | ||
1051 | pa = pci_map_single(di->pbus, p->data, | |
1052 | di->rxbufsize, PCI_DMA_FROMDEVICE); | |
1053 | ||
1054 | /* save the free packet pointer */ | |
1055 | di->rxp[rxout] = p; | |
1056 | ||
1057 | /* reset flags for each descriptor */ | |
1058 | flags = 0; | |
1059 | if (rxout == (di->nrxd - 1)) | |
1060 | flags = D64_CTRL1_EOT; | |
1061 | ||
1062 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, | |
1063 | di->rxbufsize); | |
1064 | rxout = nextrxd(di, rxout); | |
1065 | } | |
1066 | ||
1067 | di->rxout = rxout; | |
1068 | ||
1069 | /* update the chip lastdscr pointer */ | |
1070 | W_REG(&di->d64rxregs->ptr, | |
1071 | di->rcvptrbase + I2B(rxout, struct dma64desc)); | |
1072 | ||
1073 | return ring_empty; | |
1074 | } | |
1075 | ||
1076 | void dma_rxreclaim(struct dma_pub *pub) | |
1077 | { | |
1078 | struct dma_info *di = (struct dma_info *)pub; | |
1079 | struct sk_buff *p; | |
1080 | ||
8505a7e6 | 1081 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1082 | |
1083 | while ((p = _dma_getnextrxp(di, true))) | |
1084 | brcmu_pkt_buf_free_skb(p); | |
1085 | } | |
1086 | ||
1087 | void dma_counterreset(struct dma_pub *pub) | |
1088 | { | |
1089 | /* reset all software counters */ | |
1090 | pub->rxgiants = 0; | |
1091 | pub->rxnobuf = 0; | |
1092 | pub->txnobuf = 0; | |
1093 | } | |
1094 | ||
1095 | /* get the address of the var in order to change later */ | |
1096 | unsigned long dma_getvar(struct dma_pub *pub, const char *name) | |
1097 | { | |
1098 | struct dma_info *di = (struct dma_info *)pub; | |
1099 | ||
1100 | if (!strcmp(name, "&txavail")) | |
1101 | return (unsigned long)&(di->dma.txavail); | |
1102 | return 0; | |
1103 | } | |
1104 | ||
1105 | /* 64-bit DMA functions */ | |
1106 | ||
1107 | void dma_txinit(struct dma_pub *pub) | |
1108 | { | |
1109 | struct dma_info *di = (struct dma_info *)pub; | |
1110 | u32 control = D64_XC_XE; | |
1111 | ||
8505a7e6 | 1112 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1113 | |
1114 | if (di->ntxd == 0) | |
1115 | return; | |
1116 | ||
1117 | di->txin = di->txout = 0; | |
1118 | di->dma.txavail = di->ntxd - 1; | |
1119 | ||
1120 | /* clear tx descriptor ring */ | |
1121 | memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); | |
1122 | ||
1123 | /* DMA engine with out alignment requirement requires table to be inited | |
1124 | * before enabling the engine | |
1125 | */ | |
1126 | if (!di->aligndesc_4k) | |
1127 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1128 | ||
1129 | if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) | |
1130 | control |= D64_XC_PD; | |
1131 | OR_REG(&di->d64txregs->control, control); | |
1132 | ||
1133 | /* DMA engine with alignment requirement requires table to be inited | |
1134 | * before enabling the engine | |
1135 | */ | |
1136 | if (di->aligndesc_4k) | |
1137 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1138 | } | |
1139 | ||
1140 | void dma_txsuspend(struct dma_pub *pub) | |
1141 | { | |
1142 | struct dma_info *di = (struct dma_info *)pub; | |
1143 | ||
8505a7e6 | 1144 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1145 | |
1146 | if (di->ntxd == 0) | |
1147 | return; | |
1148 | ||
1149 | OR_REG(&di->d64txregs->control, D64_XC_SE); | |
1150 | } | |
1151 | ||
1152 | void dma_txresume(struct dma_pub *pub) | |
1153 | { | |
1154 | struct dma_info *di = (struct dma_info *)pub; | |
1155 | ||
8505a7e6 | 1156 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1157 | |
1158 | if (di->ntxd == 0) | |
1159 | return; | |
1160 | ||
1161 | AND_REG(&di->d64txregs->control, ~D64_XC_SE); | |
1162 | } | |
1163 | ||
1164 | bool dma_txsuspended(struct dma_pub *pub) | |
1165 | { | |
1166 | struct dma_info *di = (struct dma_info *)pub; | |
1167 | ||
1168 | return (di->ntxd == 0) || | |
1169 | ((R_REG(&di->d64txregs->control) & D64_XC_SE) == | |
1170 | D64_XC_SE); | |
1171 | } | |
1172 | ||
1173 | void dma_txreclaim(struct dma_pub *pub, enum txd_range range) | |
1174 | { | |
1175 | struct dma_info *di = (struct dma_info *)pub; | |
1176 | struct sk_buff *p; | |
1177 | ||
8505a7e6 JP |
1178 | DMA_TRACE("%s: %s\n", |
1179 | di->name, | |
1180 | range == DMA_RANGE_ALL ? "all" : | |
1181 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1182 | "transferred"); | |
5b435de0 AS |
1183 | |
1184 | if (di->txin == di->txout) | |
1185 | return; | |
1186 | ||
1187 | while ((p = dma_getnexttxp(pub, range))) { | |
1188 | /* For unframed data, we don't have any packets to free */ | |
1189 | if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) | |
1190 | brcmu_pkt_buf_free_skb(p); | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | bool dma_txreset(struct dma_pub *pub) | |
1195 | { | |
1196 | struct dma_info *di = (struct dma_info *)pub; | |
1197 | u32 status; | |
1198 | ||
1199 | if (di->ntxd == 0) | |
1200 | return true; | |
1201 | ||
1202 | /* suspend tx DMA first */ | |
1203 | W_REG(&di->d64txregs->control, D64_XC_SE); | |
1204 | SPINWAIT(((status = | |
1205 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1206 | != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) | |
1207 | && (status != D64_XS0_XS_STOPPED), 10000); | |
1208 | ||
1209 | W_REG(&di->d64txregs->control, 0); | |
1210 | SPINWAIT(((status = | |
1211 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1212 | != D64_XS0_XS_DISABLED), 10000); | |
1213 | ||
1214 | /* wait for the last transaction to complete */ | |
1215 | udelay(300); | |
1216 | ||
1217 | return status == D64_XS0_XS_DISABLED; | |
1218 | } | |
1219 | ||
1220 | bool dma_rxreset(struct dma_pub *pub) | |
1221 | { | |
1222 | struct dma_info *di = (struct dma_info *)pub; | |
1223 | u32 status; | |
1224 | ||
1225 | if (di->nrxd == 0) | |
1226 | return true; | |
1227 | ||
1228 | W_REG(&di->d64rxregs->control, 0); | |
1229 | SPINWAIT(((status = | |
1230 | (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) | |
1231 | != D64_RS0_RS_DISABLED), 10000); | |
1232 | ||
1233 | return status == D64_RS0_RS_DISABLED; | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * !! tx entry routine | |
1238 | * WARNING: call must check the return value for error. | |
1239 | * the error(toss frames) could be fatal and cause many subsequent hard | |
1240 | * to debug problems | |
1241 | */ | |
1242 | int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) | |
1243 | { | |
1244 | struct dma_info *di = (struct dma_info *)pub; | |
1245 | struct sk_buff *p, *next; | |
1246 | unsigned char *data; | |
1247 | uint len; | |
1248 | u16 txout; | |
1249 | u32 flags = 0; | |
1250 | dma_addr_t pa; | |
1251 | ||
8505a7e6 | 1252 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1253 | |
1254 | txout = di->txout; | |
1255 | ||
1256 | /* | |
1257 | * Walk the chain of packet buffers | |
1258 | * allocating and initializing transmit descriptor entries. | |
1259 | */ | |
1260 | for (p = p0; p; p = next) { | |
1261 | data = p->data; | |
1262 | len = p->len; | |
1263 | next = p->next; | |
1264 | ||
1265 | /* return nonzero if out of tx descriptors */ | |
1266 | if (nexttxd(di, txout) == di->txin) | |
1267 | goto outoftxd; | |
1268 | ||
1269 | if (len == 0) | |
1270 | continue; | |
1271 | ||
1272 | /* get physical address of buffer start */ | |
1273 | pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); | |
1274 | ||
1275 | flags = 0; | |
1276 | if (p == p0) | |
1277 | flags |= D64_CTRL1_SOF; | |
1278 | ||
1279 | /* With a DMA segment list, Descriptor table is filled | |
1280 | * using the segment list instead of looping over | |
1281 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST | |
1282 | * is when end of segment list is reached. | |
1283 | */ | |
1284 | if (next == NULL) | |
1285 | flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); | |
1286 | if (txout == (di->ntxd - 1)) | |
1287 | flags |= D64_CTRL1_EOT; | |
1288 | ||
1289 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); | |
1290 | ||
1291 | txout = nexttxd(di, txout); | |
1292 | } | |
1293 | ||
1294 | /* if last txd eof not set, fix it */ | |
1295 | if (!(flags & D64_CTRL1_EOF)) | |
1296 | di->txd64[prevtxd(di, txout)].ctrl1 = | |
1297 | cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF); | |
1298 | ||
1299 | /* save the packet */ | |
1300 | di->txp[prevtxd(di, txout)] = p0; | |
1301 | ||
1302 | /* bump the tx descriptor index */ | |
1303 | di->txout = txout; | |
1304 | ||
1305 | /* kick the chip */ | |
1306 | if (commit) | |
1307 | W_REG(&di->d64txregs->ptr, | |
1308 | di->xmtptrbase + I2B(txout, struct dma64desc)); | |
1309 | ||
1310 | /* tx flow control */ | |
1311 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1312 | ||
1313 | return 0; | |
1314 | ||
1315 | outoftxd: | |
8505a7e6 | 1316 | DMA_ERROR("%s: out of txds !!!\n", di->name); |
5b435de0 AS |
1317 | brcmu_pkt_buf_free_skb(p0); |
1318 | di->dma.txavail = 0; | |
1319 | di->dma.txnobuf++; | |
1320 | return -1; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * Reclaim next completed txd (txds if using chained buffers) in the range | |
1325 | * specified and return associated packet. | |
1326 | * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be | |
1327 | * transmitted as noted by the hardware "CurrDescr" pointer. | |
1328 | * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be | |
1329 | * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. | |
1330 | * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and | |
1331 | * return associated packet regardless of the value of hardware pointers. | |
1332 | */ | |
1333 | struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) | |
1334 | { | |
1335 | struct dma_info *di = (struct dma_info *)pub; | |
1336 | u16 start, end, i; | |
1337 | u16 active_desc; | |
1338 | struct sk_buff *txp; | |
1339 | ||
8505a7e6 JP |
1340 | DMA_TRACE("%s: %s\n", |
1341 | di->name, | |
1342 | range == DMA_RANGE_ALL ? "all" : | |
1343 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1344 | "transferred"); | |
5b435de0 AS |
1345 | |
1346 | if (di->ntxd == 0) | |
1347 | return NULL; | |
1348 | ||
1349 | txp = NULL; | |
1350 | ||
1351 | start = di->txin; | |
1352 | if (range == DMA_RANGE_ALL) | |
1353 | end = di->txout; | |
1354 | else { | |
1355 | struct dma64regs __iomem *dregs = di->d64txregs; | |
1356 | ||
1357 | end = (u16) (B2I(((R_REG(&dregs->status0) & | |
1358 | D64_XS0_CD_MASK) - | |
1359 | di->xmtptrbase) & D64_XS0_CD_MASK, | |
1360 | struct dma64desc)); | |
1361 | ||
1362 | if (range == DMA_RANGE_TRANSFERED) { | |
1363 | active_desc = | |
1364 | (u16) (R_REG(&dregs->status1) & | |
1365 | D64_XS1_AD_MASK); | |
1366 | active_desc = | |
1367 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; | |
1368 | active_desc = B2I(active_desc, struct dma64desc); | |
1369 | if (end != active_desc) | |
1370 | end = prevtxd(di, active_desc); | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | if ((start == 0) && (end > di->txout)) | |
1375 | goto bogus; | |
1376 | ||
1377 | for (i = start; i != end && !txp; i = nexttxd(di, i)) { | |
1378 | dma_addr_t pa; | |
1379 | uint size; | |
1380 | ||
1381 | pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; | |
1382 | ||
1383 | size = | |
1384 | (le32_to_cpu(di->txd64[i].ctrl2) & | |
1385 | D64_CTRL2_BC_MASK); | |
1386 | ||
1387 | di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
1388 | di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
1389 | ||
1390 | txp = di->txp[i]; | |
1391 | di->txp[i] = NULL; | |
1392 | ||
1393 | pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); | |
1394 | } | |
1395 | ||
1396 | di->txin = i; | |
1397 | ||
1398 | /* tx flow control */ | |
1399 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1400 | ||
1401 | return txp; | |
1402 | ||
1403 | bogus: | |
8505a7e6 JP |
1404 | DMA_NONE("bogus curr: start %d end %d txout %d\n", |
1405 | start, end, di->txout); | |
5b435de0 AS |
1406 | return NULL; |
1407 | } | |
1408 | ||
1409 | /* | |
1410 | * Mac80211 initiated actions sometimes require packets in the DMA queue to be | |
1411 | * modified. The modified portion of the packet is not under control of the DMA | |
1412 | * engine. This function calls a caller-supplied function for each packet in | |
1413 | * the caller specified dma chain. | |
1414 | */ | |
1415 | void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) | |
1416 | (void *pkt, void *arg_a), void *arg_a) | |
1417 | { | |
1418 | struct dma_info *di = (struct dma_info *) dmah; | |
1419 | uint i = di->txin; | |
1420 | uint end = di->txout; | |
1421 | struct sk_buff *skb; | |
1422 | struct ieee80211_tx_info *tx_info; | |
1423 | ||
1424 | while (i != end) { | |
1425 | skb = (struct sk_buff *)di->txp[i]; | |
1426 | if (skb != NULL) { | |
1427 | tx_info = (struct ieee80211_tx_info *)skb->cb; | |
1428 | (callback_fnc)(tx_info, arg_a); | |
1429 | } | |
1430 | i = nexttxd(di, i); | |
1431 | } | |
1432 | } |