Commit | Line | Data |
---|---|---|
5b435de0 AS |
1 | /* |
2 | * Copyright (c) 2010 Broadcom Corporation | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
8505a7e6 JP |
16 | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | ||
5b435de0 | 19 | #include <linux/slab.h> |
5b435de0 AS |
20 | #include <linux/delay.h> |
21 | #include <linux/pci.h> | |
22 | ||
23 | #include <brcmu_utils.h> | |
24 | #include <aiutils.h> | |
25 | #include "types.h" | |
26 | #include "dma.h" | |
23038214 | 27 | #include "soc.h" |
5b435de0 AS |
28 | |
29 | /* | |
30 | * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within | |
31 | * a contiguous 8kB physical address. | |
32 | */ | |
33 | #define D64RINGALIGN_BITS 13 | |
34 | #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) | |
35 | #define D64RINGALIGN (1 << D64RINGALIGN_BITS) | |
36 | ||
37 | #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) | |
38 | ||
39 | /* transmit channel control */ | |
40 | #define D64_XC_XE 0x00000001 /* transmit enable */ | |
41 | #define D64_XC_SE 0x00000002 /* transmit suspend request */ | |
42 | #define D64_XC_LE 0x00000004 /* loopback enable */ | |
43 | #define D64_XC_FL 0x00000010 /* flush request */ | |
44 | #define D64_XC_PD 0x00000800 /* parity check disable */ | |
45 | #define D64_XC_AE 0x00030000 /* address extension bits */ | |
46 | #define D64_XC_AE_SHIFT 16 | |
47 | ||
48 | /* transmit descriptor table pointer */ | |
49 | #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ | |
50 | ||
51 | /* transmit channel status */ | |
52 | #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
53 | #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ | |
54 | #define D64_XS0_XS_SHIFT 28 | |
55 | #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ | |
56 | #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ | |
57 | #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ | |
58 | #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ | |
59 | #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ | |
60 | ||
61 | #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ | |
62 | #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ | |
63 | #define D64_XS1_XE_SHIFT 28 | |
64 | #define D64_XS1_XE_NOERR 0x00000000 /* no error */ | |
65 | #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ | |
66 | #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ | |
67 | #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ | |
68 | #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ | |
69 | #define D64_XS1_XE_COREE 0x50000000 /* core error */ | |
70 | ||
71 | /* receive channel control */ | |
72 | /* receive enable */ | |
73 | #define D64_RC_RE 0x00000001 | |
74 | /* receive frame offset */ | |
75 | #define D64_RC_RO_MASK 0x000000fe | |
76 | #define D64_RC_RO_SHIFT 1 | |
77 | /* direct fifo receive (pio) mode */ | |
78 | #define D64_RC_FM 0x00000100 | |
79 | /* separate rx header descriptor enable */ | |
80 | #define D64_RC_SH 0x00000200 | |
81 | /* overflow continue */ | |
82 | #define D64_RC_OC 0x00000400 | |
83 | /* parity check disable */ | |
84 | #define D64_RC_PD 0x00000800 | |
85 | /* address extension bits */ | |
86 | #define D64_RC_AE 0x00030000 | |
87 | #define D64_RC_AE_SHIFT 16 | |
88 | ||
89 | /* flags for dma controller */ | |
90 | /* partity enable */ | |
91 | #define DMA_CTRL_PEN (1 << 0) | |
92 | /* rx overflow continue */ | |
93 | #define DMA_CTRL_ROC (1 << 1) | |
94 | /* allow rx scatter to multiple descriptors */ | |
95 | #define DMA_CTRL_RXMULTI (1 << 2) | |
96 | /* Unframed Rx/Tx data */ | |
97 | #define DMA_CTRL_UNFRAMED (1 << 3) | |
98 | ||
99 | /* receive descriptor table pointer */ | |
100 | #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ | |
101 | ||
102 | /* receive channel status */ | |
103 | #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
104 | #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ | |
105 | #define D64_RS0_RS_SHIFT 28 | |
106 | #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ | |
107 | #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ | |
108 | #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ | |
109 | #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ | |
110 | #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ | |
111 | ||
112 | #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ | |
113 | #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ | |
114 | #define D64_RS1_RE_SHIFT 28 | |
115 | #define D64_RS1_RE_NOERR 0x00000000 /* no error */ | |
116 | #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ | |
117 | #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ | |
118 | #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ | |
119 | #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ | |
120 | #define D64_RS1_RE_COREE 0x50000000 /* core error */ | |
121 | ||
122 | /* fifoaddr */ | |
123 | #define D64_FA_OFF_MASK 0xffff /* offset */ | |
124 | #define D64_FA_SEL_MASK 0xf0000 /* select */ | |
125 | #define D64_FA_SEL_SHIFT 16 | |
126 | #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ | |
127 | #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ | |
128 | #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ | |
129 | #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ | |
130 | #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ | |
131 | #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ | |
132 | #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ | |
133 | #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ | |
134 | #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ | |
135 | #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ | |
136 | ||
137 | /* descriptor control flags 1 */ | |
138 | #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ | |
139 | #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ | |
140 | #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ | |
141 | #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ | |
142 | #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ | |
143 | ||
144 | /* descriptor control flags 2 */ | |
145 | /* buffer byte count. real data len must <= 16KB */ | |
146 | #define D64_CTRL2_BC_MASK 0x00007fff | |
147 | /* address extension bits */ | |
148 | #define D64_CTRL2_AE 0x00030000 | |
149 | #define D64_CTRL2_AE_SHIFT 16 | |
150 | /* parity bit */ | |
151 | #define D64_CTRL2_PARITY 0x00040000 | |
152 | ||
153 | /* control flags in the range [27:20] are core-specific and not defined here */ | |
154 | #define D64_CTRL_CORE_MASK 0x0ff00000 | |
155 | ||
156 | #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ | |
157 | #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ | |
158 | #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ | |
159 | #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ | |
160 | ||
161 | /* | |
162 | * packet headroom necessary to accommodate the largest header | |
163 | * in the system, (i.e TXOFF). By doing, we avoid the need to | |
164 | * allocate an extra buffer for the header when bridging to WL. | |
165 | * There is a compile time check in wlc.c which ensure that this | |
166 | * value is at least as big as TXOFF. This value is used in | |
167 | * dma_rxfill(). | |
168 | */ | |
169 | ||
170 | #define BCMEXTRAHDROOM 172 | |
171 | ||
172 | /* debug/trace */ | |
173 | #ifdef BCMDBG | |
8505a7e6 JP |
174 | #define DMA_ERROR(fmt, ...) \ |
175 | do { \ | |
176 | if (*di->msg_level & 1) \ | |
177 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
178 | } while (0) | |
179 | #define DMA_TRACE(fmt, ...) \ | |
180 | do { \ | |
181 | if (*di->msg_level & 2) \ | |
182 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
183 | } while (0) | |
5b435de0 | 184 | #else |
8505a7e6 JP |
185 | #define DMA_ERROR(fmt, ...) \ |
186 | no_printk(fmt, ##__VA_ARGS__) | |
187 | #define DMA_TRACE(fmt, ...) \ | |
188 | no_printk(fmt, ##__VA_ARGS__) | |
5b435de0 AS |
189 | #endif /* BCMDBG */ |
190 | ||
8505a7e6 JP |
191 | #define DMA_NONE(fmt, ...) \ |
192 | no_printk(fmt, ##__VA_ARGS__) | |
5b435de0 AS |
193 | |
194 | #define MAXNAMEL 8 /* 8 char names */ | |
195 | ||
196 | /* macros to convert between byte offsets and indexes */ | |
197 | #define B2I(bytes, type) ((bytes) / sizeof(type)) | |
198 | #define I2B(index, type) ((index) * sizeof(type)) | |
199 | ||
200 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ | |
201 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ | |
202 | ||
203 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ | |
204 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ | |
205 | ||
206 | /* | |
207 | * DMA Descriptor | |
208 | * Descriptors are only read by the hardware, never written back. | |
209 | */ | |
210 | struct dma64desc { | |
211 | __le32 ctrl1; /* misc control bits & bufcount */ | |
212 | __le32 ctrl2; /* buffer count and address extension */ | |
213 | __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
214 | __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
215 | }; | |
216 | ||
217 | /* dma engine software state */ | |
218 | struct dma_info { | |
219 | struct dma_pub dma; /* exported structure */ | |
220 | uint *msg_level; /* message level pointer */ | |
221 | char name[MAXNAMEL]; /* callers name for diag msgs */ | |
222 | ||
2e81b9b1 AS |
223 | struct bcma_device *d11core; |
224 | struct device *dmadev; | |
5b435de0 AS |
225 | |
226 | bool dma64; /* this dma engine is operating in 64-bit mode */ | |
227 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ | |
228 | ||
229 | /* 64-bit dma tx engine registers */ | |
230 | struct dma64regs __iomem *d64txregs; | |
231 | /* 64-bit dma rx engine registers */ | |
232 | struct dma64regs __iomem *d64rxregs; | |
233 | /* pointer to dma64 tx descriptor ring */ | |
234 | struct dma64desc *txd64; | |
235 | /* pointer to dma64 rx descriptor ring */ | |
236 | struct dma64desc *rxd64; | |
237 | ||
238 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ | |
239 | ||
240 | u16 ntxd; /* # tx descriptors tunable */ | |
241 | u16 txin; /* index of next descriptor to reclaim */ | |
242 | u16 txout; /* index of next descriptor to post */ | |
243 | /* pointer to parallel array of pointers to packets */ | |
244 | struct sk_buff **txp; | |
245 | /* Aligned physical address of descriptor ring */ | |
246 | dma_addr_t txdpa; | |
247 | /* Original physical address of descriptor ring */ | |
248 | dma_addr_t txdpaorig; | |
249 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ | |
250 | u32 txdalloc; /* #bytes allocated for the ring */ | |
251 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register | |
252 | * is not just an index, it needs all 13 bits to be | |
253 | * an offset from the addr register. | |
254 | */ | |
255 | ||
256 | u16 nrxd; /* # rx descriptors tunable */ | |
257 | u16 rxin; /* index of next descriptor to reclaim */ | |
258 | u16 rxout; /* index of next descriptor to post */ | |
259 | /* pointer to parallel array of pointers to packets */ | |
260 | struct sk_buff **rxp; | |
261 | /* Aligned physical address of descriptor ring */ | |
262 | dma_addr_t rxdpa; | |
263 | /* Original physical address of descriptor ring */ | |
264 | dma_addr_t rxdpaorig; | |
265 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ | |
266 | u32 rxdalloc; /* #bytes allocated for the ring */ | |
267 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ | |
268 | ||
269 | /* tunables */ | |
270 | unsigned int rxbufsize; /* rx buffer size in bytes, not including | |
271 | * the extra headroom | |
272 | */ | |
273 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper | |
274 | * stack, e.g. some rx pkt buffers will be | |
275 | * bridged to tx side without byte copying. | |
276 | * The extra headroom needs to be large enough | |
277 | * to fit txheader needs. Some dongle driver may | |
278 | * not need it. | |
279 | */ | |
280 | uint nrxpost; /* # rx buffers to keep posted */ | |
281 | unsigned int rxoffset; /* rxcontrol offset */ | |
282 | /* add to get dma address of descriptor ring, low 32 bits */ | |
283 | uint ddoffsetlow; | |
284 | /* high 32 bits */ | |
285 | uint ddoffsethigh; | |
286 | /* add to get dma address of data buffer, low 32 bits */ | |
287 | uint dataoffsetlow; | |
288 | /* high 32 bits */ | |
289 | uint dataoffsethigh; | |
290 | /* descriptor base need to be aligned or not */ | |
291 | bool aligndesc_4k; | |
292 | }; | |
293 | ||
294 | /* | |
295 | * default dma message level (if input msg_level | |
296 | * pointer is null in dma_attach()) | |
297 | */ | |
298 | static uint dma_msg_level; | |
299 | ||
300 | /* Check for odd number of 1's */ | |
301 | static u32 parity32(__le32 data) | |
302 | { | |
303 | /* no swap needed for counting 1's */ | |
304 | u32 par_data = *(u32 *)&data; | |
305 | ||
306 | par_data ^= par_data >> 16; | |
307 | par_data ^= par_data >> 8; | |
308 | par_data ^= par_data >> 4; | |
309 | par_data ^= par_data >> 2; | |
310 | par_data ^= par_data >> 1; | |
311 | ||
312 | return par_data & 1; | |
313 | } | |
314 | ||
315 | static bool dma64_dd_parity(struct dma64desc *dd) | |
316 | { | |
317 | return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); | |
318 | } | |
319 | ||
320 | /* descriptor bumping functions */ | |
321 | ||
322 | static uint xxd(uint x, uint n) | |
323 | { | |
324 | return x & (n - 1); /* faster than %, but n must be power of 2 */ | |
325 | } | |
326 | ||
327 | static uint txd(struct dma_info *di, uint x) | |
328 | { | |
329 | return xxd(x, di->ntxd); | |
330 | } | |
331 | ||
332 | static uint rxd(struct dma_info *di, uint x) | |
333 | { | |
334 | return xxd(x, di->nrxd); | |
335 | } | |
336 | ||
337 | static uint nexttxd(struct dma_info *di, uint i) | |
338 | { | |
339 | return txd(di, i + 1); | |
340 | } | |
341 | ||
342 | static uint prevtxd(struct dma_info *di, uint i) | |
343 | { | |
344 | return txd(di, i - 1); | |
345 | } | |
346 | ||
347 | static uint nextrxd(struct dma_info *di, uint i) | |
348 | { | |
349 | return txd(di, i + 1); | |
350 | } | |
351 | ||
352 | static uint ntxdactive(struct dma_info *di, uint h, uint t) | |
353 | { | |
354 | return txd(di, t-h); | |
355 | } | |
356 | ||
357 | static uint nrxdactive(struct dma_info *di, uint h, uint t) | |
358 | { | |
359 | return rxd(di, t-h); | |
360 | } | |
361 | ||
362 | static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) | |
363 | { | |
ae8e4672 | 364 | uint dmactrlflags; |
5b435de0 AS |
365 | |
366 | if (di == NULL) { | |
8505a7e6 | 367 | DMA_ERROR("NULL dma handle\n"); |
5b435de0 AS |
368 | return 0; |
369 | } | |
370 | ||
ae8e4672 | 371 | dmactrlflags = di->dma.dmactrlflags; |
5b435de0 AS |
372 | dmactrlflags &= ~mask; |
373 | dmactrlflags |= flags; | |
374 | ||
375 | /* If trying to enable parity, check if parity is actually supported */ | |
376 | if (dmactrlflags & DMA_CTRL_PEN) { | |
377 | u32 control; | |
378 | ||
379 | control = R_REG(&di->d64txregs->control); | |
380 | W_REG(&di->d64txregs->control, | |
381 | control | D64_XC_PD); | |
382 | if (R_REG(&di->d64txregs->control) & D64_XC_PD) | |
383 | /* We *can* disable it so it is supported, | |
384 | * restore control register | |
385 | */ | |
386 | W_REG(&di->d64txregs->control, | |
387 | control); | |
388 | else | |
389 | /* Not supported, don't allow it to be enabled */ | |
390 | dmactrlflags &= ~DMA_CTRL_PEN; | |
391 | } | |
392 | ||
393 | di->dma.dmactrlflags = dmactrlflags; | |
394 | ||
395 | return dmactrlflags; | |
396 | } | |
397 | ||
398 | static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) | |
399 | { | |
400 | u32 w; | |
401 | OR_REG(&dma64regs->control, D64_XC_AE); | |
402 | w = R_REG(&dma64regs->control); | |
403 | AND_REG(&dma64regs->control, ~D64_XC_AE); | |
404 | return (w & D64_XC_AE) == D64_XC_AE; | |
405 | } | |
406 | ||
407 | /* | |
408 | * return true if this dma engine supports DmaExtendedAddrChanges, | |
409 | * otherwise false | |
410 | */ | |
411 | static bool _dma_isaddrext(struct dma_info *di) | |
412 | { | |
413 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ | |
414 | ||
415 | /* not all tx or rx channel are available */ | |
416 | if (di->d64txregs != NULL) { | |
417 | if (!_dma64_addrext(di->d64txregs)) | |
8505a7e6 JP |
418 | DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", |
419 | di->name); | |
5b435de0 AS |
420 | return true; |
421 | } else if (di->d64rxregs != NULL) { | |
422 | if (!_dma64_addrext(di->d64rxregs)) | |
8505a7e6 JP |
423 | DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", |
424 | di->name); | |
5b435de0 AS |
425 | return true; |
426 | } | |
427 | ||
428 | return false; | |
429 | } | |
430 | ||
431 | static bool _dma_descriptor_align(struct dma_info *di) | |
432 | { | |
433 | u32 addrl; | |
434 | ||
435 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ | |
436 | if (di->d64txregs != NULL) { | |
437 | W_REG(&di->d64txregs->addrlow, 0xff0); | |
438 | addrl = R_REG(&di->d64txregs->addrlow); | |
439 | if (addrl != 0) | |
440 | return false; | |
441 | } else if (di->d64rxregs != NULL) { | |
442 | W_REG(&di->d64rxregs->addrlow, 0xff0); | |
443 | addrl = R_REG(&di->d64rxregs->addrlow); | |
444 | if (addrl != 0) | |
445 | return false; | |
446 | } | |
447 | return true; | |
448 | } | |
449 | ||
450 | /* | |
451 | * Descriptor table must start at the DMA hardware dictated alignment, so | |
452 | * allocated memory must be large enough to support this requirement. | |
453 | */ | |
2e81b9b1 | 454 | static void *dma_alloc_consistent(struct dma_info *di, uint size, |
5b435de0 AS |
455 | u16 align_bits, uint *alloced, |
456 | dma_addr_t *pap) | |
457 | { | |
458 | if (align_bits) { | |
459 | u16 align = (1 << align_bits); | |
460 | if (!IS_ALIGNED(PAGE_SIZE, align)) | |
461 | size += align; | |
462 | *alloced = size; | |
463 | } | |
2e81b9b1 | 464 | return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); |
5b435de0 AS |
465 | } |
466 | ||
467 | static | |
468 | u8 dma_align_sizetobits(uint size) | |
469 | { | |
470 | u8 bitpos = 0; | |
471 | while (size >>= 1) | |
472 | bitpos++; | |
473 | return bitpos; | |
474 | } | |
475 | ||
476 | /* This function ensures that the DMA descriptor ring will not get allocated | |
477 | * across Page boundary. If the allocation is done across the page boundary | |
478 | * at the first time, then it is freed and the allocation is done at | |
479 | * descriptor ring size aligned location. This will ensure that the ring will | |
480 | * not cross page boundary | |
481 | */ | |
482 | static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, | |
483 | u16 *alignbits, uint *alloced, | |
484 | dma_addr_t *descpa) | |
485 | { | |
486 | void *va; | |
487 | u32 desc_strtaddr; | |
488 | u32 alignbytes = 1 << *alignbits; | |
489 | ||
2e81b9b1 | 490 | va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); |
5b435de0 AS |
491 | |
492 | if (NULL == va) | |
493 | return NULL; | |
494 | ||
495 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); | |
496 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr | |
497 | & boundary)) { | |
498 | *alignbits = dma_align_sizetobits(size); | |
2e81b9b1 AS |
499 | dma_free_coherent(di->dmadev, size, va, *descpa); |
500 | va = dma_alloc_consistent(di, size, *alignbits, | |
5b435de0 AS |
501 | alloced, descpa); |
502 | } | |
503 | return va; | |
504 | } | |
505 | ||
506 | static bool dma64_alloc(struct dma_info *di, uint direction) | |
507 | { | |
508 | u16 size; | |
509 | uint ddlen; | |
510 | void *va; | |
511 | uint alloced = 0; | |
512 | u16 align; | |
513 | u16 align_bits; | |
514 | ||
515 | ddlen = sizeof(struct dma64desc); | |
516 | ||
517 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); | |
518 | align_bits = di->dmadesc_align; | |
519 | align = (1 << align_bits); | |
520 | ||
521 | if (direction == DMA_TX) { | |
522 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
523 | &alloced, &di->txdpaorig); | |
524 | if (va == NULL) { | |
8505a7e6 JP |
525 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", |
526 | di->name); | |
5b435de0 AS |
527 | return false; |
528 | } | |
529 | align = (1 << align_bits); | |
530 | di->txd64 = (struct dma64desc *) | |
531 | roundup((unsigned long)va, align); | |
532 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); | |
533 | di->txdpa = di->txdpaorig + di->txdalign; | |
534 | di->txdalloc = alloced; | |
535 | } else { | |
536 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
537 | &alloced, &di->rxdpaorig); | |
538 | if (va == NULL) { | |
8505a7e6 JP |
539 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", |
540 | di->name); | |
5b435de0 AS |
541 | return false; |
542 | } | |
543 | align = (1 << align_bits); | |
544 | di->rxd64 = (struct dma64desc *) | |
545 | roundup((unsigned long)va, align); | |
546 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); | |
547 | di->rxdpa = di->rxdpaorig + di->rxdalign; | |
548 | di->rxdalloc = alloced; | |
549 | } | |
550 | ||
551 | return true; | |
552 | } | |
553 | ||
554 | static bool _dma_alloc(struct dma_info *di, uint direction) | |
555 | { | |
556 | return dma64_alloc(di, direction); | |
557 | } | |
558 | ||
559 | struct dma_pub *dma_attach(char *name, struct si_pub *sih, | |
2e81b9b1 AS |
560 | struct bcma_device *d11core, |
561 | void __iomem *dmaregstx, void __iomem *dmaregsrx, | |
562 | uint ntxd, uint nrxd, | |
563 | uint rxbufsize, int rxextheadroom, | |
564 | uint nrxpost, uint rxoffset, uint *msg_level) | |
5b435de0 AS |
565 | { |
566 | struct dma_info *di; | |
567 | uint size; | |
568 | ||
569 | /* allocate private info structure */ | |
570 | di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); | |
571 | if (di == NULL) | |
572 | return NULL; | |
573 | ||
574 | di->msg_level = msg_level ? msg_level : &dma_msg_level; | |
575 | ||
576 | ||
577 | di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); | |
578 | ||
579 | /* init dma reg pointer */ | |
2e81b9b1 | 580 | di->d11core = d11core; |
5b435de0 AS |
581 | di->d64txregs = (struct dma64regs __iomem *) dmaregstx; |
582 | di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; | |
583 | ||
584 | /* | |
585 | * Default flags (which can be changed by the driver calling | |
586 | * dma_ctrlflags before enable): For backwards compatibility | |
587 | * both Rx Overflow Continue and Parity are DISABLED. | |
588 | */ | |
589 | _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); | |
590 | ||
8505a7e6 JP |
591 | DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", |
592 | name, "DMA64", | |
593 | di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, | |
594 | rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx); | |
5b435de0 AS |
595 | |
596 | /* make a private copy of our callers name */ | |
597 | strncpy(di->name, name, MAXNAMEL); | |
598 | di->name[MAXNAMEL - 1] = '\0'; | |
599 | ||
2e81b9b1 | 600 | di->dmadev = d11core->dma_dev; |
5b435de0 AS |
601 | |
602 | /* save tunables */ | |
603 | di->ntxd = (u16) ntxd; | |
604 | di->nrxd = (u16) nrxd; | |
605 | ||
606 | /* the actual dma size doesn't include the extra headroom */ | |
607 | di->rxextrahdrroom = | |
608 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; | |
609 | if (rxbufsize > BCMEXTRAHDROOM) | |
610 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); | |
611 | else | |
612 | di->rxbufsize = (u16) rxbufsize; | |
613 | ||
614 | di->nrxpost = (u16) nrxpost; | |
615 | di->rxoffset = (u8) rxoffset; | |
616 | ||
617 | /* | |
618 | * figure out the DMA physical address offset for dd and data | |
619 | * PCI/PCIE: they map silicon backplace address to zero | |
620 | * based memory, need offset | |
621 | * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram | |
622 | * swapped region for data buffer, not descriptor | |
623 | */ | |
624 | di->ddoffsetlow = 0; | |
625 | di->dataoffsetlow = 0; | |
626 | /* add offset for pcie with DMA64 bus */ | |
627 | di->ddoffsetlow = 0; | |
628 | di->ddoffsethigh = SI_PCIE_DMA_H32; | |
629 | di->dataoffsetlow = di->ddoffsetlow; | |
630 | di->dataoffsethigh = di->ddoffsethigh; | |
631 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ | |
632 | if ((ai_coreid(sih) == SDIOD_CORE_ID) | |
633 | && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) | |
634 | di->addrext = 0; | |
635 | else if ((ai_coreid(sih) == I2S_CORE_ID) && | |
636 | ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) | |
637 | di->addrext = 0; | |
638 | else | |
639 | di->addrext = _dma_isaddrext(di); | |
640 | ||
641 | /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ | |
642 | di->aligndesc_4k = _dma_descriptor_align(di); | |
643 | if (di->aligndesc_4k) { | |
644 | di->dmadesc_align = D64RINGALIGN_BITS; | |
645 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) | |
646 | /* for smaller dd table, HW relax alignment reqmnt */ | |
647 | di->dmadesc_align = D64RINGALIGN_BITS - 1; | |
648 | } else { | |
649 | di->dmadesc_align = 4; /* 16 byte alignment */ | |
650 | } | |
651 | ||
8505a7e6 JP |
652 | DMA_NONE("DMA descriptor align_needed %d, align %d\n", |
653 | di->aligndesc_4k, di->dmadesc_align); | |
5b435de0 AS |
654 | |
655 | /* allocate tx packet pointer vector */ | |
656 | if (ntxd) { | |
657 | size = ntxd * sizeof(void *); | |
658 | di->txp = kzalloc(size, GFP_ATOMIC); | |
659 | if (di->txp == NULL) | |
660 | goto fail; | |
661 | } | |
662 | ||
663 | /* allocate rx packet pointer vector */ | |
664 | if (nrxd) { | |
665 | size = nrxd * sizeof(void *); | |
666 | di->rxp = kzalloc(size, GFP_ATOMIC); | |
667 | if (di->rxp == NULL) | |
668 | goto fail; | |
669 | } | |
670 | ||
671 | /* | |
672 | * allocate transmit descriptor ring, only need ntxd descriptors | |
673 | * but it must be aligned | |
674 | */ | |
675 | if (ntxd) { | |
676 | if (!_dma_alloc(di, DMA_TX)) | |
677 | goto fail; | |
678 | } | |
679 | ||
680 | /* | |
681 | * allocate receive descriptor ring, only need nrxd descriptors | |
682 | * but it must be aligned | |
683 | */ | |
684 | if (nrxd) { | |
685 | if (!_dma_alloc(di, DMA_RX)) | |
686 | goto fail; | |
687 | } | |
688 | ||
689 | if ((di->ddoffsetlow != 0) && !di->addrext) { | |
690 | if (di->txdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
691 | DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", |
692 | di->name, (u32)di->txdpa); | |
5b435de0 AS |
693 | goto fail; |
694 | } | |
695 | if (di->rxdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
696 | DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", |
697 | di->name, (u32)di->rxdpa); | |
5b435de0 AS |
698 | goto fail; |
699 | } | |
700 | } | |
701 | ||
8505a7e6 JP |
702 | DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", |
703 | di->ddoffsetlow, di->ddoffsethigh, | |
704 | di->dataoffsetlow, di->dataoffsethigh, | |
705 | di->addrext); | |
5b435de0 AS |
706 | |
707 | return (struct dma_pub *) di; | |
708 | ||
709 | fail: | |
710 | dma_detach((struct dma_pub *)di); | |
711 | return NULL; | |
712 | } | |
713 | ||
714 | static inline void | |
715 | dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, | |
716 | dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) | |
717 | { | |
718 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; | |
719 | ||
720 | /* PCI bus with big(>1G) physical address, use address extension */ | |
721 | if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { | |
722 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
723 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
724 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
725 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
726 | } else { | |
727 | /* address extension for 32-bit PCI */ | |
728 | u32 ae; | |
729 | ||
730 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
731 | pa &= ~PCI32ADDR_HIGH; | |
732 | ||
733 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; | |
734 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
735 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
736 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
737 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
738 | } | |
739 | if (di->dma.dmactrlflags & DMA_CTRL_PEN) { | |
740 | if (dma64_dd_parity(&ddring[outidx])) | |
741 | ddring[outidx].ctrl2 = | |
742 | cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); | |
743 | } | |
744 | } | |
745 | ||
746 | /* !! may be called with core in reset */ | |
747 | void dma_detach(struct dma_pub *pub) | |
748 | { | |
749 | struct dma_info *di = (struct dma_info *)pub; | |
750 | ||
8505a7e6 | 751 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
752 | |
753 | /* free dma descriptor rings */ | |
754 | if (di->txd64) | |
2e81b9b1 AS |
755 | dma_free_coherent(di->dmadev, di->txdalloc, |
756 | ((s8 *)di->txd64 - di->txdalign), | |
757 | (di->txdpaorig)); | |
5b435de0 | 758 | if (di->rxd64) |
2e81b9b1 AS |
759 | dma_free_coherent(di->dmadev, di->rxdalloc, |
760 | ((s8 *)di->rxd64 - di->rxdalign), | |
761 | (di->rxdpaorig)); | |
5b435de0 AS |
762 | |
763 | /* free packet pointer vectors */ | |
764 | kfree(di->txp); | |
765 | kfree(di->rxp); | |
766 | ||
767 | /* free our private info structure */ | |
768 | kfree(di); | |
769 | ||
770 | } | |
771 | ||
772 | /* initialize descriptor table base address */ | |
773 | static void | |
774 | _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) | |
775 | { | |
776 | if (!di->aligndesc_4k) { | |
777 | if (direction == DMA_TX) | |
778 | di->xmtptrbase = pa; | |
779 | else | |
780 | di->rcvptrbase = pa; | |
781 | } | |
782 | ||
783 | if ((di->ddoffsetlow == 0) | |
784 | || !(pa & PCI32ADDR_HIGH)) { | |
785 | if (direction == DMA_TX) { | |
786 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
787 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
788 | } else { | |
789 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
790 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
791 | } | |
792 | } else { | |
793 | /* DMA64 32bits address extension */ | |
794 | u32 ae; | |
795 | ||
796 | /* shift the high bit(s) from pa to ae */ | |
797 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
798 | pa &= ~PCI32ADDR_HIGH; | |
799 | ||
800 | if (direction == DMA_TX) { | |
801 | W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); | |
802 | W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); | |
803 | SET_REG(&di->d64txregs->control, | |
804 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); | |
805 | } else { | |
806 | W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); | |
807 | W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); | |
808 | SET_REG(&di->d64rxregs->control, | |
809 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); | |
810 | } | |
811 | } | |
812 | } | |
813 | ||
814 | static void _dma_rxenable(struct dma_info *di) | |
815 | { | |
816 | uint dmactrlflags = di->dma.dmactrlflags; | |
817 | u32 control; | |
818 | ||
8505a7e6 | 819 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
820 | |
821 | control = | |
822 | (R_REG(&di->d64rxregs->control) & D64_RC_AE) | | |
823 | D64_RC_RE; | |
824 | ||
825 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) | |
826 | control |= D64_RC_PD; | |
827 | ||
828 | if (dmactrlflags & DMA_CTRL_ROC) | |
829 | control |= D64_RC_OC; | |
830 | ||
831 | W_REG(&di->d64rxregs->control, | |
832 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); | |
833 | } | |
834 | ||
835 | void dma_rxinit(struct dma_pub *pub) | |
836 | { | |
837 | struct dma_info *di = (struct dma_info *)pub; | |
838 | ||
8505a7e6 | 839 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
840 | |
841 | if (di->nrxd == 0) | |
842 | return; | |
843 | ||
844 | di->rxin = di->rxout = 0; | |
845 | ||
846 | /* clear rx descriptor ring */ | |
847 | memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); | |
848 | ||
849 | /* DMA engine with out alignment requirement requires table to be inited | |
850 | * before enabling the engine | |
851 | */ | |
852 | if (!di->aligndesc_4k) | |
853 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
854 | ||
855 | _dma_rxenable(di); | |
856 | ||
857 | if (di->aligndesc_4k) | |
858 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
859 | } | |
860 | ||
861 | static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) | |
862 | { | |
863 | uint i, curr; | |
864 | struct sk_buff *rxp; | |
865 | dma_addr_t pa; | |
866 | ||
867 | i = di->rxin; | |
868 | ||
869 | /* return if no packets posted */ | |
870 | if (i == di->rxout) | |
871 | return NULL; | |
872 | ||
873 | curr = | |
874 | B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - | |
875 | di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); | |
876 | ||
877 | /* ignore curr if forceall */ | |
878 | if (!forceall && (i == curr)) | |
879 | return NULL; | |
880 | ||
881 | /* get the packet pointer that corresponds to the rx descriptor */ | |
882 | rxp = di->rxp[i]; | |
883 | di->rxp[i] = NULL; | |
884 | ||
885 | pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; | |
886 | ||
887 | /* clear this packet from the descriptor ring */ | |
2e81b9b1 | 888 | dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); |
5b435de0 AS |
889 | |
890 | di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
891 | di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
892 | ||
893 | di->rxin = nextrxd(di, i); | |
894 | ||
895 | return rxp; | |
896 | } | |
897 | ||
898 | static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) | |
899 | { | |
900 | if (di->nrxd == 0) | |
901 | return NULL; | |
902 | ||
903 | return dma64_getnextrxp(di, forceall); | |
904 | } | |
905 | ||
906 | /* | |
907 | * !! rx entry routine | |
3fd172d3 | 908 | * returns the number packages in the next frame, or 0 if there are no more |
5b435de0 AS |
909 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is |
910 | * supported with pkts chain | |
911 | * otherwise, it's treated as giant pkt and will be tossed. | |
912 | * The DMA scattering starts with normal DMA header, followed by first | |
913 | * buffer data. After it reaches the max size of buffer, the data continues | |
914 | * in next DMA descriptor buffer WITHOUT DMA header | |
915 | */ | |
3fd172d3 | 916 | int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) |
5b435de0 AS |
917 | { |
918 | struct dma_info *di = (struct dma_info *)pub; | |
3fd172d3 AS |
919 | struct sk_buff_head dma_frames; |
920 | struct sk_buff *p, *next; | |
5b435de0 AS |
921 | uint len; |
922 | uint pkt_len; | |
923 | int resid = 0; | |
3fd172d3 | 924 | int pktcnt = 1; |
5b435de0 | 925 | |
3fd172d3 | 926 | skb_queue_head_init(&dma_frames); |
5b435de0 | 927 | next_frame: |
3fd172d3 AS |
928 | p = _dma_getnextrxp(di, false); |
929 | if (p == NULL) | |
930 | return 0; | |
5b435de0 | 931 | |
3fd172d3 | 932 | len = le16_to_cpu(*(__le16 *) (p->data)); |
8505a7e6 | 933 | DMA_TRACE("%s: dma_rx len %d\n", di->name, len); |
3fd172d3 | 934 | dma_spin_for_len(len, p); |
5b435de0 AS |
935 | |
936 | /* set actual length */ | |
937 | pkt_len = min((di->rxoffset + len), di->rxbufsize); | |
3fd172d3 AS |
938 | __skb_trim(p, pkt_len); |
939 | skb_queue_tail(&dma_frames, p); | |
5b435de0 AS |
940 | resid = len - (di->rxbufsize - di->rxoffset); |
941 | ||
942 | /* check for single or multi-buffer rx */ | |
943 | if (resid > 0) { | |
5b435de0 | 944 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
5b435de0 AS |
945 | pkt_len = min_t(uint, resid, di->rxbufsize); |
946 | __skb_trim(p, pkt_len); | |
3fd172d3 | 947 | skb_queue_tail(&dma_frames, p); |
5b435de0 | 948 | resid -= di->rxbufsize; |
3fd172d3 | 949 | pktcnt++; |
5b435de0 AS |
950 | } |
951 | ||
952 | #ifdef BCMDBG | |
953 | if (resid > 0) { | |
954 | uint cur; | |
955 | cur = | |
956 | B2I(((R_REG(&di->d64rxregs->status0) & | |
957 | D64_RS0_CD_MASK) - | |
958 | di->rcvptrbase) & D64_RS0_CD_MASK, | |
959 | struct dma64desc); | |
8505a7e6 JP |
960 | DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", |
961 | di->rxin, di->rxout, cur); | |
5b435de0 AS |
962 | } |
963 | #endif /* BCMDBG */ | |
964 | ||
965 | if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { | |
8505a7e6 JP |
966 | DMA_ERROR("%s: bad frame length (%d)\n", |
967 | di->name, len); | |
3fd172d3 AS |
968 | skb_queue_walk_safe(&dma_frames, p, next) { |
969 | skb_unlink(p, &dma_frames); | |
970 | brcmu_pkt_buf_free_skb(p); | |
971 | } | |
5b435de0 | 972 | di->dma.rxgiants++; |
3fd172d3 | 973 | pktcnt = 1; |
5b435de0 AS |
974 | goto next_frame; |
975 | } | |
976 | } | |
977 | ||
3fd172d3 AS |
978 | skb_queue_splice_tail(&dma_frames, skb_list); |
979 | return pktcnt; | |
5b435de0 AS |
980 | } |
981 | ||
982 | static bool dma64_rxidle(struct dma_info *di) | |
983 | { | |
8505a7e6 | 984 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
985 | |
986 | if (di->nrxd == 0) | |
987 | return true; | |
988 | ||
989 | return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == | |
990 | (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); | |
991 | } | |
992 | ||
993 | /* | |
994 | * post receive buffers | |
995 | * return false is refill failed completely and ring is empty this will stall | |
996 | * the rx dma and user might want to call rxfill again asap. This unlikely | |
997 | * happens on memory-rich NIC, but often on memory-constrained dongle | |
998 | */ | |
999 | bool dma_rxfill(struct dma_pub *pub) | |
1000 | { | |
1001 | struct dma_info *di = (struct dma_info *)pub; | |
1002 | struct sk_buff *p; | |
1003 | u16 rxin, rxout; | |
1004 | u32 flags = 0; | |
1005 | uint n; | |
1006 | uint i; | |
1007 | dma_addr_t pa; | |
1008 | uint extra_offset = 0; | |
1009 | bool ring_empty; | |
1010 | ||
1011 | ring_empty = false; | |
1012 | ||
1013 | /* | |
1014 | * Determine how many receive buffers we're lacking | |
1015 | * from the full complement, allocate, initialize, | |
1016 | * and post them, then update the chip rx lastdscr. | |
1017 | */ | |
1018 | ||
1019 | rxin = di->rxin; | |
1020 | rxout = di->rxout; | |
1021 | ||
1022 | n = di->nrxpost - nrxdactive(di, rxin, rxout); | |
1023 | ||
8505a7e6 | 1024 | DMA_TRACE("%s: post %d\n", di->name, n); |
5b435de0 AS |
1025 | |
1026 | if (di->rxbufsize > BCMEXTRAHDROOM) | |
1027 | extra_offset = di->rxextrahdrroom; | |
1028 | ||
1029 | for (i = 0; i < n; i++) { | |
1030 | /* | |
1031 | * the di->rxbufsize doesn't include the extra headroom, | |
1032 | * we need to add it to the size to be allocated | |
1033 | */ | |
1034 | p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); | |
1035 | ||
1036 | if (p == NULL) { | |
8505a7e6 | 1037 | DMA_ERROR("%s: out of rxbufs\n", di->name); |
5b435de0 | 1038 | if (i == 0 && dma64_rxidle(di)) { |
8505a7e6 | 1039 | DMA_ERROR("%s: ring is empty !\n", di->name); |
5b435de0 AS |
1040 | ring_empty = true; |
1041 | } | |
1042 | di->dma.rxnobuf++; | |
1043 | break; | |
1044 | } | |
1045 | /* reserve an extra headroom, if applicable */ | |
1046 | if (extra_offset) | |
1047 | skb_pull(p, extra_offset); | |
1048 | ||
1049 | /* Do a cached write instead of uncached write since DMA_MAP | |
1050 | * will flush the cache. | |
1051 | */ | |
1052 | *(u32 *) (p->data) = 0; | |
1053 | ||
2e81b9b1 AS |
1054 | pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, |
1055 | DMA_FROM_DEVICE); | |
5b435de0 AS |
1056 | |
1057 | /* save the free packet pointer */ | |
1058 | di->rxp[rxout] = p; | |
1059 | ||
1060 | /* reset flags for each descriptor */ | |
1061 | flags = 0; | |
1062 | if (rxout == (di->nrxd - 1)) | |
1063 | flags = D64_CTRL1_EOT; | |
1064 | ||
1065 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, | |
1066 | di->rxbufsize); | |
1067 | rxout = nextrxd(di, rxout); | |
1068 | } | |
1069 | ||
1070 | di->rxout = rxout; | |
1071 | ||
1072 | /* update the chip lastdscr pointer */ | |
1073 | W_REG(&di->d64rxregs->ptr, | |
1074 | di->rcvptrbase + I2B(rxout, struct dma64desc)); | |
1075 | ||
1076 | return ring_empty; | |
1077 | } | |
1078 | ||
1079 | void dma_rxreclaim(struct dma_pub *pub) | |
1080 | { | |
1081 | struct dma_info *di = (struct dma_info *)pub; | |
1082 | struct sk_buff *p; | |
1083 | ||
8505a7e6 | 1084 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1085 | |
1086 | while ((p = _dma_getnextrxp(di, true))) | |
1087 | brcmu_pkt_buf_free_skb(p); | |
1088 | } | |
1089 | ||
1090 | void dma_counterreset(struct dma_pub *pub) | |
1091 | { | |
1092 | /* reset all software counters */ | |
1093 | pub->rxgiants = 0; | |
1094 | pub->rxnobuf = 0; | |
1095 | pub->txnobuf = 0; | |
1096 | } | |
1097 | ||
1098 | /* get the address of the var in order to change later */ | |
1099 | unsigned long dma_getvar(struct dma_pub *pub, const char *name) | |
1100 | { | |
1101 | struct dma_info *di = (struct dma_info *)pub; | |
1102 | ||
1103 | if (!strcmp(name, "&txavail")) | |
1104 | return (unsigned long)&(di->dma.txavail); | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | /* 64-bit DMA functions */ | |
1109 | ||
1110 | void dma_txinit(struct dma_pub *pub) | |
1111 | { | |
1112 | struct dma_info *di = (struct dma_info *)pub; | |
1113 | u32 control = D64_XC_XE; | |
1114 | ||
8505a7e6 | 1115 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1116 | |
1117 | if (di->ntxd == 0) | |
1118 | return; | |
1119 | ||
1120 | di->txin = di->txout = 0; | |
1121 | di->dma.txavail = di->ntxd - 1; | |
1122 | ||
1123 | /* clear tx descriptor ring */ | |
1124 | memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); | |
1125 | ||
1126 | /* DMA engine with out alignment requirement requires table to be inited | |
1127 | * before enabling the engine | |
1128 | */ | |
1129 | if (!di->aligndesc_4k) | |
1130 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1131 | ||
1132 | if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) | |
1133 | control |= D64_XC_PD; | |
1134 | OR_REG(&di->d64txregs->control, control); | |
1135 | ||
1136 | /* DMA engine with alignment requirement requires table to be inited | |
1137 | * before enabling the engine | |
1138 | */ | |
1139 | if (di->aligndesc_4k) | |
1140 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1141 | } | |
1142 | ||
1143 | void dma_txsuspend(struct dma_pub *pub) | |
1144 | { | |
1145 | struct dma_info *di = (struct dma_info *)pub; | |
1146 | ||
8505a7e6 | 1147 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1148 | |
1149 | if (di->ntxd == 0) | |
1150 | return; | |
1151 | ||
1152 | OR_REG(&di->d64txregs->control, D64_XC_SE); | |
1153 | } | |
1154 | ||
1155 | void dma_txresume(struct dma_pub *pub) | |
1156 | { | |
1157 | struct dma_info *di = (struct dma_info *)pub; | |
1158 | ||
8505a7e6 | 1159 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1160 | |
1161 | if (di->ntxd == 0) | |
1162 | return; | |
1163 | ||
1164 | AND_REG(&di->d64txregs->control, ~D64_XC_SE); | |
1165 | } | |
1166 | ||
1167 | bool dma_txsuspended(struct dma_pub *pub) | |
1168 | { | |
1169 | struct dma_info *di = (struct dma_info *)pub; | |
1170 | ||
1171 | return (di->ntxd == 0) || | |
1172 | ((R_REG(&di->d64txregs->control) & D64_XC_SE) == | |
1173 | D64_XC_SE); | |
1174 | } | |
1175 | ||
1176 | void dma_txreclaim(struct dma_pub *pub, enum txd_range range) | |
1177 | { | |
1178 | struct dma_info *di = (struct dma_info *)pub; | |
1179 | struct sk_buff *p; | |
1180 | ||
8505a7e6 JP |
1181 | DMA_TRACE("%s: %s\n", |
1182 | di->name, | |
1183 | range == DMA_RANGE_ALL ? "all" : | |
1184 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1185 | "transferred"); | |
5b435de0 AS |
1186 | |
1187 | if (di->txin == di->txout) | |
1188 | return; | |
1189 | ||
1190 | while ((p = dma_getnexttxp(pub, range))) { | |
1191 | /* For unframed data, we don't have any packets to free */ | |
1192 | if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) | |
1193 | brcmu_pkt_buf_free_skb(p); | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | bool dma_txreset(struct dma_pub *pub) | |
1198 | { | |
1199 | struct dma_info *di = (struct dma_info *)pub; | |
1200 | u32 status; | |
1201 | ||
1202 | if (di->ntxd == 0) | |
1203 | return true; | |
1204 | ||
1205 | /* suspend tx DMA first */ | |
1206 | W_REG(&di->d64txregs->control, D64_XC_SE); | |
1207 | SPINWAIT(((status = | |
1208 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1209 | != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) | |
1210 | && (status != D64_XS0_XS_STOPPED), 10000); | |
1211 | ||
1212 | W_REG(&di->d64txregs->control, 0); | |
1213 | SPINWAIT(((status = | |
1214 | (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) | |
1215 | != D64_XS0_XS_DISABLED), 10000); | |
1216 | ||
1217 | /* wait for the last transaction to complete */ | |
1218 | udelay(300); | |
1219 | ||
1220 | return status == D64_XS0_XS_DISABLED; | |
1221 | } | |
1222 | ||
1223 | bool dma_rxreset(struct dma_pub *pub) | |
1224 | { | |
1225 | struct dma_info *di = (struct dma_info *)pub; | |
1226 | u32 status; | |
1227 | ||
1228 | if (di->nrxd == 0) | |
1229 | return true; | |
1230 | ||
1231 | W_REG(&di->d64rxregs->control, 0); | |
1232 | SPINWAIT(((status = | |
1233 | (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) | |
1234 | != D64_RS0_RS_DISABLED), 10000); | |
1235 | ||
1236 | return status == D64_RS0_RS_DISABLED; | |
1237 | } | |
1238 | ||
1239 | /* | |
1240 | * !! tx entry routine | |
1241 | * WARNING: call must check the return value for error. | |
1242 | * the error(toss frames) could be fatal and cause many subsequent hard | |
1243 | * to debug problems | |
1244 | */ | |
3030794f | 1245 | int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) |
5b435de0 AS |
1246 | { |
1247 | struct dma_info *di = (struct dma_info *)pub; | |
5b435de0 AS |
1248 | unsigned char *data; |
1249 | uint len; | |
1250 | u16 txout; | |
1251 | u32 flags = 0; | |
1252 | dma_addr_t pa; | |
1253 | ||
8505a7e6 | 1254 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1255 | |
1256 | txout = di->txout; | |
1257 | ||
1258 | /* | |
3030794f | 1259 | * obtain and initialize transmit descriptor entry. |
5b435de0 | 1260 | */ |
3030794f AS |
1261 | data = p->data; |
1262 | len = p->len; | |
5b435de0 | 1263 | |
3030794f AS |
1264 | /* no use to transmit a zero length packet */ |
1265 | if (len == 0) | |
1266 | return 0; | |
5b435de0 | 1267 | |
3030794f AS |
1268 | /* return nonzero if out of tx descriptors */ |
1269 | if (nexttxd(di, txout) == di->txin) | |
1270 | goto outoftxd; | |
5b435de0 | 1271 | |
3030794f | 1272 | /* get physical address of buffer start */ |
2e81b9b1 | 1273 | pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); |
5b435de0 | 1274 | |
3030794f AS |
1275 | /* With a DMA segment list, Descriptor table is filled |
1276 | * using the segment list instead of looping over | |
1277 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST | |
1278 | * is when end of segment list is reached. | |
1279 | */ | |
1280 | flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; | |
1281 | if (txout == (di->ntxd - 1)) | |
1282 | flags |= D64_CTRL1_EOT; | |
5b435de0 | 1283 | |
3030794f | 1284 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); |
5b435de0 | 1285 | |
3030794f | 1286 | txout = nexttxd(di, txout); |
5b435de0 AS |
1287 | |
1288 | /* save the packet */ | |
3030794f | 1289 | di->txp[prevtxd(di, txout)] = p; |
5b435de0 AS |
1290 | |
1291 | /* bump the tx descriptor index */ | |
1292 | di->txout = txout; | |
1293 | ||
1294 | /* kick the chip */ | |
1295 | if (commit) | |
1296 | W_REG(&di->d64txregs->ptr, | |
1297 | di->xmtptrbase + I2B(txout, struct dma64desc)); | |
1298 | ||
1299 | /* tx flow control */ | |
1300 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1301 | ||
1302 | return 0; | |
1303 | ||
1304 | outoftxd: | |
8505a7e6 | 1305 | DMA_ERROR("%s: out of txds !!!\n", di->name); |
3030794f | 1306 | brcmu_pkt_buf_free_skb(p); |
5b435de0 AS |
1307 | di->dma.txavail = 0; |
1308 | di->dma.txnobuf++; | |
1309 | return -1; | |
1310 | } | |
1311 | ||
1312 | /* | |
1313 | * Reclaim next completed txd (txds if using chained buffers) in the range | |
1314 | * specified and return associated packet. | |
1315 | * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be | |
1316 | * transmitted as noted by the hardware "CurrDescr" pointer. | |
1317 | * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be | |
1318 | * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. | |
1319 | * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and | |
1320 | * return associated packet regardless of the value of hardware pointers. | |
1321 | */ | |
1322 | struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) | |
1323 | { | |
1324 | struct dma_info *di = (struct dma_info *)pub; | |
1325 | u16 start, end, i; | |
1326 | u16 active_desc; | |
1327 | struct sk_buff *txp; | |
1328 | ||
8505a7e6 JP |
1329 | DMA_TRACE("%s: %s\n", |
1330 | di->name, | |
1331 | range == DMA_RANGE_ALL ? "all" : | |
1332 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1333 | "transferred"); | |
5b435de0 AS |
1334 | |
1335 | if (di->ntxd == 0) | |
1336 | return NULL; | |
1337 | ||
1338 | txp = NULL; | |
1339 | ||
1340 | start = di->txin; | |
1341 | if (range == DMA_RANGE_ALL) | |
1342 | end = di->txout; | |
1343 | else { | |
1344 | struct dma64regs __iomem *dregs = di->d64txregs; | |
1345 | ||
1346 | end = (u16) (B2I(((R_REG(&dregs->status0) & | |
1347 | D64_XS0_CD_MASK) - | |
1348 | di->xmtptrbase) & D64_XS0_CD_MASK, | |
1349 | struct dma64desc)); | |
1350 | ||
1351 | if (range == DMA_RANGE_TRANSFERED) { | |
1352 | active_desc = | |
1353 | (u16) (R_REG(&dregs->status1) & | |
1354 | D64_XS1_AD_MASK); | |
1355 | active_desc = | |
1356 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; | |
1357 | active_desc = B2I(active_desc, struct dma64desc); | |
1358 | if (end != active_desc) | |
1359 | end = prevtxd(di, active_desc); | |
1360 | } | |
1361 | } | |
1362 | ||
1363 | if ((start == 0) && (end > di->txout)) | |
1364 | goto bogus; | |
1365 | ||
1366 | for (i = start; i != end && !txp; i = nexttxd(di, i)) { | |
1367 | dma_addr_t pa; | |
1368 | uint size; | |
1369 | ||
1370 | pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; | |
1371 | ||
1372 | size = | |
1373 | (le32_to_cpu(di->txd64[i].ctrl2) & | |
1374 | D64_CTRL2_BC_MASK); | |
1375 | ||
1376 | di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
1377 | di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
1378 | ||
1379 | txp = di->txp[i]; | |
1380 | di->txp[i] = NULL; | |
1381 | ||
2e81b9b1 | 1382 | dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); |
5b435de0 AS |
1383 | } |
1384 | ||
1385 | di->txin = i; | |
1386 | ||
1387 | /* tx flow control */ | |
1388 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1389 | ||
1390 | return txp; | |
1391 | ||
1392 | bogus: | |
8505a7e6 JP |
1393 | DMA_NONE("bogus curr: start %d end %d txout %d\n", |
1394 | start, end, di->txout); | |
5b435de0 AS |
1395 | return NULL; |
1396 | } | |
1397 | ||
1398 | /* | |
1399 | * Mac80211 initiated actions sometimes require packets in the DMA queue to be | |
1400 | * modified. The modified portion of the packet is not under control of the DMA | |
1401 | * engine. This function calls a caller-supplied function for each packet in | |
1402 | * the caller specified dma chain. | |
1403 | */ | |
1404 | void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) | |
1405 | (void *pkt, void *arg_a), void *arg_a) | |
1406 | { | |
1407 | struct dma_info *di = (struct dma_info *) dmah; | |
1408 | uint i = di->txin; | |
1409 | uint end = di->txout; | |
1410 | struct sk_buff *skb; | |
1411 | struct ieee80211_tx_info *tx_info; | |
1412 | ||
1413 | while (i != end) { | |
1414 | skb = (struct sk_buff *)di->txp[i]; | |
1415 | if (skb != NULL) { | |
1416 | tx_info = (struct ieee80211_tx_info *)skb->cb; | |
1417 | (callback_fnc)(tx_info, arg_a); | |
1418 | } | |
1419 | i = nexttxd(di, i); | |
1420 | } | |
1421 | } |