staging: ath6kl: Convert enum A_STATUS to int
[deliverable/linux.git] / drivers / staging / ath6kl / htc2 / AR6000 / ar6k.c
CommitLineData
30295c89
VM
1//------------------------------------------------------------------------------
2// <copyright file="ar6k.c" company="Atheros">
3// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
4//
5//
6// Permission to use, copy, modify, and/or distribute this software for any
7// purpose with or without fee is hereby granted, provided that the above
8// copyright notice and this permission notice appear in all copies.
9//
10// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17//
18//
19//------------------------------------------------------------------------------
20//==============================================================================
21// AR6K device layer that handles register level I/O
22//
23// Author(s): ="Atheros"
24//==============================================================================
25
26#include "a_config.h"
27#include "athdefs.h"
28#include "a_types.h"
29#include "AR6002/hw2.0/hw/mbox_host_reg.h"
30#include "a_osapi.h"
31#include "../htc_debug.h"
32#include "hif.h"
33#include "htc_packet.h"
34#include "ar6k.h"
35
36#define MAILBOX_FOR_BLOCK_SIZE 1
37
1f4c34bd
JP
38int DevEnableInterrupts(AR6K_DEVICE *pDev);
39int DevDisableInterrupts(AR6K_DEVICE *pDev);
30295c89
VM
40
41static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev);
42
43void AR6KFreeIOPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket)
44{
45 LOCK_AR6K(pDev);
46 HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket);
47 UNLOCK_AR6K(pDev);
48}
49
50HTC_PACKET *AR6KAllocIOPacket(AR6K_DEVICE *pDev)
51{
52 HTC_PACKET *pPacket;
53
54 LOCK_AR6K(pDev);
55 pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList);
56 UNLOCK_AR6K(pDev);
57
58 return pPacket;
59}
60
61void DevCleanup(AR6K_DEVICE *pDev)
62{
63 DevCleanupGMbox(pDev);
64
65 if (pDev->HifAttached) {
66 HIFDetachHTC(pDev->HIFDevice);
67 pDev->HifAttached = FALSE;
68 }
69
70 DevCleanupVirtualScatterSupport(pDev);
71
72 if (A_IS_MUTEX_VALID(&pDev->Lock)) {
73 A_MUTEX_DELETE(&pDev->Lock);
74 }
75}
76
1f4c34bd 77int DevSetup(AR6K_DEVICE *pDev)
30295c89
VM
78{
79 A_UINT32 blocksizes[AR6K_MAILBOXES];
1f4c34bd 80 int status = A_OK;
30295c89
VM
81 int i;
82 HTC_CALLBACKS htcCallbacks;
83
84 do {
85
86 DL_LIST_INIT(&pDev->ScatterReqHead);
87 /* initialize our free list of IO packets */
88 INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList);
89 A_MUTEX_INIT(&pDev->Lock);
90
91 A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
92 /* the device layer handles these */
93 htcCallbacks.rwCompletionHandler = DevRWCompletionHandler;
94 htcCallbacks.dsrHandler = DevDsrHandler;
95 htcCallbacks.context = pDev;
96
97 status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);
98
99 if (A_FAILED(status)) {
100 break;
101 }
102
103 pDev->HifAttached = TRUE;
104
105 /* get the addresses for all 4 mailboxes */
106 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
107 &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo));
108
109 if (status != A_OK) {
110 A_ASSERT(FALSE);
111 break;
112 }
113
114 /* carve up register I/O packets (these are for ASYNC register I/O ) */
115 for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) {
116 HTC_PACKET *pIOPacket;
117 pIOPacket = &pDev->RegIOBuffers[i].HtcPacket;
118 SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket,
119 pDev,
120 pDev->RegIOBuffers[i].Buffer,
121 AR6K_REG_IO_BUFFER_SIZE,
122 0); /* don't care */
123 AR6KFreeIOPacket(pDev,pIOPacket);
124 }
125
126 /* get the block sizes */
127 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
128 blocksizes, sizeof(blocksizes));
129
130 if (status != A_OK) {
131 A_ASSERT(FALSE);
132 break;
133 }
134
135 /* note: we actually get the block size of a mailbox other than 0, for SDIO the block
136 * size on mailbox 0 is artificially set to 1. So we use the block size that is set
137 * for the other 3 mailboxes */
138 pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
139 /* must be a power of 2 */
140 A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0);
141
142 /* assemble mask, used for padding to a block */
143 pDev->BlockMask = pDev->BlockSize - 1;
144
145 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n",
146 pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]));
147
148 pDev->GetPendingEventsFunc = NULL;
149 /* see if the HIF layer implements the get pending events function */
150 HIFConfigureDevice(pDev->HIFDevice,
151 HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
152 &pDev->GetPendingEventsFunc,
153 sizeof(pDev->GetPendingEventsFunc));
154
155 /* assume we can process HIF interrupt events asynchronously */
156 pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
157
158 /* see if the HIF layer overrides this assumption */
159 HIFConfigureDevice(pDev->HIFDevice,
160 HIF_DEVICE_GET_IRQ_PROC_MODE,
161 &pDev->HifIRQProcessingMode,
162 sizeof(pDev->HifIRQProcessingMode));
163
164 switch (pDev->HifIRQProcessingMode) {
165 case HIF_DEVICE_IRQ_SYNC_ONLY:
166 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYNC ONLY\n"));
167 /* see if HIF layer wants HTC to yield */
168 HIFConfigureDevice(pDev->HIFDevice,
169 HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
170 &pDev->HifIRQYieldParams,
171 sizeof(pDev->HifIRQYieldParams));
172
173 if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
174 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
175 ("HIF requests that DSR yield per %d RECV packets \n",
176 pDev->HifIRQYieldParams.RecvPacketYieldCount));
177 pDev->DSRCanYield = TRUE;
178 }
179 break;
180 case HIF_DEVICE_IRQ_ASYNC_SYNC:
181 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYNC and SYNC\n"));
182 break;
183 default:
184 A_ASSERT(FALSE);
185 }
186
187 pDev->HifMaskUmaskRecvEvent = NULL;
188
189 /* see if the HIF layer implements the mask/unmask recv events function */
190 HIFConfigureDevice(pDev->HIFDevice,
191 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
192 &pDev->HifMaskUmaskRecvEvent,
193 sizeof(pDev->HifMaskUmaskRecvEvent));
194
195 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%lX , 0x%lX\n",
196 (unsigned long)pDev->GetPendingEventsFunc, (unsigned long)pDev->HifMaskUmaskRecvEvent));
197
198 status = DevDisableInterrupts(pDev);
199
200 if (A_FAILED(status)) {
201 break;
202 }
203
204 status = DevSetupGMbox(pDev);
205
206 } while (FALSE);
207
208 if (A_FAILED(status)) {
209 if (pDev->HifAttached) {
210 HIFDetachHTC(pDev->HIFDevice);
211 pDev->HifAttached = FALSE;
212 }
213 }
214
215 return status;
216
217}
218
1f4c34bd 219int DevEnableInterrupts(AR6K_DEVICE *pDev)
30295c89 220{
1f4c34bd 221 int status;
30295c89
VM
222 AR6K_IRQ_ENABLE_REGISTERS regs;
223
224 LOCK_AR6K(pDev);
225
226 /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
227 pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x01) |
228 INT_STATUS_ENABLE_CPU_SET(0x01) |
229 INT_STATUS_ENABLE_COUNTER_SET(0x01);
230
231 if (NULL == pDev->GetPendingEventsFunc) {
232 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
233 } else {
234 /* The HIF layer provided us with a pending events function which means that
235 * the detection of pending mbox messages is handled in the HIF layer.
236 * This is the case for the SPI2 interface.
237 * In the normal case we enable MBOX interrupts, for the case
238 * with HIFs that offer this mechanism, we keep these interrupts
239 * masked */
240 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
241 }
242
243
244 /* Set up the CPU Interrupt Status Register */
245 pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_SET(0x00);
246
247 /* Set up the Error Interrupt Status Register */
248 pDev->IrqEnableRegisters.error_status_enable =
249 ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) |
250 ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01);
251
252 /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
253 pDev->IrqEnableRegisters.counter_int_status_enable =
254 COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK);
255
256 /* copy into our temp area */
257 A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
258
259 UNLOCK_AR6K(pDev);
260
261 /* always synchronous */
262 status = HIFReadWrite(pDev->HIFDevice,
263 INT_STATUS_ENABLE_ADDRESS,
264 &regs.int_status_enable,
265 AR6K_IRQ_ENABLE_REGS_SIZE,
266 HIF_WR_SYNC_BYTE_INC,
267 NULL);
268
269 if (status != A_OK) {
270 /* Can't write it for some reason */
271 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
272 ("Failed to update interrupt control registers err: %d\n", status));
273
274 }
275
276 return status;
277}
278
1f4c34bd 279int DevDisableInterrupts(AR6K_DEVICE *pDev)
30295c89
VM
280{
281 AR6K_IRQ_ENABLE_REGISTERS regs;
282
283 LOCK_AR6K(pDev);
284 /* Disable all interrupts */
285 pDev->IrqEnableRegisters.int_status_enable = 0;
286 pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
287 pDev->IrqEnableRegisters.error_status_enable = 0;
288 pDev->IrqEnableRegisters.counter_int_status_enable = 0;
289 /* copy into our temp area */
290 A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
291
292 UNLOCK_AR6K(pDev);
293
294 /* always synchronous */
295 return HIFReadWrite(pDev->HIFDevice,
296 INT_STATUS_ENABLE_ADDRESS,
297 &regs.int_status_enable,
298 AR6K_IRQ_ENABLE_REGS_SIZE,
299 HIF_WR_SYNC_BYTE_INC,
300 NULL);
301}
302
303/* enable device interrupts */
1f4c34bd 304int DevUnmaskInterrupts(AR6K_DEVICE *pDev)
30295c89
VM
305{
306 /* for good measure, make sure interrupt are disabled before unmasking at the HIF
307 * layer.
308 * The rationale here is that between device insertion (where we clear the interrupts the first time)
309 * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
310 * The AR6K interrupt enables reset back to an "enabled" state when this happens.
311 * */
1f4c34bd 312 int IntStatus = A_OK;
30295c89
VM
313 DevDisableInterrupts(pDev);
314
315#ifdef THREAD_X
316 // Tobe verified...
317 IntStatus = DevEnableInterrupts(pDev);
318 /* Unmask the host controller interrupts */
319 HIFUnMaskInterrupt(pDev->HIFDevice);
320#else
321 /* Unmask the host controller interrupts */
322 HIFUnMaskInterrupt(pDev->HIFDevice);
323 IntStatus = DevEnableInterrupts(pDev);
324#endif
325
326 return IntStatus;
327}
328
329/* disable all device interrupts */
1f4c34bd 330int DevMaskInterrupts(AR6K_DEVICE *pDev)
30295c89
VM
331{
332 /* mask the interrupt at the HIF layer, we don't want a stray interrupt taken while
333 * we zero out our shadow registers in DevDisableInterrupts()*/
334 HIFMaskInterrupt(pDev->HIFDevice);
335
336 return DevDisableInterrupts(pDev);
337}
338
339/* callback when our fetch to enable/disable completes */
340static void DevDoEnableDisableRecvAsyncHandler(void *Context, HTC_PACKET *pPacket)
341{
342 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
343
344 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
345
346 if (A_FAILED(pPacket->Status)) {
347 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
348 (" Failed to disable receiver, status:%d \n", pPacket->Status));
349 }
350 /* free this IO packet */
351 AR6KFreeIOPacket(pDev,pPacket);
352 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n"));
353}
354
355/* disable packet reception (used in case the host runs out of buffers)
356 * this is the "override" method when the HIF reports another methods to
357 * disable recv events */
1f4c34bd 358static int DevDoEnableDisableRecvOverride(AR6K_DEVICE *pDev, A_BOOL EnableRecv, A_BOOL AsyncMode)
30295c89 359{
1f4c34bd 360 int status = A_OK;
30295c89
VM
361 HTC_PACKET *pIOPacket = NULL;
362
363 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mode:%d\n",
364 EnableRecv,AsyncMode));
365
366 do {
367
368 if (AsyncMode) {
369
370 pIOPacket = AR6KAllocIOPacket(pDev);
371
372 if (NULL == pIOPacket) {
373 status = A_NO_MEMORY;
374 A_ASSERT(FALSE);
375 break;
376 }
377
378 /* stick in our completion routine when the I/O operation completes */
379 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
380 pIOPacket->pContext = pDev;
381
382 /* call the HIF layer override and do this asynchronously */
383 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
384 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
385 pIOPacket);
386 break;
387 }
388
389 /* if we get here we are doing it synchronously */
390 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice,
391 EnableRecv ? HIF_UNMASK_RECV : HIF_MASK_RECV,
392 NULL);
393
394 } while (FALSE);
395
396 if (A_FAILED(status) && (pIOPacket != NULL)) {
397 AR6KFreeIOPacket(pDev,pIOPacket);
398 }
399
400 return status;
401}
402
403/* disable packet reception (used in case the host runs out of buffers)
404 * this is the "normal" method using the interrupt enable registers through
405 * the host I/F */
1f4c34bd 406static int DevDoEnableDisableRecvNormal(AR6K_DEVICE *pDev, A_BOOL EnableRecv, A_BOOL AsyncMode)
30295c89 407{
1f4c34bd 408 int status = A_OK;
30295c89
VM
409 HTC_PACKET *pIOPacket = NULL;
410 AR6K_IRQ_ENABLE_REGISTERS regs;
411
412 /* take the lock to protect interrupt enable shadows */
413 LOCK_AR6K(pDev);
414
415 if (EnableRecv) {
416 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
417 } else {
418 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DATA_SET(0x01);
419 }
420
421 /* copy into our temp area */
422 A_MEMCPY(&regs,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
423 UNLOCK_AR6K(pDev);
424
425 do {
426
427 if (AsyncMode) {
428
429 pIOPacket = AR6KAllocIOPacket(pDev);
430
431 if (NULL == pIOPacket) {
432 status = A_NO_MEMORY;
433 A_ASSERT(FALSE);
434 break;
435 }
436
437 /* copy values to write to our async I/O buffer */
438 A_MEMCPY(pIOPacket->pBuffer,&regs,AR6K_IRQ_ENABLE_REGS_SIZE);
439
440 /* stick in our completion routine when the I/O operation completes */
441 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler;
442 pIOPacket->pContext = pDev;
443
444 /* write it out asynchronously */
445 HIFReadWrite(pDev->HIFDevice,
446 INT_STATUS_ENABLE_ADDRESS,
447 pIOPacket->pBuffer,
448 AR6K_IRQ_ENABLE_REGS_SIZE,
449 HIF_WR_ASYNC_BYTE_INC,
450 pIOPacket);
451 break;
452 }
453
454 /* if we get here we are doing it synchronously */
455
456 status = HIFReadWrite(pDev->HIFDevice,
457 INT_STATUS_ENABLE_ADDRESS,
458 &regs.int_status_enable,
459 AR6K_IRQ_ENABLE_REGS_SIZE,
460 HIF_WR_SYNC_BYTE_INC,
461 NULL);
462
463 } while (FALSE);
464
465 if (A_FAILED(status) && (pIOPacket != NULL)) {
466 AR6KFreeIOPacket(pDev,pIOPacket);
467 }
468
469 return status;
470}
471
472
1f4c34bd 473int DevStopRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode)
30295c89
VM
474{
475 if (NULL == pDev->HifMaskUmaskRecvEvent) {
476 return DevDoEnableDisableRecvNormal(pDev,FALSE,AsyncMode);
477 } else {
478 return DevDoEnableDisableRecvOverride(pDev,FALSE,AsyncMode);
479 }
480}
481
1f4c34bd 482int DevEnableRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode)
30295c89
VM
483{
484 if (NULL == pDev->HifMaskUmaskRecvEvent) {
485 return DevDoEnableDisableRecvNormal(pDev,TRUE,AsyncMode);
486 } else {
487 return DevDoEnableDisableRecvOverride(pDev,TRUE,AsyncMode);
488 }
489}
490
1f4c34bd 491int DevWaitForPendingRecv(AR6K_DEVICE *pDev,A_UINT32 TimeoutInMs,A_BOOL *pbIsRecvPending)
30295c89 492{
1f4c34bd 493 int status = A_OK;
30295c89
VM
494 A_UCHAR host_int_status = 0x0;
495 A_UINT32 counter = 0x0;
496
497 if(TimeoutInMs < 100)
498 {
499 TimeoutInMs = 100;
500 }
501
502 counter = TimeoutInMs / 100;
503
504 do
505 {
506 //Read the Host Interrupt Status Register
507 status = HIFReadWrite(pDev->HIFDevice,
508 HOST_INT_STATUS_ADDRESS,
509 &host_int_status,
510 sizeof(A_UCHAR),
511 HIF_RD_SYNC_BYTE_INC,
512 NULL);
513 if(A_FAILED(status))
514 {
515 AR_DEBUG_PRINTF(ATH_LOG_ERR,("DevWaitForPendingRecv:Read HOST_INT_STATUS_ADDRESS Failed 0x%X\n",status));
516 break;
517 }
518
519 host_int_status = A_SUCCESS(status) ? (host_int_status & (1 << 0)):0;
520 if(!host_int_status)
521 {
522 status = A_OK;
523 *pbIsRecvPending = FALSE;
524 break;
525 }
526 else
527 {
528 *pbIsRecvPending = TRUE;
529 }
530
531 A_MDELAY(100);
532
533 counter--;
534
535 }while(counter);
536 return status;
537}
538
539void DevDumpRegisters(AR6K_DEVICE *pDev,
540 AR6K_IRQ_PROC_REGISTERS *pIrqProcRegs,
541 AR6K_IRQ_ENABLE_REGISTERS *pIrqEnableRegs)
542{
543
544 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n"));
545
546 if (pIrqProcRegs != NULL) {
547 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
548 ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status));
549 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
550 ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status));
551 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
552 ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status));
553 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
554 ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_status));
555 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
556 ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame));
557 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
558 ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_valid));
559 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
560 ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]));
561 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
562 ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]));
563
564 if (pDev->MailBoxInfo.GMboxAddress != 0) {
565 /* if the target supports GMBOX hardware, dump some additional state */
566 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
567 ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_status2));
568 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
569 ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avail));
570 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
571 ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[0]));
572 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
573 ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_lookahead_alias[1]));
574 }
575
576 }
577
578 if (pIrqEnableRegs != NULL) {
579 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
580 ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enable));
581 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
582 ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_status_enable));
583 }
584 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n"));
585}
586
587
588#define DEV_GET_VIRT_DMA_INFO(p) ((DEV_SCATTER_DMA_VIRTUAL_INFO *)((p)->HIFPrivate[0]))
589
590static HIF_SCATTER_REQ *DevAllocScatterReq(HIF_DEVICE *Context)
591{
592 DL_LIST *pItem;
593 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
594 LOCK_AR6K(pDev);
595 pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead);
596 UNLOCK_AR6K(pDev);
597 if (pItem != NULL) {
598 return A_CONTAINING_STRUCT(pItem, HIF_SCATTER_REQ, ListLink);
599 }
600 return NULL;
601}
602
603static void DevFreeScatterReq(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq)
604{
605 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
606 LOCK_AR6K(pDev);
607 DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink);
608 UNLOCK_AR6K(pDev);
609}
610
1f4c34bd 611int DevCopyScatterListToFromDMABuffer(HIF_SCATTER_REQ *pReq, A_BOOL FromDMA)
30295c89
VM
612{
613 A_UINT8 *pDMABuffer = NULL;
614 int i, remaining;
615 A_UINT32 length;
616
617 pDMABuffer = pReq->pScatterBounceBuffer;
618
619 if (pDMABuffer == NULL) {
620 A_ASSERT(FALSE);
621 return A_EINVAL;
622 }
623
624 remaining = (int)pReq->TotalLength;
625
626 for (i = 0; i < pReq->ValidScatterEntries; i++) {
627
628 length = min((int)pReq->ScatterList[i].Length, remaining);
629
630 if (length != (int)pReq->ScatterList[i].Length) {
631 A_ASSERT(FALSE);
632 /* there is a problem with the scatter list */
633 return A_EINVAL;
634 }
635
636 if (FromDMA) {
637 /* from DMA buffer */
638 A_MEMCPY(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
639 } else {
640 /* to DMA buffer */
641 A_MEMCPY(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
642 }
643
644 pDMABuffer += length;
645 remaining -= length;
646 }
647
648 return A_OK;
649}
650
651static void DevReadWriteScatterAsyncHandler(void *Context, HTC_PACKET *pPacket)
652{
653 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
654 HIF_SCATTER_REQ *pReq = (HIF_SCATTER_REQ *)pPacket->pPktContext;
655
656 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
657
658 pReq->CompletionStatus = pPacket->Status;
659
660 AR6KFreeIOPacket(pDev,pPacket);
661
662 pReq->CompletionRoutine(pReq);
663
664 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n"));
665}
666
1f4c34bd 667static int DevReadWriteScatter(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq)
30295c89
VM
668{
669 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context;
1f4c34bd 670 int status = A_OK;
30295c89
VM
671 HTC_PACKET *pIOPacket = NULL;
672 A_UINT32 request = pReq->Request;
673
674 do {
675
676 if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) {
677 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
678 ("Invalid length: %d \n", pReq->TotalLength));
679 break;
680 }
681
682 if (pReq->TotalLength == 0) {
683 A_ASSERT(FALSE);
684 break;
685 }
686
687 if (request & HIF_ASYNCHRONOUS) {
688 /* use an I/O packet to carry this request */
689 pIOPacket = AR6KAllocIOPacket(pDev);
690 if (NULL == pIOPacket) {
691 status = A_NO_MEMORY;
692 break;
693 }
694
695 /* save the request */
696 pIOPacket->pPktContext = pReq;
697 /* stick in our completion routine when the I/O operation completes */
698 pIOPacket->Completion = DevReadWriteScatterAsyncHandler;
699 pIOPacket->pContext = pDev;
700 }
701
702 if (request & HIF_WRITE) {
703 /* in virtual DMA, we are issuing the requests through the legacy HIFReadWrite API
704 * this API will adjust the address automatically for the last byte to fall on the mailbox
705 * EOM. */
706
707 /* if the address is an extended address, we can adjust the address here since the extended
708 * address will bypass the normal checks in legacy HIF layers */
709 if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress) {
710 pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize - pReq->TotalLength;
711 }
712 }
713
714 /* use legacy readwrite */
715 status = HIFReadWrite(pDev->HIFDevice,
716 pReq->Address,
717 DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer,
718 pReq->TotalLength,
719 request,
720 (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL);
721
722 } while (FALSE);
723
724 if ((status != A_PENDING) && A_FAILED(status) && (request & HIF_ASYNCHRONOUS)) {
725 if (pIOPacket != NULL) {
726 AR6KFreeIOPacket(pDev,pIOPacket);
727 }
728 pReq->CompletionStatus = status;
729 pReq->CompletionRoutine(pReq);
730 status = A_OK;
731 }
732
733 return status;
734}
735
736
737static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev)
738{
739 HIF_SCATTER_REQ *pReq;
740
741 while (1) {
742 pReq = DevAllocScatterReq((HIF_DEVICE *)pDev);
743 if (NULL == pReq) {
744 break;
745 }
746 A_FREE(pReq);
747 }
748
749}
750
751 /* function to set up virtual scatter support if HIF layer has not implemented the interface */
1f4c34bd 752static int DevSetupVirtualScatterSupport(AR6K_DEVICE *pDev)
30295c89 753{
1f4c34bd 754 int status = A_OK;
30295c89
VM
755 int bufferSize, sgreqSize;
756 int i;
757 DEV_SCATTER_DMA_VIRTUAL_INFO *pVirtualInfo;
758 HIF_SCATTER_REQ *pReq;
759
760 bufferSize = sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO) +
761 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
762
763 sgreqSize = sizeof(HIF_SCATTER_REQ) +
764 (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(HIF_SCATTER_ITEM));
765
766 for (i = 0; i < AR6K_SCATTER_REQS; i++) {
767 /* allocate the scatter request, buffer info and the actual virtual buffer itself */
768 pReq = (HIF_SCATTER_REQ *)A_MALLOC(sgreqSize + bufferSize);
769
770 if (NULL == pReq) {
771 status = A_NO_MEMORY;
772 break;
773 }
774
775 A_MEMZERO(pReq, sgreqSize);
776
777 /* the virtual DMA starts after the scatter request struct */
778 pVirtualInfo = (DEV_SCATTER_DMA_VIRTUAL_INFO *)((A_UINT8 *)pReq + sgreqSize);
779 A_MEMZERO(pVirtualInfo, sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO));
780
781 pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0];
782 /* align buffer to cache line in case host controller can actually DMA this */
783 pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirtDmaBuffer);
784 /* store the structure in the private area */
785 pReq->HIFPrivate[0] = pVirtualInfo;
786 /* we emulate a DMA bounce interface */
787 pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE;
788 pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer;
789 /* free request to the list */
790 DevFreeScatterReq((HIF_DEVICE *)pDev,pReq);
791 }
792
793 if (A_FAILED(status)) {
794 DevCleanupVirtualScatterSupport(pDev);
795 } else {
796 pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq;
797 pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq;
798 pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter;
799 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
800 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K: SPI bus requires RX scatter limits\n"));
801 pDev->HifScatterInfo.MaxScatterEntries = AR6K_MIN_SCATTER_ENTRIES_PER_REQ;
802 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MIN_TRANSFER_SIZE_PER_SCATTER;
803 } else {
804 pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ;
805 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SIZE_PER_SCATTER;
806 }
807 pDev->ScatterIsVirtual = TRUE;
808 }
809
810 return status;
811}
812
813
1f4c34bd 814int DevSetupMsgBundling(AR6K_DEVICE *pDev, int MaxMsgsPerTransfer)
30295c89 815{
1f4c34bd 816 int status;
30295c89
VM
817
818 if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING) {
819 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
820 return A_ENOTSUP;
821 }
822
823 status = HIFConfigureDevice(pDev->HIFDevice,
824 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
825 &pDev->HifScatterInfo,
826 sizeof(pDev->HifScatterInfo));
827
828 if (A_FAILED(status)) {
829 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
830 ("AR6K: ** HIF layer does not support scatter requests (%d) \n",status));
831
832 /* we can try to use a virtual DMA scatter mechanism using legacy HIFReadWrite() */
833 status = DevSetupVirtualScatterSupport(pDev);
834
835 if (A_SUCCESS(status)) {
836 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
837 ("AR6K: virtual scatter transfers enabled (max scatter items:%d: maxlen:%d) \n",
838 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
839 }
840
841 } else {
842 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
843 ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
844 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
845 }
846
847 if (A_SUCCESS(status)) {
848 /* for the recv path, the maximum number of bytes per recv bundle is just limited
849 * by the maximum transfer size at the HIF layer */
850 pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
851
852 if (pDev->MailBoxInfo.MboxBusIFType == MBOX_BUS_IF_SPI) {
853 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("AR6K : SPI bus requires TX bundling disabled\n"));
854 pDev->MaxSendBundleSize = 0;
855 } else {
856 /* for the send path, the max transfer size is limited by the existence and size of
857 * the extended mailbox address range */
858 if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) {
859 pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize;
860 } else {
861 /* legacy */
862 pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH;
863 }
864
865 if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerScatterReq) {
866 /* limit send bundle size to what the HIF can support for scatter requests */
867 pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatterReq;
868 }
869 }
870
871 AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
872 ("AR6K: max recv: %d max send: %d \n",
873 DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev)));
874
875 }
876 return status;
877}
878
1f4c34bd 879int DevSubmitScatterRequest(AR6K_DEVICE *pDev, HIF_SCATTER_REQ *pScatterReq, A_BOOL Read, A_BOOL Async)
30295c89 880{
1f4c34bd 881 int status;
30295c89
VM
882
883 if (Read) {
884 /* read operation */
885 pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
886 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
887 A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev));
888 } else {
889 A_UINT32 mailboxWidth;
890
891 /* write operation */
892 pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BLOCK_INC;
893 A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_SEND_LENGTH(pDev));
894 if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) {
895 /* for large writes use the extended address */
896 pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedAddress;
897 mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize;
898 } else {
899 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
900 mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH;
901 }
902
903 if (!pDev->ScatterIsVirtual) {
904 /* we are passing this scatter list down to the HIF layer' scatter request handler, fixup the address
905 * so that the last byte falls on the EOM, we do this for those HIFs that support the
906 * scatter API */
907 pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
908 }
909
910 }
911
912 AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND,
913 ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
914 pScatterReq->ValidScatterEntries,
915 pScatterReq->TotalLength,
916 pScatterReq->Address,
917 Async ? "ASYNC" : "SYNC",
918 (Read) ? "RD" : "WR"));
919
920 status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);
921
922 if (A_FAILED(status)) {
923 if (Async) {
924 pScatterReq->CompletionStatus = status;
925 pScatterReq->CompletionRoutine(pScatterReq);
926 return A_OK;
927 }
928 return status;
929 }
930
931 status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ? pDev : pDev->HIFDevice,
932 pScatterReq);
933 if (!Async) {
934 /* in sync mode, we can touch the scatter request */
935 pScatterReq->CompletionStatus = status;
936 DEV_FINISH_SCATTER_OPERATION(pScatterReq);
937 } else {
938 if (status == A_PENDING) {
939 status = A_OK;
940 }
941 }
942
943 return status;
944}
945
946
947#ifdef MBOXHW_UNIT_TEST
948
949
950/* This is a mailbox hardware unit test that must be called in a schedulable context
951 * This test is very simple, it will send a list of buffers with a counting pattern
952 * and the target will invert the data and send the message back
953 *
954 * the unit test has the following constraints:
955 *
956 * The target has at least 8 buffers of 256 bytes each. The host will send
957 * the following pattern of buffers in rapid succession :
958 *
959 * 1 buffer - 128 bytes
960 * 1 buffer - 256 bytes
961 * 1 buffer - 512 bytes
962 * 1 buffer - 1024 bytes
963 *
964 * The host will send the buffers to one mailbox and wait for buffers to be reflected
965 * back from the same mailbox. The target sends the buffers FIFO order.
966 * Once the final buffer has been received for a mailbox, the next mailbox is tested.
967 *
968 *
969 * Note: To simplifythe test , we assume that the chosen buffer sizes
970 * will fall on a nice block pad
971 *
972 * It is expected that higher-order tests will be written to stress the mailboxes using
973 * a message-based protocol (with some performance timming) that can create more
974 * randomness in the packets sent over mailboxes.
975 *
976 * */
977
978#define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1))
979
980#define BUFFER_BLOCK_PAD 128
981
982#if 0
983#define BUFFER1 128
984#define BUFFER2 256
985#define BUFFER3 512
986#define BUFFER4 1024
987#endif
988
989#if 1
990#define BUFFER1 80
991#define BUFFER2 200
992#define BUFFER3 444
993#define BUFFER4 800
994#endif
995
996#define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \
997 A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \
998 A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \
999 A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) )
1000
1001#define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4)
1002
1003#define TEST_CREDITS_RECV_TIMEOUT 100
1004
1005static A_UINT8 g_Buffer[TOTAL_BYTES];
1006static A_UINT32 g_MailboxAddrs[AR6K_MAILBOXES];
1007static A_UINT32 g_BlockSizes[AR6K_MAILBOXES];
1008
1009#define BUFFER_PROC_LIST_DEPTH 4
1010
1011typedef struct _BUFFER_PROC_LIST{
1012 A_UINT8 *pBuffer;
1013 A_UINT32 length;
1014}BUFFER_PROC_LIST;
1015
1016
1017#define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \
1018{ \
1019 (pList)->pBuffer = (pCurrpos); \
1020 (pList)->length = (len); \
1021 (pCurrpos) += (len); \
1022 (pList)++; \
1023}
1024
1025/* a simple and crude way to send different "message" sizes */
1026static void AssembleBufferList(BUFFER_PROC_LIST *pList)
1027{
1028 A_UINT8 *pBuffer = g_Buffer;
1029
1030#if BUFFER_PROC_LIST_DEPTH < 4
1031#error "Buffer processing list depth is not deep enough!!"
1032#endif
1033
1034 PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer);
1035 PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer);
1036 PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer);
1037 PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer);
1038
1039}
1040
1041#define FILL_ZERO TRUE
1042#define FILL_COUNTING FALSE
1043static void InitBuffers(A_BOOL Zero)
1044{
1045 A_UINT16 *pBuffer16 = (A_UINT16 *)g_Buffer;
1046 int i;
1047
1048 /* fill buffer with 16 bit counting pattern or zeros */
1049 for (i = 0; i < (TOTAL_BYTES / 2) ; i++) {
1050 if (!Zero) {
1051 pBuffer16[i] = (A_UINT16)i;
1052 } else {
1053 pBuffer16[i] = 0;
1054 }
1055 }
1056}
1057
1058
1059static A_BOOL CheckOneBuffer(A_UINT16 *pBuffer16, int Length)
1060{
1061 int i;
1062 A_UINT16 startCount;
1063 A_BOOL success = TRUE;
1064
1065 /* get the starting count */
1066 startCount = pBuffer16[0];
1067 /* invert it, this is the expected value */
1068 startCount = ~startCount;
1069 /* scan the buffer and verify */
1070 for (i = 0; i < (Length / 2) ; i++,startCount++) {
1071 /* target will invert all the data */
1072 if ((A_UINT16)pBuffer16[i] != (A_UINT16)~startCount) {
1073 success = FALSE;
1074 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x%X (offset:%d, total:%d) \n",
1075 pBuffer16[i], ((A_UINT16)~startCount), i, Length));
1076 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n",
1077 pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer16[i+3]));
1078 break;
1079 }
1080 }
1081
1082 return success;
1083}
1084
1085static A_BOOL CheckBuffers(void)
1086{
1087 int i;
1088 A_BOOL success = TRUE;
1089 BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH];
1090
1091 /* assemble the list */
1092 AssembleBufferList(checkList);
1093
1094 /* scan the buffers and verify */
1095 for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) {
1096 success = CheckOneBuffer((A_UINT16 *)checkList[i].pBuffer, checkList[i].length);
1097 if (!success) {
1098 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed verify \n",
1099 (A_UINT32)checkList[i].pBuffer, checkList[i].length));
1100 break;
1101 }
1102 }
1103
1104 return success;
1105}
1106
1107 /* find the end marker for the last buffer we will be sending */
1108static A_UINT16 GetEndMarker(void)
1109{
1110 A_UINT8 *pBuffer;
1111 BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH];
1112
1113 /* fill up buffers with the normal counting pattern */
1114 InitBuffers(FILL_COUNTING);
1115
1116 /* assemble the list we will be sending down */
1117 AssembleBufferList(checkList);
1118 /* point to the last 2 bytes of the last buffer */
1119 pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_PROC_LIST_DEPTH - 1].length) - 2]);
1120
1121 /* the last count in the last buffer is the marker */
1122 return (A_UINT16)pBuffer[0] | ((A_UINT16)pBuffer[1] << 8);
1123}
1124
1125#define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR
1126
1127/* send the ordered buffers to the target */
1f4c34bd 1128static int SendBuffers(AR6K_DEVICE *pDev, int mbox)
30295c89 1129{
1f4c34bd 1130 int status = A_OK;
30295c89
VM
1131 A_UINT32 request = HIF_WR_SYNC_BLOCK_INC;
1132 BUFFER_PROC_LIST sendList[BUFFER_PROC_LIST_DEPTH];
1133 int i;
1134 int totalBytes = 0;
1135 int paddedLength;
1136 int totalwPadding = 0;
1137
1138 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mbox));
1139
1140 /* fill buffer with counting pattern */
1141 InitBuffers(FILL_COUNTING);
1142
1143 /* assemble the order in which we send */
1144 AssembleBufferList(sendList);
1145
1146 for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) {
1147
1148 /* we are doing block transfers, so we need to pad everything to a block size */
1149 paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) &
1150 (~(g_BlockSizes[mbox] - 1));
1151
1152 /* send each buffer synchronously */
1153 status = HIFReadWrite(pDev->HIFDevice,
1154 g_MailboxAddrs[mbox],
1155 sendList[i].pBuffer,
1156 paddedLength,
1157 request,
1158 NULL);
1159 if (status != A_OK) {
1160 break;
1161 }
1162 totalBytes += sendList[i].length;
1163 totalwPadding += paddedLength;
1164 }
1165
1166 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mailbox : %d \n",totalBytes,totalwPadding,mbox));
1167
1168 return status;
1169}
1170
1171/* poll the mailbox credit counter until we get a credit or timeout */
1f4c34bd 1172static int GetCredits(AR6K_DEVICE *pDev, int mbox, int *pCredits)
30295c89 1173{
1f4c34bd 1174 int status = A_OK;
30295c89
VM
1175 int timeout = TEST_CREDITS_RECV_TIMEOUT;
1176 A_UINT8 credits = 0;
1177 A_UINT32 address;
1178
1179 while (TRUE) {
1180
1181 /* Read the counter register to get credits, this auto-decrements */
1182 address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4;
1183 status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits),
1184 HIF_RD_SYNC_BYTE_FIX, NULL);
1185 if (status != A_OK) {
1186 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1187 ("Unable to decrement the command credit count register (mbox=%d)\n",mbox));
1188 status = A_ERROR;
1189 break;
1190 }
1191
1192 if (credits) {
1193 break;
1194 }
1195
1196 timeout--;
1197
1198 if (timeout <= 0) {
1199 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
1200 (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",mbox,address));
1201 status = A_ERROR;
1202 break;
1203 }
1204
1205 /* delay a little, target may not be ready */
1206 A_MDELAY(1000);
1207
1208 }
1209
1210 if (status == A_OK) {
1211 *pCredits = credits;
1212 }
1213
1214 return status;
1215}
1216
1217
1218/* wait for the buffers to come back */
1f4c34bd 1219static int RecvBuffers(AR6K_DEVICE *pDev, int mbox)
30295c89 1220{
1f4c34bd 1221 int status = A_OK;
30295c89
VM
1222 A_UINT32 request = HIF_RD_SYNC_BLOCK_INC;
1223 BUFFER_PROC_LIST recvList[BUFFER_PROC_LIST_DEPTH];
1224 int curBuffer;
1225 int credits;
1226 int i;
1227 int totalBytes = 0;
1228 int paddedLength;
1229 int totalwPadding = 0;
1230
1231 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n",mbox));
1232
1233 /* zero the buffers */
1234 InitBuffers(FILL_ZERO);
1235
1236 /* assemble the order in which we should receive */
1237 AssembleBufferList(recvList);
1238
1239 curBuffer = 0;
1240
1241 while (curBuffer < BUFFER_PROC_LIST_DEPTH) {
1242
1243 /* get number of buffers that have been completed, this blocks
1244 * until we get at least 1 credit or it times out */
1245 status = GetCredits(pDev, mbox, &credits);
1246
1247 if (status != A_OK) {
1248 break;
1249 }
1250
1251 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n",credits, mbox));
1252
1253 /* get all the buffers that are sitting on the queue */
1254 for (i = 0; i < credits; i++) {
1255 A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH);
1256 /* recv the current buffer synchronously, the buffers should come back in
1257 * order... with padding applied by the target */
1258 paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1)) &
1259 (~(g_BlockSizes[mbox] - 1));
1260
1261 status = HIFReadWrite(pDev->HIFDevice,
1262 g_MailboxAddrs[mbox],
1263 recvList[curBuffer].pBuffer,
1264 paddedLength,
1265 request,
1266 NULL);
1267 if (status != A_OK) {
1268 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mailbox:%d : address:0x%X \n",
1269 recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]));
1270 break;
1271 }
1272
1273 totalwPadding += paddedLength;
1274 totalBytes += recvList[curBuffer].length;
1275 curBuffer++;
1276 }
1277
1278 if (status != A_OK) {
1279 break;
1280 }
1281 /* go back and get some more */
1282 credits = 0;
1283 }
1284
1285 if (totalBytes != TEST_BYTES) {
1286 A_ASSERT(FALSE);
1287 } else {
1288 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total recv :%d (w/Padding : %d) \n",
1289 mbox, totalBytes, totalwPadding));
1290 }
1291
1292 return status;
1293
1294
1295}
1296
1f4c34bd 1297static int DoOneMboxHWTest(AR6K_DEVICE *pDev, int mbox)
30295c89 1298{
1f4c34bd 1299 int status;
30295c89
VM
1300
1301 do {
1302 /* send out buffers */
1303 status = SendBuffers(pDev,mbox);
1304
1305 if (status != A_OK) {
1306 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d\n",status,mbox));
1307 break;
1308 }
1309
1310 /* go get them, this will block */
1311 status = RecvBuffers(pDev, mbox);
1312
1313 if (status != A_OK) {
1314 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n",status,mbox));
1315 break;
1316 }
1317
1318 /* check the returned data patterns */
1319 if (!CheckBuffers()) {
1320 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",mbox));
1321 status = A_ERROR;
1322 break;
1323 }
1324
1325 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \n",mbox));
1326
1327 } while (FALSE);
1328
1329 return status;
1330}
1331
1332/* here is where the test starts */
1f4c34bd 1333int DoMboxHWTest(AR6K_DEVICE *pDev)
30295c89
VM
1334{
1335 int i;
1f4c34bd 1336 int status;
30295c89
VM
1337 int credits = 0;
1338 A_UINT8 params[4];
1339 int numBufs;
1340 int bufferSize;
1341 A_UINT16 temp;
1342
1343
1344 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n"));
1345
1346 do {
1347 /* get the addresses for all 4 mailboxes */
1348 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR,
1349 g_MailboxAddrs, sizeof(g_MailboxAddrs));
1350
1351 if (status != A_OK) {
1352 A_ASSERT(FALSE);
1353 break;
1354 }
1355
1356 /* get the block sizes */
1357 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
1358 g_BlockSizes, sizeof(g_BlockSizes));
1359
1360 if (status != A_OK) {
1361 A_ASSERT(FALSE);
1362 break;
1363 }
1364
1365 /* note, the HIF layer usually reports mbox 0 to have a block size of
1366 * 1, but our test wants to run in block-mode for all mailboxes, so we treat all mailboxes
1367 * the same. */
1368 g_BlockSizes[0] = g_BlockSizes[1];
1369 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockSizes[0]));
1370
1371 if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) {
1372 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for buffer pad %d\n",
1373 g_BlockSizes[1], BUFFER_BLOCK_PAD));
1374 break;
1375 }
1376
1377 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n"));
1378
1379 /* the target lets us know it is ready by giving us 1 credit on
1380 * mailbox 0 */
1381 status = GetCredits(pDev, 0, &credits);
1382
1383 if (status != A_OK) {
1384 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"));
1385 break;
1386 }
1387
1388 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n"));
1389
1390 /* read the first 4 scratch registers */
1391 status = HIFReadWrite(pDev->HIFDevice,
1392 SCRATCH_ADDRESS,
1393 params,
1394 4,
1395 HIF_RD_SYNC_BYTE_INC,
1396 NULL);
1397
1398 if (status != A_OK) {
1399 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"));
1400 break;
1401 }
1402
1403 numBufs = params[0];
1404 bufferSize = (int)(((A_UINT16)params[2] << 8) | (A_UINT16)params[1]);
1405
1406 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE,
1407 ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (total space: %d, minimum required space (w/padding): %d) \n",
1408 numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES));
1409
1410 if ((numBufs * bufferSize) < TOTAL_BYTES) {
1411 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test! need:%d, got:%d \n",
1412 TOTAL_BYTES, (numBufs*bufferSize)));
1413 status = A_ERROR;
1414 break;
1415 }
1416
1417 temp = GetEndMarker();
1418
1419 status = HIFReadWrite(pDev->HIFDevice,
1420 SCRATCH_ADDRESS + 4,
1421 (A_UINT8 *)&temp,
1422 2,
1423 HIF_WR_SYNC_BYTE_INC,
1424 NULL);
1425
1426 if (status != A_OK) {
1427 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n"));
1428 break;
1429 }
1430
1431 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp));
1432
1433 temp = (A_UINT16)g_BlockSizes[1];
1434 /* convert to a mask */
1435 temp = temp - 1;
1436 status = HIFReadWrite(pDev->HIFDevice,
1437 SCRATCH_ADDRESS + 6,
1438 (A_UINT8 *)&temp,
1439 2,
1440 HIF_WR_SYNC_BYTE_INC,
1441 NULL);
1442
1443 if (status != A_OK) {
1444 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n"));
1445 break;
1446 }
1447
1448 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp));
1449
1450 /* execute the test on each mailbox */
1451 for (i = 0; i < AR6K_MAILBOXES; i++) {
1452 status = DoOneMboxHWTest(pDev, i);
1453 if (status != A_OK) {
1454 break;
1455 }
1456 }
1457
1458 } while (FALSE);
1459
1460 if (status == A_OK) {
1461 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \n"));
1462 } else {
1463 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n"));
1464 }
1465 /* don't let HTC_Start continue, the target is actually not running any HTC code */
1466 return A_ERROR;
1467}
1468#endif
1469
1470
1471
This page took 0.193942 seconds and 5 git commands to generate.