2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #define MAILBOX_FOR_BLOCK_SIZE 1
25 #define ATH6KL_TIME_QUANTUM 10 /* in ms */
27 static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req
*req
, bool from_dma
)
32 buf
= req
->virt_dma_buf
;
34 for (i
= 0; i
< req
->scat_entries
; i
++) {
37 memcpy(req
->scat_list
[i
].buf
, buf
,
38 req
->scat_list
[i
].len
);
40 memcpy(buf
, req
->scat_list
[i
].buf
,
41 req
->scat_list
[i
].len
);
43 buf
+= req
->scat_list
[i
].len
;
49 int ath6kldev_rw_comp_handler(void *context
, int status
)
51 struct htc_packet
*packet
= context
;
53 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
54 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
57 packet
->status
= status
;
58 packet
->completion(packet
->context
, packet
);
63 static int ath6kldev_proc_dbg_intr(struct ath6kl_device
*dev
)
68 ath6kl_err("target debug interrupt\n");
70 ath6kl_target_failure(dev
->ar
);
73 * read counter to clear the interrupt, the debug error interrupt is
76 status
= hif_read_write_sync(dev
->ar
, COUNT_DEC_ADDRESS
,
77 (u8
*)&dummy
, 4, HIF_RD_SYNC_BYTE_INC
);
84 /* mailbox recv message polling */
85 int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device
*dev
, u32
*lk_ahd
,
88 struct ath6kl_irq_proc_registers
*rg
;
90 u8 htc_mbox
= 1 << HTC_MAILBOX
;
92 for (i
= timeout
/ ATH6KL_TIME_QUANTUM
; i
> 0; i
--) {
93 /* this is the standard HIF way, load the reg table */
94 status
= hif_read_write_sync(dev
->ar
, HOST_INT_STATUS_ADDRESS
,
95 (u8
*) &dev
->irq_proc_reg
,
96 sizeof(dev
->irq_proc_reg
),
97 HIF_RD_SYNC_BYTE_INC
);
100 ath6kl_err("failed to read reg table\n");
104 /* check for MBOX data and valid lookahead */
105 if (dev
->irq_proc_reg
.host_int_status
& htc_mbox
) {
106 if (dev
->irq_proc_reg
.rx_lkahd_valid
&
109 * Mailbox has a message and the look ahead
112 rg
= &dev
->irq_proc_reg
;
114 le32_to_cpu(rg
->rx_lkahd
[HTC_MAILBOX
]);
120 mdelay(ATH6KL_TIME_QUANTUM
);
121 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
, "retry mbox poll : %d\n", i
);
125 ath6kl_err("timeout waiting for recv message\n");
127 /* check if the target asserted */
128 if (dev
->irq_proc_reg
.counter_int_status
&
129 ATH6KL_TARGET_DEBUG_INTR_MASK
)
131 * Target failure handler will be called in case of
134 ath6kldev_proc_dbg_intr(dev
);
141 * Disable packet reception (used in case the host runs out of buffers)
142 * using the interrupt enable registers through the host I/F
144 int ath6kldev_rx_control(struct ath6kl_device
*dev
, bool enable_rx
)
146 struct ath6kl_irq_enable_reg regs
;
149 /* take the lock to protect interrupt enable shadows */
150 spin_lock_bh(&dev
->lock
);
153 dev
->irq_en_reg
.int_status_en
|=
154 SM(INT_STATUS_ENABLE_MBOX_DATA
, 0x01);
156 dev
->irq_en_reg
.int_status_en
&=
157 ~SM(INT_STATUS_ENABLE_MBOX_DATA
, 0x01);
159 memcpy(®s
, &dev
->irq_en_reg
, sizeof(regs
));
161 spin_unlock_bh(&dev
->lock
);
163 status
= hif_read_write_sync(dev
->ar
, INT_STATUS_ENABLE_ADDRESS
,
165 sizeof(struct ath6kl_irq_enable_reg
),
166 HIF_WR_SYNC_BYTE_INC
);
171 int ath6kldev_submit_scat_req(struct ath6kl_device
*dev
,
172 struct hif_scatter_req
*scat_req
, bool read
)
177 scat_req
->req
= HIF_RD_SYNC_BLOCK_FIX
;
178 scat_req
->addr
= dev
->ar
->mbox_info
.htc_addr
;
180 scat_req
->req
= HIF_WR_ASYNC_BLOCK_INC
;
183 (scat_req
->len
> HIF_MBOX_WIDTH
) ?
184 dev
->ar
->mbox_info
.htc_ext_addr
:
185 dev
->ar
->mbox_info
.htc_addr
;
188 ath6kl_dbg((ATH6KL_DBG_HTC_RECV
| ATH6KL_DBG_HTC_SEND
),
189 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
190 scat_req
->scat_entries
, scat_req
->len
,
191 scat_req
->addr
, !read
? "async" : "sync",
192 (read
) ? "rd" : "wr");
194 if (!read
&& scat_req
->virt_scat
) {
195 status
= ath6kldev_cp_scat_dma_buf(scat_req
, false);
197 scat_req
->status
= status
;
198 scat_req
->complete(dev
->ar
->htc_target
, scat_req
);
203 status
= ath6kl_hif_scat_req_rw(dev
->ar
, scat_req
);
206 /* in sync mode, we can touch the scatter request */
207 scat_req
->status
= status
;
208 if (!status
&& scat_req
->virt_scat
)
210 ath6kldev_cp_scat_dma_buf(scat_req
, true);
216 static int ath6kldev_proc_counter_intr(struct ath6kl_device
*dev
)
218 u8 counter_int_status
;
220 ath6kl_dbg(ATH6KL_DBG_IRQ
, "counter interrupt\n");
222 counter_int_status
= dev
->irq_proc_reg
.counter_int_status
&
223 dev
->irq_en_reg
.cntr_int_status_en
;
225 ath6kl_dbg(ATH6KL_DBG_IRQ
,
226 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
230 * NOTE: other modules like GMBOX may use the counter interrupt for
231 * credit flow control on other counters, we only need to check for
232 * the debug assertion counter interrupt.
234 if (counter_int_status
& ATH6KL_TARGET_DEBUG_INTR_MASK
)
235 return ath6kldev_proc_dbg_intr(dev
);
240 static int ath6kldev_proc_err_intr(struct ath6kl_device
*dev
)
246 ath6kl_dbg(ATH6KL_DBG_IRQ
, "error interrupt\n");
248 error_int_status
= dev
->irq_proc_reg
.error_int_status
& 0x0F;
249 if (!error_int_status
) {
254 ath6kl_dbg(ATH6KL_DBG_IRQ
,
255 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
258 if (MS(ERROR_INT_STATUS_WAKEUP
, error_int_status
))
259 ath6kl_dbg(ATH6KL_DBG_IRQ
, "error : wakeup\n");
261 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW
, error_int_status
))
262 ath6kl_err("rx underflow\n");
264 if (MS(ERROR_INT_STATUS_TX_OVERFLOW
, error_int_status
))
265 ath6kl_err("tx overflow\n");
267 /* Clear the interrupt */
268 dev
->irq_proc_reg
.error_int_status
&= ~error_int_status
;
270 /* set W1C value to clear the interrupt, this hits the register first */
271 reg_buf
[0] = error_int_status
;
276 status
= hif_read_write_sync(dev
->ar
, ERROR_INT_STATUS_ADDRESS
,
277 reg_buf
, 4, HIF_WR_SYNC_BYTE_FIX
);
285 static int ath6kldev_proc_cpu_intr(struct ath6kl_device
*dev
)
291 ath6kl_dbg(ATH6KL_DBG_IRQ
, "cpu interrupt\n");
293 cpu_int_status
= dev
->irq_proc_reg
.cpu_int_status
&
294 dev
->irq_en_reg
.cpu_int_status_en
;
295 if (!cpu_int_status
) {
300 ath6kl_dbg(ATH6KL_DBG_IRQ
,
301 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
304 /* Clear the interrupt */
305 dev
->irq_proc_reg
.cpu_int_status
&= ~cpu_int_status
;
308 * Set up the register transfer buffer to hit the register 4 times ,
309 * this is done to make the access 4-byte aligned to mitigate issues
310 * with host bus interconnects that restrict bus transfer lengths to
311 * be a multiple of 4-bytes.
314 /* set W1C value to clear the interrupt, this hits the register first */
315 reg_buf
[0] = cpu_int_status
;
316 /* the remaining are set to zero which have no-effect */
321 status
= hif_read_write_sync(dev
->ar
, CPU_INT_STATUS_ADDRESS
,
322 reg_buf
, 4, HIF_WR_SYNC_BYTE_FIX
);
330 /* process pending interrupts synchronously */
331 static int proc_pending_irqs(struct ath6kl_device
*dev
, bool *done
)
333 struct ath6kl_irq_proc_registers
*rg
;
335 u8 host_int_status
= 0;
337 u8 htc_mbox
= 1 << HTC_MAILBOX
;
339 ath6kl_dbg(ATH6KL_DBG_IRQ
, "proc_pending_irqs: (dev: 0x%p)\n", dev
);
342 * NOTE: HIF implementation guarantees that the context of this
343 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
344 * sleep or call any API that can block or switch thread/task
345 * contexts. This is a fully schedulable context.
349 * Process pending intr only when int_status_en is clear, it may
350 * result in unnecessary bus transaction otherwise. Target may be
351 * unresponsive at the time.
353 if (dev
->irq_en_reg
.int_status_en
) {
355 * Read the first 28 bytes of the HTC register table. This
356 * will yield us the value of different int status
357 * registers and the lookahead registers.
359 * length = sizeof(int_status) + sizeof(cpu_int_status)
360 * + sizeof(error_int_status) +
361 * sizeof(counter_int_status) +
362 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
363 * + sizeof(hole) + sizeof(rx_lkahd) +
364 * sizeof(int_status_en) +
365 * sizeof(cpu_int_status_en) +
366 * sizeof(err_int_status_en) +
367 * sizeof(cntr_int_status_en);
369 status
= hif_read_write_sync(dev
->ar
, HOST_INT_STATUS_ADDRESS
,
370 (u8
*) &dev
->irq_proc_reg
,
371 sizeof(dev
->irq_proc_reg
),
372 HIF_RD_SYNC_BYTE_INC
);
376 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ
))
377 ath6kl_dump_registers(dev
, &dev
->irq_proc_reg
,
380 /* Update only those registers that are enabled */
381 host_int_status
= dev
->irq_proc_reg
.host_int_status
&
382 dev
->irq_en_reg
.int_status_en
;
384 /* Look at mbox status */
385 if (host_int_status
& htc_mbox
) {
387 * Mask out pending mbox value, we use "lookAhead as
388 * the real flag for mbox processing.
390 host_int_status
&= ~htc_mbox
;
391 if (dev
->irq_proc_reg
.rx_lkahd_valid
&
393 rg
= &dev
->irq_proc_reg
;
394 lk_ahd
= le32_to_cpu(rg
->rx_lkahd
[HTC_MAILBOX
]);
396 ath6kl_err("lookAhead is zero!\n");
401 if (!host_int_status
&& !lk_ahd
) {
409 ath6kl_dbg(ATH6KL_DBG_IRQ
,
410 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd
);
412 * Mailbox Interrupt, the HTC layer may issue async
413 * requests to empty the mailbox. When emptying the recv
414 * mailbox we use the async handler above called from the
415 * completion routine of the callers read request. This can
416 * improve performance by reducing context switching when
417 * we rapidly pull packets.
419 status
= htc_rxmsg_pending_handler(dev
->htc_cnxt
,
426 * HTC could not pull any messages out due to lack
429 dev
->chk_irq_status_cnt
= 0;
432 /* now handle the rest of them */
433 ath6kl_dbg(ATH6KL_DBG_IRQ
,
434 "valid interrupt source(s) for other interrupts: 0x%x\n",
437 if (MS(HOST_INT_STATUS_CPU
, host_int_status
)) {
439 status
= ath6kldev_proc_cpu_intr(dev
);
444 if (MS(HOST_INT_STATUS_ERROR
, host_int_status
)) {
445 /* Error Interrupt */
446 status
= ath6kldev_proc_err_intr(dev
);
451 if (MS(HOST_INT_STATUS_COUNTER
, host_int_status
))
452 /* Counter Interrupt */
453 status
= ath6kldev_proc_counter_intr(dev
);
457 * An optimization to bypass reading the IRQ status registers
458 * unecessarily which can re-wake the target, if upper layers
459 * determine that we are in a low-throughput mode, we can rely on
460 * taking another interrupt rather than re-checking the status
461 * registers which can re-wake the target.
463 * NOTE : for host interfaces that makes use of detecting pending
464 * mbox messages at hif can not use this optimization due to
465 * possible side effects, SPI requires the host to drain all
466 * messages from the mailbox before exiting the ISR routine.
469 ath6kl_dbg(ATH6KL_DBG_IRQ
,
470 "bypassing irq status re-check, forcing done\n");
472 if (!dev
->chk_irq_status_cnt
)
475 ath6kl_dbg(ATH6KL_DBG_IRQ
,
476 "proc_pending_irqs: (done:%d, status=%d\n", *done
, status
);
481 /* interrupt handler, kicks off all interrupt processing */
482 int ath6kldev_intr_bh_handler(struct ath6kl
*ar
)
484 struct ath6kl_device
*dev
= ar
->htc_target
->dev
;
489 * Reset counter used to flag a re-scan of IRQ status registers on
492 dev
->chk_irq_status_cnt
= 0;
495 * IRQ processing is synchronous, interrupt status registers can be
499 status
= proc_pending_irqs(dev
, &done
);
507 static int ath6kldev_enable_intrs(struct ath6kl_device
*dev
)
509 struct ath6kl_irq_enable_reg regs
;
512 spin_lock_bh(&dev
->lock
);
514 /* Enable all but ATH6KL CPU interrupts */
515 dev
->irq_en_reg
.int_status_en
=
516 SM(INT_STATUS_ENABLE_ERROR
, 0x01) |
517 SM(INT_STATUS_ENABLE_CPU
, 0x01) |
518 SM(INT_STATUS_ENABLE_COUNTER
, 0x01);
521 * NOTE: There are some cases where HIF can do detection of
522 * pending mbox messages which is disabled now.
524 dev
->irq_en_reg
.int_status_en
|= SM(INT_STATUS_ENABLE_MBOX_DATA
, 0x01);
526 /* Set up the CPU Interrupt status Register */
527 dev
->irq_en_reg
.cpu_int_status_en
= 0;
529 /* Set up the Error Interrupt status Register */
530 dev
->irq_en_reg
.err_int_status_en
=
531 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW
, 0x01) |
532 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW
, 0x1);
535 * Enable Counter interrupt status register to get fatal errors for
538 dev
->irq_en_reg
.cntr_int_status_en
= SM(COUNTER_INT_STATUS_ENABLE_BIT
,
539 ATH6KL_TARGET_DEBUG_INTR_MASK
);
540 memcpy(®s
, &dev
->irq_en_reg
, sizeof(regs
));
542 spin_unlock_bh(&dev
->lock
);
544 status
= hif_read_write_sync(dev
->ar
, INT_STATUS_ENABLE_ADDRESS
,
545 ®s
.int_status_en
, sizeof(regs
),
546 HIF_WR_SYNC_BYTE_INC
);
549 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
555 int ath6kldev_disable_intrs(struct ath6kl_device
*dev
)
557 struct ath6kl_irq_enable_reg regs
;
559 spin_lock_bh(&dev
->lock
);
560 /* Disable all interrupts */
561 dev
->irq_en_reg
.int_status_en
= 0;
562 dev
->irq_en_reg
.cpu_int_status_en
= 0;
563 dev
->irq_en_reg
.err_int_status_en
= 0;
564 dev
->irq_en_reg
.cntr_int_status_en
= 0;
565 memcpy(®s
, &dev
->irq_en_reg
, sizeof(regs
));
566 spin_unlock_bh(&dev
->lock
);
568 return hif_read_write_sync(dev
->ar
, INT_STATUS_ENABLE_ADDRESS
,
569 ®s
.int_status_en
, sizeof(regs
),
570 HIF_WR_SYNC_BYTE_INC
);
573 /* enable device interrupts */
574 int ath6kldev_unmask_intrs(struct ath6kl_device
*dev
)
579 * Make sure interrupt are disabled before unmasking at the HIF
580 * layer. The rationale here is that between device insertion
581 * (where we clear the interrupts the first time) and when HTC
582 * is finally ready to handle interrupts, other software can perform
583 * target "soft" resets. The ATH6KL interrupt enables reset back to an
584 * "enabled" state when this happens.
586 ath6kldev_disable_intrs(dev
);
588 /* unmask the host controller interrupts */
589 ath6kl_hif_irq_enable(dev
->ar
);
590 status
= ath6kldev_enable_intrs(dev
);
595 /* disable all device interrupts */
596 int ath6kldev_mask_intrs(struct ath6kl_device
*dev
)
599 * Mask the interrupt at the HIF layer to avoid any stray interrupt
600 * taken while we zero out our shadow registers in
601 * ath6kldev_disable_intrs().
603 ath6kl_hif_irq_disable(dev
->ar
);
605 return ath6kldev_disable_intrs(dev
);
608 int ath6kldev_setup(struct ath6kl_device
*dev
)
612 spin_lock_init(&dev
->lock
);
615 * NOTE: we actually get the block size of a mailbox other than 0,
616 * for SDIO the block size on mailbox 0 is artificially set to 1.
617 * So we use the block size that is set for the other 3 mailboxes.
619 dev
->htc_cnxt
->block_sz
= dev
->ar
->mbox_info
.block_size
;
621 /* must be a power of 2 */
622 if ((dev
->htc_cnxt
->block_sz
& (dev
->htc_cnxt
->block_sz
- 1)) != 0) {
627 /* assemble mask, used for padding to a block */
628 dev
->htc_cnxt
->block_mask
= dev
->htc_cnxt
->block_sz
- 1;
630 ath6kl_dbg(ATH6KL_DBG_TRC
, "block size: %d, mbox addr:0x%X\n",
631 dev
->htc_cnxt
->block_sz
, dev
->ar
->mbox_info
.htc_addr
);
633 ath6kl_dbg(ATH6KL_DBG_TRC
,
634 "hif interrupt processing is sync only\n");
636 status
= ath6kldev_disable_intrs(dev
);