Commit | Line | Data |
---|---|---|
bdcd8170 KV |
1 | /* |
2 | * Copyright (c) 2007-2011 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #include "core.h" | |
18 | #include "target.h" | |
19 | #include "hif-ops.h" | |
20 | #include "htc_hif.h" | |
21 | #include "debug.h" | |
22 | ||
23 | #define MAILBOX_FOR_BLOCK_SIZE 1 | |
24 | ||
25 | #define ATH6KL_TIME_QUANTUM 10 /* in ms */ | |
26 | ||
bdcd8170 KV |
27 | static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) |
28 | { | |
29 | u8 *buf; | |
30 | int i; | |
31 | ||
32 | buf = req->virt_dma_buf; | |
33 | ||
34 | for (i = 0; i < req->scat_entries; i++) { | |
35 | ||
36 | if (from_dma) | |
37 | memcpy(req->scat_list[i].buf, buf, | |
38 | req->scat_list[i].len); | |
39 | else | |
40 | memcpy(buf, req->scat_list[i].buf, | |
41 | req->scat_list[i].len); | |
42 | ||
43 | buf += req->scat_list[i].len; | |
44 | } | |
45 | ||
46 | return 0; | |
47 | } | |
48 | ||
49 | int ath6kldev_rw_comp_handler(void *context, int status) | |
50 | { | |
51 | struct htc_packet *packet = context; | |
52 | ||
53 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | |
54 | "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n", | |
55 | packet, status); | |
56 | ||
57 | packet->status = status; | |
58 | packet->completion(packet->context, packet); | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) | |
64 | { | |
65 | u32 dummy; | |
66 | int status; | |
67 | ||
68 | ath6kl_err("target debug interrupt\n"); | |
69 | ||
70 | ath6kl_target_failure(dev->ar); | |
71 | ||
72 | /* | |
73 | * read counter to clear the interrupt, the debug error interrupt is | |
74 | * counter 0. | |
75 | */ | |
76 | status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, | |
77 | (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); | |
78 | if (status) | |
79 | WARN_ON(1); | |
80 | ||
81 | return status; | |
82 | } | |
83 | ||
84 | /* mailbox recv message polling */ | |
85 | int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, | |
86 | int timeout) | |
87 | { | |
88 | struct ath6kl_irq_proc_registers *rg; | |
89 | int status = 0, i; | |
90 | u8 htc_mbox = 1 << HTC_MAILBOX; | |
91 | ||
92 | for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) { | |
93 | /* this is the standard HIF way, load the reg table */ | |
94 | status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, | |
95 | (u8 *) &dev->irq_proc_reg, | |
96 | sizeof(dev->irq_proc_reg), | |
97 | HIF_RD_SYNC_BYTE_INC); | |
98 | ||
99 | if (status) { | |
100 | ath6kl_err("failed to read reg table\n"); | |
101 | return status; | |
102 | } | |
103 | ||
104 | /* check for MBOX data and valid lookahead */ | |
105 | if (dev->irq_proc_reg.host_int_status & htc_mbox) { | |
106 | if (dev->irq_proc_reg.rx_lkahd_valid & | |
107 | htc_mbox) { | |
108 | /* | |
109 | * Mailbox has a message and the look ahead | |
110 | * is valid. | |
111 | */ | |
112 | rg = &dev->irq_proc_reg; | |
113 | *lk_ahd = | |
114 | le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); | |
115 | break; | |
116 | } | |
117 | } | |
118 | ||
119 | /* delay a little */ | |
120 | mdelay(ATH6KL_TIME_QUANTUM); | |
121 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); | |
122 | } | |
123 | ||
124 | if (i == 0) { | |
125 | ath6kl_err("timeout waiting for recv message\n"); | |
126 | status = -ETIME; | |
127 | /* check if the target asserted */ | |
128 | if (dev->irq_proc_reg.counter_int_status & | |
129 | ATH6KL_TARGET_DEBUG_INTR_MASK) | |
130 | /* | |
131 | * Target failure handler will be called in case of | |
132 | * an assert. | |
133 | */ | |
134 | ath6kldev_proc_dbg_intr(dev); | |
135 | } | |
136 | ||
137 | return status; | |
138 | } | |
139 | ||
140 | /* | |
141 | * Disable packet reception (used in case the host runs out of buffers) | |
142 | * using the interrupt enable registers through the host I/F | |
143 | */ | |
144 | int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) | |
145 | { | |
146 | struct ath6kl_irq_enable_reg regs; | |
147 | int status = 0; | |
148 | ||
149 | /* take the lock to protect interrupt enable shadows */ | |
150 | spin_lock_bh(&dev->lock); | |
151 | ||
152 | if (enable_rx) | |
153 | dev->irq_en_reg.int_status_en |= | |
154 | SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
155 | else | |
156 | dev->irq_en_reg.int_status_en &= | |
157 | ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
158 | ||
159 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
160 | ||
161 | spin_unlock_bh(&dev->lock); | |
162 | ||
163 | status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
164 | ®s.int_status_en, | |
165 | sizeof(struct ath6kl_irq_enable_reg), | |
166 | HIF_WR_SYNC_BYTE_INC); | |
167 | ||
168 | return status; | |
169 | } | |
170 | ||
bdcd8170 KV |
171 | int ath6kldev_submit_scat_req(struct ath6kl_device *dev, |
172 | struct hif_scatter_req *scat_req, bool read) | |
173 | { | |
174 | int status = 0; | |
175 | ||
176 | if (read) { | |
177 | scat_req->req = HIF_RD_SYNC_BLOCK_FIX; | |
178 | scat_req->addr = dev->ar->mbox_info.htc_addr; | |
179 | } else { | |
180 | scat_req->req = HIF_WR_ASYNC_BLOCK_INC; | |
181 | ||
182 | scat_req->addr = | |
183 | (scat_req->len > HIF_MBOX_WIDTH) ? | |
184 | dev->ar->mbox_info.htc_ext_addr : | |
185 | dev->ar->mbox_info.htc_addr; | |
186 | } | |
187 | ||
188 | ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), | |
189 | "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", | |
190 | scat_req->scat_entries, scat_req->len, | |
191 | scat_req->addr, !read ? "async" : "sync", | |
192 | (read) ? "rd" : "wr"); | |
193 | ||
4a005c3e | 194 | if (!read && scat_req->virt_scat) |
bdcd8170 KV |
195 | status = ath6kldev_cp_scat_dma_buf(scat_req, false); |
196 | ||
197 | if (status) { | |
198 | if (!read) { | |
199 | scat_req->status = status; | |
e041c7f9 | 200 | scat_req->complete(dev->ar->htc_target, scat_req); |
bdcd8170 KV |
201 | return 0; |
202 | } | |
203 | return status; | |
204 | } | |
205 | ||
348a8fbc | 206 | status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); |
bdcd8170 KV |
207 | |
208 | if (read) { | |
209 | /* in sync mode, we can touch the scatter request */ | |
210 | scat_req->status = status; | |
4a005c3e | 211 | if (!status && scat_req->virt_scat) |
bdcd8170 KV |
212 | scat_req->status = |
213 | ath6kldev_cp_scat_dma_buf(scat_req, true); | |
214 | } | |
215 | ||
216 | return status; | |
217 | } | |
218 | ||
bdcd8170 KV |
219 | int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_trans) |
220 | { | |
cfeab10b | 221 | return ath6kl_hif_enable_scatter(dev->ar, &dev->hif_scat_info); |
bdcd8170 KV |
222 | } |
223 | ||
224 | static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) | |
225 | { | |
226 | u8 counter_int_status; | |
227 | ||
228 | ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n"); | |
229 | ||
230 | counter_int_status = dev->irq_proc_reg.counter_int_status & | |
231 | dev->irq_en_reg.cntr_int_status_en; | |
232 | ||
233 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
234 | "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n", | |
235 | counter_int_status); | |
236 | ||
237 | /* | |
238 | * NOTE: other modules like GMBOX may use the counter interrupt for | |
239 | * credit flow control on other counters, we only need to check for | |
240 | * the debug assertion counter interrupt. | |
241 | */ | |
242 | if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) | |
243 | return ath6kldev_proc_dbg_intr(dev); | |
244 | ||
245 | return 0; | |
246 | } | |
247 | ||
248 | static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) | |
249 | { | |
250 | int status; | |
251 | u8 error_int_status; | |
252 | u8 reg_buf[4]; | |
253 | ||
254 | ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n"); | |
255 | ||
256 | error_int_status = dev->irq_proc_reg.error_int_status & 0x0F; | |
257 | if (!error_int_status) { | |
258 | WARN_ON(1); | |
259 | return -EIO; | |
260 | } | |
261 | ||
262 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
263 | "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n", | |
264 | error_int_status); | |
265 | ||
266 | if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status)) | |
267 | ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n"); | |
268 | ||
269 | if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status)) | |
270 | ath6kl_err("rx underflow\n"); | |
271 | ||
272 | if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status)) | |
273 | ath6kl_err("tx overflow\n"); | |
274 | ||
275 | /* Clear the interrupt */ | |
276 | dev->irq_proc_reg.error_int_status &= ~error_int_status; | |
277 | ||
278 | /* set W1C value to clear the interrupt, this hits the register first */ | |
279 | reg_buf[0] = error_int_status; | |
280 | reg_buf[1] = 0; | |
281 | reg_buf[2] = 0; | |
282 | reg_buf[3] = 0; | |
283 | ||
284 | status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, | |
285 | reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); | |
286 | ||
287 | if (status) | |
288 | WARN_ON(1); | |
289 | ||
290 | return status; | |
291 | } | |
292 | ||
293 | static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) | |
294 | { | |
295 | int status; | |
296 | u8 cpu_int_status; | |
297 | u8 reg_buf[4]; | |
298 | ||
299 | ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n"); | |
300 | ||
301 | cpu_int_status = dev->irq_proc_reg.cpu_int_status & | |
302 | dev->irq_en_reg.cpu_int_status_en; | |
303 | if (!cpu_int_status) { | |
304 | WARN_ON(1); | |
305 | return -EIO; | |
306 | } | |
307 | ||
308 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
309 | "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", | |
310 | cpu_int_status); | |
311 | ||
312 | /* Clear the interrupt */ | |
313 | dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status; | |
314 | ||
315 | /* | |
316 | * Set up the register transfer buffer to hit the register 4 times , | |
317 | * this is done to make the access 4-byte aligned to mitigate issues | |
318 | * with host bus interconnects that restrict bus transfer lengths to | |
319 | * be a multiple of 4-bytes. | |
320 | */ | |
321 | ||
322 | /* set W1C value to clear the interrupt, this hits the register first */ | |
323 | reg_buf[0] = cpu_int_status; | |
324 | /* the remaining are set to zero which have no-effect */ | |
325 | reg_buf[1] = 0; | |
326 | reg_buf[2] = 0; | |
327 | reg_buf[3] = 0; | |
328 | ||
329 | status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, | |
330 | reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); | |
331 | ||
332 | if (status) | |
333 | WARN_ON(1); | |
334 | ||
335 | return status; | |
336 | } | |
337 | ||
338 | /* process pending interrupts synchronously */ | |
339 | static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) | |
340 | { | |
341 | struct ath6kl_irq_proc_registers *rg; | |
342 | int status = 0; | |
343 | u8 host_int_status = 0; | |
344 | u32 lk_ahd = 0; | |
345 | u8 htc_mbox = 1 << HTC_MAILBOX; | |
346 | ||
347 | ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev); | |
348 | ||
349 | /* | |
350 | * NOTE: HIF implementation guarantees that the context of this | |
351 | * call allows us to perform SYNCHRONOUS I/O, that is we can block, | |
352 | * sleep or call any API that can block or switch thread/task | |
353 | * contexts. This is a fully schedulable context. | |
354 | */ | |
355 | ||
356 | /* | |
357 | * Process pending intr only when int_status_en is clear, it may | |
358 | * result in unnecessary bus transaction otherwise. Target may be | |
359 | * unresponsive at the time. | |
360 | */ | |
361 | if (dev->irq_en_reg.int_status_en) { | |
362 | /* | |
363 | * Read the first 28 bytes of the HTC register table. This | |
364 | * will yield us the value of different int status | |
365 | * registers and the lookahead registers. | |
366 | * | |
367 | * length = sizeof(int_status) + sizeof(cpu_int_status) | |
368 | * + sizeof(error_int_status) + | |
369 | * sizeof(counter_int_status) + | |
370 | * sizeof(mbox_frame) + sizeof(rx_lkahd_valid) | |
371 | * + sizeof(hole) + sizeof(rx_lkahd) + | |
372 | * sizeof(int_status_en) + | |
373 | * sizeof(cpu_int_status_en) + | |
374 | * sizeof(err_int_status_en) + | |
375 | * sizeof(cntr_int_status_en); | |
376 | */ | |
377 | status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, | |
378 | (u8 *) &dev->irq_proc_reg, | |
379 | sizeof(dev->irq_proc_reg), | |
380 | HIF_RD_SYNC_BYTE_INC); | |
381 | if (status) | |
382 | goto out; | |
383 | ||
384 | if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ)) | |
385 | ath6kl_dump_registers(dev, &dev->irq_proc_reg, | |
386 | &dev->irq_en_reg); | |
387 | ||
388 | /* Update only those registers that are enabled */ | |
389 | host_int_status = dev->irq_proc_reg.host_int_status & | |
390 | dev->irq_en_reg.int_status_en; | |
391 | ||
392 | /* Look at mbox status */ | |
393 | if (host_int_status & htc_mbox) { | |
394 | /* | |
395 | * Mask out pending mbox value, we use "lookAhead as | |
396 | * the real flag for mbox processing. | |
397 | */ | |
398 | host_int_status &= ~htc_mbox; | |
399 | if (dev->irq_proc_reg.rx_lkahd_valid & | |
400 | htc_mbox) { | |
401 | rg = &dev->irq_proc_reg; | |
402 | lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); | |
403 | if (!lk_ahd) | |
404 | ath6kl_err("lookAhead is zero!\n"); | |
405 | } | |
406 | } | |
407 | } | |
408 | ||
409 | if (!host_int_status && !lk_ahd) { | |
410 | *done = true; | |
411 | goto out; | |
412 | } | |
413 | ||
414 | if (lk_ahd) { | |
415 | int fetched = 0; | |
416 | ||
417 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
418 | "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd); | |
419 | /* | |
420 | * Mailbox Interrupt, the HTC layer may issue async | |
421 | * requests to empty the mailbox. When emptying the recv | |
422 | * mailbox we use the async handler above called from the | |
423 | * completion routine of the callers read request. This can | |
424 | * improve performance by reducing context switching when | |
425 | * we rapidly pull packets. | |
426 | */ | |
427 | status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched); | |
428 | if (status) | |
429 | goto out; | |
430 | ||
431 | if (!fetched) | |
432 | /* | |
433 | * HTC could not pull any messages out due to lack | |
434 | * of resources. | |
435 | */ | |
436 | dev->chk_irq_status_cnt = 0; | |
437 | } | |
438 | ||
439 | /* now handle the rest of them */ | |
440 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
441 | "valid interrupt source(s) for other interrupts: 0x%x\n", | |
442 | host_int_status); | |
443 | ||
444 | if (MS(HOST_INT_STATUS_CPU, host_int_status)) { | |
445 | /* CPU Interrupt */ | |
446 | status = ath6kldev_proc_cpu_intr(dev); | |
447 | if (status) | |
448 | goto out; | |
449 | } | |
450 | ||
451 | if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { | |
452 | /* Error Interrupt */ | |
453 | status = ath6kldev_proc_err_intr(dev); | |
454 | if (status) | |
455 | goto out; | |
456 | } | |
457 | ||
458 | if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) | |
459 | /* Counter Interrupt */ | |
460 | status = ath6kldev_proc_counter_intr(dev); | |
461 | ||
462 | out: | |
463 | /* | |
464 | * An optimization to bypass reading the IRQ status registers | |
465 | * unecessarily which can re-wake the target, if upper layers | |
466 | * determine that we are in a low-throughput mode, we can rely on | |
467 | * taking another interrupt rather than re-checking the status | |
468 | * registers which can re-wake the target. | |
469 | * | |
470 | * NOTE : for host interfaces that makes use of detecting pending | |
471 | * mbox messages at hif can not use this optimization due to | |
472 | * possible side effects, SPI requires the host to drain all | |
473 | * messages from the mailbox before exiting the ISR routine. | |
474 | */ | |
475 | ||
476 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
477 | "bypassing irq status re-check, forcing done\n"); | |
478 | ||
479 | *done = true; | |
480 | ||
481 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
482 | "proc_pending_irqs: (done:%d, status=%d\n", *done, status); | |
483 | ||
484 | return status; | |
485 | } | |
486 | ||
487 | /* interrupt handler, kicks off all interrupt processing */ | |
488 | int ath6kldev_intr_bh_handler(struct ath6kl *ar) | |
489 | { | |
490 | struct ath6kl_device *dev = ar->htc_target->dev; | |
491 | int status = 0; | |
492 | bool done = false; | |
493 | ||
494 | /* | |
495 | * Reset counter used to flag a re-scan of IRQ status registers on | |
496 | * the target. | |
497 | */ | |
498 | dev->chk_irq_status_cnt = 0; | |
499 | ||
500 | /* | |
501 | * IRQ processing is synchronous, interrupt status registers can be | |
502 | * re-read. | |
503 | */ | |
504 | while (!done) { | |
505 | status = proc_pending_irqs(dev, &done); | |
506 | if (status) | |
507 | break; | |
508 | } | |
509 | ||
510 | return status; | |
511 | } | |
512 | ||
513 | static int ath6kldev_enable_intrs(struct ath6kl_device *dev) | |
514 | { | |
515 | struct ath6kl_irq_enable_reg regs; | |
516 | int status; | |
517 | ||
518 | spin_lock_bh(&dev->lock); | |
519 | ||
520 | /* Enable all but ATH6KL CPU interrupts */ | |
521 | dev->irq_en_reg.int_status_en = | |
522 | SM(INT_STATUS_ENABLE_ERROR, 0x01) | | |
523 | SM(INT_STATUS_ENABLE_CPU, 0x01) | | |
524 | SM(INT_STATUS_ENABLE_COUNTER, 0x01); | |
525 | ||
526 | /* | |
527 | * NOTE: There are some cases where HIF can do detection of | |
528 | * pending mbox messages which is disabled now. | |
529 | */ | |
530 | dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
531 | ||
532 | /* Set up the CPU Interrupt status Register */ | |
533 | dev->irq_en_reg.cpu_int_status_en = 0; | |
534 | ||
535 | /* Set up the Error Interrupt status Register */ | |
536 | dev->irq_en_reg.err_int_status_en = | |
537 | SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) | | |
538 | SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1); | |
539 | ||
540 | /* | |
541 | * Enable Counter interrupt status register to get fatal errors for | |
542 | * debugging. | |
543 | */ | |
544 | dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT, | |
545 | ATH6KL_TARGET_DEBUG_INTR_MASK); | |
546 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
547 | ||
548 | spin_unlock_bh(&dev->lock); | |
549 | ||
550 | status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
551 | ®s.int_status_en, sizeof(regs), | |
552 | HIF_WR_SYNC_BYTE_INC); | |
553 | ||
554 | if (status) | |
555 | ath6kl_err("failed to update interrupt ctl reg err: %d\n", | |
556 | status); | |
557 | ||
558 | return status; | |
559 | } | |
560 | ||
561 | int ath6kldev_disable_intrs(struct ath6kl_device *dev) | |
562 | { | |
563 | struct ath6kl_irq_enable_reg regs; | |
564 | ||
565 | spin_lock_bh(&dev->lock); | |
566 | /* Disable all interrupts */ | |
567 | dev->irq_en_reg.int_status_en = 0; | |
568 | dev->irq_en_reg.cpu_int_status_en = 0; | |
569 | dev->irq_en_reg.err_int_status_en = 0; | |
570 | dev->irq_en_reg.cntr_int_status_en = 0; | |
571 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
572 | spin_unlock_bh(&dev->lock); | |
573 | ||
574 | return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
575 | ®s.int_status_en, sizeof(regs), | |
576 | HIF_WR_SYNC_BYTE_INC); | |
577 | } | |
578 | ||
579 | /* enable device interrupts */ | |
580 | int ath6kldev_unmask_intrs(struct ath6kl_device *dev) | |
581 | { | |
582 | int status = 0; | |
583 | ||
584 | /* | |
585 | * Make sure interrupt are disabled before unmasking at the HIF | |
586 | * layer. The rationale here is that between device insertion | |
587 | * (where we clear the interrupts the first time) and when HTC | |
588 | * is finally ready to handle interrupts, other software can perform | |
589 | * target "soft" resets. The ATH6KL interrupt enables reset back to an | |
590 | * "enabled" state when this happens. | |
591 | */ | |
592 | ath6kldev_disable_intrs(dev); | |
593 | ||
594 | /* unmask the host controller interrupts */ | |
595 | ath6kl_hif_irq_enable(dev->ar); | |
596 | status = ath6kldev_enable_intrs(dev); | |
597 | ||
598 | return status; | |
599 | } | |
600 | ||
601 | /* disable all device interrupts */ | |
602 | int ath6kldev_mask_intrs(struct ath6kl_device *dev) | |
603 | { | |
604 | /* | |
605 | * Mask the interrupt at the HIF layer to avoid any stray interrupt | |
606 | * taken while we zero out our shadow registers in | |
607 | * ath6kldev_disable_intrs(). | |
608 | */ | |
609 | ath6kl_hif_irq_disable(dev->ar); | |
610 | ||
611 | return ath6kldev_disable_intrs(dev); | |
612 | } | |
613 | ||
614 | int ath6kldev_setup(struct ath6kl_device *dev) | |
615 | { | |
616 | int status = 0; | |
bdcd8170 | 617 | |
bdcd8170 KV |
618 | spin_lock_init(&dev->lock); |
619 | ||
bdcd8170 KV |
620 | /* |
621 | * NOTE: we actually get the block size of a mailbox other than 0, | |
622 | * for SDIO the block size on mailbox 0 is artificially set to 1. | |
623 | * So we use the block size that is set for the other 3 mailboxes. | |
624 | */ | |
625 | dev->block_sz = dev->ar->mbox_info.block_size; | |
626 | ||
627 | /* must be a power of 2 */ | |
628 | if ((dev->block_sz & (dev->block_sz - 1)) != 0) { | |
629 | WARN_ON(1); | |
630 | goto fail_setup; | |
631 | } | |
632 | ||
633 | /* assemble mask, used for padding to a block */ | |
634 | dev->block_mask = dev->block_sz - 1; | |
635 | ||
636 | ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", | |
637 | dev->block_sz, dev->ar->mbox_info.htc_addr); | |
638 | ||
639 | ath6kl_dbg(ATH6KL_DBG_TRC, | |
640 | "hif interrupt processing is sync only\n"); | |
641 | ||
642 | status = ath6kldev_disable_intrs(dev); | |
643 | ||
644 | fail_setup: | |
645 | return status; | |
646 | ||
647 | } |