Commit | Line | Data |
---|---|---|
bdcd8170 KV |
1 | /* |
2 | * Copyright (c) 2007-2011 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #include "core.h" | |
18 | #include "target.h" | |
19 | #include "hif-ops.h" | |
20 | #include "htc_hif.h" | |
21 | #include "debug.h" | |
22 | ||
23 | #define MAILBOX_FOR_BLOCK_SIZE 1 | |
24 | ||
25 | #define ATH6KL_TIME_QUANTUM 10 /* in ms */ | |
26 | ||
bdcd8170 KV |
27 | static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) |
28 | { | |
29 | u8 *buf; | |
30 | int i; | |
31 | ||
32 | buf = req->virt_dma_buf; | |
33 | ||
34 | for (i = 0; i < req->scat_entries; i++) { | |
35 | ||
36 | if (from_dma) | |
37 | memcpy(req->scat_list[i].buf, buf, | |
38 | req->scat_list[i].len); | |
39 | else | |
40 | memcpy(buf, req->scat_list[i].buf, | |
41 | req->scat_list[i].len); | |
42 | ||
43 | buf += req->scat_list[i].len; | |
44 | } | |
45 | ||
46 | return 0; | |
47 | } | |
48 | ||
49 | int ath6kldev_rw_comp_handler(void *context, int status) | |
50 | { | |
51 | struct htc_packet *packet = context; | |
52 | ||
53 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | |
54 | "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n", | |
55 | packet, status); | |
56 | ||
57 | packet->status = status; | |
58 | packet->completion(packet->context, packet); | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) | |
64 | { | |
65 | u32 dummy; | |
66 | int status; | |
67 | ||
68 | ath6kl_err("target debug interrupt\n"); | |
69 | ||
70 | ath6kl_target_failure(dev->ar); | |
71 | ||
72 | /* | |
73 | * read counter to clear the interrupt, the debug error interrupt is | |
74 | * counter 0. | |
75 | */ | |
76 | status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, | |
77 | (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); | |
78 | if (status) | |
79 | WARN_ON(1); | |
80 | ||
81 | return status; | |
82 | } | |
83 | ||
84 | /* mailbox recv message polling */ | |
85 | int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, | |
86 | int timeout) | |
87 | { | |
88 | struct ath6kl_irq_proc_registers *rg; | |
89 | int status = 0, i; | |
90 | u8 htc_mbox = 1 << HTC_MAILBOX; | |
91 | ||
92 | for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) { | |
93 | /* this is the standard HIF way, load the reg table */ | |
94 | status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, | |
95 | (u8 *) &dev->irq_proc_reg, | |
96 | sizeof(dev->irq_proc_reg), | |
97 | HIF_RD_SYNC_BYTE_INC); | |
98 | ||
99 | if (status) { | |
100 | ath6kl_err("failed to read reg table\n"); | |
101 | return status; | |
102 | } | |
103 | ||
104 | /* check for MBOX data and valid lookahead */ | |
105 | if (dev->irq_proc_reg.host_int_status & htc_mbox) { | |
106 | if (dev->irq_proc_reg.rx_lkahd_valid & | |
107 | htc_mbox) { | |
108 | /* | |
109 | * Mailbox has a message and the look ahead | |
110 | * is valid. | |
111 | */ | |
112 | rg = &dev->irq_proc_reg; | |
113 | *lk_ahd = | |
114 | le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); | |
115 | break; | |
116 | } | |
117 | } | |
118 | ||
119 | /* delay a little */ | |
120 | mdelay(ATH6KL_TIME_QUANTUM); | |
121 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); | |
122 | } | |
123 | ||
124 | if (i == 0) { | |
125 | ath6kl_err("timeout waiting for recv message\n"); | |
126 | status = -ETIME; | |
127 | /* check if the target asserted */ | |
128 | if (dev->irq_proc_reg.counter_int_status & | |
129 | ATH6KL_TARGET_DEBUG_INTR_MASK) | |
130 | /* | |
131 | * Target failure handler will be called in case of | |
132 | * an assert. | |
133 | */ | |
134 | ath6kldev_proc_dbg_intr(dev); | |
135 | } | |
136 | ||
137 | return status; | |
138 | } | |
139 | ||
140 | /* | |
141 | * Disable packet reception (used in case the host runs out of buffers) | |
142 | * using the interrupt enable registers through the host I/F | |
143 | */ | |
144 | int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) | |
145 | { | |
146 | struct ath6kl_irq_enable_reg regs; | |
147 | int status = 0; | |
148 | ||
149 | /* take the lock to protect interrupt enable shadows */ | |
150 | spin_lock_bh(&dev->lock); | |
151 | ||
152 | if (enable_rx) | |
153 | dev->irq_en_reg.int_status_en |= | |
154 | SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
155 | else | |
156 | dev->irq_en_reg.int_status_en &= | |
157 | ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
158 | ||
159 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
160 | ||
161 | spin_unlock_bh(&dev->lock); | |
162 | ||
163 | status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
164 | ®s.int_status_en, | |
165 | sizeof(struct ath6kl_irq_enable_reg), | |
166 | HIF_WR_SYNC_BYTE_INC); | |
167 | ||
168 | return status; | |
169 | } | |
170 | ||
bdcd8170 KV |
171 | int ath6kldev_submit_scat_req(struct ath6kl_device *dev, |
172 | struct hif_scatter_req *scat_req, bool read) | |
173 | { | |
174 | int status = 0; | |
175 | ||
176 | if (read) { | |
177 | scat_req->req = HIF_RD_SYNC_BLOCK_FIX; | |
178 | scat_req->addr = dev->ar->mbox_info.htc_addr; | |
179 | } else { | |
180 | scat_req->req = HIF_WR_ASYNC_BLOCK_INC; | |
181 | ||
182 | scat_req->addr = | |
183 | (scat_req->len > HIF_MBOX_WIDTH) ? | |
184 | dev->ar->mbox_info.htc_ext_addr : | |
185 | dev->ar->mbox_info.htc_addr; | |
186 | } | |
187 | ||
188 | ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), | |
189 | "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", | |
190 | scat_req->scat_entries, scat_req->len, | |
191 | scat_req->addr, !read ? "async" : "sync", | |
192 | (read) ? "rd" : "wr"); | |
193 | ||
23b7840a | 194 | if (!read && scat_req->virt_scat) { |
bdcd8170 | 195 | status = ath6kldev_cp_scat_dma_buf(scat_req, false); |
23b7840a | 196 | if (status) { |
bdcd8170 | 197 | scat_req->status = status; |
e041c7f9 | 198 | scat_req->complete(dev->ar->htc_target, scat_req); |
bdcd8170 KV |
199 | return 0; |
200 | } | |
bdcd8170 KV |
201 | } |
202 | ||
348a8fbc | 203 | status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); |
bdcd8170 KV |
204 | |
205 | if (read) { | |
206 | /* in sync mode, we can touch the scatter request */ | |
207 | scat_req->status = status; | |
4a005c3e | 208 | if (!status && scat_req->virt_scat) |
bdcd8170 KV |
209 | scat_req->status = |
210 | ath6kldev_cp_scat_dma_buf(scat_req, true); | |
211 | } | |
212 | ||
213 | return status; | |
214 | } | |
215 | ||
bdcd8170 KV |
216 | static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) |
217 | { | |
218 | u8 counter_int_status; | |
219 | ||
220 | ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n"); | |
221 | ||
222 | counter_int_status = dev->irq_proc_reg.counter_int_status & | |
223 | dev->irq_en_reg.cntr_int_status_en; | |
224 | ||
225 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
226 | "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n", | |
227 | counter_int_status); | |
228 | ||
229 | /* | |
230 | * NOTE: other modules like GMBOX may use the counter interrupt for | |
231 | * credit flow control on other counters, we only need to check for | |
232 | * the debug assertion counter interrupt. | |
233 | */ | |
234 | if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) | |
235 | return ath6kldev_proc_dbg_intr(dev); | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
240 | static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) | |
241 | { | |
242 | int status; | |
243 | u8 error_int_status; | |
244 | u8 reg_buf[4]; | |
245 | ||
246 | ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n"); | |
247 | ||
248 | error_int_status = dev->irq_proc_reg.error_int_status & 0x0F; | |
249 | if (!error_int_status) { | |
250 | WARN_ON(1); | |
251 | return -EIO; | |
252 | } | |
253 | ||
254 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
255 | "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n", | |
256 | error_int_status); | |
257 | ||
258 | if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status)) | |
259 | ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n"); | |
260 | ||
261 | if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status)) | |
262 | ath6kl_err("rx underflow\n"); | |
263 | ||
264 | if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status)) | |
265 | ath6kl_err("tx overflow\n"); | |
266 | ||
267 | /* Clear the interrupt */ | |
268 | dev->irq_proc_reg.error_int_status &= ~error_int_status; | |
269 | ||
270 | /* set W1C value to clear the interrupt, this hits the register first */ | |
271 | reg_buf[0] = error_int_status; | |
272 | reg_buf[1] = 0; | |
273 | reg_buf[2] = 0; | |
274 | reg_buf[3] = 0; | |
275 | ||
276 | status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, | |
277 | reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); | |
278 | ||
279 | if (status) | |
280 | WARN_ON(1); | |
281 | ||
282 | return status; | |
283 | } | |
284 | ||
285 | static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) | |
286 | { | |
287 | int status; | |
288 | u8 cpu_int_status; | |
289 | u8 reg_buf[4]; | |
290 | ||
291 | ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n"); | |
292 | ||
293 | cpu_int_status = dev->irq_proc_reg.cpu_int_status & | |
294 | dev->irq_en_reg.cpu_int_status_en; | |
295 | if (!cpu_int_status) { | |
296 | WARN_ON(1); | |
297 | return -EIO; | |
298 | } | |
299 | ||
300 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
301 | "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", | |
302 | cpu_int_status); | |
303 | ||
304 | /* Clear the interrupt */ | |
305 | dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status; | |
306 | ||
307 | /* | |
308 | * Set up the register transfer buffer to hit the register 4 times , | |
309 | * this is done to make the access 4-byte aligned to mitigate issues | |
310 | * with host bus interconnects that restrict bus transfer lengths to | |
311 | * be a multiple of 4-bytes. | |
312 | */ | |
313 | ||
314 | /* set W1C value to clear the interrupt, this hits the register first */ | |
315 | reg_buf[0] = cpu_int_status; | |
316 | /* the remaining are set to zero which have no-effect */ | |
317 | reg_buf[1] = 0; | |
318 | reg_buf[2] = 0; | |
319 | reg_buf[3] = 0; | |
320 | ||
321 | status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, | |
322 | reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); | |
323 | ||
324 | if (status) | |
325 | WARN_ON(1); | |
326 | ||
327 | return status; | |
328 | } | |
329 | ||
330 | /* process pending interrupts synchronously */ | |
331 | static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) | |
332 | { | |
333 | struct ath6kl_irq_proc_registers *rg; | |
334 | int status = 0; | |
335 | u8 host_int_status = 0; | |
336 | u32 lk_ahd = 0; | |
337 | u8 htc_mbox = 1 << HTC_MAILBOX; | |
338 | ||
339 | ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev); | |
340 | ||
341 | /* | |
342 | * NOTE: HIF implementation guarantees that the context of this | |
343 | * call allows us to perform SYNCHRONOUS I/O, that is we can block, | |
344 | * sleep or call any API that can block or switch thread/task | |
345 | * contexts. This is a fully schedulable context. | |
346 | */ | |
347 | ||
348 | /* | |
349 | * Process pending intr only when int_status_en is clear, it may | |
350 | * result in unnecessary bus transaction otherwise. Target may be | |
351 | * unresponsive at the time. | |
352 | */ | |
353 | if (dev->irq_en_reg.int_status_en) { | |
354 | /* | |
355 | * Read the first 28 bytes of the HTC register table. This | |
356 | * will yield us the value of different int status | |
357 | * registers and the lookahead registers. | |
358 | * | |
359 | * length = sizeof(int_status) + sizeof(cpu_int_status) | |
360 | * + sizeof(error_int_status) + | |
361 | * sizeof(counter_int_status) + | |
362 | * sizeof(mbox_frame) + sizeof(rx_lkahd_valid) | |
363 | * + sizeof(hole) + sizeof(rx_lkahd) + | |
364 | * sizeof(int_status_en) + | |
365 | * sizeof(cpu_int_status_en) + | |
366 | * sizeof(err_int_status_en) + | |
367 | * sizeof(cntr_int_status_en); | |
368 | */ | |
369 | status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, | |
370 | (u8 *) &dev->irq_proc_reg, | |
371 | sizeof(dev->irq_proc_reg), | |
372 | HIF_RD_SYNC_BYTE_INC); | |
373 | if (status) | |
374 | goto out; | |
375 | ||
376 | if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ)) | |
377 | ath6kl_dump_registers(dev, &dev->irq_proc_reg, | |
378 | &dev->irq_en_reg); | |
379 | ||
380 | /* Update only those registers that are enabled */ | |
381 | host_int_status = dev->irq_proc_reg.host_int_status & | |
382 | dev->irq_en_reg.int_status_en; | |
383 | ||
384 | /* Look at mbox status */ | |
385 | if (host_int_status & htc_mbox) { | |
386 | /* | |
387 | * Mask out pending mbox value, we use "lookAhead as | |
388 | * the real flag for mbox processing. | |
389 | */ | |
390 | host_int_status &= ~htc_mbox; | |
391 | if (dev->irq_proc_reg.rx_lkahd_valid & | |
392 | htc_mbox) { | |
393 | rg = &dev->irq_proc_reg; | |
394 | lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); | |
395 | if (!lk_ahd) | |
396 | ath6kl_err("lookAhead is zero!\n"); | |
397 | } | |
398 | } | |
399 | } | |
400 | ||
401 | if (!host_int_status && !lk_ahd) { | |
402 | *done = true; | |
403 | goto out; | |
404 | } | |
405 | ||
406 | if (lk_ahd) { | |
407 | int fetched = 0; | |
408 | ||
409 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
410 | "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd); | |
411 | /* | |
412 | * Mailbox Interrupt, the HTC layer may issue async | |
413 | * requests to empty the mailbox. When emptying the recv | |
414 | * mailbox we use the async handler above called from the | |
415 | * completion routine of the callers read request. This can | |
416 | * improve performance by reducing context switching when | |
417 | * we rapidly pull packets. | |
418 | */ | |
df45f7f9 VT |
419 | status = htc_rxmsg_pending_handler(dev->htc_cnxt, |
420 | &lk_ahd, &fetched); | |
bdcd8170 KV |
421 | if (status) |
422 | goto out; | |
423 | ||
424 | if (!fetched) | |
425 | /* | |
426 | * HTC could not pull any messages out due to lack | |
427 | * of resources. | |
428 | */ | |
fcb82058 | 429 | dev->htc_cnxt->chk_irq_status_cnt = 0; |
bdcd8170 KV |
430 | } |
431 | ||
432 | /* now handle the rest of them */ | |
433 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
434 | "valid interrupt source(s) for other interrupts: 0x%x\n", | |
435 | host_int_status); | |
436 | ||
437 | if (MS(HOST_INT_STATUS_CPU, host_int_status)) { | |
438 | /* CPU Interrupt */ | |
439 | status = ath6kldev_proc_cpu_intr(dev); | |
440 | if (status) | |
441 | goto out; | |
442 | } | |
443 | ||
444 | if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { | |
445 | /* Error Interrupt */ | |
446 | status = ath6kldev_proc_err_intr(dev); | |
447 | if (status) | |
448 | goto out; | |
449 | } | |
450 | ||
451 | if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) | |
452 | /* Counter Interrupt */ | |
453 | status = ath6kldev_proc_counter_intr(dev); | |
454 | ||
455 | out: | |
456 | /* | |
457 | * An optimization to bypass reading the IRQ status registers | |
458 | * unecessarily which can re-wake the target, if upper layers | |
459 | * determine that we are in a low-throughput mode, we can rely on | |
460 | * taking another interrupt rather than re-checking the status | |
461 | * registers which can re-wake the target. | |
462 | * | |
463 | * NOTE : for host interfaces that makes use of detecting pending | |
464 | * mbox messages at hif can not use this optimization due to | |
465 | * possible side effects, SPI requires the host to drain all | |
466 | * messages from the mailbox before exiting the ISR routine. | |
467 | */ | |
468 | ||
469 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
470 | "bypassing irq status re-check, forcing done\n"); | |
471 | ||
fcb82058 | 472 | if (!dev->htc_cnxt->chk_irq_status_cnt) |
7520ceb7 | 473 | *done = true; |
bdcd8170 KV |
474 | |
475 | ath6kl_dbg(ATH6KL_DBG_IRQ, | |
476 | "proc_pending_irqs: (done:%d, status=%d\n", *done, status); | |
477 | ||
478 | return status; | |
479 | } | |
480 | ||
481 | /* interrupt handler, kicks off all interrupt processing */ | |
482 | int ath6kldev_intr_bh_handler(struct ath6kl *ar) | |
483 | { | |
484 | struct ath6kl_device *dev = ar->htc_target->dev; | |
485 | int status = 0; | |
486 | bool done = false; | |
487 | ||
488 | /* | |
489 | * Reset counter used to flag a re-scan of IRQ status registers on | |
490 | * the target. | |
491 | */ | |
fcb82058 | 492 | dev->htc_cnxt->chk_irq_status_cnt = 0; |
bdcd8170 KV |
493 | |
494 | /* | |
495 | * IRQ processing is synchronous, interrupt status registers can be | |
496 | * re-read. | |
497 | */ | |
498 | while (!done) { | |
499 | status = proc_pending_irqs(dev, &done); | |
500 | if (status) | |
501 | break; | |
502 | } | |
503 | ||
504 | return status; | |
505 | } | |
506 | ||
507 | static int ath6kldev_enable_intrs(struct ath6kl_device *dev) | |
508 | { | |
509 | struct ath6kl_irq_enable_reg regs; | |
510 | int status; | |
511 | ||
512 | spin_lock_bh(&dev->lock); | |
513 | ||
514 | /* Enable all but ATH6KL CPU interrupts */ | |
515 | dev->irq_en_reg.int_status_en = | |
516 | SM(INT_STATUS_ENABLE_ERROR, 0x01) | | |
517 | SM(INT_STATUS_ENABLE_CPU, 0x01) | | |
518 | SM(INT_STATUS_ENABLE_COUNTER, 0x01); | |
519 | ||
520 | /* | |
521 | * NOTE: There are some cases where HIF can do detection of | |
522 | * pending mbox messages which is disabled now. | |
523 | */ | |
524 | dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); | |
525 | ||
526 | /* Set up the CPU Interrupt status Register */ | |
527 | dev->irq_en_reg.cpu_int_status_en = 0; | |
528 | ||
529 | /* Set up the Error Interrupt status Register */ | |
530 | dev->irq_en_reg.err_int_status_en = | |
531 | SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) | | |
532 | SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1); | |
533 | ||
534 | /* | |
535 | * Enable Counter interrupt status register to get fatal errors for | |
536 | * debugging. | |
537 | */ | |
538 | dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT, | |
539 | ATH6KL_TARGET_DEBUG_INTR_MASK); | |
540 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
541 | ||
542 | spin_unlock_bh(&dev->lock); | |
543 | ||
544 | status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
545 | ®s.int_status_en, sizeof(regs), | |
546 | HIF_WR_SYNC_BYTE_INC); | |
547 | ||
548 | if (status) | |
549 | ath6kl_err("failed to update interrupt ctl reg err: %d\n", | |
550 | status); | |
551 | ||
552 | return status; | |
553 | } | |
554 | ||
555 | int ath6kldev_disable_intrs(struct ath6kl_device *dev) | |
556 | { | |
557 | struct ath6kl_irq_enable_reg regs; | |
558 | ||
559 | spin_lock_bh(&dev->lock); | |
560 | /* Disable all interrupts */ | |
561 | dev->irq_en_reg.int_status_en = 0; | |
562 | dev->irq_en_reg.cpu_int_status_en = 0; | |
563 | dev->irq_en_reg.err_int_status_en = 0; | |
564 | dev->irq_en_reg.cntr_int_status_en = 0; | |
565 | memcpy(®s, &dev->irq_en_reg, sizeof(regs)); | |
566 | spin_unlock_bh(&dev->lock); | |
567 | ||
568 | return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, | |
569 | ®s.int_status_en, sizeof(regs), | |
570 | HIF_WR_SYNC_BYTE_INC); | |
571 | } | |
572 | ||
573 | /* enable device interrupts */ | |
574 | int ath6kldev_unmask_intrs(struct ath6kl_device *dev) | |
575 | { | |
576 | int status = 0; | |
577 | ||
578 | /* | |
579 | * Make sure interrupt are disabled before unmasking at the HIF | |
580 | * layer. The rationale here is that between device insertion | |
581 | * (where we clear the interrupts the first time) and when HTC | |
582 | * is finally ready to handle interrupts, other software can perform | |
583 | * target "soft" resets. The ATH6KL interrupt enables reset back to an | |
584 | * "enabled" state when this happens. | |
585 | */ | |
586 | ath6kldev_disable_intrs(dev); | |
587 | ||
588 | /* unmask the host controller interrupts */ | |
589 | ath6kl_hif_irq_enable(dev->ar); | |
590 | status = ath6kldev_enable_intrs(dev); | |
591 | ||
592 | return status; | |
593 | } | |
594 | ||
595 | /* disable all device interrupts */ | |
596 | int ath6kldev_mask_intrs(struct ath6kl_device *dev) | |
597 | { | |
598 | /* | |
599 | * Mask the interrupt at the HIF layer to avoid any stray interrupt | |
600 | * taken while we zero out our shadow registers in | |
601 | * ath6kldev_disable_intrs(). | |
602 | */ | |
603 | ath6kl_hif_irq_disable(dev->ar); | |
604 | ||
605 | return ath6kldev_disable_intrs(dev); | |
606 | } | |
607 | ||
608 | int ath6kldev_setup(struct ath6kl_device *dev) | |
609 | { | |
610 | int status = 0; | |
bdcd8170 | 611 | |
bdcd8170 KV |
612 | spin_lock_init(&dev->lock); |
613 | ||
bdcd8170 KV |
614 | /* |
615 | * NOTE: we actually get the block size of a mailbox other than 0, | |
616 | * for SDIO the block size on mailbox 0 is artificially set to 1. | |
617 | * So we use the block size that is set for the other 3 mailboxes. | |
618 | */ | |
5be8824f | 619 | dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size; |
bdcd8170 KV |
620 | |
621 | /* must be a power of 2 */ | |
5be8824f | 622 | if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { |
bdcd8170 KV |
623 | WARN_ON(1); |
624 | goto fail_setup; | |
625 | } | |
626 | ||
627 | /* assemble mask, used for padding to a block */ | |
5be8824f | 628 | dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; |
bdcd8170 KV |
629 | |
630 | ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", | |
5be8824f | 631 | dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); |
bdcd8170 KV |
632 | |
633 | ath6kl_dbg(ATH6KL_DBG_TRC, | |
634 | "hif interrupt processing is sync only\n"); | |
635 | ||
636 | status = ath6kldev_disable_intrs(dev); | |
637 | ||
638 | fail_setup: | |
639 | return status; | |
640 | ||
641 | } |