2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 static void ath9k_hw_set_txq_interrupts(struct ath_hw
*ah
,
23 struct ath9k_tx_queue_info
*qi
)
25 ath_dbg(ath9k_hw_common(ah
), ATH_DBG_INTERRUPT
,
26 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
27 ah
->txok_interrupt_mask
, ah
->txerr_interrupt_mask
,
28 ah
->txdesc_interrupt_mask
, ah
->txeol_interrupt_mask
,
29 ah
->txurn_interrupt_mask
);
31 ENABLE_REGWRITE_BUFFER(ah
);
33 REG_WRITE(ah
, AR_IMR_S0
,
34 SM(ah
->txok_interrupt_mask
, AR_IMR_S0_QCU_TXOK
)
35 | SM(ah
->txdesc_interrupt_mask
, AR_IMR_S0_QCU_TXDESC
));
36 REG_WRITE(ah
, AR_IMR_S1
,
37 SM(ah
->txerr_interrupt_mask
, AR_IMR_S1_QCU_TXERR
)
38 | SM(ah
->txeol_interrupt_mask
, AR_IMR_S1_QCU_TXEOL
));
40 ah
->imrs2_reg
&= ~AR_IMR_S2_QCU_TXURN
;
41 ah
->imrs2_reg
|= (ah
->txurn_interrupt_mask
& AR_IMR_S2_QCU_TXURN
);
42 REG_WRITE(ah
, AR_IMR_S2
, ah
->imrs2_reg
);
44 REGWRITE_BUFFER_FLUSH(ah
);
47 u32
ath9k_hw_gettxbuf(struct ath_hw
*ah
, u32 q
)
49 return REG_READ(ah
, AR_QTXDP(q
));
51 EXPORT_SYMBOL(ath9k_hw_gettxbuf
);
53 void ath9k_hw_puttxbuf(struct ath_hw
*ah
, u32 q
, u32 txdp
)
55 struct ath_wiphy
*aphy
= ah
->hw
->priv
;
56 struct ath_softc
*sc
= aphy
->sc
;
57 TX_STAT_INC(q
, puttxbuf
);
58 REG_WRITE(ah
, AR_QTXDP(q
), txdp
);
60 EXPORT_SYMBOL(ath9k_hw_puttxbuf
);
62 void ath9k_hw_txstart(struct ath_hw
*ah
, u32 q
)
64 struct ath_wiphy
*aphy
= ah
->hw
->priv
;
65 struct ath_softc
*sc
= aphy
->sc
;
66 TX_STAT_INC(q
, txstart
);
67 ath_dbg(ath9k_hw_common(ah
), ATH_DBG_QUEUE
,
68 "Enable TXE on queue: %u\n", q
);
69 REG_WRITE(ah
, AR_Q_TXE
, 1 << q
);
71 EXPORT_SYMBOL(ath9k_hw_txstart
);
73 void ath9k_hw_cleartxdesc(struct ath_hw
*ah
, void *ds
)
75 struct ar5416_desc
*ads
= AR5416DESC(ds
);
77 ads
->ds_txstatus0
= ads
->ds_txstatus1
= 0;
78 ads
->ds_txstatus2
= ads
->ds_txstatus3
= 0;
79 ads
->ds_txstatus4
= ads
->ds_txstatus5
= 0;
80 ads
->ds_txstatus6
= ads
->ds_txstatus7
= 0;
81 ads
->ds_txstatus8
= ads
->ds_txstatus9
= 0;
83 EXPORT_SYMBOL(ath9k_hw_cleartxdesc
);
85 u32
ath9k_hw_numtxpending(struct ath_hw
*ah
, u32 q
)
89 npend
= REG_READ(ah
, AR_QSTS(q
)) & AR_Q_STS_PEND_FR_CNT
;
92 if (REG_READ(ah
, AR_Q_TXE
) & (1 << q
))
98 EXPORT_SYMBOL(ath9k_hw_numtxpending
);
101 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
103 * @ah: atheros hardware struct
104 * @bIncTrigLevel: whether or not the frame trigger level should be updated
106 * The frame trigger level specifies the minimum number of bytes,
107 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
108 * before the PCU will initiate sending the frame on the air. This can
109 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
110 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
113 * Caution must be taken to ensure to set the frame trigger level based
114 * on the DMA request size. For example if the DMA request size is set to
115 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
116 * there need to be enough space in the tx FIFO for the requested transfer
117 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
118 * the threshold to a value beyond 6, then the transmit will hang.
120 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
121 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
122 * there is a hardware issue which forces us to use 2 KB instead so the
123 * frame trigger level must not exceed 2 KB for these chipsets.
125 bool ath9k_hw_updatetxtriglevel(struct ath_hw
*ah
, bool bIncTrigLevel
)
127 u32 txcfg
, curLevel
, newLevel
;
129 if (ah
->tx_trig_level
>= ah
->config
.max_txtrig_level
)
132 ath9k_hw_disable_interrupts(ah
);
134 txcfg
= REG_READ(ah
, AR_TXCFG
);
135 curLevel
= MS(txcfg
, AR_FTRIG
);
138 if (curLevel
< ah
->config
.max_txtrig_level
)
140 } else if (curLevel
> MIN_TX_FIFO_THRESHOLD
)
142 if (newLevel
!= curLevel
)
143 REG_WRITE(ah
, AR_TXCFG
,
144 (txcfg
& ~AR_FTRIG
) | SM(newLevel
, AR_FTRIG
));
146 ath9k_hw_enable_interrupts(ah
);
148 ah
->tx_trig_level
= newLevel
;
150 return newLevel
!= curLevel
;
152 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel
);
154 bool ath9k_hw_stoptxdma(struct ath_hw
*ah
, u32 q
)
156 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
157 #define ATH9K_TIME_QUANTUM 100 /* usec */
158 struct ath_common
*common
= ath9k_hw_common(ah
);
159 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
160 struct ath9k_tx_queue_info
*qi
;
162 u32 wait_time
= ATH9K_TX_STOP_DMA_TIMEOUT
/ ATH9K_TIME_QUANTUM
;
164 if (q
>= pCap
->total_queues
) {
165 ath_dbg(common
, ATH_DBG_QUEUE
,
166 "Stopping TX DMA, invalid queue: %u\n", q
);
171 if (qi
->tqi_type
== ATH9K_TX_QUEUE_INACTIVE
) {
172 ath_dbg(common
, ATH_DBG_QUEUE
,
173 "Stopping TX DMA, inactive queue: %u\n", q
);
177 REG_WRITE(ah
, AR_Q_TXD
, 1 << q
);
179 for (wait
= wait_time
; wait
!= 0; wait
--) {
180 if (ath9k_hw_numtxpending(ah
, q
) == 0)
182 udelay(ATH9K_TIME_QUANTUM
);
185 if (ath9k_hw_numtxpending(ah
, q
)) {
186 ath_dbg(common
, ATH_DBG_QUEUE
,
187 "%s: Num of pending TX Frames %d on Q %d\n",
188 __func__
, ath9k_hw_numtxpending(ah
, q
), q
);
190 for (j
= 0; j
< 2; j
++) {
191 tsfLow
= REG_READ(ah
, AR_TSF_L32
);
192 REG_WRITE(ah
, AR_QUIET2
,
193 SM(10, AR_QUIET2_QUIET_DUR
));
194 REG_WRITE(ah
, AR_QUIET_PERIOD
, 100);
195 REG_WRITE(ah
, AR_NEXT_QUIET_TIMER
, tsfLow
>> 10);
196 REG_SET_BIT(ah
, AR_TIMER_MODE
,
199 if ((REG_READ(ah
, AR_TSF_L32
) >> 10) == (tsfLow
>> 10))
202 ath_dbg(common
, ATH_DBG_QUEUE
,
203 "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
207 REG_SET_BIT(ah
, AR_DIAG_SW
, AR_DIAG_FORCE_CH_IDLE_HIGH
);
210 REG_CLR_BIT(ah
, AR_TIMER_MODE
, AR_QUIET_TIMER_EN
);
213 while (ath9k_hw_numtxpending(ah
, q
)) {
216 "Failed to stop TX DMA in 100 msec after killing last frame\n");
219 udelay(ATH9K_TIME_QUANTUM
);
222 REG_CLR_BIT(ah
, AR_DIAG_SW
, AR_DIAG_FORCE_CH_IDLE_HIGH
);
225 REG_WRITE(ah
, AR_Q_TXD
, 0);
228 #undef ATH9K_TX_STOP_DMA_TIMEOUT
229 #undef ATH9K_TIME_QUANTUM
231 EXPORT_SYMBOL(ath9k_hw_stoptxdma
);
233 void ath9k_hw_gettxintrtxqs(struct ath_hw
*ah
, u32
*txqs
)
235 *txqs
&= ah
->intr_txqs
;
236 ah
->intr_txqs
&= ~(*txqs
);
238 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs
);
240 bool ath9k_hw_set_txq_props(struct ath_hw
*ah
, int q
,
241 const struct ath9k_tx_queue_info
*qinfo
)
244 struct ath_common
*common
= ath9k_hw_common(ah
);
245 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
246 struct ath9k_tx_queue_info
*qi
;
248 if (q
>= pCap
->total_queues
) {
249 ath_dbg(common
, ATH_DBG_QUEUE
,
250 "Set TXQ properties, invalid queue: %u\n", q
);
255 if (qi
->tqi_type
== ATH9K_TX_QUEUE_INACTIVE
) {
256 ath_dbg(common
, ATH_DBG_QUEUE
,
257 "Set TXQ properties, inactive queue: %u\n", q
);
261 ath_dbg(common
, ATH_DBG_QUEUE
, "Set queue properties for: %u\n", q
);
263 qi
->tqi_ver
= qinfo
->tqi_ver
;
264 qi
->tqi_subtype
= qinfo
->tqi_subtype
;
265 qi
->tqi_qflags
= qinfo
->tqi_qflags
;
266 qi
->tqi_priority
= qinfo
->tqi_priority
;
267 if (qinfo
->tqi_aifs
!= ATH9K_TXQ_USEDEFAULT
)
268 qi
->tqi_aifs
= min(qinfo
->tqi_aifs
, 255U);
270 qi
->tqi_aifs
= INIT_AIFS
;
271 if (qinfo
->tqi_cwmin
!= ATH9K_TXQ_USEDEFAULT
) {
272 cw
= min(qinfo
->tqi_cwmin
, 1024U);
274 while (qi
->tqi_cwmin
< cw
)
275 qi
->tqi_cwmin
= (qi
->tqi_cwmin
<< 1) | 1;
277 qi
->tqi_cwmin
= qinfo
->tqi_cwmin
;
278 if (qinfo
->tqi_cwmax
!= ATH9K_TXQ_USEDEFAULT
) {
279 cw
= min(qinfo
->tqi_cwmax
, 1024U);
281 while (qi
->tqi_cwmax
< cw
)
282 qi
->tqi_cwmax
= (qi
->tqi_cwmax
<< 1) | 1;
284 qi
->tqi_cwmax
= INIT_CWMAX
;
286 if (qinfo
->tqi_shretry
!= 0)
287 qi
->tqi_shretry
= min((u32
) qinfo
->tqi_shretry
, 15U);
289 qi
->tqi_shretry
= INIT_SH_RETRY
;
290 if (qinfo
->tqi_lgretry
!= 0)
291 qi
->tqi_lgretry
= min((u32
) qinfo
->tqi_lgretry
, 15U);
293 qi
->tqi_lgretry
= INIT_LG_RETRY
;
294 qi
->tqi_cbrPeriod
= qinfo
->tqi_cbrPeriod
;
295 qi
->tqi_cbrOverflowLimit
= qinfo
->tqi_cbrOverflowLimit
;
296 qi
->tqi_burstTime
= qinfo
->tqi_burstTime
;
297 qi
->tqi_readyTime
= qinfo
->tqi_readyTime
;
299 switch (qinfo
->tqi_subtype
) {
301 if (qi
->tqi_type
== ATH9K_TX_QUEUE_DATA
)
302 qi
->tqi_intFlags
= ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS
;
310 EXPORT_SYMBOL(ath9k_hw_set_txq_props
);
312 bool ath9k_hw_get_txq_props(struct ath_hw
*ah
, int q
,
313 struct ath9k_tx_queue_info
*qinfo
)
315 struct ath_common
*common
= ath9k_hw_common(ah
);
316 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
317 struct ath9k_tx_queue_info
*qi
;
319 if (q
>= pCap
->total_queues
) {
320 ath_dbg(common
, ATH_DBG_QUEUE
,
321 "Get TXQ properties, invalid queue: %u\n", q
);
326 if (qi
->tqi_type
== ATH9K_TX_QUEUE_INACTIVE
) {
327 ath_dbg(common
, ATH_DBG_QUEUE
,
328 "Get TXQ properties, inactive queue: %u\n", q
);
332 qinfo
->tqi_qflags
= qi
->tqi_qflags
;
333 qinfo
->tqi_ver
= qi
->tqi_ver
;
334 qinfo
->tqi_subtype
= qi
->tqi_subtype
;
335 qinfo
->tqi_qflags
= qi
->tqi_qflags
;
336 qinfo
->tqi_priority
= qi
->tqi_priority
;
337 qinfo
->tqi_aifs
= qi
->tqi_aifs
;
338 qinfo
->tqi_cwmin
= qi
->tqi_cwmin
;
339 qinfo
->tqi_cwmax
= qi
->tqi_cwmax
;
340 qinfo
->tqi_shretry
= qi
->tqi_shretry
;
341 qinfo
->tqi_lgretry
= qi
->tqi_lgretry
;
342 qinfo
->tqi_cbrPeriod
= qi
->tqi_cbrPeriod
;
343 qinfo
->tqi_cbrOverflowLimit
= qi
->tqi_cbrOverflowLimit
;
344 qinfo
->tqi_burstTime
= qi
->tqi_burstTime
;
345 qinfo
->tqi_readyTime
= qi
->tqi_readyTime
;
349 EXPORT_SYMBOL(ath9k_hw_get_txq_props
);
351 int ath9k_hw_setuptxqueue(struct ath_hw
*ah
, enum ath9k_tx_queue type
,
352 const struct ath9k_tx_queue_info
*qinfo
)
354 struct ath_common
*common
= ath9k_hw_common(ah
);
355 struct ath9k_tx_queue_info
*qi
;
356 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
360 case ATH9K_TX_QUEUE_BEACON
:
361 q
= pCap
->total_queues
- 1;
363 case ATH9K_TX_QUEUE_CAB
:
364 q
= pCap
->total_queues
- 2;
366 case ATH9K_TX_QUEUE_PSPOLL
:
369 case ATH9K_TX_QUEUE_UAPSD
:
370 q
= pCap
->total_queues
- 3;
372 case ATH9K_TX_QUEUE_DATA
:
373 for (q
= 0; q
< pCap
->total_queues
; q
++)
374 if (ah
->txq
[q
].tqi_type
==
375 ATH9K_TX_QUEUE_INACTIVE
)
377 if (q
== pCap
->total_queues
) {
378 ath_err(common
, "No available TX queue\n");
383 ath_err(common
, "Invalid TX queue type: %u\n", type
);
387 ath_dbg(common
, ATH_DBG_QUEUE
, "Setup TX queue: %u\n", q
);
390 if (qi
->tqi_type
!= ATH9K_TX_QUEUE_INACTIVE
) {
391 ath_err(common
, "TX queue: %u already active\n", q
);
394 memset(qi
, 0, sizeof(struct ath9k_tx_queue_info
));
398 TXQ_FLAG_TXOKINT_ENABLE
399 | TXQ_FLAG_TXERRINT_ENABLE
400 | TXQ_FLAG_TXDESCINT_ENABLE
| TXQ_FLAG_TXURNINT_ENABLE
;
401 qi
->tqi_aifs
= INIT_AIFS
;
402 qi
->tqi_cwmin
= ATH9K_TXQ_USEDEFAULT
;
403 qi
->tqi_cwmax
= INIT_CWMAX
;
404 qi
->tqi_shretry
= INIT_SH_RETRY
;
405 qi
->tqi_lgretry
= INIT_LG_RETRY
;
406 qi
->tqi_physCompBuf
= 0;
408 qi
->tqi_physCompBuf
= qinfo
->tqi_physCompBuf
;
409 (void) ath9k_hw_set_txq_props(ah
, q
, qinfo
);
414 EXPORT_SYMBOL(ath9k_hw_setuptxqueue
);
416 bool ath9k_hw_releasetxqueue(struct ath_hw
*ah
, u32 q
)
418 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
419 struct ath_common
*common
= ath9k_hw_common(ah
);
420 struct ath9k_tx_queue_info
*qi
;
422 if (q
>= pCap
->total_queues
) {
423 ath_dbg(common
, ATH_DBG_QUEUE
,
424 "Release TXQ, invalid queue: %u\n", q
);
428 if (qi
->tqi_type
== ATH9K_TX_QUEUE_INACTIVE
) {
429 ath_dbg(common
, ATH_DBG_QUEUE
,
430 "Release TXQ, inactive queue: %u\n", q
);
434 ath_dbg(common
, ATH_DBG_QUEUE
, "Release TX queue: %u\n", q
);
436 qi
->tqi_type
= ATH9K_TX_QUEUE_INACTIVE
;
437 ah
->txok_interrupt_mask
&= ~(1 << q
);
438 ah
->txerr_interrupt_mask
&= ~(1 << q
);
439 ah
->txdesc_interrupt_mask
&= ~(1 << q
);
440 ah
->txeol_interrupt_mask
&= ~(1 << q
);
441 ah
->txurn_interrupt_mask
&= ~(1 << q
);
442 ath9k_hw_set_txq_interrupts(ah
, qi
);
446 EXPORT_SYMBOL(ath9k_hw_releasetxqueue
);
448 bool ath9k_hw_resettxqueue(struct ath_hw
*ah
, u32 q
)
450 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
451 struct ath_common
*common
= ath9k_hw_common(ah
);
452 struct ath9k_channel
*chan
= ah
->curchan
;
453 struct ath9k_tx_queue_info
*qi
;
454 u32 cwMin
, chanCwMin
, value
;
456 if (q
>= pCap
->total_queues
) {
457 ath_dbg(common
, ATH_DBG_QUEUE
,
458 "Reset TXQ, invalid queue: %u\n", q
);
463 if (qi
->tqi_type
== ATH9K_TX_QUEUE_INACTIVE
) {
464 ath_dbg(common
, ATH_DBG_QUEUE
,
465 "Reset TXQ, inactive queue: %u\n", q
);
469 ath_dbg(common
, ATH_DBG_QUEUE
, "Reset TX queue: %u\n", q
);
471 if (qi
->tqi_cwmin
== ATH9K_TXQ_USEDEFAULT
) {
472 if (chan
&& IS_CHAN_B(chan
))
473 chanCwMin
= INIT_CWMIN_11B
;
475 chanCwMin
= INIT_CWMIN
;
477 for (cwMin
= 1; cwMin
< chanCwMin
; cwMin
= (cwMin
<< 1) | 1);
479 cwMin
= qi
->tqi_cwmin
;
481 ENABLE_REGWRITE_BUFFER(ah
);
483 REG_WRITE(ah
, AR_DLCL_IFS(q
),
484 SM(cwMin
, AR_D_LCL_IFS_CWMIN
) |
485 SM(qi
->tqi_cwmax
, AR_D_LCL_IFS_CWMAX
) |
486 SM(qi
->tqi_aifs
, AR_D_LCL_IFS_AIFS
));
488 REG_WRITE(ah
, AR_DRETRY_LIMIT(q
),
489 SM(INIT_SSH_RETRY
, AR_D_RETRY_LIMIT_STA_SH
) |
490 SM(INIT_SLG_RETRY
, AR_D_RETRY_LIMIT_STA_LG
) |
491 SM(qi
->tqi_shretry
, AR_D_RETRY_LIMIT_FR_SH
));
493 REG_WRITE(ah
, AR_QMISC(q
), AR_Q_MISC_DCU_EARLY_TERM_REQ
);
494 REG_WRITE(ah
, AR_DMISC(q
),
495 AR_D_MISC_CW_BKOFF_EN
| AR_D_MISC_FRAG_WAIT_EN
| 0x2);
497 if (qi
->tqi_cbrPeriod
) {
498 REG_WRITE(ah
, AR_QCBRCFG(q
),
499 SM(qi
->tqi_cbrPeriod
, AR_Q_CBRCFG_INTERVAL
) |
500 SM(qi
->tqi_cbrOverflowLimit
, AR_Q_CBRCFG_OVF_THRESH
));
501 REG_WRITE(ah
, AR_QMISC(q
),
502 REG_READ(ah
, AR_QMISC(q
)) | AR_Q_MISC_FSP_CBR
|
503 (qi
->tqi_cbrOverflowLimit
?
504 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
: 0));
506 if (qi
->tqi_readyTime
&& (qi
->tqi_type
!= ATH9K_TX_QUEUE_CAB
)) {
507 REG_WRITE(ah
, AR_QRDYTIMECFG(q
),
508 SM(qi
->tqi_readyTime
, AR_Q_RDYTIMECFG_DURATION
) |
512 REG_WRITE(ah
, AR_DCHNTIME(q
),
513 SM(qi
->tqi_burstTime
, AR_D_CHNTIME_DUR
) |
514 (qi
->tqi_burstTime
? AR_D_CHNTIME_EN
: 0));
516 if (qi
->tqi_burstTime
517 && (qi
->tqi_qflags
& TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE
)) {
518 REG_WRITE(ah
, AR_QMISC(q
),
519 REG_READ(ah
, AR_QMISC(q
)) |
520 AR_Q_MISC_RDYTIME_EXP_POLICY
);
524 if (qi
->tqi_qflags
& TXQ_FLAG_BACKOFF_DISABLE
) {
525 REG_WRITE(ah
, AR_DMISC(q
),
526 REG_READ(ah
, AR_DMISC(q
)) |
527 AR_D_MISC_POST_FR_BKOFF_DIS
);
530 REGWRITE_BUFFER_FLUSH(ah
);
532 if (qi
->tqi_qflags
& TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE
) {
533 REG_WRITE(ah
, AR_DMISC(q
),
534 REG_READ(ah
, AR_DMISC(q
)) |
535 AR_D_MISC_FRAG_BKOFF_EN
);
537 switch (qi
->tqi_type
) {
538 case ATH9K_TX_QUEUE_BEACON
:
539 ENABLE_REGWRITE_BUFFER(ah
);
541 REG_WRITE(ah
, AR_QMISC(q
), REG_READ(ah
, AR_QMISC(q
))
542 | AR_Q_MISC_FSP_DBA_GATED
543 | AR_Q_MISC_BEACON_USE
544 | AR_Q_MISC_CBR_INCR_DIS1
);
546 REG_WRITE(ah
, AR_DMISC(q
), REG_READ(ah
, AR_DMISC(q
))
547 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
<<
548 AR_D_MISC_ARB_LOCKOUT_CNTRL_S
)
549 | AR_D_MISC_BEACON_USE
550 | AR_D_MISC_POST_FR_BKOFF_DIS
);
552 REGWRITE_BUFFER_FLUSH(ah
);
555 * cwmin and cwmax should be 0 for beacon queue
556 * but not for IBSS as we would create an imbalance
557 * on beaconing fairness for participating nodes.
559 if (AR_SREV_9300_20_OR_LATER(ah
) &&
560 ah
->opmode
!= NL80211_IFTYPE_ADHOC
) {
561 REG_WRITE(ah
, AR_DLCL_IFS(q
), SM(0, AR_D_LCL_IFS_CWMIN
)
562 | SM(0, AR_D_LCL_IFS_CWMAX
)
563 | SM(qi
->tqi_aifs
, AR_D_LCL_IFS_AIFS
));
566 case ATH9K_TX_QUEUE_CAB
:
567 ENABLE_REGWRITE_BUFFER(ah
);
569 REG_WRITE(ah
, AR_QMISC(q
), REG_READ(ah
, AR_QMISC(q
))
570 | AR_Q_MISC_FSP_DBA_GATED
571 | AR_Q_MISC_CBR_INCR_DIS1
572 | AR_Q_MISC_CBR_INCR_DIS0
);
573 value
= (qi
->tqi_readyTime
-
574 (ah
->config
.sw_beacon_response_time
-
575 ah
->config
.dma_beacon_response_time
) -
576 ah
->config
.additional_swba_backoff
) * 1024;
577 REG_WRITE(ah
, AR_QRDYTIMECFG(q
),
578 value
| AR_Q_RDYTIMECFG_EN
);
579 REG_WRITE(ah
, AR_DMISC(q
), REG_READ(ah
, AR_DMISC(q
))
580 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
<<
581 AR_D_MISC_ARB_LOCKOUT_CNTRL_S
));
583 REGWRITE_BUFFER_FLUSH(ah
);
586 case ATH9K_TX_QUEUE_PSPOLL
:
587 REG_WRITE(ah
, AR_QMISC(q
),
588 REG_READ(ah
, AR_QMISC(q
)) | AR_Q_MISC_CBR_INCR_DIS1
);
590 case ATH9K_TX_QUEUE_UAPSD
:
591 REG_WRITE(ah
, AR_DMISC(q
), REG_READ(ah
, AR_DMISC(q
)) |
592 AR_D_MISC_POST_FR_BKOFF_DIS
);
598 if (qi
->tqi_intFlags
& ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS
) {
599 REG_WRITE(ah
, AR_DMISC(q
),
600 REG_READ(ah
, AR_DMISC(q
)) |
601 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
,
602 AR_D_MISC_ARB_LOCKOUT_CNTRL
) |
603 AR_D_MISC_POST_FR_BKOFF_DIS
);
606 if (AR_SREV_9300_20_OR_LATER(ah
))
607 REG_WRITE(ah
, AR_Q_DESC_CRCCHK
, AR_Q_DESC_CRCCHK_EN
);
609 if (qi
->tqi_qflags
& TXQ_FLAG_TXOKINT_ENABLE
)
610 ah
->txok_interrupt_mask
|= 1 << q
;
612 ah
->txok_interrupt_mask
&= ~(1 << q
);
613 if (qi
->tqi_qflags
& TXQ_FLAG_TXERRINT_ENABLE
)
614 ah
->txerr_interrupt_mask
|= 1 << q
;
616 ah
->txerr_interrupt_mask
&= ~(1 << q
);
617 if (qi
->tqi_qflags
& TXQ_FLAG_TXDESCINT_ENABLE
)
618 ah
->txdesc_interrupt_mask
|= 1 << q
;
620 ah
->txdesc_interrupt_mask
&= ~(1 << q
);
621 if (qi
->tqi_qflags
& TXQ_FLAG_TXEOLINT_ENABLE
)
622 ah
->txeol_interrupt_mask
|= 1 << q
;
624 ah
->txeol_interrupt_mask
&= ~(1 << q
);
625 if (qi
->tqi_qflags
& TXQ_FLAG_TXURNINT_ENABLE
)
626 ah
->txurn_interrupt_mask
|= 1 << q
;
628 ah
->txurn_interrupt_mask
&= ~(1 << q
);
629 ath9k_hw_set_txq_interrupts(ah
, qi
);
633 EXPORT_SYMBOL(ath9k_hw_resettxqueue
);
635 int ath9k_hw_rxprocdesc(struct ath_hw
*ah
, struct ath_desc
*ds
,
636 struct ath_rx_status
*rs
, u64 tsf
)
638 struct ar5416_desc ads
;
639 struct ar5416_desc
*adsp
= AR5416DESC(ds
);
642 if ((adsp
->ds_rxstatus8
& AR_RxDone
) == 0)
645 ads
.u
.rx
= adsp
->u
.rx
;
650 rs
->rs_datalen
= ads
.ds_rxstatus1
& AR_DataLen
;
651 rs
->rs_tstamp
= ads
.AR_RcvTimestamp
;
653 if (ads
.ds_rxstatus8
& AR_PostDelimCRCErr
) {
654 rs
->rs_rssi
= ATH9K_RSSI_BAD
;
655 rs
->rs_rssi_ctl0
= ATH9K_RSSI_BAD
;
656 rs
->rs_rssi_ctl1
= ATH9K_RSSI_BAD
;
657 rs
->rs_rssi_ctl2
= ATH9K_RSSI_BAD
;
658 rs
->rs_rssi_ext0
= ATH9K_RSSI_BAD
;
659 rs
->rs_rssi_ext1
= ATH9K_RSSI_BAD
;
660 rs
->rs_rssi_ext2
= ATH9K_RSSI_BAD
;
662 rs
->rs_rssi
= MS(ads
.ds_rxstatus4
, AR_RxRSSICombined
);
663 rs
->rs_rssi_ctl0
= MS(ads
.ds_rxstatus0
,
665 rs
->rs_rssi_ctl1
= MS(ads
.ds_rxstatus0
,
667 rs
->rs_rssi_ctl2
= MS(ads
.ds_rxstatus0
,
669 rs
->rs_rssi_ext0
= MS(ads
.ds_rxstatus4
,
671 rs
->rs_rssi_ext1
= MS(ads
.ds_rxstatus4
,
673 rs
->rs_rssi_ext2
= MS(ads
.ds_rxstatus4
,
676 if (ads
.ds_rxstatus8
& AR_RxKeyIdxValid
)
677 rs
->rs_keyix
= MS(ads
.ds_rxstatus8
, AR_KeyIdx
);
679 rs
->rs_keyix
= ATH9K_RXKEYIX_INVALID
;
681 rs
->rs_rate
= RXSTATUS_RATE(ah
, (&ads
));
682 rs
->rs_more
= (ads
.ds_rxstatus1
& AR_RxMore
) ? 1 : 0;
684 rs
->rs_isaggr
= (ads
.ds_rxstatus8
& AR_RxAggr
) ? 1 : 0;
686 (ads
.ds_rxstatus8
& AR_RxMoreAggr
) ? 1 : 0;
687 rs
->rs_antenna
= MS(ads
.ds_rxstatus3
, AR_RxAntenna
);
689 (ads
.ds_rxstatus3
& AR_GI
) ? ATH9K_RX_GI
: 0;
691 (ads
.ds_rxstatus3
& AR_2040
) ? ATH9K_RX_2040
: 0;
693 if (ads
.ds_rxstatus8
& AR_PreDelimCRCErr
)
694 rs
->rs_flags
|= ATH9K_RX_DELIM_CRC_PRE
;
695 if (ads
.ds_rxstatus8
& AR_PostDelimCRCErr
)
696 rs
->rs_flags
|= ATH9K_RX_DELIM_CRC_POST
;
697 if (ads
.ds_rxstatus8
& AR_DecryptBusyErr
)
698 rs
->rs_flags
|= ATH9K_RX_DECRYPT_BUSY
;
700 if ((ads
.ds_rxstatus8
& AR_RxFrameOK
) == 0) {
702 * Treat these errors as mutually exclusive to avoid spurious
703 * extra error reports from the hardware. If a CRC error is
704 * reported, then decryption and MIC errors are irrelevant,
705 * the frame is going to be dropped either way
707 if (ads
.ds_rxstatus8
& AR_CRCErr
)
708 rs
->rs_status
|= ATH9K_RXERR_CRC
;
709 else if (ads
.ds_rxstatus8
& AR_PHYErr
) {
710 rs
->rs_status
|= ATH9K_RXERR_PHY
;
711 phyerr
= MS(ads
.ds_rxstatus8
, AR_PHYErrCode
);
712 rs
->rs_phyerr
= phyerr
;
713 } else if (ads
.ds_rxstatus8
& AR_DecryptCRCErr
)
714 rs
->rs_status
|= ATH9K_RXERR_DECRYPT
;
715 else if (ads
.ds_rxstatus8
& AR_MichaelErr
)
716 rs
->rs_status
|= ATH9K_RXERR_MIC
;
718 if (ads
.ds_rxstatus8
& AR_KeyMiss
)
719 rs
->rs_status
|= ATH9K_RXERR_DECRYPT
;
724 EXPORT_SYMBOL(ath9k_hw_rxprocdesc
);
727 * This can stop or re-enables RX.
729 * If bool is set this will kill any frame which is currently being
730 * transferred between the MAC and baseband and also prevent any new
731 * frames from getting started.
733 bool ath9k_hw_setrxabort(struct ath_hw
*ah
, bool set
)
738 REG_SET_BIT(ah
, AR_DIAG_SW
,
739 (AR_DIAG_RX_DIS
| AR_DIAG_RX_ABORT
));
741 if (!ath9k_hw_wait(ah
, AR_OBS_BUS_1
, AR_OBS_BUS_1_RX_STATE
,
742 0, AH_WAIT_TIMEOUT
)) {
743 REG_CLR_BIT(ah
, AR_DIAG_SW
,
747 reg
= REG_READ(ah
, AR_OBS_BUS_1
);
748 ath_err(ath9k_hw_common(ah
),
749 "RX failed to go idle in 10 ms RXSM=0x%x\n",
755 REG_CLR_BIT(ah
, AR_DIAG_SW
,
756 (AR_DIAG_RX_DIS
| AR_DIAG_RX_ABORT
));
761 EXPORT_SYMBOL(ath9k_hw_setrxabort
);
763 void ath9k_hw_putrxbuf(struct ath_hw
*ah
, u32 rxdp
)
765 REG_WRITE(ah
, AR_RXDP
, rxdp
);
767 EXPORT_SYMBOL(ath9k_hw_putrxbuf
);
769 void ath9k_hw_startpcureceive(struct ath_hw
*ah
, bool is_scanning
)
771 ath9k_enable_mib_counters(ah
);
773 ath9k_ani_reset(ah
, is_scanning
);
775 REG_CLR_BIT(ah
, AR_DIAG_SW
, (AR_DIAG_RX_DIS
| AR_DIAG_RX_ABORT
));
777 EXPORT_SYMBOL(ath9k_hw_startpcureceive
);
779 void ath9k_hw_abortpcurecv(struct ath_hw
*ah
)
781 REG_SET_BIT(ah
, AR_DIAG_SW
, AR_DIAG_RX_ABORT
| AR_DIAG_RX_DIS
);
783 ath9k_hw_disable_mib_counters(ah
);
785 EXPORT_SYMBOL(ath9k_hw_abortpcurecv
);
787 bool ath9k_hw_stopdmarecv(struct ath_hw
*ah
)
789 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
790 #define AH_RX_TIME_QUANTUM 100 /* usec */
791 struct ath_common
*common
= ath9k_hw_common(ah
);
794 REG_WRITE(ah
, AR_CR
, AR_CR_RXD
);
796 /* Wait for rx enable bit to go low */
797 for (i
= AH_RX_STOP_DMA_TIMEOUT
/ AH_TIME_QUANTUM
; i
!= 0; i
--) {
798 if ((REG_READ(ah
, AR_CR
) & AR_CR_RXE
) == 0)
800 udelay(AH_TIME_QUANTUM
);
805 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
806 AH_RX_STOP_DMA_TIMEOUT
/ 1000,
808 REG_READ(ah
, AR_DIAG_SW
));
814 #undef AH_RX_TIME_QUANTUM
815 #undef AH_RX_STOP_DMA_TIMEOUT
817 EXPORT_SYMBOL(ath9k_hw_stopdmarecv
);
819 int ath9k_hw_beaconq_setup(struct ath_hw
*ah
)
821 struct ath9k_tx_queue_info qi
;
823 memset(&qi
, 0, sizeof(qi
));
827 /* NB: don't enable any interrupts */
828 return ath9k_hw_setuptxqueue(ah
, ATH9K_TX_QUEUE_BEACON
, &qi
);
830 EXPORT_SYMBOL(ath9k_hw_beaconq_setup
);
832 bool ath9k_hw_intrpend(struct ath_hw
*ah
)
836 if (AR_SREV_9100(ah
))
839 host_isr
= REG_READ(ah
, AR_INTR_ASYNC_CAUSE
);
840 if ((host_isr
& AR_INTR_MAC_IRQ
) && (host_isr
!= AR_INTR_SPURIOUS
))
843 host_isr
= REG_READ(ah
, AR_INTR_SYNC_CAUSE
);
844 if ((host_isr
& AR_INTR_SYNC_DEFAULT
)
845 && (host_isr
!= AR_INTR_SPURIOUS
))
850 EXPORT_SYMBOL(ath9k_hw_intrpend
);
852 void ath9k_hw_disable_interrupts(struct ath_hw
*ah
)
854 struct ath_common
*common
= ath9k_hw_common(ah
);
856 ath_dbg(common
, ATH_DBG_INTERRUPT
, "disable IER\n");
857 REG_WRITE(ah
, AR_IER
, AR_IER_DISABLE
);
858 (void) REG_READ(ah
, AR_IER
);
859 if (!AR_SREV_9100(ah
)) {
860 REG_WRITE(ah
, AR_INTR_ASYNC_ENABLE
, 0);
861 (void) REG_READ(ah
, AR_INTR_ASYNC_ENABLE
);
863 REG_WRITE(ah
, AR_INTR_SYNC_ENABLE
, 0);
864 (void) REG_READ(ah
, AR_INTR_SYNC_ENABLE
);
867 EXPORT_SYMBOL(ath9k_hw_disable_interrupts
);
869 void ath9k_hw_enable_interrupts(struct ath_hw
*ah
)
871 struct ath_common
*common
= ath9k_hw_common(ah
);
873 if (!(ah
->imask
& ATH9K_INT_GLOBAL
))
876 ath_dbg(common
, ATH_DBG_INTERRUPT
, "enable IER\n");
877 REG_WRITE(ah
, AR_IER
, AR_IER_ENABLE
);
878 if (!AR_SREV_9100(ah
)) {
879 REG_WRITE(ah
, AR_INTR_ASYNC_ENABLE
,
881 REG_WRITE(ah
, AR_INTR_ASYNC_MASK
, AR_INTR_MAC_IRQ
);
884 REG_WRITE(ah
, AR_INTR_SYNC_ENABLE
,
885 AR_INTR_SYNC_DEFAULT
);
886 REG_WRITE(ah
, AR_INTR_SYNC_MASK
,
887 AR_INTR_SYNC_DEFAULT
);
889 ath_dbg(common
, ATH_DBG_INTERRUPT
, "AR_IMR 0x%x IER 0x%x\n",
890 REG_READ(ah
, AR_IMR
), REG_READ(ah
, AR_IER
));
892 EXPORT_SYMBOL(ath9k_hw_enable_interrupts
);
894 void ath9k_hw_set_interrupts(struct ath_hw
*ah
, enum ath9k_int ints
)
896 enum ath9k_int omask
= ah
->imask
;
898 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
899 struct ath_common
*common
= ath9k_hw_common(ah
);
901 if (!(ints
& ATH9K_INT_GLOBAL
))
902 ath9k_hw_enable_interrupts(ah
);
904 ath_dbg(common
, ATH_DBG_INTERRUPT
, "0x%x => 0x%x\n", omask
, ints
);
906 /* TODO: global int Ref count */
907 mask
= ints
& ATH9K_INT_COMMON
;
910 if (ints
& ATH9K_INT_TX
) {
911 if (ah
->config
.tx_intr_mitigation
)
912 mask
|= AR_IMR_TXMINTR
| AR_IMR_TXINTM
;
914 if (ah
->txok_interrupt_mask
)
916 if (ah
->txdesc_interrupt_mask
)
917 mask
|= AR_IMR_TXDESC
;
919 if (ah
->txerr_interrupt_mask
)
920 mask
|= AR_IMR_TXERR
;
921 if (ah
->txeol_interrupt_mask
)
922 mask
|= AR_IMR_TXEOL
;
924 if (ints
& ATH9K_INT_RX
) {
925 if (AR_SREV_9300_20_OR_LATER(ah
)) {
926 mask
|= AR_IMR_RXERR
| AR_IMR_RXOK_HP
;
927 if (ah
->config
.rx_intr_mitigation
) {
928 mask
&= ~AR_IMR_RXOK_LP
;
929 mask
|= AR_IMR_RXMINTR
| AR_IMR_RXINTM
;
931 mask
|= AR_IMR_RXOK_LP
;
934 if (ah
->config
.rx_intr_mitigation
)
935 mask
|= AR_IMR_RXMINTR
| AR_IMR_RXINTM
;
937 mask
|= AR_IMR_RXOK
| AR_IMR_RXDESC
;
939 if (!(pCap
->hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
))
940 mask
|= AR_IMR_GENTMR
;
943 if (ints
& (ATH9K_INT_BMISC
)) {
944 mask
|= AR_IMR_BCNMISC
;
945 if (ints
& ATH9K_INT_TIM
)
946 mask2
|= AR_IMR_S2_TIM
;
947 if (ints
& ATH9K_INT_DTIM
)
948 mask2
|= AR_IMR_S2_DTIM
;
949 if (ints
& ATH9K_INT_DTIMSYNC
)
950 mask2
|= AR_IMR_S2_DTIMSYNC
;
951 if (ints
& ATH9K_INT_CABEND
)
952 mask2
|= AR_IMR_S2_CABEND
;
953 if (ints
& ATH9K_INT_TSFOOR
)
954 mask2
|= AR_IMR_S2_TSFOOR
;
957 if (ints
& (ATH9K_INT_GTT
| ATH9K_INT_CST
)) {
958 mask
|= AR_IMR_BCNMISC
;
959 if (ints
& ATH9K_INT_GTT
)
960 mask2
|= AR_IMR_S2_GTT
;
961 if (ints
& ATH9K_INT_CST
)
962 mask2
|= AR_IMR_S2_CST
;
965 ath_dbg(common
, ATH_DBG_INTERRUPT
, "new IMR 0x%x\n", mask
);
966 REG_WRITE(ah
, AR_IMR
, mask
);
967 ah
->imrs2_reg
&= ~(AR_IMR_S2_TIM
| AR_IMR_S2_DTIM
| AR_IMR_S2_DTIMSYNC
|
968 AR_IMR_S2_CABEND
| AR_IMR_S2_CABTO
|
969 AR_IMR_S2_TSFOOR
| AR_IMR_S2_GTT
| AR_IMR_S2_CST
);
970 ah
->imrs2_reg
|= mask2
;
971 REG_WRITE(ah
, AR_IMR_S2
, ah
->imrs2_reg
);
973 if (!(pCap
->hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
)) {
974 if (ints
& ATH9K_INT_TIM_TIMER
)
975 REG_SET_BIT(ah
, AR_IMR_S5
, AR_IMR_S5_TIM_TIMER
);
977 REG_CLR_BIT(ah
, AR_IMR_S5
, AR_IMR_S5_TIM_TIMER
);
980 ath9k_hw_enable_interrupts(ah
);
984 EXPORT_SYMBOL(ath9k_hw_set_interrupts
);