1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
43 static inline u32
iwlagn_get_scd_ssn(struct iwl5000_tx_resp
*tx_resp
)
45 return le32_to_cpup((__le32
*)&tx_resp
->status
+
46 tx_resp
->frame_count
) & MAX_SN
;
49 static void iwlagn_set_tx_status(struct iwl_priv
*priv
,
50 struct ieee80211_tx_info
*info
,
51 struct iwl5000_tx_resp
*tx_resp
,
52 int txq_id
, bool is_agg
)
54 u16 status
= le16_to_cpu(tx_resp
->status
.status
);
56 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
58 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
59 info
->flags
|= iwl_tx_status_to_mac80211(status
);
60 iwlagn_hwrate_to_tx_control(priv
, le32_to_cpu(tx_resp
->rate_n_flags
),
63 IWL_DEBUG_TX_REPLY(priv
, "TXQ %d status %s (0x%08x) rate_n_flags "
66 iwl_get_tx_fail_reason(status
), status
,
67 le32_to_cpu(tx_resp
->rate_n_flags
),
68 tx_resp
->failure_frame
);
71 static int iwlagn_tx_status_reply_tx(struct iwl_priv
*priv
,
72 struct iwl_ht_agg
*agg
,
73 struct iwl5000_tx_resp
*tx_resp
,
74 int txq_id
, u16 start_idx
)
77 struct agg_tx_status
*frame_status
= &tx_resp
->status
;
78 struct ieee80211_hdr
*hdr
= NULL
;
83 IWL_DEBUG_TX_REPLY(priv
, "got tx response w/o block-ack\n");
85 agg
->frame_count
= tx_resp
->frame_count
;
86 agg
->start_idx
= start_idx
;
87 agg
->rate_n_flags
= le32_to_cpu(tx_resp
->rate_n_flags
);
90 /* # frames attempted by Tx command */
91 if (agg
->frame_count
== 1) {
92 /* Only one frame was attempted; no block-ack will arrive */
95 IWL_DEBUG_TX_REPLY(priv
, "FrameCnt = %d, StartIdx=%d idx=%d\n",
96 agg
->frame_count
, agg
->start_idx
, idx
);
97 iwlagn_set_tx_status(priv
,
99 priv
->txq
[txq_id
].txb
[idx
].skb
),
100 tx_resp
, txq_id
, true);
101 agg
->wait_for_ba
= 0;
103 /* Two or more frames were attempted; expect block-ack */
107 * Start is the lowest frame sent. It may not be the first
108 * frame in the batch; we figure this out dynamically during
109 * the following loop.
111 int start
= agg
->start_idx
;
113 /* Construct bit-map of pending frames within Tx window */
114 for (i
= 0; i
< agg
->frame_count
; i
++) {
116 status
= le16_to_cpu(frame_status
[i
].status
);
117 seq
= le16_to_cpu(frame_status
[i
].sequence
);
118 idx
= SEQ_TO_INDEX(seq
);
119 txq_id
= SEQ_TO_QUEUE(seq
);
121 if (status
& (AGG_TX_STATE_FEW_BYTES_MSK
|
122 AGG_TX_STATE_ABORT_MSK
))
125 IWL_DEBUG_TX_REPLY(priv
, "FrameCnt = %d, txq_id=%d idx=%d\n",
126 agg
->frame_count
, txq_id
, idx
);
128 hdr
= iwl_tx_queue_get_hdr(priv
, txq_id
, idx
);
131 "BUG_ON idx doesn't point to valid skb"
132 " idx=%d, txq_id=%d\n", idx
, txq_id
);
136 sc
= le16_to_cpu(hdr
->seq_ctrl
);
137 if (idx
!= (SEQ_TO_SN(sc
) & 0xff)) {
139 "BUG_ON idx doesn't match seq control"
140 " idx=%d, seq_idx=%d, seq=%d\n",
146 IWL_DEBUG_TX_REPLY(priv
, "AGG Frame i=%d idx %d seq=%d\n",
147 i
, idx
, SEQ_TO_SN(sc
));
150 * sh -> how many frames ahead of the starting frame is
153 * Note that all frames sent in the batch must be in a
154 * 64-frame window, so this number should be in [0,63].
155 * If outside of this window, then we've found a new
156 * "first" frame in the batch and need to change start.
161 * If >= 64, out of window. start must be at the front
162 * of the circular buffer, idx must be near the end of
163 * the buffer, and idx is the new "first" frame. Shift
164 * the indices around.
167 /* Shift bitmap by start - idx, wrapped */
168 sh
= 0x100 - idx
+ start
;
169 bitmap
= bitmap
<< sh
;
170 /* Now idx is the new start so sh = 0 */
174 * If <= -64 then wraps the 256-pkt circular buffer
175 * (e.g., start = 255 and idx = 0, sh should be 1)
177 } else if (sh
<= -64) {
178 sh
= 0x100 - start
+ idx
;
180 * If < 0 but > -64, out of window. idx is before start
181 * but not wrapped. Shift the indices around.
184 /* Shift by how far start is ahead of idx */
186 bitmap
= bitmap
<< sh
;
187 /* Now idx is the new start so sh = 0 */
191 /* Sequence number start + sh was sent in this batch */
192 bitmap
|= 1ULL << sh
;
193 IWL_DEBUG_TX_REPLY(priv
, "start=%d bitmap=0x%llx\n",
194 start
, (unsigned long long)bitmap
);
198 * Store the bitmap and possibly the new start, if we wrapped
201 agg
->bitmap
= bitmap
;
202 agg
->start_idx
= start
;
203 IWL_DEBUG_TX_REPLY(priv
, "Frames %d start_idx=%d bitmap=0x%llx\n",
204 agg
->frame_count
, agg
->start_idx
,
205 (unsigned long long)agg
->bitmap
);
208 agg
->wait_for_ba
= 1;
213 void iwl_check_abort_status(struct iwl_priv
*priv
,
214 u8 frame_count
, u32 status
)
216 if (frame_count
== 1 && status
== TX_STATUS_FAIL_RFKILL_FLUSH
) {
217 IWL_ERR(priv
, "Tx flush command to flush out all frames\n");
218 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
219 queue_work(priv
->workqueue
, &priv
->tx_flush
);
223 static void iwlagn_rx_reply_tx(struct iwl_priv
*priv
,
224 struct iwl_rx_mem_buffer
*rxb
)
226 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
227 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
228 int txq_id
= SEQ_TO_QUEUE(sequence
);
229 int index
= SEQ_TO_INDEX(sequence
);
230 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
231 struct ieee80211_tx_info
*info
;
232 struct iwl5000_tx_resp
*tx_resp
= (void *)&pkt
->u
.raw
[0];
233 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
239 if ((index
>= txq
->q
.n_bd
) || (iwl_queue_used(&txq
->q
, index
) == 0)) {
240 IWL_ERR(priv
, "Read index for DMA queue txq_id (%d) index %d "
241 "is out of range [0-%d] %d %d\n", txq_id
,
242 index
, txq
->q
.n_bd
, txq
->q
.write_ptr
,
247 info
= IEEE80211_SKB_CB(txq
->txb
[txq
->q
.read_ptr
].skb
);
248 memset(&info
->status
, 0, sizeof(info
->status
));
250 tid
= (tx_resp
->ra_tid
& IWL50_TX_RES_TID_MSK
) >> IWL50_TX_RES_TID_POS
;
251 sta_id
= (tx_resp
->ra_tid
& IWL50_TX_RES_RA_MSK
) >> IWL50_TX_RES_RA_POS
;
253 spin_lock_irqsave(&priv
->sta_lock
, flags
);
254 if (txq
->sched_retry
) {
255 const u32 scd_ssn
= iwlagn_get_scd_ssn(tx_resp
);
256 struct iwl_ht_agg
*agg
;
258 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
260 * If the BT kill count is non-zero, we'll get this
261 * notification again.
263 if (tx_resp
->bt_kill_count
&& tx_resp
->frame_count
== 1 &&
264 priv
->cfg
->advanced_bt_coexist
) {
265 IWL_WARN(priv
, "receive reply tx with bt_kill\n");
267 iwlagn_tx_status_reply_tx(priv
, agg
, tx_resp
, txq_id
, index
);
269 /* check if BAR is needed */
270 if ((tx_resp
->frame_count
== 1) && !iwl_is_tx_success(status
))
271 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
273 if (txq
->q
.read_ptr
!= (scd_ssn
& 0xff)) {
274 index
= iwl_queue_dec_wrap(scd_ssn
& 0xff, txq
->q
.n_bd
);
275 IWL_DEBUG_TX_REPLY(priv
, "Retry scheduler reclaim "
276 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
277 scd_ssn
, index
, txq_id
, txq
->swq_id
);
279 freed
= iwlagn_tx_queue_reclaim(priv
, txq_id
, index
);
280 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
282 if (priv
->mac80211_registered
&&
283 (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
284 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
)) {
285 if (agg
->state
== IWL_AGG_OFF
)
286 iwl_wake_queue(priv
, txq_id
);
288 iwl_wake_queue(priv
, txq
->swq_id
);
292 BUG_ON(txq_id
!= txq
->swq_id
);
293 iwlagn_set_tx_status(priv
, info
, tx_resp
, txq_id
, false);
294 freed
= iwlagn_tx_queue_reclaim(priv
, txq_id
, index
);
295 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
297 if (priv
->mac80211_registered
&&
298 (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
))
299 iwl_wake_queue(priv
, txq_id
);
302 iwlagn_txq_check_empty(priv
, sta_id
, tid
, txq_id
);
304 iwl_check_abort_status(priv
, tx_resp
->frame_count
, status
);
305 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
308 void iwlagn_rx_handler_setup(struct iwl_priv
*priv
)
310 /* init calibration handlers */
311 priv
->rx_handlers
[CALIBRATION_RES_NOTIFICATION
] =
312 iwlagn_rx_calib_result
;
313 priv
->rx_handlers
[CALIBRATION_COMPLETE_NOTIFICATION
] =
314 iwlagn_rx_calib_complete
;
315 priv
->rx_handlers
[REPLY_TX
] = iwlagn_rx_reply_tx
;
318 void iwlagn_setup_deferred_work(struct iwl_priv
*priv
)
320 /* in agn, the tx power calibration is done in uCode */
321 priv
->disable_tx_power_cal
= 1;
324 int iwlagn_hw_valid_rtc_data_addr(u32 addr
)
326 return (addr
>= IWLAGN_RTC_DATA_LOWER_BOUND
) &&
327 (addr
< IWLAGN_RTC_DATA_UPPER_BOUND
);
330 int iwlagn_send_tx_power(struct iwl_priv
*priv
)
332 struct iwl5000_tx_power_dbm_cmd tx_power_cmd
;
335 /* half dBm need to multiply */
336 tx_power_cmd
.global_lmt
= (s8
)(2 * priv
->tx_power_user_lmt
);
338 if (priv
->tx_power_lmt_in_half_dbm
&&
339 priv
->tx_power_lmt_in_half_dbm
< tx_power_cmd
.global_lmt
) {
341 * For the newer devices which using enhanced/extend tx power
342 * table in EEPROM, the format is in half dBm. driver need to
343 * convert to dBm format before report to mac80211.
344 * By doing so, there is a possibility of 1/2 dBm resolution
345 * lost. driver will perform "round-up" operation before
346 * reporting, but it will cause 1/2 dBm tx power over the
347 * regulatory limit. Perform the checking here, if the
348 * "tx_power_user_lmt" is higher than EEPROM value (in
349 * half-dBm format), lower the tx power based on EEPROM
351 tx_power_cmd
.global_lmt
= priv
->tx_power_lmt_in_half_dbm
;
353 tx_power_cmd
.flags
= IWL50_TX_POWER_NO_CLOSED
;
354 tx_power_cmd
.srv_chan_lmt
= IWL50_TX_POWER_AUTO
;
356 if (IWL_UCODE_API(priv
->ucode_ver
) == 1)
357 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD_V1
;
359 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD
;
361 return iwl_send_cmd_pdu_async(priv
, tx_ant_cfg_cmd
,
362 sizeof(tx_power_cmd
), &tx_power_cmd
,
366 void iwlagn_temperature(struct iwl_priv
*priv
)
368 /* store temperature from statistics (in Celsius) */
370 le32_to_cpu(priv
->_agn
.statistics
.general
.common
.temperature
);
371 iwl_tt_handler(priv
);
374 u16
iwlagn_eeprom_calib_version(struct iwl_priv
*priv
)
376 struct iwl_eeprom_calib_hdr
{
382 hdr
= (struct iwl_eeprom_calib_hdr
*)iwl_eeprom_query_addr(priv
,
391 static u32
eeprom_indirect_address(const struct iwl_priv
*priv
, u32 address
)
395 if ((address
& INDIRECT_ADDRESS
) == 0)
398 switch (address
& INDIRECT_TYPE_MSK
) {
400 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_HOST
);
402 case INDIRECT_GENERAL
:
403 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_GENERAL
);
405 case INDIRECT_REGULATORY
:
406 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_REGULATORY
);
408 case INDIRECT_CALIBRATION
:
409 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_CALIBRATION
);
411 case INDIRECT_PROCESS_ADJST
:
412 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_PROCESS_ADJST
);
414 case INDIRECT_OTHERS
:
415 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_OTHERS
);
418 IWL_ERR(priv
, "illegal indirect type: 0x%X\n",
419 address
& INDIRECT_TYPE_MSK
);
423 /* translate the offset from words to byte */
424 return (address
& ADDRESS_MSK
) + (offset
<< 1);
427 const u8
*iwlagn_eeprom_query_addr(const struct iwl_priv
*priv
,
430 u32 address
= eeprom_indirect_address(priv
, offset
);
431 BUG_ON(address
>= priv
->cfg
->eeprom_size
);
432 return &priv
->eeprom
[address
];
435 struct iwl_mod_params iwlagn_mod_params
= {
438 /* the rest are 0 by default */
441 void iwlagn_rx_queue_reset(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
445 spin_lock_irqsave(&rxq
->lock
, flags
);
446 INIT_LIST_HEAD(&rxq
->rx_free
);
447 INIT_LIST_HEAD(&rxq
->rx_used
);
448 /* Fill the rx_used queue with _all_ of the Rx buffers */
449 for (i
= 0; i
< RX_FREE_BUFFERS
+ RX_QUEUE_SIZE
; i
++) {
450 /* In the reset function, these buffers may have been allocated
451 * to an SKB, so we need to unmap and free potential storage */
452 if (rxq
->pool
[i
].page
!= NULL
) {
453 pci_unmap_page(priv
->pci_dev
, rxq
->pool
[i
].page_dma
,
454 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
456 __iwl_free_pages(priv
, rxq
->pool
[i
].page
);
457 rxq
->pool
[i
].page
= NULL
;
459 list_add_tail(&rxq
->pool
[i
].list
, &rxq
->rx_used
);
462 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
463 rxq
->queue
[i
] = NULL
;
465 /* Set us so that we have processed and used all buffers, but have
466 * not restocked the Rx queue with fresh buffers */
467 rxq
->read
= rxq
->write
= 0;
468 rxq
->write_actual
= 0;
470 spin_unlock_irqrestore(&rxq
->lock
, flags
);
473 int iwlagn_rx_init(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
476 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
477 u32 rb_timeout
= 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
479 if (!priv
->cfg
->use_isr_legacy
)
480 rb_timeout
= RX_RB_TIMEOUT
;
482 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
483 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
485 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
488 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
490 /* Reset driver's Rx queue write index */
491 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
493 /* Tell device where to find RBD circular buffer in DRAM */
494 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
495 (u32
)(rxq
->bd_dma
>> 8));
497 /* Tell device where in DRAM to update its Rx status */
498 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
499 rxq
->rb_stts_dma
>> 4);
502 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
503 * the credit mechanism in 5000 HW RX FIFO
504 * Direct rx interrupts to hosts
505 * Rx buffer size 4 or 8k
509 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
510 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
511 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
512 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
513 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK
|
515 (rb_timeout
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
516 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
518 /* Set interrupt coalescing timer to default (2048 usecs) */
519 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
524 int iwlagn_hw_nic_init(struct iwl_priv
*priv
)
527 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
531 spin_lock_irqsave(&priv
->lock
, flags
);
532 priv
->cfg
->ops
->lib
->apm_ops
.init(priv
);
534 /* Set interrupt coalescing calibration timer to default (512 usecs) */
535 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
537 spin_unlock_irqrestore(&priv
->lock
, flags
);
539 ret
= priv
->cfg
->ops
->lib
->apm_ops
.set_pwr_src(priv
, IWL_PWR_SRC_VMAIN
);
541 priv
->cfg
->ops
->lib
->apm_ops
.config(priv
);
543 /* Allocate the RX queue, or reset if it is already allocated */
545 ret
= iwl_rx_queue_alloc(priv
);
547 IWL_ERR(priv
, "Unable to initialize Rx queue\n");
551 iwlagn_rx_queue_reset(priv
, rxq
);
553 iwlagn_rx_replenish(priv
);
555 iwlagn_rx_init(priv
, rxq
);
557 spin_lock_irqsave(&priv
->lock
, flags
);
559 rxq
->need_update
= 1;
560 iwl_rx_queue_update_write_ptr(priv
, rxq
);
562 spin_unlock_irqrestore(&priv
->lock
, flags
);
564 /* Allocate or reset and init all Tx and Command queues */
566 ret
= iwlagn_txq_ctx_alloc(priv
);
570 iwlagn_txq_ctx_reset(priv
);
572 set_bit(STATUS_INIT
, &priv
->status
);
578 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
580 static inline __le32
iwlagn_dma_addr2rbd_ptr(struct iwl_priv
*priv
,
583 return cpu_to_le32((u32
)(dma_addr
>> 8));
587 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
589 * If there are slots in the RX queue that need to be restocked,
590 * and we have free pre-allocated buffers, fill the ranks as much
591 * as we can, pulling from rx_free.
593 * This moves the 'write' index forward to catch up with 'processed', and
594 * also updates the memory address in the firmware to reference the new
597 void iwlagn_rx_queue_restock(struct iwl_priv
*priv
)
599 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
600 struct list_head
*element
;
601 struct iwl_rx_mem_buffer
*rxb
;
604 spin_lock_irqsave(&rxq
->lock
, flags
);
605 while ((iwl_rx_queue_space(rxq
) > 0) && (rxq
->free_count
)) {
606 /* The overwritten rxb must be a used one */
607 rxb
= rxq
->queue
[rxq
->write
];
608 BUG_ON(rxb
&& rxb
->page
);
610 /* Get next free Rx buffer, remove from free list */
611 element
= rxq
->rx_free
.next
;
612 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
615 /* Point to Rx buffer via next RBD in circular buffer */
616 rxq
->bd
[rxq
->write
] = iwlagn_dma_addr2rbd_ptr(priv
,
618 rxq
->queue
[rxq
->write
] = rxb
;
619 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
622 spin_unlock_irqrestore(&rxq
->lock
, flags
);
623 /* If the pre-allocated buffer pool is dropping low, schedule to
625 if (rxq
->free_count
<= RX_LOW_WATERMARK
)
626 queue_work(priv
->workqueue
, &priv
->rx_replenish
);
629 /* If we've added more space for the firmware to place data, tell it.
630 * Increment device's write pointer in multiples of 8. */
631 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
632 spin_lock_irqsave(&rxq
->lock
, flags
);
633 rxq
->need_update
= 1;
634 spin_unlock_irqrestore(&rxq
->lock
, flags
);
635 iwl_rx_queue_update_write_ptr(priv
, rxq
);
640 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
642 * When moving to rx_free an SKB is allocated for the slot.
644 * Also restock the Rx queue via iwl_rx_queue_restock.
645 * This is called as a scheduled work item (except for during initialization)
647 void iwlagn_rx_allocate(struct iwl_priv
*priv
, gfp_t priority
)
649 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
650 struct list_head
*element
;
651 struct iwl_rx_mem_buffer
*rxb
;
654 gfp_t gfp_mask
= priority
;
657 spin_lock_irqsave(&rxq
->lock
, flags
);
658 if (list_empty(&rxq
->rx_used
)) {
659 spin_unlock_irqrestore(&rxq
->lock
, flags
);
662 spin_unlock_irqrestore(&rxq
->lock
, flags
);
664 if (rxq
->free_count
> RX_LOW_WATERMARK
)
665 gfp_mask
|= __GFP_NOWARN
;
667 if (priv
->hw_params
.rx_page_order
> 0)
668 gfp_mask
|= __GFP_COMP
;
670 /* Alloc a new receive buffer */
671 page
= alloc_pages(gfp_mask
, priv
->hw_params
.rx_page_order
);
674 IWL_DEBUG_INFO(priv
, "alloc_pages failed, "
676 priv
->hw_params
.rx_page_order
);
678 if ((rxq
->free_count
<= RX_LOW_WATERMARK
) &&
680 IWL_CRIT(priv
, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
681 priority
== GFP_ATOMIC
? "GFP_ATOMIC" : "GFP_KERNEL",
683 /* We don't reschedule replenish work here -- we will
684 * call the restock method and if it still needs
685 * more buffers it will schedule replenish */
689 spin_lock_irqsave(&rxq
->lock
, flags
);
691 if (list_empty(&rxq
->rx_used
)) {
692 spin_unlock_irqrestore(&rxq
->lock
, flags
);
693 __free_pages(page
, priv
->hw_params
.rx_page_order
);
696 element
= rxq
->rx_used
.next
;
697 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
700 spin_unlock_irqrestore(&rxq
->lock
, flags
);
704 /* Get physical address of the RB */
705 rxb
->page_dma
= pci_map_page(priv
->pci_dev
, page
, 0,
706 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
708 /* dma address must be no more than 36 bits */
709 BUG_ON(rxb
->page_dma
& ~DMA_BIT_MASK(36));
710 /* and also 256 byte aligned! */
711 BUG_ON(rxb
->page_dma
& DMA_BIT_MASK(8));
713 spin_lock_irqsave(&rxq
->lock
, flags
);
715 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
717 priv
->alloc_rxb_page
++;
719 spin_unlock_irqrestore(&rxq
->lock
, flags
);
723 void iwlagn_rx_replenish(struct iwl_priv
*priv
)
727 iwlagn_rx_allocate(priv
, GFP_KERNEL
);
729 spin_lock_irqsave(&priv
->lock
, flags
);
730 iwlagn_rx_queue_restock(priv
);
731 spin_unlock_irqrestore(&priv
->lock
, flags
);
734 void iwlagn_rx_replenish_now(struct iwl_priv
*priv
)
736 iwlagn_rx_allocate(priv
, GFP_ATOMIC
);
738 iwlagn_rx_queue_restock(priv
);
741 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
742 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
743 * This free routine walks the list of POOL entries and if SKB is set to
744 * non NULL it is unmapped and freed
746 void iwlagn_rx_queue_free(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
749 for (i
= 0; i
< RX_QUEUE_SIZE
+ RX_FREE_BUFFERS
; i
++) {
750 if (rxq
->pool
[i
].page
!= NULL
) {
751 pci_unmap_page(priv
->pci_dev
, rxq
->pool
[i
].page_dma
,
752 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
754 __iwl_free_pages(priv
, rxq
->pool
[i
].page
);
755 rxq
->pool
[i
].page
= NULL
;
759 dma_free_coherent(&priv
->pci_dev
->dev
, 4 * RX_QUEUE_SIZE
, rxq
->bd
,
761 dma_free_coherent(&priv
->pci_dev
->dev
, sizeof(struct iwl_rb_status
),
762 rxq
->rb_stts
, rxq
->rb_stts_dma
);
767 int iwlagn_rxq_stop(struct iwl_priv
*priv
)
771 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
772 iwl_poll_direct_bit(priv
, FH_MEM_RSSR_RX_STATUS_REG
,
773 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
778 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags
, enum ieee80211_band band
)
783 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
784 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
785 idx
= (rate_n_flags
& 0xff);
787 /* Legacy rate format, search for match in table */
789 if (band
== IEEE80211_BAND_5GHZ
)
790 band_offset
= IWL_FIRST_OFDM_RATE
;
791 for (idx
= band_offset
; idx
< IWL_RATE_COUNT_LEGACY
; idx
++)
792 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
793 return idx
- band_offset
;
799 /* Calc max signal level (dBm) among 3 possible receivers */
800 static inline int iwlagn_calc_rssi(struct iwl_priv
*priv
,
801 struct iwl_rx_phy_res
*rx_resp
)
803 return priv
->cfg
->ops
->utils
->calc_rssi(priv
, rx_resp
);
806 static u32
iwlagn_translate_rx_status(struct iwl_priv
*priv
, u32 decrypt_in
)
810 if ((decrypt_in
& RX_RES_STATUS_STATION_FOUND
) ==
811 RX_RES_STATUS_STATION_FOUND
)
812 decrypt_out
|= (RX_RES_STATUS_STATION_FOUND
|
813 RX_RES_STATUS_NO_STATION_INFO_MISMATCH
);
815 decrypt_out
|= (decrypt_in
& RX_RES_STATUS_SEC_TYPE_MSK
);
817 /* packet was not encrypted */
818 if ((decrypt_in
& RX_RES_STATUS_SEC_TYPE_MSK
) ==
819 RX_RES_STATUS_SEC_TYPE_NONE
)
822 /* packet was encrypted with unknown alg */
823 if ((decrypt_in
& RX_RES_STATUS_SEC_TYPE_MSK
) ==
824 RX_RES_STATUS_SEC_TYPE_ERR
)
827 /* decryption was not done in HW */
828 if ((decrypt_in
& RX_MPDU_RES_STATUS_DEC_DONE_MSK
) !=
829 RX_MPDU_RES_STATUS_DEC_DONE_MSK
)
832 switch (decrypt_in
& RX_RES_STATUS_SEC_TYPE_MSK
) {
834 case RX_RES_STATUS_SEC_TYPE_CCMP
:
835 /* alg is CCM: check MIC only */
836 if (!(decrypt_in
& RX_MPDU_RES_STATUS_MIC_OK
))
838 decrypt_out
|= RX_RES_STATUS_BAD_ICV_MIC
;
840 decrypt_out
|= RX_RES_STATUS_DECRYPT_OK
;
844 case RX_RES_STATUS_SEC_TYPE_TKIP
:
845 if (!(decrypt_in
& RX_MPDU_RES_STATUS_TTAK_OK
)) {
847 decrypt_out
|= RX_RES_STATUS_BAD_KEY_TTAK
;
850 /* fall through if TTAK OK */
852 if (!(decrypt_in
& RX_MPDU_RES_STATUS_ICV_OK
))
853 decrypt_out
|= RX_RES_STATUS_BAD_ICV_MIC
;
855 decrypt_out
|= RX_RES_STATUS_DECRYPT_OK
;
859 IWL_DEBUG_RX(priv
, "decrypt_in:0x%x decrypt_out = 0x%x\n",
860 decrypt_in
, decrypt_out
);
865 static void iwlagn_pass_packet_to_mac80211(struct iwl_priv
*priv
,
866 struct ieee80211_hdr
*hdr
,
869 struct iwl_rx_mem_buffer
*rxb
,
870 struct ieee80211_rx_status
*stats
)
873 __le16 fc
= hdr
->frame_control
;
875 /* We only process data packets if the interface is open */
876 if (unlikely(!priv
->is_open
)) {
877 IWL_DEBUG_DROP_LIMIT(priv
,
878 "Dropping packet while interface is not open.\n");
882 /* In case of HW accelerated crypto and bad decryption, drop */
883 if (!priv
->cfg
->mod_params
->sw_crypto
&&
884 iwl_set_decrypted_flag(priv
, hdr
, ampdu_status
, stats
))
887 skb
= dev_alloc_skb(128);
889 IWL_ERR(priv
, "dev_alloc_skb failed\n");
893 skb_add_rx_frag(skb
, 0, rxb
->page
, (void *)hdr
- rxb_addr(rxb
), len
);
895 iwl_update_stats(priv
, false, fc
, len
);
896 memcpy(IEEE80211_SKB_RXCB(skb
), stats
, sizeof(*stats
));
898 ieee80211_rx(priv
->hw
, skb
);
899 priv
->alloc_rxb_page
--;
903 /* Called for REPLY_RX (legacy ABG frames), or
904 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
905 void iwlagn_rx_reply_rx(struct iwl_priv
*priv
,
906 struct iwl_rx_mem_buffer
*rxb
)
908 struct ieee80211_hdr
*header
;
909 struct ieee80211_rx_status rx_status
;
910 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
911 struct iwl_rx_phy_res
*phy_res
;
912 __le32 rx_pkt_status
;
913 struct iwl_rx_mpdu_res_start
*amsdu
;
919 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
920 * REPLY_RX: physical layer info is in this buffer
921 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
922 * command and cached in priv->last_phy_res
924 * Here we set up local variables depending on which command is
927 if (pkt
->hdr
.cmd
== REPLY_RX
) {
928 phy_res
= (struct iwl_rx_phy_res
*)pkt
->u
.raw
;
929 header
= (struct ieee80211_hdr
*)(pkt
->u
.raw
+ sizeof(*phy_res
)
930 + phy_res
->cfg_phy_cnt
);
932 len
= le16_to_cpu(phy_res
->byte_count
);
933 rx_pkt_status
= *(__le32
*)(pkt
->u
.raw
+ sizeof(*phy_res
) +
934 phy_res
->cfg_phy_cnt
+ len
);
935 ampdu_status
= le32_to_cpu(rx_pkt_status
);
937 if (!priv
->_agn
.last_phy_res_valid
) {
938 IWL_ERR(priv
, "MPDU frame without cached PHY data\n");
941 phy_res
= &priv
->_agn
.last_phy_res
;
942 amsdu
= (struct iwl_rx_mpdu_res_start
*)pkt
->u
.raw
;
943 header
= (struct ieee80211_hdr
*)(pkt
->u
.raw
+ sizeof(*amsdu
));
944 len
= le16_to_cpu(amsdu
->byte_count
);
945 rx_pkt_status
= *(__le32
*)(pkt
->u
.raw
+ sizeof(*amsdu
) + len
);
946 ampdu_status
= iwlagn_translate_rx_status(priv
,
947 le32_to_cpu(rx_pkt_status
));
950 if ((unlikely(phy_res
->cfg_phy_cnt
> 20))) {
951 IWL_DEBUG_DROP(priv
, "dsp size out of range [0,20]: %d/n",
952 phy_res
->cfg_phy_cnt
);
956 if (!(rx_pkt_status
& RX_RES_STATUS_NO_CRC32_ERROR
) ||
957 !(rx_pkt_status
& RX_RES_STATUS_NO_RXE_OVERFLOW
)) {
958 IWL_DEBUG_RX(priv
, "Bad CRC or FIFO: 0x%08X.\n",
959 le32_to_cpu(rx_pkt_status
));
963 /* This will be used in several places later */
964 rate_n_flags
= le32_to_cpu(phy_res
->rate_n_flags
);
966 /* rx_status carries information about the packet to mac80211 */
967 rx_status
.mactime
= le64_to_cpu(phy_res
->timestamp
);
969 ieee80211_channel_to_frequency(le16_to_cpu(phy_res
->channel
));
970 rx_status
.band
= (phy_res
->phy_flags
& RX_RES_PHY_FLAGS_BAND_24_MSK
) ?
971 IEEE80211_BAND_2GHZ
: IEEE80211_BAND_5GHZ
;
973 iwlagn_hwrate_to_mac80211_idx(rate_n_flags
, rx_status
.band
);
976 /* TSF isn't reliable. In order to allow smooth user experience,
977 * this W/A doesn't propagate it to the mac80211 */
978 /*rx_status.flag |= RX_FLAG_TSFT;*/
980 priv
->ucode_beacon_time
= le32_to_cpu(phy_res
->beacon_time_stamp
);
982 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
983 rx_status
.signal
= iwlagn_calc_rssi(priv
, phy_res
);
985 iwl_dbg_log_rx_data_frame(priv
, len
, header
);
986 IWL_DEBUG_STATS_LIMIT(priv
, "Rssi %d, TSF %llu\n",
987 rx_status
.signal
, (unsigned long long)rx_status
.mactime
);
992 * It seems that the antenna field in the phy flags value
993 * is actually a bit field. This is undefined by radiotap,
994 * it wants an actual antenna number but I always get "7"
995 * for most legacy frames I receive indicating that the
996 * same frame was received on all three RX chains.
998 * I think this field should be removed in favor of a
999 * new 802.11n radiotap field "RX chains" that is defined
1003 (le16_to_cpu(phy_res
->phy_flags
) & RX_RES_PHY_FLAGS_ANTENNA_MSK
)
1004 >> RX_RES_PHY_FLAGS_ANTENNA_POS
;
1006 /* set the preamble flag if appropriate */
1007 if (phy_res
->phy_flags
& RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK
)
1008 rx_status
.flag
|= RX_FLAG_SHORTPRE
;
1010 /* Set up the HT phy flags */
1011 if (rate_n_flags
& RATE_MCS_HT_MSK
)
1012 rx_status
.flag
|= RX_FLAG_HT
;
1013 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
1014 rx_status
.flag
|= RX_FLAG_40MHZ
;
1015 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1016 rx_status
.flag
|= RX_FLAG_SHORT_GI
;
1018 iwlagn_pass_packet_to_mac80211(priv
, header
, len
, ampdu_status
,
1022 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1023 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1024 void iwlagn_rx_reply_rx_phy(struct iwl_priv
*priv
,
1025 struct iwl_rx_mem_buffer
*rxb
)
1027 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1028 priv
->_agn
.last_phy_res_valid
= true;
1029 memcpy(&priv
->_agn
.last_phy_res
, pkt
->u
.raw
,
1030 sizeof(struct iwl_rx_phy_res
));
1033 static int iwl_get_single_channel_for_scan(struct iwl_priv
*priv
,
1034 struct ieee80211_vif
*vif
,
1035 enum ieee80211_band band
,
1036 struct iwl_scan_channel
*scan_ch
)
1038 const struct ieee80211_supported_band
*sband
;
1039 u16 passive_dwell
= 0;
1040 u16 active_dwell
= 0;
1044 sband
= iwl_get_hw_mode(priv
, band
);
1046 IWL_ERR(priv
, "invalid band\n");
1050 active_dwell
= iwl_get_active_dwell_time(priv
, band
, 0);
1051 passive_dwell
= iwl_get_passive_dwell_time(priv
, band
, vif
);
1053 if (passive_dwell
<= active_dwell
)
1054 passive_dwell
= active_dwell
+ 1;
1056 channel
= iwl_get_single_channel_number(priv
, band
);
1058 scan_ch
->channel
= cpu_to_le16(channel
);
1059 scan_ch
->type
= SCAN_CHANNEL_TYPE_PASSIVE
;
1060 scan_ch
->active_dwell
= cpu_to_le16(active_dwell
);
1061 scan_ch
->passive_dwell
= cpu_to_le16(passive_dwell
);
1062 /* Set txpower levels to defaults */
1063 scan_ch
->dsp_atten
= 110;
1064 if (band
== IEEE80211_BAND_5GHZ
)
1065 scan_ch
->tx_gain
= ((1 << 5) | (3 << 3)) | 3;
1067 scan_ch
->tx_gain
= ((1 << 5) | (5 << 3));
1070 IWL_ERR(priv
, "no valid channel found\n");
1074 static int iwl_get_channels_for_scan(struct iwl_priv
*priv
,
1075 struct ieee80211_vif
*vif
,
1076 enum ieee80211_band band
,
1077 u8 is_active
, u8 n_probes
,
1078 struct iwl_scan_channel
*scan_ch
)
1080 struct ieee80211_channel
*chan
;
1081 const struct ieee80211_supported_band
*sband
;
1082 const struct iwl_channel_info
*ch_info
;
1083 u16 passive_dwell
= 0;
1084 u16 active_dwell
= 0;
1088 sband
= iwl_get_hw_mode(priv
, band
);
1092 active_dwell
= iwl_get_active_dwell_time(priv
, band
, n_probes
);
1093 passive_dwell
= iwl_get_passive_dwell_time(priv
, band
, vif
);
1095 if (passive_dwell
<= active_dwell
)
1096 passive_dwell
= active_dwell
+ 1;
1098 for (i
= 0, added
= 0; i
< priv
->scan_request
->n_channels
; i
++) {
1099 chan
= priv
->scan_request
->channels
[i
];
1101 if (chan
->band
!= band
)
1104 channel
= chan
->hw_value
;
1105 scan_ch
->channel
= cpu_to_le16(channel
);
1107 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
1108 if (!is_channel_valid(ch_info
)) {
1109 IWL_DEBUG_SCAN(priv
, "Channel %d is INVALID for this band.\n",
1114 if (!is_active
|| is_channel_passive(ch_info
) ||
1115 (chan
->flags
& IEEE80211_CHAN_PASSIVE_SCAN
))
1116 scan_ch
->type
= SCAN_CHANNEL_TYPE_PASSIVE
;
1118 scan_ch
->type
= SCAN_CHANNEL_TYPE_ACTIVE
;
1121 scan_ch
->type
|= IWL_SCAN_PROBE_MASK(n_probes
);
1123 scan_ch
->active_dwell
= cpu_to_le16(active_dwell
);
1124 scan_ch
->passive_dwell
= cpu_to_le16(passive_dwell
);
1126 /* Set txpower levels to defaults */
1127 scan_ch
->dsp_atten
= 110;
1129 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1131 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1133 if (band
== IEEE80211_BAND_5GHZ
)
1134 scan_ch
->tx_gain
= ((1 << 5) | (3 << 3)) | 3;
1136 scan_ch
->tx_gain
= ((1 << 5) | (5 << 3));
1138 IWL_DEBUG_SCAN(priv
, "Scanning ch=%d prob=0x%X [%s %d]\n",
1139 channel
, le32_to_cpu(scan_ch
->type
),
1140 (scan_ch
->type
& SCAN_CHANNEL_TYPE_ACTIVE
) ?
1141 "ACTIVE" : "PASSIVE",
1142 (scan_ch
->type
& SCAN_CHANNEL_TYPE_ACTIVE
) ?
1143 active_dwell
: passive_dwell
);
1149 IWL_DEBUG_SCAN(priv
, "total channels to scan %d\n", added
);
1153 void iwlagn_request_scan(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
)
1155 struct iwl_host_cmd cmd
= {
1156 .id
= REPLY_SCAN_CMD
,
1157 .len
= sizeof(struct iwl_scan_cmd
),
1158 .flags
= CMD_SIZE_HUGE
,
1160 struct iwl_scan_cmd
*scan
;
1161 struct ieee80211_conf
*conf
= NULL
;
1162 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1166 enum ieee80211_band band
;
1168 u8 rx_ant
= priv
->hw_params
.valid_rx_ant
;
1170 bool is_active
= false;
1173 u8 scan_tx_antennas
= priv
->hw_params
.valid_tx_ant
;
1176 ctx
= iwl_rxon_ctx_from_vif(vif
);
1178 conf
= ieee80211_get_hw_conf(priv
->hw
);
1180 cancel_delayed_work(&priv
->scan_check
);
1182 if (!iwl_is_ready(priv
)) {
1183 IWL_WARN(priv
, "request scan called when driver not ready.\n");
1187 /* Make sure the scan wasn't canceled before this queued work
1188 * was given the chance to run... */
1189 if (!test_bit(STATUS_SCANNING
, &priv
->status
))
1192 /* This should never be called or scheduled if there is currently
1193 * a scan active in the hardware. */
1194 if (test_bit(STATUS_SCAN_HW
, &priv
->status
)) {
1195 IWL_DEBUG_INFO(priv
, "Multiple concurrent scan requests in parallel. "
1196 "Ignoring second request.\n");
1200 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
1201 IWL_DEBUG_SCAN(priv
, "Aborting scan due to device shutdown\n");
1205 if (test_bit(STATUS_SCAN_ABORTING
, &priv
->status
)) {
1206 IWL_DEBUG_HC(priv
, "Scan request while abort pending. Queuing.\n");
1210 if (iwl_is_rfkill(priv
)) {
1211 IWL_DEBUG_HC(priv
, "Aborting scan due to RF Kill activation\n");
1215 if (!test_bit(STATUS_READY
, &priv
->status
)) {
1216 IWL_DEBUG_HC(priv
, "Scan request while uninitialized. Queuing.\n");
1220 if (!priv
->scan_cmd
) {
1221 priv
->scan_cmd
= kmalloc(sizeof(struct iwl_scan_cmd
) +
1222 IWL_MAX_SCAN_SIZE
, GFP_KERNEL
);
1223 if (!priv
->scan_cmd
) {
1224 IWL_DEBUG_SCAN(priv
,
1225 "fail to allocate memory for scan\n");
1229 scan
= priv
->scan_cmd
;
1230 memset(scan
, 0, sizeof(struct iwl_scan_cmd
) + IWL_MAX_SCAN_SIZE
);
1232 scan
->quiet_plcp_th
= IWL_PLCP_QUIET_THRESH
;
1233 scan
->quiet_time
= IWL_ACTIVE_QUIET_TIME
;
1235 if (iwl_is_any_associated(priv
)) {
1238 u32 suspend_time
= 100;
1239 u32 scan_suspend_time
= 100;
1240 unsigned long flags
;
1242 IWL_DEBUG_INFO(priv
, "Scanning while associated...\n");
1243 spin_lock_irqsave(&priv
->lock
, flags
);
1244 if (priv
->is_internal_short_scan
)
1247 interval
= vif
->bss_conf
.beacon_int
;
1248 spin_unlock_irqrestore(&priv
->lock
, flags
);
1250 scan
->suspend_time
= 0;
1251 scan
->max_out_time
= cpu_to_le32(200 * 1024);
1253 interval
= suspend_time
;
1255 extra
= (suspend_time
/ interval
) << 22;
1256 scan_suspend_time
= (extra
|
1257 ((suspend_time
% interval
) * 1024));
1258 scan
->suspend_time
= cpu_to_le32(scan_suspend_time
);
1259 IWL_DEBUG_SCAN(priv
, "suspend_time 0x%X beacon interval %d\n",
1260 scan_suspend_time
, interval
);
1263 if (priv
->is_internal_short_scan
) {
1264 IWL_DEBUG_SCAN(priv
, "Start internal passive scan.\n");
1265 } else if (priv
->scan_request
->n_ssids
) {
1267 IWL_DEBUG_SCAN(priv
, "Kicking off active scan\n");
1268 for (i
= 0; i
< priv
->scan_request
->n_ssids
; i
++) {
1269 /* always does wildcard anyway */
1270 if (!priv
->scan_request
->ssids
[i
].ssid_len
)
1272 scan
->direct_scan
[p
].id
= WLAN_EID_SSID
;
1273 scan
->direct_scan
[p
].len
=
1274 priv
->scan_request
->ssids
[i
].ssid_len
;
1275 memcpy(scan
->direct_scan
[p
].ssid
,
1276 priv
->scan_request
->ssids
[i
].ssid
,
1277 priv
->scan_request
->ssids
[i
].ssid_len
);
1283 IWL_DEBUG_SCAN(priv
, "Start passive scan.\n");
1285 scan
->tx_cmd
.tx_flags
= TX_CMD_FLG_SEQ_CTL_MSK
;
1286 scan
->tx_cmd
.sta_id
= ctx
->bcast_sta_id
;
1287 scan
->tx_cmd
.stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
1289 switch (priv
->scan_band
) {
1290 case IEEE80211_BAND_2GHZ
:
1291 scan
->flags
= RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
;
1292 chan_mod
= le32_to_cpu(
1293 priv
->contexts
[IWL_RXON_CTX_BSS
].active
.flags
&
1294 RXON_FLG_CHANNEL_MODE_MSK
)
1295 >> RXON_FLG_CHANNEL_MODE_POS
;
1296 if (chan_mod
== CHANNEL_MODE_PURE_40
) {
1297 rate
= IWL_RATE_6M_PLCP
;
1299 rate
= IWL_RATE_1M_PLCP
;
1300 rate_flags
= RATE_MCS_CCK_MSK
;
1303 * Internal scans are passive, so we can indiscriminately set
1304 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1306 if (priv
->cfg
->advanced_bt_coexist
)
1307 scan
->tx_cmd
.tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
1308 scan
->good_CRC_th
= IWL_GOOD_CRC_TH_DISABLED
;
1310 case IEEE80211_BAND_5GHZ
:
1311 rate
= IWL_RATE_6M_PLCP
;
1313 * If active scanning is requested but a certain channel is
1314 * marked passive, we can do active scanning if we detect
1317 * There is an issue with some firmware versions that triggers
1318 * a sysassert on a "good CRC threshold" of zero (== disabled),
1319 * on a radar channel even though this means that we should NOT
1322 * The "good CRC threshold" is the number of frames that we
1323 * need to receive during our dwell time on a channel before
1324 * sending out probes -- setting this to a huge value will
1325 * mean we never reach it, but at the same time work around
1326 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1327 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1329 scan
->good_CRC_th
= is_active
? IWL_GOOD_CRC_TH_DEFAULT
:
1330 IWL_GOOD_CRC_TH_NEVER
;
1333 IWL_WARN(priv
, "Invalid scan band count\n");
1337 band
= priv
->scan_band
;
1339 if (priv
->cfg
->scan_rx_antennas
[band
])
1340 rx_ant
= priv
->cfg
->scan_rx_antennas
[band
];
1342 if (priv
->cfg
->scan_tx_antennas
[band
])
1343 scan_tx_antennas
= priv
->cfg
->scan_tx_antennas
[band
];
1345 if (priv
->cfg
->advanced_bt_coexist
&& priv
->bt_full_concurrent
) {
1346 /* operated as 1x1 in full concurrency mode */
1348 first_antenna(priv
->cfg
->scan_tx_antennas
[band
]);
1351 priv
->scan_tx_ant
[band
] = iwl_toggle_tx_ant(priv
, priv
->scan_tx_ant
[band
],
1353 rate_flags
|= iwl_ant_idx_to_flags(priv
->scan_tx_ant
[band
]);
1354 scan
->tx_cmd
.rate_n_flags
= iwl_hw_set_rate_n_flags(rate
, rate_flags
);
1356 /* In power save mode use one chain, otherwise use all chains */
1357 if (test_bit(STATUS_POWER_PMI
, &priv
->status
)) {
1358 /* rx_ant has been set to all valid chains previously */
1359 active_chains
= rx_ant
&
1360 ((u8
)(priv
->chain_noise_data
.active_chains
));
1362 active_chains
= rx_ant
;
1364 IWL_DEBUG_SCAN(priv
, "chain_noise_data.active_chains: %u\n",
1365 priv
->chain_noise_data
.active_chains
);
1367 rx_ant
= first_antenna(active_chains
);
1369 if (priv
->cfg
->advanced_bt_coexist
&& priv
->bt_full_concurrent
) {
1370 /* operated as 1x1 in full concurrency mode */
1371 rx_ant
= first_antenna(rx_ant
);
1374 /* MIMO is not used here, but value is required */
1375 rx_chain
|= priv
->hw_params
.valid_rx_ant
<< RXON_RX_CHAIN_VALID_POS
;
1376 rx_chain
|= rx_ant
<< RXON_RX_CHAIN_FORCE_MIMO_SEL_POS
;
1377 rx_chain
|= rx_ant
<< RXON_RX_CHAIN_FORCE_SEL_POS
;
1378 rx_chain
|= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS
;
1379 scan
->rx_chain
= cpu_to_le16(rx_chain
);
1380 if (!priv
->is_internal_short_scan
) {
1381 cmd_len
= iwl_fill_probe_req(priv
,
1382 (struct ieee80211_mgmt
*)scan
->data
,
1384 priv
->scan_request
->ie
,
1385 priv
->scan_request
->ie_len
,
1386 IWL_MAX_SCAN_SIZE
- sizeof(*scan
));
1388 /* use bcast addr, will not be transmitted but must be valid */
1389 cmd_len
= iwl_fill_probe_req(priv
,
1390 (struct ieee80211_mgmt
*)scan
->data
,
1391 iwl_bcast_addr
, NULL
, 0,
1392 IWL_MAX_SCAN_SIZE
- sizeof(*scan
));
1395 scan
->tx_cmd
.len
= cpu_to_le16(cmd_len
);
1397 scan
->filter_flags
|= (RXON_FILTER_ACCEPT_GRP_MSK
|
1398 RXON_FILTER_BCON_AWARE_MSK
);
1400 if (priv
->is_internal_short_scan
) {
1401 scan
->channel_count
=
1402 iwl_get_single_channel_for_scan(priv
, vif
, band
,
1403 (void *)&scan
->data
[le16_to_cpu(
1404 scan
->tx_cmd
.len
)]);
1406 scan
->channel_count
=
1407 iwl_get_channels_for_scan(priv
, vif
, band
,
1408 is_active
, n_probes
,
1409 (void *)&scan
->data
[le16_to_cpu(
1410 scan
->tx_cmd
.len
)]);
1412 if (scan
->channel_count
== 0) {
1413 IWL_DEBUG_SCAN(priv
, "channel count %d\n", scan
->channel_count
);
1417 cmd
.len
+= le16_to_cpu(scan
->tx_cmd
.len
) +
1418 scan
->channel_count
* sizeof(struct iwl_scan_channel
);
1420 scan
->len
= cpu_to_le16(cmd
.len
);
1422 set_bit(STATUS_SCAN_HW
, &priv
->status
);
1424 if (priv
->cfg
->ops
->hcmd
->set_pan_params
&&
1425 priv
->cfg
->ops
->hcmd
->set_pan_params(priv
))
1428 if (iwl_send_cmd_sync(priv
, &cmd
))
1431 queue_delayed_work(priv
->workqueue
, &priv
->scan_check
,
1432 IWL_SCAN_CHECK_WATCHDOG
);
1437 /* Cannot perform scan. Make sure we clear scanning
1438 * bits from status so next scan request can be performed.
1439 * If we don't clear scanning status bit here all next scan
1442 clear_bit(STATUS_SCAN_HW
, &priv
->status
);
1443 clear_bit(STATUS_SCANNING
, &priv
->status
);
1444 /* inform mac80211 scan aborted */
1445 queue_work(priv
->workqueue
, &priv
->scan_completed
);
1448 int iwlagn_manage_ibss_station(struct iwl_priv
*priv
,
1449 struct ieee80211_vif
*vif
, bool add
)
1451 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
1454 return iwl_add_bssid_station(priv
, vif_priv
->ctx
,
1455 vif
->bss_conf
.bssid
, true,
1456 &vif_priv
->ibss_bssid_sta_id
);
1457 return iwl_remove_station(priv
, vif_priv
->ibss_bssid_sta_id
,
1458 vif
->bss_conf
.bssid
);
1461 void iwl_free_tfds_in_queue(struct iwl_priv
*priv
,
1462 int sta_id
, int tid
, int freed
)
1464 lockdep_assert_held(&priv
->sta_lock
);
1466 if (priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
>= freed
)
1467 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
-= freed
;
1469 IWL_DEBUG_TX(priv
, "free more than tfds_in_queue (%u:%d)\n",
1470 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
,
1472 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
= 0;
1476 #define IWL_FLUSH_WAIT_MS 2000
1478 int iwlagn_wait_tx_queue_empty(struct iwl_priv
*priv
)
1480 struct iwl_tx_queue
*txq
;
1481 struct iwl_queue
*q
;
1483 unsigned long now
= jiffies
;
1486 /* waiting for all the tx frames complete might take a while */
1487 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
1488 if (cnt
== priv
->cmd_queue
)
1490 txq
= &priv
->txq
[cnt
];
1492 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
1493 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
1496 if (q
->read_ptr
!= q
->write_ptr
) {
1497 IWL_ERR(priv
, "fail to flush all tx fifo queues\n");
1505 #define IWL_TX_QUEUE_MSK 0xfffff
1508 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1511 * 1. acquire mutex before calling
1512 * 2. make sure rf is on and not in exit state
1514 int iwlagn_txfifo_flush(struct iwl_priv
*priv
, u16 flush_control
)
1516 struct iwl_txfifo_flush_cmd flush_cmd
;
1517 struct iwl_host_cmd cmd
= {
1518 .id
= REPLY_TXFIFO_FLUSH
,
1519 .len
= sizeof(struct iwl_txfifo_flush_cmd
),
1526 memset(&flush_cmd
, 0, sizeof(flush_cmd
));
1527 flush_cmd
.fifo_control
= IWL_TX_FIFO_VO_MSK
| IWL_TX_FIFO_VI_MSK
|
1528 IWL_TX_FIFO_BE_MSK
| IWL_TX_FIFO_BK_MSK
;
1529 if (priv
->cfg
->sku
& IWL_SKU_N
)
1530 flush_cmd
.fifo_control
|= IWL_AGG_TX_QUEUE_MSK
;
1532 IWL_DEBUG_INFO(priv
, "fifo queue control: 0X%x\n",
1533 flush_cmd
.fifo_control
);
1534 flush_cmd
.flush_control
= cpu_to_le16(flush_control
);
1536 return iwl_send_cmd(priv
, &cmd
);
1539 void iwlagn_dev_txfifo_flush(struct iwl_priv
*priv
, u16 flush_control
)
1541 mutex_lock(&priv
->mutex
);
1542 ieee80211_stop_queues(priv
->hw
);
1543 if (priv
->cfg
->ops
->lib
->txfifo_flush(priv
, IWL_DROP_ALL
)) {
1544 IWL_ERR(priv
, "flush request fail\n");
1547 IWL_DEBUG_INFO(priv
, "wait transmit/flush all frames\n");
1548 iwlagn_wait_tx_queue_empty(priv
);
1550 ieee80211_wake_queues(priv
->hw
);
1551 mutex_unlock(&priv
->mutex
);
1558 * Macros to access the lookup table.
1560 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1561 * wifi_prio, wifi_txrx and wifi_sh_ant_req.
1563 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1565 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1566 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1567 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1569 * These macros encode that format.
1571 #define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1572 wifi_txrx, wifi_sh_ant_req) \
1573 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1574 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1576 #define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1577 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1578 #define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1579 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1580 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1581 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1583 #define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1584 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1585 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1586 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1588 #define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1589 wifi_req, wifi_prio, wifi_txrx, \
1591 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1592 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1595 #define LUT_WLAN_KILL_OP(lut, op, val) \
1596 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1597 #define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1598 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1599 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1600 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1601 #define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1602 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1603 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1604 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1605 #define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1606 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1607 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1608 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1610 #define LUT_ANT_SWITCH_OP(lut, op, val) \
1611 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1612 #define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1613 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1614 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1615 wifi_req, wifi_prio, wifi_txrx, \
1617 #define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1618 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1619 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1620 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1621 #define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1622 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1623 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1624 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1626 static const __le32 iwlagn_def_3w_lookup
[12] = {
1627 cpu_to_le32(0xaaaaaaaa),
1628 cpu_to_le32(0xaaaaaaaa),
1629 cpu_to_le32(0xaeaaaaaa),
1630 cpu_to_le32(0xaaaaaaaa),
1631 cpu_to_le32(0xcc00ff28),
1632 cpu_to_le32(0x0000aaaa),
1633 cpu_to_le32(0xcc00aaaa),
1634 cpu_to_le32(0x0000aaaa),
1635 cpu_to_le32(0xc0004000),
1636 cpu_to_le32(0x00004000),
1637 cpu_to_le32(0xf0005000),
1638 cpu_to_le32(0xf0004000),
1641 static const __le32 iwlagn_concurrent_lookup
[12] = {
1642 cpu_to_le32(0xaaaaaaaa),
1643 cpu_to_le32(0xaaaaaaaa),
1644 cpu_to_le32(0xaaaaaaaa),
1645 cpu_to_le32(0xaaaaaaaa),
1646 cpu_to_le32(0xaaaaaaaa),
1647 cpu_to_le32(0xaaaaaaaa),
1648 cpu_to_le32(0xaaaaaaaa),
1649 cpu_to_le32(0xaaaaaaaa),
1650 cpu_to_le32(0x00000000),
1651 cpu_to_le32(0x00000000),
1652 cpu_to_le32(0x00000000),
1653 cpu_to_le32(0x00000000),
1656 void iwlagn_send_advance_bt_config(struct iwl_priv
*priv
)
1658 struct iwlagn_bt_cmd bt_cmd
= {
1659 .max_kill
= IWLAGN_BT_MAX_KILL_DEFAULT
,
1660 .bt3_timer_t7_value
= IWLAGN_BT3_T7_DEFAULT
,
1661 .bt3_prio_sample_time
= IWLAGN_BT3_PRIO_SAMPLE_DEFAULT
,
1662 .bt3_timer_t2_value
= IWLAGN_BT3_T2_DEFAULT
,
1665 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup
) !=
1666 sizeof(bt_cmd
.bt3_lookup_table
));
1668 bt_cmd
.prio_boost
= priv
->cfg
->bt_prio_boost
;
1669 bt_cmd
.kill_ack_mask
= priv
->kill_ack_mask
;
1670 bt_cmd
.kill_cts_mask
= priv
->kill_cts_mask
;
1671 bt_cmd
.valid
= priv
->bt_valid
;
1674 * Configure BT coex mode to "no coexistence" when the
1675 * user disabled BT coexistence, we have no interface
1676 * (might be in monitor mode), or the interface is in
1677 * IBSS mode (no proper uCode support for coex then).
1679 if (!bt_coex_active
|| priv
->iw_mode
== NL80211_IFTYPE_ADHOC
) {
1682 bt_cmd
.flags
= IWLAGN_BT_FLAG_COEX_MODE_3W
<<
1683 IWLAGN_BT_FLAG_COEX_MODE_SHIFT
;
1684 if (priv
->bt_ch_announce
)
1685 bt_cmd
.flags
|= IWLAGN_BT_FLAG_CHANNEL_INHIBITION
;
1686 IWL_DEBUG_INFO(priv
, "BT coex flag: 0X%x\n", bt_cmd
.flags
);
1688 if (priv
->bt_full_concurrent
)
1689 memcpy(bt_cmd
.bt3_lookup_table
, iwlagn_concurrent_lookup
,
1690 sizeof(iwlagn_concurrent_lookup
));
1692 memcpy(bt_cmd
.bt3_lookup_table
, iwlagn_def_3w_lookup
,
1693 sizeof(iwlagn_def_3w_lookup
));
1695 IWL_DEBUG_INFO(priv
, "BT coex %s in %s mode\n",
1696 bt_cmd
.flags
? "active" : "disabled",
1697 priv
->bt_full_concurrent
?
1698 "full concurrency" : "3-wire");
1700 if (iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
, sizeof(bt_cmd
), &bt_cmd
))
1701 IWL_ERR(priv
, "failed to send BT Coex Config\n");
1704 * When we are doing a restart, need to also reconfigure BT
1705 * SCO to the device. If not doing a restart, bt_sco_active
1706 * will always be false, so there's no need to have an extra
1707 * variable to check for it.
1709 if (priv
->bt_sco_active
) {
1710 struct iwlagn_bt_sco_cmd sco_cmd
= { .flags
= 0 };
1712 if (priv
->bt_sco_active
)
1713 sco_cmd
.flags
|= IWLAGN_BT_SCO_ACTIVE
;
1714 if (iwl_send_cmd_pdu(priv
, REPLY_BT_COEX_SCO
,
1715 sizeof(sco_cmd
), &sco_cmd
))
1716 IWL_ERR(priv
, "failed to send BT SCO command\n");
1720 static void iwlagn_bt_traffic_change_work(struct work_struct
*work
)
1722 struct iwl_priv
*priv
=
1723 container_of(work
, struct iwl_priv
, bt_traffic_change_work
);
1724 struct iwl_rxon_context
*ctx
;
1725 int smps_request
= -1;
1727 IWL_DEBUG_INFO(priv
, "BT traffic load changes: %d\n",
1728 priv
->bt_traffic_load
);
1730 switch (priv
->bt_traffic_load
) {
1731 case IWL_BT_COEX_TRAFFIC_LOAD_NONE
:
1732 smps_request
= IEEE80211_SMPS_AUTOMATIC
;
1734 case IWL_BT_COEX_TRAFFIC_LOAD_LOW
:
1735 smps_request
= IEEE80211_SMPS_DYNAMIC
;
1737 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH
:
1738 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS
:
1739 smps_request
= IEEE80211_SMPS_STATIC
;
1742 IWL_ERR(priv
, "Invalid BT traffic load: %d\n",
1743 priv
->bt_traffic_load
);
1747 mutex_lock(&priv
->mutex
);
1749 if (priv
->cfg
->ops
->lib
->update_chain_flags
)
1750 priv
->cfg
->ops
->lib
->update_chain_flags(priv
);
1752 if (smps_request
!= -1) {
1753 for_each_context(priv
, ctx
) {
1754 if (ctx
->vif
&& ctx
->vif
->type
== NL80211_IFTYPE_STATION
)
1755 ieee80211_request_smps(ctx
->vif
, smps_request
);
1759 mutex_unlock(&priv
->mutex
);
1762 static void iwlagn_print_uartmsg(struct iwl_priv
*priv
,
1763 struct iwl_bt_uart_msg
*uart_msg
)
1765 IWL_DEBUG_NOTIF(priv
, "Message Type = 0x%X, SSN = 0x%X, "
1766 "Update Req = 0x%X",
1767 (BT_UART_MSG_FRAME1MSGTYPE_MSK
& uart_msg
->frame1
) >>
1768 BT_UART_MSG_FRAME1MSGTYPE_POS
,
1769 (BT_UART_MSG_FRAME1SSN_MSK
& uart_msg
->frame1
) >>
1770 BT_UART_MSG_FRAME1SSN_POS
,
1771 (BT_UART_MSG_FRAME1UPDATEREQ_MSK
& uart_msg
->frame1
) >>
1772 BT_UART_MSG_FRAME1UPDATEREQ_POS
);
1774 IWL_DEBUG_NOTIF(priv
, "Open connections = 0x%X, Traffic load = 0x%X, "
1775 "Chl_SeqN = 0x%X, In band = 0x%X",
1776 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK
& uart_msg
->frame2
) >>
1777 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS
,
1778 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK
& uart_msg
->frame2
) >>
1779 BT_UART_MSG_FRAME2TRAFFICLOAD_POS
,
1780 (BT_UART_MSG_FRAME2CHLSEQN_MSK
& uart_msg
->frame2
) >>
1781 BT_UART_MSG_FRAME2CHLSEQN_POS
,
1782 (BT_UART_MSG_FRAME2INBAND_MSK
& uart_msg
->frame2
) >>
1783 BT_UART_MSG_FRAME2INBAND_POS
);
1785 IWL_DEBUG_NOTIF(priv
, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1786 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1787 (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
) >>
1788 BT_UART_MSG_FRAME3SCOESCO_POS
,
1789 (BT_UART_MSG_FRAME3SNIFF_MSK
& uart_msg
->frame3
) >>
1790 BT_UART_MSG_FRAME3SNIFF_POS
,
1791 (BT_UART_MSG_FRAME3A2DP_MSK
& uart_msg
->frame3
) >>
1792 BT_UART_MSG_FRAME3A2DP_POS
,
1793 (BT_UART_MSG_FRAME3ACL_MSK
& uart_msg
->frame3
) >>
1794 BT_UART_MSG_FRAME3ACL_POS
,
1795 (BT_UART_MSG_FRAME3MASTER_MSK
& uart_msg
->frame3
) >>
1796 BT_UART_MSG_FRAME3MASTER_POS
,
1797 (BT_UART_MSG_FRAME3OBEX_MSK
& uart_msg
->frame3
) >>
1798 BT_UART_MSG_FRAME3OBEX_POS
);
1800 IWL_DEBUG_NOTIF(priv
, "Idle duration = 0x%X",
1801 (BT_UART_MSG_FRAME4IDLEDURATION_MSK
& uart_msg
->frame4
) >>
1802 BT_UART_MSG_FRAME4IDLEDURATION_POS
);
1804 IWL_DEBUG_NOTIF(priv
, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1805 "eSCO Retransmissions = 0x%X",
1806 (BT_UART_MSG_FRAME5TXACTIVITY_MSK
& uart_msg
->frame5
) >>
1807 BT_UART_MSG_FRAME5TXACTIVITY_POS
,
1808 (BT_UART_MSG_FRAME5RXACTIVITY_MSK
& uart_msg
->frame5
) >>
1809 BT_UART_MSG_FRAME5RXACTIVITY_POS
,
1810 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK
& uart_msg
->frame5
) >>
1811 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS
);
1813 IWL_DEBUG_NOTIF(priv
, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1814 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK
& uart_msg
->frame6
) >>
1815 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS
,
1816 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK
& uart_msg
->frame6
) >>
1817 BT_UART_MSG_FRAME6DISCOVERABLE_POS
);
1819 IWL_DEBUG_NOTIF(priv
, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
1820 "0x%X, Connectable = 0x%X",
1821 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK
& uart_msg
->frame7
) >>
1822 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS
,
1823 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK
& uart_msg
->frame7
) >>
1824 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS
,
1825 (BT_UART_MSG_FRAME7CONNECTABLE_MSK
& uart_msg
->frame7
) >>
1826 BT_UART_MSG_FRAME7CONNECTABLE_POS
);
1829 static void iwlagn_set_kill_ack_msk(struct iwl_priv
*priv
,
1830 struct iwl_bt_uart_msg
*uart_msg
)
1833 __le32 bt_kill_ack_msg
[2] = {
1834 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
1836 kill_ack_msk
= (((BT_UART_MSG_FRAME3A2DP_MSK
|
1837 BT_UART_MSG_FRAME3SNIFF_MSK
|
1838 BT_UART_MSG_FRAME3SCOESCO_MSK
) &
1839 uart_msg
->frame3
) == 0) ? 1 : 0;
1840 if (priv
->kill_ack_mask
!= bt_kill_ack_msg
[kill_ack_msk
]) {
1841 priv
->bt_valid
|= IWLAGN_BT_VALID_KILL_ACK_MASK
;
1842 priv
->kill_ack_mask
= bt_kill_ack_msg
[kill_ack_msk
];
1843 /* schedule to send runtime bt_config */
1844 queue_work(priv
->workqueue
, &priv
->bt_runtime_config
);
1849 void iwlagn_bt_coex_profile_notif(struct iwl_priv
*priv
,
1850 struct iwl_rx_mem_buffer
*rxb
)
1852 unsigned long flags
;
1853 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1854 struct iwl_bt_coex_profile_notif
*coex
= &pkt
->u
.bt_coex_profile_notif
;
1855 struct iwlagn_bt_sco_cmd sco_cmd
= { .flags
= 0 };
1856 struct iwl_bt_uart_msg
*uart_msg
= &coex
->last_bt_uart_msg
;
1857 u8 last_traffic_load
;
1859 IWL_DEBUG_NOTIF(priv
, "BT Coex notification:\n");
1860 IWL_DEBUG_NOTIF(priv
, " status: %d\n", coex
->bt_status
);
1861 IWL_DEBUG_NOTIF(priv
, " traffic load: %d\n", coex
->bt_traffic_load
);
1862 IWL_DEBUG_NOTIF(priv
, " CI compliance: %d\n",
1863 coex
->bt_ci_compliance
);
1864 iwlagn_print_uartmsg(priv
, uart_msg
);
1866 last_traffic_load
= priv
->notif_bt_traffic_load
;
1867 priv
->notif_bt_traffic_load
= coex
->bt_traffic_load
;
1868 if (priv
->iw_mode
!= NL80211_IFTYPE_ADHOC
) {
1869 if (priv
->bt_status
!= coex
->bt_status
||
1870 last_traffic_load
!= coex
->bt_traffic_load
) {
1871 if (coex
->bt_status
) {
1873 if (!priv
->bt_ch_announce
)
1874 priv
->bt_traffic_load
=
1875 IWL_BT_COEX_TRAFFIC_LOAD_HIGH
;
1877 priv
->bt_traffic_load
=
1878 coex
->bt_traffic_load
;
1881 priv
->bt_traffic_load
=
1882 IWL_BT_COEX_TRAFFIC_LOAD_NONE
;
1884 priv
->bt_status
= coex
->bt_status
;
1885 queue_work(priv
->workqueue
,
1886 &priv
->bt_traffic_change_work
);
1888 if (priv
->bt_sco_active
!=
1889 (uart_msg
->frame3
& BT_UART_MSG_FRAME3SCOESCO_MSK
)) {
1890 priv
->bt_sco_active
= uart_msg
->frame3
&
1891 BT_UART_MSG_FRAME3SCOESCO_MSK
;
1892 if (priv
->bt_sco_active
)
1893 sco_cmd
.flags
|= IWLAGN_BT_SCO_ACTIVE
;
1894 iwl_send_cmd_pdu_async(priv
, REPLY_BT_COEX_SCO
,
1895 sizeof(sco_cmd
), &sco_cmd
, NULL
);
1899 iwlagn_set_kill_ack_msk(priv
, uart_msg
);
1901 /* FIXME: based on notification, adjust the prio_boost */
1903 spin_lock_irqsave(&priv
->lock
, flags
);
1904 priv
->bt_ci_compliance
= coex
->bt_ci_compliance
;
1905 spin_unlock_irqrestore(&priv
->lock
, flags
);
1908 void iwlagn_bt_rx_handler_setup(struct iwl_priv
*priv
)
1910 iwlagn_rx_handler_setup(priv
);
1911 priv
->rx_handlers
[REPLY_BT_COEX_PROFILE_NOTIF
] =
1912 iwlagn_bt_coex_profile_notif
;
1915 void iwlagn_bt_setup_deferred_work(struct iwl_priv
*priv
)
1917 iwlagn_setup_deferred_work(priv
);
1919 INIT_WORK(&priv
->bt_traffic_change_work
,
1920 iwlagn_bt_traffic_change_work
);
1923 void iwlagn_bt_cancel_deferred_work(struct iwl_priv
*priv
)
1925 cancel_work_sync(&priv
->bt_traffic_change_work
);