1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <net/mac80211.h>
35 #include "iwl-eeprom.h"
36 #include "iwl-dev.h" /* FIXME: remove */
37 #include "iwl-debug.h"
40 #include "iwl-power.h"
42 #include "iwl-helpers.h"
45 MODULE_DESCRIPTION("iwl core");
46 MODULE_VERSION(IWLWIFI_VERSION
);
47 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
48 MODULE_LICENSE("GPL");
51 * set bt_coex_active to true, uCode will do kill/defer
52 * every time the priority line is asserted (BT is sending signals on the
53 * priority line in the PCIx).
54 * set bt_coex_active to false, uCode will ignore the BT activity and
55 * perform the normal operation
57 * User might experience transmit issue on some platform due to WiFi/BT
58 * co-exist problem. The possible behaviors are:
59 * Able to scan and finding all the available AP
60 * Not able to associate with any AP
61 * On those platforms, WiFi communication can be restored by set
62 * "bt_coex_active" module parameter to "false"
64 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 static bool bt_coex_active
= true;
67 module_param(bt_coex_active
, bool, S_IRUGO
);
68 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bluetooth co-exist");
70 static struct iwl_wimax_coex_event_entry cu_priorities
[COEX_NUM_OF_EVENTS
] = {
71 {COEX_CU_UNASSOC_IDLE_RP
, COEX_CU_UNASSOC_IDLE_WP
,
72 0, COEX_UNASSOC_IDLE_FLAGS
},
73 {COEX_CU_UNASSOC_MANUAL_SCAN_RP
, COEX_CU_UNASSOC_MANUAL_SCAN_WP
,
74 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS
},
75 {COEX_CU_UNASSOC_AUTO_SCAN_RP
, COEX_CU_UNASSOC_AUTO_SCAN_WP
,
76 0, COEX_UNASSOC_AUTO_SCAN_FLAGS
},
77 {COEX_CU_CALIBRATION_RP
, COEX_CU_CALIBRATION_WP
,
78 0, COEX_CALIBRATION_FLAGS
},
79 {COEX_CU_PERIODIC_CALIBRATION_RP
, COEX_CU_PERIODIC_CALIBRATION_WP
,
80 0, COEX_PERIODIC_CALIBRATION_FLAGS
},
81 {COEX_CU_CONNECTION_ESTAB_RP
, COEX_CU_CONNECTION_ESTAB_WP
,
82 0, COEX_CONNECTION_ESTAB_FLAGS
},
83 {COEX_CU_ASSOCIATED_IDLE_RP
, COEX_CU_ASSOCIATED_IDLE_WP
,
84 0, COEX_ASSOCIATED_IDLE_FLAGS
},
85 {COEX_CU_ASSOC_MANUAL_SCAN_RP
, COEX_CU_ASSOC_MANUAL_SCAN_WP
,
86 0, COEX_ASSOC_MANUAL_SCAN_FLAGS
},
87 {COEX_CU_ASSOC_AUTO_SCAN_RP
, COEX_CU_ASSOC_AUTO_SCAN_WP
,
88 0, COEX_ASSOC_AUTO_SCAN_FLAGS
},
89 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP
, COEX_CU_ASSOC_ACTIVE_LEVEL_WP
,
90 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS
},
91 {COEX_CU_RF_ON_RP
, COEX_CU_RF_ON_WP
, 0, COEX_CU_RF_ON_FLAGS
},
92 {COEX_CU_RF_OFF_RP
, COEX_CU_RF_OFF_WP
, 0, COEX_RF_OFF_FLAGS
},
93 {COEX_CU_STAND_ALONE_DEBUG_RP
, COEX_CU_STAND_ALONE_DEBUG_WP
,
94 0, COEX_STAND_ALONE_DEBUG_FLAGS
},
95 {COEX_CU_IPAN_ASSOC_LEVEL_RP
, COEX_CU_IPAN_ASSOC_LEVEL_WP
,
96 0, COEX_IPAN_ASSOC_LEVEL_FLAGS
},
97 {COEX_CU_RSRVD1_RP
, COEX_CU_RSRVD1_WP
, 0, COEX_RSRVD1_FLAGS
},
98 {COEX_CU_RSRVD2_RP
, COEX_CU_RSRVD2_WP
, 0, COEX_RSRVD2_FLAGS
}
101 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
102 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
103 IWL_RATE_SISO_##s##M_PLCP, \
104 IWL_RATE_MIMO2_##s##M_PLCP,\
105 IWL_RATE_MIMO3_##s##M_PLCP,\
106 IWL_RATE_##r##M_IEEE, \
107 IWL_RATE_##ip##M_INDEX, \
108 IWL_RATE_##in##M_INDEX, \
109 IWL_RATE_##rp##M_INDEX, \
110 IWL_RATE_##rn##M_INDEX, \
111 IWL_RATE_##pp##M_INDEX, \
112 IWL_RATE_##np##M_INDEX }
115 EXPORT_SYMBOL(iwl_debug_level
);
119 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
121 * If there isn't a valid next or previous rate then INV is used which
122 * maps to IWL_RATE_INVALID
125 const struct iwl_rate_info iwl_rates
[IWL_RATE_COUNT
] = {
126 IWL_DECLARE_RATE_INFO(1, INV
, INV
, 2, INV
, 2, INV
, 2), /* 1mbps */
127 IWL_DECLARE_RATE_INFO(2, INV
, 1, 5, 1, 5, 1, 5), /* 2mbps */
128 IWL_DECLARE_RATE_INFO(5, INV
, 2, 6, 2, 11, 2, 11), /*5.5mbps */
129 IWL_DECLARE_RATE_INFO(11, INV
, 9, 12, 9, 12, 5, 18), /* 11mbps */
130 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
131 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
132 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
133 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
134 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
135 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
136 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
137 IWL_DECLARE_RATE_INFO(54, 54, 48, INV
, 48, INV
, 48, INV
),/* 54mbps */
138 IWL_DECLARE_RATE_INFO(60, 60, 48, INV
, 48, INV
, 48, INV
),/* 60mbps */
139 /* FIXME:RS: ^^ should be INV (legacy) */
141 EXPORT_SYMBOL(iwl_rates
);
143 int iwl_hwrate_to_plcp_idx(u32 rate_n_flags
)
148 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
149 idx
= (rate_n_flags
& 0xff);
151 if (idx
>= IWL_RATE_MIMO3_6M_PLCP
)
152 idx
= idx
- IWL_RATE_MIMO3_6M_PLCP
;
153 else if (idx
>= IWL_RATE_MIMO2_6M_PLCP
)
154 idx
= idx
- IWL_RATE_MIMO2_6M_PLCP
;
156 idx
+= IWL_FIRST_OFDM_RATE
;
157 /* skip 9M not supported in ht*/
158 if (idx
>= IWL_RATE_9M_INDEX
)
160 if ((idx
>= IWL_FIRST_OFDM_RATE
) && (idx
<= IWL_LAST_OFDM_RATE
))
163 /* legacy rate format, search for match in table */
165 for (idx
= 0; idx
< ARRAY_SIZE(iwl_rates
); idx
++)
166 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
172 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx
);
174 u8
iwl_toggle_tx_ant(struct iwl_priv
*priv
, u8 ant
)
178 for (i
= 0; i
< RATE_ANT_NUM
- 1; i
++) {
179 ind
= (ind
+ 1) < RATE_ANT_NUM
? ind
+ 1 : 0;
180 if (priv
->hw_params
.valid_tx_ant
& BIT(ind
))
185 EXPORT_SYMBOL(iwl_toggle_tx_ant
);
187 const u8 iwl_bcast_addr
[ETH_ALEN
] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
188 EXPORT_SYMBOL(iwl_bcast_addr
);
191 /* This function both allocates and initializes hw and priv. */
192 struct ieee80211_hw
*iwl_alloc_all(struct iwl_cfg
*cfg
,
193 struct ieee80211_ops
*hw_ops
)
195 struct iwl_priv
*priv
;
197 /* mac80211 allocates memory for this device instance, including
198 * space for this driver's private structure */
199 struct ieee80211_hw
*hw
=
200 ieee80211_alloc_hw(sizeof(struct iwl_priv
), hw_ops
);
202 printk(KERN_ERR
"%s: Can not allocate network device\n",
213 EXPORT_SYMBOL(iwl_alloc_all
);
215 void iwl_hw_detect(struct iwl_priv
*priv
)
217 priv
->hw_rev
= _iwl_read32(priv
, CSR_HW_REV
);
218 priv
->hw_wa_rev
= _iwl_read32(priv
, CSR_HW_REV_WA_REG
);
219 pci_read_config_byte(priv
->pci_dev
, PCI_REVISION_ID
, &priv
->rev_id
);
221 EXPORT_SYMBOL(iwl_hw_detect
);
226 static void iwl_update_qos(struct iwl_priv
*priv
)
228 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
231 priv
->qos_data
.def_qos_parm
.qos_flags
= 0;
233 if (priv
->qos_data
.qos_active
)
234 priv
->qos_data
.def_qos_parm
.qos_flags
|=
235 QOS_PARAM_FLG_UPDATE_EDCA_MSK
;
237 if (priv
->current_ht_config
.is_ht
)
238 priv
->qos_data
.def_qos_parm
.qos_flags
|= QOS_PARAM_FLG_TGN_MSK
;
240 IWL_DEBUG_QOS(priv
, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
241 priv
->qos_data
.qos_active
,
242 priv
->qos_data
.def_qos_parm
.qos_flags
);
244 iwl_send_cmd_pdu_async(priv
, REPLY_QOS_PARAM
,
245 sizeof(struct iwl_qosparam_cmd
),
246 &priv
->qos_data
.def_qos_parm
, NULL
);
249 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
250 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
251 static void iwlcore_init_ht_hw_capab(const struct iwl_priv
*priv
,
252 struct ieee80211_sta_ht_cap
*ht_info
,
253 enum ieee80211_band band
)
255 u16 max_bit_rate
= 0;
256 u8 rx_chains_num
= priv
->hw_params
.rx_chains_num
;
257 u8 tx_chains_num
= priv
->hw_params
.tx_chains_num
;
260 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
262 ht_info
->ht_supported
= true;
264 if (priv
->cfg
->ht_greenfield_support
)
265 ht_info
->cap
|= IEEE80211_HT_CAP_GRN_FLD
;
266 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
267 max_bit_rate
= MAX_BIT_RATE_20_MHZ
;
268 if (priv
->hw_params
.ht40_channel
& BIT(band
)) {
269 ht_info
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
;
270 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_40
;
271 ht_info
->mcs
.rx_mask
[4] = 0x01;
272 max_bit_rate
= MAX_BIT_RATE_40_MHZ
;
275 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
276 ht_info
->cap
|= IEEE80211_HT_CAP_MAX_AMSDU
;
278 ht_info
->ampdu_factor
= CFG_HT_RX_AMPDU_FACTOR_DEF
;
279 ht_info
->ampdu_density
= CFG_HT_MPDU_DENSITY_DEF
;
281 ht_info
->mcs
.rx_mask
[0] = 0xFF;
282 if (rx_chains_num
>= 2)
283 ht_info
->mcs
.rx_mask
[1] = 0xFF;
284 if (rx_chains_num
>= 3)
285 ht_info
->mcs
.rx_mask
[2] = 0xFF;
287 /* Highest supported Rx data rate */
288 max_bit_rate
*= rx_chains_num
;
289 WARN_ON(max_bit_rate
& ~IEEE80211_HT_MCS_RX_HIGHEST_MASK
);
290 ht_info
->mcs
.rx_highest
= cpu_to_le16(max_bit_rate
);
292 /* Tx MCS capabilities */
293 ht_info
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
294 if (tx_chains_num
!= rx_chains_num
) {
295 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
296 ht_info
->mcs
.tx_params
|= ((tx_chains_num
- 1) <<
297 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
302 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
304 int iwlcore_init_geos(struct iwl_priv
*priv
)
306 struct iwl_channel_info
*ch
;
307 struct ieee80211_supported_band
*sband
;
308 struct ieee80211_channel
*channels
;
309 struct ieee80211_channel
*geo_ch
;
310 struct ieee80211_rate
*rates
;
313 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_bitrates
||
314 priv
->bands
[IEEE80211_BAND_5GHZ
].n_bitrates
) {
315 IWL_DEBUG_INFO(priv
, "Geography modes already initialized.\n");
316 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
320 channels
= kzalloc(sizeof(struct ieee80211_channel
) *
321 priv
->channel_count
, GFP_KERNEL
);
325 rates
= kzalloc((sizeof(struct ieee80211_rate
) * IWL_RATE_COUNT_LEGACY
),
332 /* 5.2GHz channels start after the 2.4GHz channels */
333 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
334 sband
->channels
= &channels
[ARRAY_SIZE(iwl_eeprom_band_1
)];
336 sband
->bitrates
= &rates
[IWL_FIRST_OFDM_RATE
];
337 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
- IWL_FIRST_OFDM_RATE
;
339 if (priv
->cfg
->sku
& IWL_SKU_N
)
340 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
341 IEEE80211_BAND_5GHZ
);
343 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
344 sband
->channels
= channels
;
346 sband
->bitrates
= rates
;
347 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
;
349 if (priv
->cfg
->sku
& IWL_SKU_N
)
350 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
351 IEEE80211_BAND_2GHZ
);
353 priv
->ieee_channels
= channels
;
354 priv
->ieee_rates
= rates
;
356 for (i
= 0; i
< priv
->channel_count
; i
++) {
357 ch
= &priv
->channel_info
[i
];
359 /* FIXME: might be removed if scan is OK */
360 if (!is_channel_valid(ch
))
363 if (is_channel_a_band(ch
))
364 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
366 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
368 geo_ch
= &sband
->channels
[sband
->n_channels
++];
370 geo_ch
->center_freq
=
371 ieee80211_channel_to_frequency(ch
->channel
);
372 geo_ch
->max_power
= ch
->max_power_avg
;
373 geo_ch
->max_antenna_gain
= 0xff;
374 geo_ch
->hw_value
= ch
->channel
;
376 if (is_channel_valid(ch
)) {
377 if (!(ch
->flags
& EEPROM_CHANNEL_IBSS
))
378 geo_ch
->flags
|= IEEE80211_CHAN_NO_IBSS
;
380 if (!(ch
->flags
& EEPROM_CHANNEL_ACTIVE
))
381 geo_ch
->flags
|= IEEE80211_CHAN_PASSIVE_SCAN
;
383 if (ch
->flags
& EEPROM_CHANNEL_RADAR
)
384 geo_ch
->flags
|= IEEE80211_CHAN_RADAR
;
386 geo_ch
->flags
|= ch
->ht40_extension_channel
;
388 if (ch
->max_power_avg
> priv
->tx_power_device_lmt
)
389 priv
->tx_power_device_lmt
= ch
->max_power_avg
;
391 geo_ch
->flags
|= IEEE80211_CHAN_DISABLED
;
394 IWL_DEBUG_INFO(priv
, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
395 ch
->channel
, geo_ch
->center_freq
,
396 is_channel_a_band(ch
) ? "5.2" : "2.4",
397 geo_ch
->flags
& IEEE80211_CHAN_DISABLED
?
398 "restricted" : "valid",
402 if ((priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
== 0) &&
403 priv
->cfg
->sku
& IWL_SKU_A
) {
404 IWL_INFO(priv
, "Incorrectly detected BG card as ABG. "
405 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
406 priv
->pci_dev
->device
,
407 priv
->pci_dev
->subsystem_device
);
408 priv
->cfg
->sku
&= ~IWL_SKU_A
;
411 IWL_INFO(priv
, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
412 priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
,
413 priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
);
415 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
419 EXPORT_SYMBOL(iwlcore_init_geos
);
422 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
424 void iwlcore_free_geos(struct iwl_priv
*priv
)
426 kfree(priv
->ieee_channels
);
427 kfree(priv
->ieee_rates
);
428 clear_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
430 EXPORT_SYMBOL(iwlcore_free_geos
);
433 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
436 void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info
*info
,
439 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
) {
440 *tx_flags
|= TX_CMD_FLG_RTS_MSK
;
441 *tx_flags
&= ~TX_CMD_FLG_CTS_MSK
;
442 } else if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
) {
443 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
444 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
447 EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag
);
449 static bool is_single_rx_stream(struct iwl_priv
*priv
)
451 return priv
->current_ht_config
.smps
== IEEE80211_SMPS_STATIC
||
452 priv
->current_ht_config
.single_chain_sufficient
;
455 static u8
iwl_is_channel_extension(struct iwl_priv
*priv
,
456 enum ieee80211_band band
,
457 u16 channel
, u8 extension_chan_offset
)
459 const struct iwl_channel_info
*ch_info
;
461 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
462 if (!is_channel_valid(ch_info
))
465 if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_ABOVE
)
466 return !(ch_info
->ht40_extension_channel
&
467 IEEE80211_CHAN_NO_HT40PLUS
);
468 else if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_BELOW
)
469 return !(ch_info
->ht40_extension_channel
&
470 IEEE80211_CHAN_NO_HT40MINUS
);
475 u8
iwl_is_ht40_tx_allowed(struct iwl_priv
*priv
,
476 struct ieee80211_sta_ht_cap
*sta_ht_inf
)
478 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
480 if (!ht_conf
->is_ht
|| !ht_conf
->is_40mhz
)
483 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
484 * the bit will not set if it is pure 40MHz case
487 if (!sta_ht_inf
->ht_supported
)
490 #ifdef CONFIG_IWLWIFI_DEBUG
491 if (priv
->disable_ht40
)
494 return iwl_is_channel_extension(priv
, priv
->band
,
495 le16_to_cpu(priv
->staging_rxon
.channel
),
496 ht_conf
->extension_chan_offset
);
498 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed
);
500 static u16
iwl_adjust_beacon_interval(u16 beacon_val
, u16 max_beacon_val
)
503 u16 beacon_factor
= 0;
505 beacon_factor
= (beacon_val
+ max_beacon_val
) / max_beacon_val
;
506 new_val
= beacon_val
/ beacon_factor
;
509 new_val
= max_beacon_val
;
514 void iwl_setup_rxon_timing(struct iwl_priv
*priv
)
517 s32 interval_tm
, rem
;
519 struct ieee80211_conf
*conf
= NULL
;
522 conf
= ieee80211_get_hw_conf(priv
->hw
);
524 spin_lock_irqsave(&priv
->lock
, flags
);
525 priv
->rxon_timing
.timestamp
= cpu_to_le64(priv
->timestamp
);
526 priv
->rxon_timing
.listen_interval
= cpu_to_le16(conf
->listen_interval
);
528 if (priv
->iw_mode
== NL80211_IFTYPE_STATION
) {
529 beacon_int
= priv
->beacon_int
;
530 priv
->rxon_timing
.atim_window
= 0;
532 beacon_int
= priv
->vif
->bss_conf
.beacon_int
;
534 /* TODO: we need to get atim_window from upper stack
535 * for now we set to 0 */
536 priv
->rxon_timing
.atim_window
= 0;
539 beacon_int
= iwl_adjust_beacon_interval(beacon_int
,
540 priv
->hw_params
.max_beacon_itrvl
* 1024);
541 priv
->rxon_timing
.beacon_interval
= cpu_to_le16(beacon_int
);
543 tsf
= priv
->timestamp
; /* tsf is modifed by do_div: copy it */
544 interval_tm
= beacon_int
* 1024;
545 rem
= do_div(tsf
, interval_tm
);
546 priv
->rxon_timing
.beacon_init_val
= cpu_to_le32(interval_tm
- rem
);
548 spin_unlock_irqrestore(&priv
->lock
, flags
);
549 IWL_DEBUG_ASSOC(priv
,
550 "beacon interval %d beacon timer %d beacon tim %d\n",
551 le16_to_cpu(priv
->rxon_timing
.beacon_interval
),
552 le32_to_cpu(priv
->rxon_timing
.beacon_init_val
),
553 le16_to_cpu(priv
->rxon_timing
.atim_window
));
555 EXPORT_SYMBOL(iwl_setup_rxon_timing
);
557 void iwl_set_rxon_hwcrypto(struct iwl_priv
*priv
, int hw_decrypt
)
559 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
562 rxon
->filter_flags
&= ~RXON_FILTER_DIS_DECRYPT_MSK
;
564 rxon
->filter_flags
|= RXON_FILTER_DIS_DECRYPT_MSK
;
567 EXPORT_SYMBOL(iwl_set_rxon_hwcrypto
);
570 * iwl_check_rxon_cmd - validate RXON structure is valid
572 * NOTE: This is really only useful during development and can eventually
573 * be #ifdef'd out once the driver is stable and folks aren't actively
576 int iwl_check_rxon_cmd(struct iwl_priv
*priv
)
580 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
582 if (rxon
->flags
& RXON_FLG_BAND_24G_MSK
) {
583 error
|= le32_to_cpu(rxon
->flags
&
584 (RXON_FLG_TGJ_NARROW_BAND_MSK
|
585 RXON_FLG_RADAR_DETECT_MSK
));
587 IWL_WARN(priv
, "check 24G fields %d | %d\n",
590 error
|= (rxon
->flags
& RXON_FLG_SHORT_SLOT_MSK
) ?
591 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK
);
593 IWL_WARN(priv
, "check 52 fields %d | %d\n",
595 error
|= le32_to_cpu(rxon
->flags
& RXON_FLG_CCK_MSK
);
597 IWL_WARN(priv
, "check 52 CCK %d | %d\n",
600 error
|= (rxon
->node_addr
[0] | rxon
->bssid_addr
[0]) & 0x1;
602 IWL_WARN(priv
, "check mac addr %d | %d\n", counter
++, error
);
604 /* make sure basic rates 6Mbps and 1Mbps are supported */
605 error
|= (((rxon
->ofdm_basic_rates
& IWL_RATE_6M_MASK
) == 0) &&
606 ((rxon
->cck_basic_rates
& IWL_RATE_1M_MASK
) == 0));
608 IWL_WARN(priv
, "check basic rate %d | %d\n", counter
++, error
);
610 error
|= (le16_to_cpu(rxon
->assoc_id
) > 2007);
612 IWL_WARN(priv
, "check assoc id %d | %d\n", counter
++, error
);
614 error
|= ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
))
615 == (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
));
617 IWL_WARN(priv
, "check CCK and short slot %d | %d\n",
620 error
|= ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
))
621 == (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
));
623 IWL_WARN(priv
, "check CCK & auto detect %d | %d\n",
626 error
|= ((rxon
->flags
& (RXON_FLG_AUTO_DETECT_MSK
|
627 RXON_FLG_TGG_PROTECT_MSK
)) == RXON_FLG_TGG_PROTECT_MSK
);
629 IWL_WARN(priv
, "check TGG and auto detect %d | %d\n",
633 IWL_WARN(priv
, "Tuning to channel %d\n",
634 le16_to_cpu(rxon
->channel
));
637 IWL_ERR(priv
, "Not a valid iwl_rxon_assoc_cmd field values\n");
642 EXPORT_SYMBOL(iwl_check_rxon_cmd
);
645 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
646 * @priv: staging_rxon is compared to active_rxon
648 * If the RXON structure is changing enough to require a new tune,
649 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
650 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
652 int iwl_full_rxon_required(struct iwl_priv
*priv
)
655 /* These items are only settable from the full RXON command */
656 if (!(iwl_is_associated(priv
)) ||
657 compare_ether_addr(priv
->staging_rxon
.bssid_addr
,
658 priv
->active_rxon
.bssid_addr
) ||
659 compare_ether_addr(priv
->staging_rxon
.node_addr
,
660 priv
->active_rxon
.node_addr
) ||
661 compare_ether_addr(priv
->staging_rxon
.wlap_bssid_addr
,
662 priv
->active_rxon
.wlap_bssid_addr
) ||
663 (priv
->staging_rxon
.dev_type
!= priv
->active_rxon
.dev_type
) ||
664 (priv
->staging_rxon
.channel
!= priv
->active_rxon
.channel
) ||
665 (priv
->staging_rxon
.air_propagation
!=
666 priv
->active_rxon
.air_propagation
) ||
667 (priv
->staging_rxon
.ofdm_ht_single_stream_basic_rates
!=
668 priv
->active_rxon
.ofdm_ht_single_stream_basic_rates
) ||
669 (priv
->staging_rxon
.ofdm_ht_dual_stream_basic_rates
!=
670 priv
->active_rxon
.ofdm_ht_dual_stream_basic_rates
) ||
671 (priv
->staging_rxon
.ofdm_ht_triple_stream_basic_rates
!=
672 priv
->active_rxon
.ofdm_ht_triple_stream_basic_rates
) ||
673 (priv
->staging_rxon
.assoc_id
!= priv
->active_rxon
.assoc_id
))
676 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
677 * be updated with the RXON_ASSOC command -- however only some
678 * flag transitions are allowed using RXON_ASSOC */
680 /* Check if we are not switching bands */
681 if ((priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
) !=
682 (priv
->active_rxon
.flags
& RXON_FLG_BAND_24G_MSK
))
685 /* Check if we are switching association toggle */
686 if ((priv
->staging_rxon
.filter_flags
& RXON_FILTER_ASSOC_MSK
) !=
687 (priv
->active_rxon
.filter_flags
& RXON_FILTER_ASSOC_MSK
))
692 EXPORT_SYMBOL(iwl_full_rxon_required
);
694 u8
iwl_rate_get_lowest_plcp(struct iwl_priv
*priv
)
697 * Assign the lowest rate -- should really get this from
698 * the beacon skb from mac80211.
700 if (priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
)
701 return IWL_RATE_1M_PLCP
;
703 return IWL_RATE_6M_PLCP
;
705 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp
);
707 void iwl_set_rxon_ht(struct iwl_priv
*priv
, struct iwl_ht_config
*ht_conf
)
709 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
711 if (!ht_conf
->is_ht
) {
712 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
713 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
714 RXON_FLG_HT40_PROT_MSK
|
715 RXON_FLG_HT_PROT_MSK
);
719 /* FIXME: if the definition of ht_protection changed, the "translation"
720 * will be needed for rxon->flags
722 rxon
->flags
|= cpu_to_le32(ht_conf
->ht_protection
<< RXON_FLG_HT_OPERATING_MODE_POS
);
724 /* Set up channel bandwidth:
725 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
726 /* clear the HT channel mode before set the mode */
727 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
728 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
729 if (iwl_is_ht40_tx_allowed(priv
, NULL
)) {
731 if (ht_conf
->ht_protection
== IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
) {
732 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_PURE_40
;
733 /* Note: control channel is opposite of extension channel */
734 switch (ht_conf
->extension_chan_offset
) {
735 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
736 rxon
->flags
&= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
738 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
739 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
743 /* Note: control channel is opposite of extension channel */
744 switch (ht_conf
->extension_chan_offset
) {
745 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
746 rxon
->flags
&= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
747 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
749 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
750 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
751 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
753 case IEEE80211_HT_PARAM_CHA_SEC_NONE
:
755 /* channel location only valid if in Mixed mode */
756 IWL_ERR(priv
, "invalid extension channel offset\n");
761 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_LEGACY
;
764 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
765 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
767 IWL_DEBUG_ASSOC(priv
, "rxon flags 0x%X operation mode :0x%X "
768 "extension channel offset 0x%x\n",
769 le32_to_cpu(rxon
->flags
), ht_conf
->ht_protection
,
770 ht_conf
->extension_chan_offset
);
773 EXPORT_SYMBOL(iwl_set_rxon_ht
);
775 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
776 #define IWL_NUM_RX_CHAINS_SINGLE 2
777 #define IWL_NUM_IDLE_CHAINS_DUAL 2
778 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
781 * Determine how many receiver/antenna chains to use.
783 * More provides better reception via diversity. Fewer saves power
784 * at the expense of throughput, but only when not in powersave to
787 * MIMO (dual stream) requires at least 2, but works better with 3.
788 * This does not determine *which* chains to use, just how many.
790 static int iwl_get_active_rx_chain_count(struct iwl_priv
*priv
)
792 /* # of Rx chains to use when expecting MIMO. */
793 if (is_single_rx_stream(priv
))
794 return IWL_NUM_RX_CHAINS_SINGLE
;
796 return IWL_NUM_RX_CHAINS_MULTIPLE
;
800 * When we are in power saving mode, unless device support spatial
801 * multiplexing power save, use the active count for rx chain count.
803 static int iwl_get_idle_rx_chain_count(struct iwl_priv
*priv
, int active_cnt
)
805 /* # Rx chains when idling, depending on SMPS mode */
806 switch (priv
->current_ht_config
.smps
) {
807 case IEEE80211_SMPS_STATIC
:
808 case IEEE80211_SMPS_DYNAMIC
:
809 return IWL_NUM_IDLE_CHAINS_SINGLE
;
810 case IEEE80211_SMPS_OFF
:
813 WARN(1, "invalid SMPS mode %d",
814 priv
->current_ht_config
.smps
);
820 static u8
iwl_count_chain_bitmap(u32 chain_bitmap
)
823 res
= (chain_bitmap
& BIT(0)) >> 0;
824 res
+= (chain_bitmap
& BIT(1)) >> 1;
825 res
+= (chain_bitmap
& BIT(2)) >> 2;
826 res
+= (chain_bitmap
& BIT(3)) >> 3;
831 * iwl_is_monitor_mode - Determine if interface in monitor mode
833 * priv->iw_mode is set in add_interface, but add_interface is
834 * never called for monitor mode. The only way mac80211 informs us about
835 * monitor mode is through configuring filters (call to configure_filter).
837 bool iwl_is_monitor_mode(struct iwl_priv
*priv
)
839 return !!(priv
->staging_rxon
.filter_flags
& RXON_FILTER_PROMISC_MSK
);
841 EXPORT_SYMBOL(iwl_is_monitor_mode
);
844 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
846 * Selects how many and which Rx receivers/antennas/chains to use.
847 * This should not be used for scan command ... it puts data in wrong place.
849 void iwl_set_rxon_chain(struct iwl_priv
*priv
)
851 bool is_single
= is_single_rx_stream(priv
);
852 bool is_cam
= !test_bit(STATUS_POWER_PMI
, &priv
->status
);
853 u8 idle_rx_cnt
, active_rx_cnt
, valid_rx_cnt
;
857 /* Tell uCode which antennas are actually connected.
858 * Before first association, we assume all antennas are connected.
859 * Just after first association, iwl_chain_noise_calibration()
860 * checks which antennas actually *are* connected. */
861 if (priv
->chain_noise_data
.active_chains
)
862 active_chains
= priv
->chain_noise_data
.active_chains
;
864 active_chains
= priv
->hw_params
.valid_rx_ant
;
866 rx_chain
= active_chains
<< RXON_RX_CHAIN_VALID_POS
;
868 /* How many receivers should we use? */
869 active_rx_cnt
= iwl_get_active_rx_chain_count(priv
);
870 idle_rx_cnt
= iwl_get_idle_rx_chain_count(priv
, active_rx_cnt
);
873 /* correct rx chain count according hw settings
874 * and chain noise calibration
876 valid_rx_cnt
= iwl_count_chain_bitmap(active_chains
);
877 if (valid_rx_cnt
< active_rx_cnt
)
878 active_rx_cnt
= valid_rx_cnt
;
880 if (valid_rx_cnt
< idle_rx_cnt
)
881 idle_rx_cnt
= valid_rx_cnt
;
883 rx_chain
|= active_rx_cnt
<< RXON_RX_CHAIN_MIMO_CNT_POS
;
884 rx_chain
|= idle_rx_cnt
<< RXON_RX_CHAIN_CNT_POS
;
886 /* copied from 'iwl_bg_request_scan()' */
887 /* Force use of chains B and C (0x6) for Rx
888 * Avoid A (0x1) for the device has off-channel reception on A-band.
889 * MIMO is not used here, but value is required */
890 if (iwl_is_monitor_mode(priv
) &&
891 !(priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
) &&
892 priv
->cfg
->off_channel_workaround
) {
893 rx_chain
= ANT_ABC
<< RXON_RX_CHAIN_VALID_POS
;
894 rx_chain
|= ANT_BC
<< RXON_RX_CHAIN_FORCE_SEL_POS
;
895 rx_chain
|= ANT_ABC
<< RXON_RX_CHAIN_FORCE_MIMO_SEL_POS
;
896 rx_chain
|= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS
;
899 priv
->staging_rxon
.rx_chain
= cpu_to_le16(rx_chain
);
901 if (!is_single
&& (active_rx_cnt
>= IWL_NUM_RX_CHAINS_SINGLE
) && is_cam
)
902 priv
->staging_rxon
.rx_chain
|= RXON_RX_CHAIN_MIMO_FORCE_MSK
;
904 priv
->staging_rxon
.rx_chain
&= ~RXON_RX_CHAIN_MIMO_FORCE_MSK
;
906 IWL_DEBUG_ASSOC(priv
, "rx_chain=0x%X active=%d idle=%d\n",
907 priv
->staging_rxon
.rx_chain
,
908 active_rx_cnt
, idle_rx_cnt
);
910 WARN_ON(active_rx_cnt
== 0 || idle_rx_cnt
== 0 ||
911 active_rx_cnt
< idle_rx_cnt
);
913 EXPORT_SYMBOL(iwl_set_rxon_chain
);
916 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
917 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
918 * @channel: Any channel valid for the requested phymode
920 * In addition to setting the staging RXON, priv->phymode is also set.
922 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
923 * in the staging RXON flag structure based on the phymode
925 int iwl_set_rxon_channel(struct iwl_priv
*priv
, struct ieee80211_channel
*ch
)
927 enum ieee80211_band band
= ch
->band
;
928 u16 channel
= ieee80211_frequency_to_channel(ch
->center_freq
);
930 if (!iwl_get_channel_info(priv
, band
, channel
)) {
931 IWL_DEBUG_INFO(priv
, "Could not set channel to %d [%d]\n",
936 if ((le16_to_cpu(priv
->staging_rxon
.channel
) == channel
) &&
937 (priv
->band
== band
))
940 priv
->staging_rxon
.channel
= cpu_to_le16(channel
);
941 if (band
== IEEE80211_BAND_5GHZ
)
942 priv
->staging_rxon
.flags
&= ~RXON_FLG_BAND_24G_MSK
;
944 priv
->staging_rxon
.flags
|= RXON_FLG_BAND_24G_MSK
;
948 IWL_DEBUG_INFO(priv
, "Staging channel set to %d [%d]\n", channel
, band
);
952 EXPORT_SYMBOL(iwl_set_rxon_channel
);
954 void iwl_set_flags_for_band(struct iwl_priv
*priv
,
955 enum ieee80211_band band
)
957 if (band
== IEEE80211_BAND_5GHZ
) {
958 priv
->staging_rxon
.flags
&=
959 ~(RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
961 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
963 /* Copied from iwl_post_associate() */
964 if (priv
->assoc_capability
& WLAN_CAPABILITY_SHORT_SLOT_TIME
)
965 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
967 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
969 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
)
970 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
972 priv
->staging_rxon
.flags
|= RXON_FLG_BAND_24G_MSK
;
973 priv
->staging_rxon
.flags
|= RXON_FLG_AUTO_DETECT_MSK
;
974 priv
->staging_rxon
.flags
&= ~RXON_FLG_CCK_MSK
;
979 * initialize rxon structure with default values from eeprom
981 void iwl_connection_init_rx_config(struct iwl_priv
*priv
, int mode
)
983 const struct iwl_channel_info
*ch_info
;
985 memset(&priv
->staging_rxon
, 0, sizeof(priv
->staging_rxon
));
988 case NL80211_IFTYPE_AP
:
989 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_AP
;
992 case NL80211_IFTYPE_STATION
:
993 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_ESS
;
994 priv
->staging_rxon
.filter_flags
= RXON_FILTER_ACCEPT_GRP_MSK
;
997 case NL80211_IFTYPE_ADHOC
:
998 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_IBSS
;
999 priv
->staging_rxon
.flags
= RXON_FLG_SHORT_PREAMBLE_MSK
;
1000 priv
->staging_rxon
.filter_flags
= RXON_FILTER_BCON_AWARE_MSK
|
1001 RXON_FILTER_ACCEPT_GRP_MSK
;
1005 IWL_ERR(priv
, "Unsupported interface type %d\n", mode
);
1010 /* TODO: Figure out when short_preamble would be set and cache from
1012 if (!hw_to_local(priv
->hw
)->short_preamble
)
1013 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
1015 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
1018 ch_info
= iwl_get_channel_info(priv
, priv
->band
,
1019 le16_to_cpu(priv
->active_rxon
.channel
));
1022 ch_info
= &priv
->channel_info
[0];
1024 priv
->staging_rxon
.channel
= cpu_to_le16(ch_info
->channel
);
1025 priv
->band
= ch_info
->band
;
1027 iwl_set_flags_for_band(priv
, priv
->band
);
1029 priv
->staging_rxon
.ofdm_basic_rates
=
1030 (IWL_OFDM_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
1031 priv
->staging_rxon
.cck_basic_rates
=
1032 (IWL_CCK_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
1034 /* clear both MIX and PURE40 mode flag */
1035 priv
->staging_rxon
.flags
&= ~(RXON_FLG_CHANNEL_MODE_MIXED
|
1036 RXON_FLG_CHANNEL_MODE_PURE_40
);
1037 memcpy(priv
->staging_rxon
.node_addr
, priv
->mac_addr
, ETH_ALEN
);
1038 memcpy(priv
->staging_rxon
.wlap_bssid_addr
, priv
->mac_addr
, ETH_ALEN
);
1039 priv
->staging_rxon
.ofdm_ht_single_stream_basic_rates
= 0xff;
1040 priv
->staging_rxon
.ofdm_ht_dual_stream_basic_rates
= 0xff;
1041 priv
->staging_rxon
.ofdm_ht_triple_stream_basic_rates
= 0xff;
1043 EXPORT_SYMBOL(iwl_connection_init_rx_config
);
1045 static void iwl_set_rate(struct iwl_priv
*priv
)
1047 const struct ieee80211_supported_band
*hw
= NULL
;
1048 struct ieee80211_rate
*rate
;
1051 hw
= iwl_get_hw_mode(priv
, priv
->band
);
1053 IWL_ERR(priv
, "Failed to set rate: unable to get hw mode\n");
1057 priv
->active_rate
= 0;
1059 for (i
= 0; i
< hw
->n_bitrates
; i
++) {
1060 rate
= &(hw
->bitrates
[i
]);
1061 if (rate
->hw_value
< IWL_RATE_COUNT_LEGACY
)
1062 priv
->active_rate
|= (1 << rate
->hw_value
);
1065 IWL_DEBUG_RATE(priv
, "Set active_rate = %0x\n", priv
->active_rate
);
1067 priv
->staging_rxon
.cck_basic_rates
=
1068 (IWL_CCK_BASIC_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
1070 priv
->staging_rxon
.ofdm_basic_rates
=
1071 (IWL_OFDM_BASIC_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
1074 void iwl_rx_csa(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
1076 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1077 struct iwl_rxon_cmd
*rxon
= (void *)&priv
->active_rxon
;
1078 struct iwl_csa_notification
*csa
= &(pkt
->u
.csa_notif
);
1080 if (priv
->switch_rxon
.switch_in_progress
) {
1081 if (!le32_to_cpu(csa
->status
) &&
1082 (csa
->channel
== priv
->switch_rxon
.channel
)) {
1083 rxon
->channel
= csa
->channel
;
1084 priv
->staging_rxon
.channel
= csa
->channel
;
1085 IWL_DEBUG_11H(priv
, "CSA notif: channel %d\n",
1086 le16_to_cpu(csa
->channel
));
1088 IWL_ERR(priv
, "CSA notif (fail) : channel %d\n",
1089 le16_to_cpu(csa
->channel
));
1091 priv
->switch_rxon
.switch_in_progress
= false;
1094 EXPORT_SYMBOL(iwl_rx_csa
);
1096 #ifdef CONFIG_IWLWIFI_DEBUG
1097 void iwl_print_rx_config_cmd(struct iwl_priv
*priv
)
1099 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
1101 IWL_DEBUG_RADIO(priv
, "RX CONFIG:\n");
1102 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, (u8
*) rxon
, sizeof(*rxon
));
1103 IWL_DEBUG_RADIO(priv
, "u16 channel: 0x%x\n", le16_to_cpu(rxon
->channel
));
1104 IWL_DEBUG_RADIO(priv
, "u32 flags: 0x%08X\n", le32_to_cpu(rxon
->flags
));
1105 IWL_DEBUG_RADIO(priv
, "u32 filter_flags: 0x%08x\n",
1106 le32_to_cpu(rxon
->filter_flags
));
1107 IWL_DEBUG_RADIO(priv
, "u8 dev_type: 0x%x\n", rxon
->dev_type
);
1108 IWL_DEBUG_RADIO(priv
, "u8 ofdm_basic_rates: 0x%02x\n",
1109 rxon
->ofdm_basic_rates
);
1110 IWL_DEBUG_RADIO(priv
, "u8 cck_basic_rates: 0x%02x\n", rxon
->cck_basic_rates
);
1111 IWL_DEBUG_RADIO(priv
, "u8[6] node_addr: %pM\n", rxon
->node_addr
);
1112 IWL_DEBUG_RADIO(priv
, "u8[6] bssid_addr: %pM\n", rxon
->bssid_addr
);
1113 IWL_DEBUG_RADIO(priv
, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon
->assoc_id
));
1115 EXPORT_SYMBOL(iwl_print_rx_config_cmd
);
1118 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1120 void iwl_irq_handle_error(struct iwl_priv
*priv
)
1122 /* Set the FW error flag -- cleared on iwl_down */
1123 set_bit(STATUS_FW_ERROR
, &priv
->status
);
1125 /* Cancel currently queued command. */
1126 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
1128 priv
->cfg
->ops
->lib
->dump_nic_error_log(priv
);
1129 if (priv
->cfg
->ops
->lib
->dump_csr
)
1130 priv
->cfg
->ops
->lib
->dump_csr(priv
);
1131 if (priv
->cfg
->ops
->lib
->dump_fh
)
1132 priv
->cfg
->ops
->lib
->dump_fh(priv
, NULL
, false);
1133 priv
->cfg
->ops
->lib
->dump_nic_event_log(priv
, false, NULL
, false);
1134 #ifdef CONFIG_IWLWIFI_DEBUG
1135 if (iwl_get_debug_level(priv
) & IWL_DL_FW_ERRORS
)
1136 iwl_print_rx_config_cmd(priv
);
1139 wake_up_interruptible(&priv
->wait_command_queue
);
1141 /* Keep the restart process from trying to send host
1142 * commands by clearing the INIT status bit */
1143 clear_bit(STATUS_READY
, &priv
->status
);
1145 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
1146 IWL_DEBUG(priv
, IWL_DL_FW_ERRORS
,
1147 "Restarting adapter due to uCode error.\n");
1149 if (priv
->cfg
->mod_params
->restart_fw
)
1150 queue_work(priv
->workqueue
, &priv
->restart
);
1153 EXPORT_SYMBOL(iwl_irq_handle_error
);
1155 static int iwl_apm_stop_master(struct iwl_priv
*priv
)
1159 /* stop device's busmaster DMA activity */
1160 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
1162 ret
= iwl_poll_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_MASTER_DISABLED
,
1163 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
1165 IWL_WARN(priv
, "Master Disable Timed Out, 100 usec\n");
1167 IWL_DEBUG_INFO(priv
, "stop master\n");
1172 void iwl_apm_stop(struct iwl_priv
*priv
)
1174 IWL_DEBUG_INFO(priv
, "Stop card, put in low power state\n");
1176 /* Stop device's DMA activity */
1177 iwl_apm_stop_master(priv
);
1179 /* Reset the entire device */
1180 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1185 * Clear "initialization complete" bit to move adapter from
1186 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1188 iwl_clear_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1190 EXPORT_SYMBOL(iwl_apm_stop
);
1194 * Start up NIC's basic functionality after it has been reset
1195 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1196 * NOTE: This does not load uCode nor start the embedded processor
1198 int iwl_apm_init(struct iwl_priv
*priv
)
1203 IWL_DEBUG_INFO(priv
, "Init card's basic functions\n");
1206 * Use "set_bit" below rather than "write", to preserve any hardware
1207 * bits already set by default after reset.
1210 /* Disable L0S exit timer (platform NMI Work/Around) */
1211 iwl_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1212 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
1215 * Disable L0s without affecting L1;
1216 * don't wait for ICH L0s (ICH bug W/A)
1218 iwl_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1219 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
1221 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1222 iwl_set_bit(priv
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
1225 * Enable HAP INTA (interrupt from management bus) to
1226 * wake device's PCI Express link L1a -> L0s
1227 * NOTE: This is no-op for 3945 (non-existant bit)
1229 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
1230 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
1233 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1234 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1235 * If so (likely), disable L0S, so device moves directly L0->L1;
1236 * costs negligible amount of power savings.
1237 * If not (unlikely), enable L0S, so there is at least some
1238 * power savings, even without L1.
1240 if (priv
->cfg
->set_l0s
) {
1241 lctl
= iwl_pcie_link_ctl(priv
);
1242 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
1243 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
1244 /* L1-ASPM enabled; disable(!) L0S */
1245 iwl_set_bit(priv
, CSR_GIO_REG
,
1246 CSR_GIO_REG_VAL_L0S_ENABLED
);
1247 IWL_DEBUG_POWER(priv
, "L1 Enabled; Disabling L0S\n");
1249 /* L1-ASPM disabled; enable(!) L0S */
1250 iwl_clear_bit(priv
, CSR_GIO_REG
,
1251 CSR_GIO_REG_VAL_L0S_ENABLED
);
1252 IWL_DEBUG_POWER(priv
, "L1 Disabled; Enabling L0S\n");
1256 /* Configure analog phase-lock-loop before activating to D0A */
1257 if (priv
->cfg
->pll_cfg_val
)
1258 iwl_set_bit(priv
, CSR_ANA_PLL_CFG
, priv
->cfg
->pll_cfg_val
);
1261 * Set "initialization complete" bit to move adapter from
1262 * D0U* --> D0A* (powered-up active) state.
1264 iwl_set_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1267 * Wait for clock stabilization; once stabilized, access to
1268 * device-internal resources is supported, e.g. iwl_write_prph()
1269 * and accesses to uCode SRAM.
1271 ret
= iwl_poll_bit(priv
, CSR_GP_CNTRL
,
1272 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1273 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
1275 IWL_DEBUG_INFO(priv
, "Failed to init the card\n");
1280 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1281 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1282 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1283 * and don't need BSM to restore data after power-saving sleep.
1285 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1286 * do not disable clocks. This preserves any hardware bits already
1287 * set by default in "CLK_CTRL_REG" after reset.
1289 if (priv
->cfg
->use_bsm
)
1290 iwl_write_prph(priv
, APMG_CLK_EN_REG
,
1291 APMG_CLK_VAL_DMA_CLK_RQT
| APMG_CLK_VAL_BSM_CLK_RQT
);
1293 iwl_write_prph(priv
, APMG_CLK_EN_REG
,
1294 APMG_CLK_VAL_DMA_CLK_RQT
);
1297 /* Disable L1-Active */
1298 iwl_set_bits_prph(priv
, APMG_PCIDEV_STT_REG
,
1299 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1304 EXPORT_SYMBOL(iwl_apm_init
);
1308 void iwl_configure_filter(struct ieee80211_hw
*hw
,
1309 unsigned int changed_flags
,
1310 unsigned int *total_flags
,
1313 struct iwl_priv
*priv
= hw
->priv
;
1314 __le32
*filter_flags
= &priv
->staging_rxon
.filter_flags
;
1316 IWL_DEBUG_MAC80211(priv
, "Enter: changed: 0x%x, total: 0x%x\n",
1317 changed_flags
, *total_flags
);
1319 if (changed_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
)) {
1320 if (*total_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
))
1321 *filter_flags
|= RXON_FILTER_PROMISC_MSK
;
1323 *filter_flags
&= ~RXON_FILTER_PROMISC_MSK
;
1325 if (changed_flags
& FIF_ALLMULTI
) {
1326 if (*total_flags
& FIF_ALLMULTI
)
1327 *filter_flags
|= RXON_FILTER_ACCEPT_GRP_MSK
;
1329 *filter_flags
&= ~RXON_FILTER_ACCEPT_GRP_MSK
;
1331 if (changed_flags
& FIF_CONTROL
) {
1332 if (*total_flags
& FIF_CONTROL
)
1333 *filter_flags
|= RXON_FILTER_CTL2HOST_MSK
;
1335 *filter_flags
&= ~RXON_FILTER_CTL2HOST_MSK
;
1337 if (changed_flags
& FIF_BCN_PRBRESP_PROMISC
) {
1338 if (*total_flags
& FIF_BCN_PRBRESP_PROMISC
)
1339 *filter_flags
|= RXON_FILTER_BCON_AWARE_MSK
;
1341 *filter_flags
&= ~RXON_FILTER_BCON_AWARE_MSK
;
1344 /* We avoid iwl_commit_rxon here to commit the new filter flags
1345 * since mac80211 will call ieee80211_hw_config immediately.
1346 * (mc_list is not supported at this time). Otherwise, we need to
1347 * queue a background iwl_commit_rxon work.
1350 *total_flags
&= FIF_OTHER_BSS
| FIF_ALLMULTI
| FIF_PROMISC_IN_BSS
|
1351 FIF_BCN_PRBRESP_PROMISC
| FIF_CONTROL
;
1353 EXPORT_SYMBOL(iwl_configure_filter
);
1355 int iwl_set_hw_params(struct iwl_priv
*priv
)
1357 priv
->hw_params
.max_rxq_size
= RX_QUEUE_SIZE
;
1358 priv
->hw_params
.max_rxq_log
= RX_QUEUE_SIZE_LOG
;
1359 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
1360 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_8K
);
1362 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_4K
);
1364 priv
->hw_params
.max_beacon_itrvl
= IWL_MAX_UCODE_BEACON_INTERVAL
;
1366 if (priv
->cfg
->mod_params
->disable_11n
)
1367 priv
->cfg
->sku
&= ~IWL_SKU_N
;
1369 /* Device-specific setup */
1370 return priv
->cfg
->ops
->lib
->set_hw_params(priv
);
1372 EXPORT_SYMBOL(iwl_set_hw_params
);
1374 int iwl_set_tx_power(struct iwl_priv
*priv
, s8 tx_power
, bool force
)
1377 s8 prev_tx_power
= priv
->tx_power_user_lmt
;
1379 if (tx_power
< IWL_TX_POWER_TARGET_POWER_MIN
) {
1380 IWL_WARN(priv
, "Requested user TXPOWER %d below lower limit %d.\n",
1382 IWL_TX_POWER_TARGET_POWER_MIN
);
1386 if (tx_power
> priv
->tx_power_device_lmt
) {
1388 "Requested user TXPOWER %d above upper limit %d.\n",
1389 tx_power
, priv
->tx_power_device_lmt
);
1393 if (priv
->tx_power_user_lmt
!= tx_power
)
1396 /* if nic is not up don't send command */
1397 if (iwl_is_ready_rf(priv
)) {
1398 priv
->tx_power_user_lmt
= tx_power
;
1399 if (force
&& priv
->cfg
->ops
->lib
->send_tx_power
)
1400 ret
= priv
->cfg
->ops
->lib
->send_tx_power(priv
);
1401 else if (!priv
->cfg
->ops
->lib
->send_tx_power
)
1404 * if fail to set tx_power, restore the orig. tx power
1407 priv
->tx_power_user_lmt
= prev_tx_power
;
1411 * Even this is an async host command, the command
1412 * will always report success from uCode
1413 * So once driver can placing the command into the queue
1414 * successfully, driver can use priv->tx_power_user_lmt
1415 * to reflect the current tx power
1419 EXPORT_SYMBOL(iwl_set_tx_power
);
1421 irqreturn_t
iwl_isr_legacy(int irq
, void *data
)
1423 struct iwl_priv
*priv
= data
;
1424 u32 inta
, inta_mask
;
1426 unsigned long flags
;
1430 spin_lock_irqsave(&priv
->lock
, flags
);
1432 /* Disable (but don't clear!) interrupts here to avoid
1433 * back-to-back ISRs and sporadic interrupts from our NIC.
1434 * If we have something to service, the tasklet will re-enable ints.
1435 * If we *don't* have something, we'll re-enable before leaving here. */
1436 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
1437 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
1439 /* Discover which interrupts are active/pending */
1440 inta
= iwl_read32(priv
, CSR_INT
);
1441 inta_fh
= iwl_read32(priv
, CSR_FH_INT_STATUS
);
1443 /* Ignore interrupt if there's nothing in NIC to service.
1444 * This may be due to IRQ shared with another device,
1445 * or due to sporadic interrupts thrown from our NIC. */
1446 if (!inta
&& !inta_fh
) {
1447 IWL_DEBUG_ISR(priv
, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1451 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1452 /* Hardware disappeared. It might have already raised
1454 IWL_WARN(priv
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1458 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1459 inta
, inta_mask
, inta_fh
);
1461 inta
&= ~CSR_INT_BIT_SCD
;
1463 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1464 if (likely(inta
|| inta_fh
))
1465 tasklet_schedule(&priv
->irq_tasklet
);
1468 spin_unlock_irqrestore(&priv
->lock
, flags
);
1472 /* re-enable interrupts here since we don't have anything to service. */
1473 /* only Re-enable if diabled by irq */
1474 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
))
1475 iwl_enable_interrupts(priv
);
1476 spin_unlock_irqrestore(&priv
->lock
, flags
);
1479 EXPORT_SYMBOL(iwl_isr_legacy
);
1481 int iwl_send_bt_config(struct iwl_priv
*priv
)
1483 struct iwl_bt_cmd bt_cmd
= {
1484 .lead_time
= BT_LEAD_TIME_DEF
,
1485 .max_kill
= BT_MAX_KILL_DEF
,
1490 if (!bt_coex_active
)
1491 bt_cmd
.flags
= BT_COEX_DISABLE
;
1493 bt_cmd
.flags
= BT_COEX_ENABLE
;
1495 IWL_DEBUG_INFO(priv
, "BT coex %s\n",
1496 (bt_cmd
.flags
== BT_COEX_DISABLE
) ? "disable" : "active");
1498 return iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1499 sizeof(struct iwl_bt_cmd
), &bt_cmd
);
1501 EXPORT_SYMBOL(iwl_send_bt_config
);
1503 int iwl_send_statistics_request(struct iwl_priv
*priv
, u8 flags
, bool clear
)
1505 struct iwl_statistics_cmd statistics_cmd
= {
1506 .configuration_flags
=
1507 clear
? IWL_STATS_CONF_CLEAR_STATS
: 0,
1510 if (flags
& CMD_ASYNC
)
1511 return iwl_send_cmd_pdu_async(priv
, REPLY_STATISTICS_CMD
,
1512 sizeof(struct iwl_statistics_cmd
),
1513 &statistics_cmd
, NULL
);
1515 return iwl_send_cmd_pdu(priv
, REPLY_STATISTICS_CMD
,
1516 sizeof(struct iwl_statistics_cmd
),
1519 EXPORT_SYMBOL(iwl_send_statistics_request
);
1522 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1523 * using sample data 100 bytes apart. If these sample points are good,
1524 * it's a pretty good bet that everything between them is good, too.
1526 static int iwlcore_verify_inst_sparse(struct iwl_priv
*priv
, __le32
*image
, u32 len
)
1533 IWL_DEBUG_INFO(priv
, "ucode inst image size is %u\n", len
);
1535 for (i
= 0; i
< len
; i
+= 100, image
+= 100/sizeof(u32
)) {
1536 /* read data comes through single port, auto-incr addr */
1537 /* NOTE: Use the debugless read so we don't flood kernel log
1538 * if IWL_DL_IO is set */
1539 iwl_write_direct32(priv
, HBUS_TARG_MEM_RADDR
,
1540 i
+ IWL49_RTC_INST_LOWER_BOUND
);
1541 val
= _iwl_read_direct32(priv
, HBUS_TARG_MEM_RDAT
);
1542 if (val
!= le32_to_cpu(*image
)) {
1554 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1555 * looking at all data.
1557 static int iwl_verify_inst_full(struct iwl_priv
*priv
, __le32
*image
,
1565 IWL_DEBUG_INFO(priv
, "ucode inst image size is %u\n", len
);
1567 iwl_write_direct32(priv
, HBUS_TARG_MEM_RADDR
,
1568 IWL49_RTC_INST_LOWER_BOUND
);
1571 for (; len
> 0; len
-= sizeof(u32
), image
++) {
1572 /* read data comes through single port, auto-incr addr */
1573 /* NOTE: Use the debugless read so we don't flood kernel log
1574 * if IWL_DL_IO is set */
1575 val
= _iwl_read_direct32(priv
, HBUS_TARG_MEM_RDAT
);
1576 if (val
!= le32_to_cpu(*image
)) {
1577 IWL_ERR(priv
, "uCode INST section is invalid at "
1578 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1579 save_len
- len
, val
, le32_to_cpu(*image
));
1588 IWL_DEBUG_INFO(priv
,
1589 "ucode image in INSTRUCTION memory is good\n");
1595 * iwl_verify_ucode - determine which instruction image is in SRAM,
1596 * and verify its contents
1598 int iwl_verify_ucode(struct iwl_priv
*priv
)
1605 image
= (__le32
*)priv
->ucode_boot
.v_addr
;
1606 len
= priv
->ucode_boot
.len
;
1607 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
1609 IWL_DEBUG_INFO(priv
, "Bootstrap uCode is good in inst SRAM\n");
1613 /* Try initialize */
1614 image
= (__le32
*)priv
->ucode_init
.v_addr
;
1615 len
= priv
->ucode_init
.len
;
1616 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
1618 IWL_DEBUG_INFO(priv
, "Initialize uCode is good in inst SRAM\n");
1622 /* Try runtime/protocol */
1623 image
= (__le32
*)priv
->ucode_code
.v_addr
;
1624 len
= priv
->ucode_code
.len
;
1625 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
1627 IWL_DEBUG_INFO(priv
, "Runtime uCode is good in inst SRAM\n");
1631 IWL_ERR(priv
, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1633 /* Since nothing seems to match, show first several data entries in
1634 * instruction SRAM, so maybe visual inspection will give a clue.
1635 * Selection of bootstrap image (vs. other images) is arbitrary. */
1636 image
= (__le32
*)priv
->ucode_boot
.v_addr
;
1637 len
= priv
->ucode_boot
.len
;
1638 ret
= iwl_verify_inst_full(priv
, image
, len
);
1642 EXPORT_SYMBOL(iwl_verify_ucode
);
1645 void iwl_rf_kill_ct_config(struct iwl_priv
*priv
)
1647 struct iwl_ct_kill_config cmd
;
1648 struct iwl_ct_kill_throttling_config adv_cmd
;
1649 unsigned long flags
;
1652 spin_lock_irqsave(&priv
->lock
, flags
);
1653 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
1654 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
1655 spin_unlock_irqrestore(&priv
->lock
, flags
);
1656 priv
->thermal_throttle
.ct_kill_toggle
= false;
1658 if (priv
->cfg
->support_ct_kill_exit
) {
1659 adv_cmd
.critical_temperature_enter
=
1660 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
1661 adv_cmd
.critical_temperature_exit
=
1662 cpu_to_le32(priv
->hw_params
.ct_kill_exit_threshold
);
1664 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
1665 sizeof(adv_cmd
), &adv_cmd
);
1667 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1669 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1671 "critical temperature enter is %d,"
1673 priv
->hw_params
.ct_kill_threshold
,
1674 priv
->hw_params
.ct_kill_exit_threshold
);
1676 cmd
.critical_temperature_R
=
1677 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
1679 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
1682 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1684 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1686 "critical temperature is %d\n",
1687 priv
->hw_params
.ct_kill_threshold
);
1690 EXPORT_SYMBOL(iwl_rf_kill_ct_config
);
1696 * Use: Sets the device's internal card state to enable, disable, or halt
1698 * When in the 'enable' state the card operates as normal.
1699 * When in the 'disable' state, the card enters into a low power mode.
1700 * When in the 'halt' state, the card is shut down and must be fully
1701 * restarted to come back on.
1703 int iwl_send_card_state(struct iwl_priv
*priv
, u32 flags
, u8 meta_flag
)
1705 struct iwl_host_cmd cmd
= {
1706 .id
= REPLY_CARD_STATE_CMD
,
1712 return iwl_send_cmd(priv
, &cmd
);
1715 void iwl_rx_pm_sleep_notif(struct iwl_priv
*priv
,
1716 struct iwl_rx_mem_buffer
*rxb
)
1718 #ifdef CONFIG_IWLWIFI_DEBUG
1719 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1720 struct iwl_sleep_notification
*sleep
= &(pkt
->u
.sleep_notif
);
1721 IWL_DEBUG_RX(priv
, "sleep mode: %d, src: %d\n",
1722 sleep
->pm_sleep_mode
, sleep
->pm_wakeup_src
);
1725 EXPORT_SYMBOL(iwl_rx_pm_sleep_notif
);
1727 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv
*priv
,
1728 struct iwl_rx_mem_buffer
*rxb
)
1730 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1731 u32 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
1732 IWL_DEBUG_RADIO(priv
, "Dumping %d bytes of unhandled "
1733 "notification for %s:\n", len
,
1734 get_cmd_string(pkt
->hdr
.cmd
));
1735 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, pkt
->u
.raw
, len
);
1737 EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif
);
1739 void iwl_rx_reply_error(struct iwl_priv
*priv
,
1740 struct iwl_rx_mem_buffer
*rxb
)
1742 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1744 IWL_ERR(priv
, "Error Reply type 0x%08X cmd %s (0x%02X) "
1745 "seq 0x%04X ser 0x%08X\n",
1746 le32_to_cpu(pkt
->u
.err_resp
.error_type
),
1747 get_cmd_string(pkt
->u
.err_resp
.cmd_id
),
1748 pkt
->u
.err_resp
.cmd_id
,
1749 le16_to_cpu(pkt
->u
.err_resp
.bad_cmd_seq_num
),
1750 le32_to_cpu(pkt
->u
.err_resp
.error_info
));
1752 EXPORT_SYMBOL(iwl_rx_reply_error
);
1754 void iwl_clear_isr_stats(struct iwl_priv
*priv
)
1756 memset(&priv
->isr_stats
, 0, sizeof(priv
->isr_stats
));
1759 int iwl_mac_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
1760 const struct ieee80211_tx_queue_params
*params
)
1762 struct iwl_priv
*priv
= hw
->priv
;
1763 unsigned long flags
;
1766 IWL_DEBUG_MAC80211(priv
, "enter\n");
1768 if (!iwl_is_ready_rf(priv
)) {
1769 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
1773 if (queue
>= AC_NUM
) {
1774 IWL_DEBUG_MAC80211(priv
, "leave - queue >= AC_NUM %d\n", queue
);
1778 q
= AC_NUM
- 1 - queue
;
1780 spin_lock_irqsave(&priv
->lock
, flags
);
1782 priv
->qos_data
.def_qos_parm
.ac
[q
].cw_min
= cpu_to_le16(params
->cw_min
);
1783 priv
->qos_data
.def_qos_parm
.ac
[q
].cw_max
= cpu_to_le16(params
->cw_max
);
1784 priv
->qos_data
.def_qos_parm
.ac
[q
].aifsn
= params
->aifs
;
1785 priv
->qos_data
.def_qos_parm
.ac
[q
].edca_txop
=
1786 cpu_to_le16((params
->txop
* 32));
1788 priv
->qos_data
.def_qos_parm
.ac
[q
].reserved1
= 0;
1790 spin_unlock_irqrestore(&priv
->lock
, flags
);
1792 IWL_DEBUG_MAC80211(priv
, "leave\n");
1795 EXPORT_SYMBOL(iwl_mac_conf_tx
);
1797 static void iwl_ht_conf(struct iwl_priv
*priv
,
1798 struct ieee80211_bss_conf
*bss_conf
)
1800 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
1801 struct ieee80211_sta
*sta
;
1803 IWL_DEBUG_MAC80211(priv
, "enter: \n");
1805 if (!ht_conf
->is_ht
)
1808 ht_conf
->ht_protection
=
1809 bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
1810 ht_conf
->non_GF_STA_present
=
1811 !!(bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
1813 ht_conf
->single_chain_sufficient
= false;
1815 switch (priv
->iw_mode
) {
1816 case NL80211_IFTYPE_STATION
:
1818 sta
= ieee80211_find_sta(priv
->vif
, priv
->bssid
);
1820 struct ieee80211_sta_ht_cap
*ht_cap
= &sta
->ht_cap
;
1823 maxstreams
= (ht_cap
->mcs
.tx_params
&
1824 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK
)
1825 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
1828 if ((ht_cap
->mcs
.rx_mask
[1] == 0) &&
1829 (ht_cap
->mcs
.rx_mask
[2] == 0))
1830 ht_conf
->single_chain_sufficient
= true;
1831 if (maxstreams
<= 1)
1832 ht_conf
->single_chain_sufficient
= true;
1835 * If at all, this can only happen through a race
1836 * when the AP disconnects us while we're still
1837 * setting up the connection, in that case mac80211
1838 * will soon tell us about that.
1840 ht_conf
->single_chain_sufficient
= true;
1844 case NL80211_IFTYPE_ADHOC
:
1845 ht_conf
->single_chain_sufficient
= true;
1851 IWL_DEBUG_MAC80211(priv
, "leave\n");
1854 static inline void iwl_set_no_assoc(struct iwl_priv
*priv
)
1857 iwl_led_disassociate(priv
);
1859 * inform the ucode that there is no longer an
1860 * association and that no more packets should be
1863 priv
->staging_rxon
.filter_flags
&=
1864 ~RXON_FILTER_ASSOC_MSK
;
1865 priv
->staging_rxon
.assoc_id
= 0;
1866 iwlcore_commit_rxon(priv
);
1869 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
1870 void iwl_bss_info_changed(struct ieee80211_hw
*hw
,
1871 struct ieee80211_vif
*vif
,
1872 struct ieee80211_bss_conf
*bss_conf
,
1875 struct iwl_priv
*priv
= hw
->priv
;
1878 IWL_DEBUG_MAC80211(priv
, "changes = 0x%X\n", changes
);
1880 if (!iwl_is_alive(priv
))
1883 mutex_lock(&priv
->mutex
);
1885 if (changes
& BSS_CHANGED_BEACON
&&
1886 priv
->iw_mode
== NL80211_IFTYPE_AP
) {
1887 dev_kfree_skb(priv
->ibss_beacon
);
1888 priv
->ibss_beacon
= ieee80211_beacon_get(hw
, vif
);
1891 if (changes
& BSS_CHANGED_BEACON_INT
) {
1892 priv
->beacon_int
= bss_conf
->beacon_int
;
1893 /* TODO: in AP mode, do something to make this take effect */
1896 if (changes
& BSS_CHANGED_BSSID
) {
1897 IWL_DEBUG_MAC80211(priv
, "BSSID %pM\n", bss_conf
->bssid
);
1900 * If there is currently a HW scan going on in the
1901 * background then we need to cancel it else the RXON
1902 * below/in post_associate will fail.
1904 if (iwl_scan_cancel_timeout(priv
, 100)) {
1905 IWL_WARN(priv
, "Aborted scan still in progress after 100ms\n");
1906 IWL_DEBUG_MAC80211(priv
, "leaving - scan abort failed.\n");
1907 mutex_unlock(&priv
->mutex
);
1911 /* mac80211 only sets assoc when in STATION mode */
1912 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
||
1914 memcpy(priv
->staging_rxon
.bssid_addr
,
1915 bss_conf
->bssid
, ETH_ALEN
);
1917 /* currently needed in a few places */
1918 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
1920 priv
->staging_rxon
.filter_flags
&=
1921 ~RXON_FILTER_ASSOC_MSK
;
1927 * This needs to be after setting the BSSID in case
1928 * mac80211 decides to do both changes at once because
1929 * it will invoke post_associate.
1931 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
1932 changes
& BSS_CHANGED_BEACON
) {
1933 struct sk_buff
*beacon
= ieee80211_beacon_get(hw
, vif
);
1936 iwl_mac_beacon_update(hw
, beacon
);
1939 if (changes
& BSS_CHANGED_ERP_PREAMBLE
) {
1940 IWL_DEBUG_MAC80211(priv
, "ERP_PREAMBLE %d\n",
1941 bss_conf
->use_short_preamble
);
1942 if (bss_conf
->use_short_preamble
)
1943 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
1945 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
1948 if (changes
& BSS_CHANGED_ERP_CTS_PROT
) {
1949 IWL_DEBUG_MAC80211(priv
, "ERP_CTS %d\n", bss_conf
->use_cts_prot
);
1950 if (bss_conf
->use_cts_prot
&& (priv
->band
!= IEEE80211_BAND_5GHZ
))
1951 priv
->staging_rxon
.flags
|= RXON_FLG_TGG_PROTECT_MSK
;
1953 priv
->staging_rxon
.flags
&= ~RXON_FLG_TGG_PROTECT_MSK
;
1956 if (changes
& BSS_CHANGED_BASIC_RATES
) {
1957 /* XXX use this information
1959 * To do that, remove code from iwl_set_rate() and put something
1963 priv->staging_rxon.ofdm_basic_rates =
1964 bss_conf->basic_rates;
1966 priv->staging_rxon.ofdm_basic_rates =
1967 bss_conf->basic_rates >> 4;
1968 priv->staging_rxon.cck_basic_rates =
1969 bss_conf->basic_rates & 0xF;
1973 if (changes
& BSS_CHANGED_HT
) {
1974 iwl_ht_conf(priv
, bss_conf
);
1976 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
1977 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
1980 if (changes
& BSS_CHANGED_ASSOC
) {
1981 IWL_DEBUG_MAC80211(priv
, "ASSOC %d\n", bss_conf
->assoc
);
1982 if (bss_conf
->assoc
) {
1983 priv
->assoc_id
= bss_conf
->aid
;
1984 priv
->beacon_int
= bss_conf
->beacon_int
;
1985 priv
->timestamp
= bss_conf
->timestamp
;
1986 priv
->assoc_capability
= bss_conf
->assoc_capability
;
1988 iwl_led_associate(priv
);
1991 * We have just associated, don't start scan too early
1992 * leave time for EAPOL exchange to complete.
1994 * XXX: do this in mac80211
1996 priv
->next_scan_jiffies
= jiffies
+
1997 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC
;
1998 if (!iwl_is_rfkill(priv
))
1999 priv
->cfg
->ops
->lib
->post_associate(priv
);
2001 iwl_set_no_assoc(priv
);
2004 if (changes
&& iwl_is_associated(priv
) && priv
->assoc_id
) {
2005 IWL_DEBUG_MAC80211(priv
, "Changes (%#x) while associated\n",
2007 ret
= iwl_send_rxon_assoc(priv
);
2009 /* Sync active_rxon with latest change. */
2010 memcpy((void *)&priv
->active_rxon
,
2011 &priv
->staging_rxon
,
2012 sizeof(struct iwl_rxon_cmd
));
2016 if (changes
& BSS_CHANGED_BEACON_ENABLED
) {
2017 if (vif
->bss_conf
.enable_beacon
) {
2018 memcpy(priv
->staging_rxon
.bssid_addr
,
2019 bss_conf
->bssid
, ETH_ALEN
);
2020 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2021 iwlcore_config_ap(priv
);
2023 iwl_set_no_assoc(priv
);
2026 mutex_unlock(&priv
->mutex
);
2028 IWL_DEBUG_MAC80211(priv
, "leave\n");
2030 EXPORT_SYMBOL(iwl_bss_info_changed
);
2032 int iwl_mac_beacon_update(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
2034 struct iwl_priv
*priv
= hw
->priv
;
2035 unsigned long flags
;
2038 IWL_DEBUG_MAC80211(priv
, "enter\n");
2040 if (!iwl_is_ready_rf(priv
)) {
2041 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2045 spin_lock_irqsave(&priv
->lock
, flags
);
2047 if (priv
->ibss_beacon
)
2048 dev_kfree_skb(priv
->ibss_beacon
);
2050 priv
->ibss_beacon
= skb
;
2053 timestamp
= ((struct ieee80211_mgmt
*)skb
->data
)->u
.beacon
.timestamp
;
2054 priv
->timestamp
= le64_to_cpu(timestamp
);
2056 IWL_DEBUG_MAC80211(priv
, "leave\n");
2057 spin_unlock_irqrestore(&priv
->lock
, flags
);
2059 priv
->cfg
->ops
->lib
->post_associate(priv
);
2063 EXPORT_SYMBOL(iwl_mac_beacon_update
);
2065 static int iwl_set_mode(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
)
2067 iwl_connection_init_rx_config(priv
, vif
->type
);
2069 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2070 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
2072 memcpy(priv
->staging_rxon
.node_addr
, priv
->mac_addr
, ETH_ALEN
);
2074 return iwlcore_commit_rxon(priv
);
2077 int iwl_mac_add_interface(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
)
2079 struct iwl_priv
*priv
= hw
->priv
;
2082 IWL_DEBUG_MAC80211(priv
, "enter: type %d\n", vif
->type
);
2084 mutex_lock(&priv
->mutex
);
2086 if (WARN_ON(!iwl_is_ready_rf(priv
))) {
2092 IWL_DEBUG_MAC80211(priv
, "leave - vif != NULL\n");
2098 priv
->iw_mode
= vif
->type
;
2100 IWL_DEBUG_MAC80211(priv
, "Set %pM\n", vif
->addr
);
2101 memcpy(priv
->mac_addr
, vif
->addr
, ETH_ALEN
);
2103 err
= iwl_set_mode(priv
, vif
);
2107 /* Add the broadcast address so we can send broadcast frames */
2108 priv
->cfg
->ops
->lib
->add_bcast_station(priv
);
2114 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
2116 mutex_unlock(&priv
->mutex
);
2118 IWL_DEBUG_MAC80211(priv
, "leave\n");
2121 EXPORT_SYMBOL(iwl_mac_add_interface
);
2123 void iwl_mac_remove_interface(struct ieee80211_hw
*hw
,
2124 struct ieee80211_vif
*vif
)
2126 struct iwl_priv
*priv
= hw
->priv
;
2128 IWL_DEBUG_MAC80211(priv
, "enter\n");
2130 mutex_lock(&priv
->mutex
);
2132 iwl_clear_ucode_stations(priv
, true);
2134 if (iwl_is_ready_rf(priv
)) {
2135 iwl_scan_cancel_timeout(priv
, 100);
2136 priv
->staging_rxon
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2137 iwlcore_commit_rxon(priv
);
2139 if (priv
->vif
== vif
) {
2141 memset(priv
->bssid
, 0, ETH_ALEN
);
2143 mutex_unlock(&priv
->mutex
);
2145 IWL_DEBUG_MAC80211(priv
, "leave\n");
2148 EXPORT_SYMBOL(iwl_mac_remove_interface
);
2151 * iwl_mac_config - mac80211 config callback
2153 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2154 * be set inappropriately and the driver currently sets the hardware up to
2155 * use it whenever needed.
2157 int iwl_mac_config(struct ieee80211_hw
*hw
, u32 changed
)
2159 struct iwl_priv
*priv
= hw
->priv
;
2160 const struct iwl_channel_info
*ch_info
;
2161 struct ieee80211_conf
*conf
= &hw
->conf
;
2162 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2163 unsigned long flags
= 0;
2166 int scan_active
= 0;
2168 mutex_lock(&priv
->mutex
);
2170 IWL_DEBUG_MAC80211(priv
, "enter to channel %d changed 0x%X\n",
2171 conf
->channel
->hw_value
, changed
);
2173 if (unlikely(!priv
->cfg
->mod_params
->disable_hw_scan
&&
2174 test_bit(STATUS_SCANNING
, &priv
->status
))) {
2176 IWL_DEBUG_MAC80211(priv
, "leave - scanning\n");
2179 if (changed
& (IEEE80211_CONF_CHANGE_SMPS
|
2180 IEEE80211_CONF_CHANGE_CHANNEL
)) {
2181 /* mac80211 uses static for non-HT which is what we want */
2182 priv
->current_ht_config
.smps
= conf
->smps_mode
;
2185 * Recalculate chain counts.
2187 * If monitor mode is enabled then mac80211 will
2188 * set up the SM PS mode to OFF if an HT channel is
2191 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2192 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
2195 /* during scanning mac80211 will delay channel setting until
2196 * scan finish with changed = 0
2198 if (!changed
|| (changed
& IEEE80211_CONF_CHANGE_CHANNEL
)) {
2202 ch
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2203 ch_info
= iwl_get_channel_info(priv
, conf
->channel
->band
, ch
);
2204 if (!is_channel_valid(ch_info
)) {
2205 IWL_DEBUG_MAC80211(priv
, "leave - invalid channel\n");
2210 spin_lock_irqsave(&priv
->lock
, flags
);
2212 /* Configure HT40 channels */
2213 ht_conf
->is_ht
= conf_is_ht(conf
);
2214 if (ht_conf
->is_ht
) {
2215 if (conf_is_ht40_minus(conf
)) {
2216 ht_conf
->extension_chan_offset
=
2217 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2218 ht_conf
->is_40mhz
= true;
2219 } else if (conf_is_ht40_plus(conf
)) {
2220 ht_conf
->extension_chan_offset
=
2221 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2222 ht_conf
->is_40mhz
= true;
2224 ht_conf
->extension_chan_offset
=
2225 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2226 ht_conf
->is_40mhz
= false;
2229 ht_conf
->is_40mhz
= false;
2230 /* Default to no protection. Protection mode will later be set
2231 * from BSS config in iwl_ht_conf */
2232 ht_conf
->ht_protection
= IEEE80211_HT_OP_MODE_PROTECTION_NONE
;
2234 /* if we are switching from ht to 2.4 clear flags
2235 * from any ht related info since 2.4 does not
2237 if ((le16_to_cpu(priv
->staging_rxon
.channel
) != ch
))
2238 priv
->staging_rxon
.flags
= 0;
2240 iwl_set_rxon_channel(priv
, conf
->channel
);
2241 iwl_set_rxon_ht(priv
, ht_conf
);
2243 iwl_set_flags_for_band(priv
, conf
->channel
->band
);
2244 spin_unlock_irqrestore(&priv
->lock
, flags
);
2245 if (iwl_is_associated(priv
) &&
2246 (le16_to_cpu(priv
->active_rxon
.channel
) != ch
) &&
2247 priv
->cfg
->ops
->lib
->set_channel_switch
) {
2250 * at this point, staging_rxon has the
2251 * configuration for channel switch
2253 ret
= priv
->cfg
->ops
->lib
->set_channel_switch(priv
,
2256 iwl_print_rx_config_cmd(priv
);
2259 priv
->switch_rxon
.switch_in_progress
= false;
2262 /* The list of supported rates and rate mask can be different
2263 * for each band; since the band may have changed, reset
2264 * the rate mask to what mac80211 lists */
2268 if (changed
& (IEEE80211_CONF_CHANGE_PS
|
2269 IEEE80211_CONF_CHANGE_IDLE
)) {
2270 ret
= iwl_power_update_mode(priv
, false);
2272 IWL_DEBUG_MAC80211(priv
, "Error setting sleep level\n");
2275 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
2276 IWL_DEBUG_MAC80211(priv
, "TX Power old=%d new=%d\n",
2277 priv
->tx_power_user_lmt
, conf
->power_level
);
2279 iwl_set_tx_power(priv
, conf
->power_level
, false);
2282 if (changed
& IEEE80211_CONF_CHANGE_QOS
) {
2283 bool qos_active
= !!(conf
->flags
& IEEE80211_CONF_QOS
);
2285 spin_lock_irqsave(&priv
->lock
, flags
);
2286 priv
->qos_data
.qos_active
= qos_active
;
2287 iwl_update_qos(priv
);
2288 spin_unlock_irqrestore(&priv
->lock
, flags
);
2291 if (!iwl_is_ready(priv
)) {
2292 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2299 if (memcmp(&priv
->active_rxon
,
2300 &priv
->staging_rxon
, sizeof(priv
->staging_rxon
)))
2301 iwlcore_commit_rxon(priv
);
2303 IWL_DEBUG_INFO(priv
, "Not re-sending same RXON configuration.\n");
2307 IWL_DEBUG_MAC80211(priv
, "leave\n");
2308 mutex_unlock(&priv
->mutex
);
2311 EXPORT_SYMBOL(iwl_mac_config
);
2313 void iwl_mac_reset_tsf(struct ieee80211_hw
*hw
)
2315 struct iwl_priv
*priv
= hw
->priv
;
2316 unsigned long flags
;
2318 mutex_lock(&priv
->mutex
);
2319 IWL_DEBUG_MAC80211(priv
, "enter\n");
2321 spin_lock_irqsave(&priv
->lock
, flags
);
2322 memset(&priv
->current_ht_config
, 0, sizeof(struct iwl_ht_config
));
2323 spin_unlock_irqrestore(&priv
->lock
, flags
);
2325 spin_lock_irqsave(&priv
->lock
, flags
);
2327 priv
->assoc_capability
= 0;
2329 /* new association get rid of ibss beacon skb */
2330 if (priv
->ibss_beacon
)
2331 dev_kfree_skb(priv
->ibss_beacon
);
2333 priv
->ibss_beacon
= NULL
;
2335 priv
->beacon_int
= priv
->vif
->bss_conf
.beacon_int
;
2336 priv
->timestamp
= 0;
2338 spin_unlock_irqrestore(&priv
->lock
, flags
);
2340 if (!iwl_is_ready_rf(priv
)) {
2341 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2342 mutex_unlock(&priv
->mutex
);
2346 /* we are restarting association process
2347 * clear RXON_FILTER_ASSOC_MSK bit
2349 iwl_scan_cancel_timeout(priv
, 100);
2350 priv
->staging_rxon
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2351 iwlcore_commit_rxon(priv
);
2355 mutex_unlock(&priv
->mutex
);
2357 IWL_DEBUG_MAC80211(priv
, "leave\n");
2359 EXPORT_SYMBOL(iwl_mac_reset_tsf
);
2361 int iwl_alloc_txq_mem(struct iwl_priv
*priv
)
2364 priv
->txq
= kzalloc(
2365 sizeof(struct iwl_tx_queue
) * priv
->cfg
->num_of_queues
,
2368 IWL_ERR(priv
, "Not enough memory for txq \n");
2373 EXPORT_SYMBOL(iwl_alloc_txq_mem
);
2375 void iwl_free_txq_mem(struct iwl_priv
*priv
)
2380 EXPORT_SYMBOL(iwl_free_txq_mem
);
2382 int iwl_send_wimax_coex(struct iwl_priv
*priv
)
2384 struct iwl_wimax_coex_cmd
uninitialized_var(coex_cmd
);
2386 if (priv
->cfg
->support_wimax_coexist
) {
2387 /* UnMask wake up src at associated sleep */
2388 coex_cmd
.flags
|= COEX_FLAGS_ASSOC_WA_UNMASK_MSK
;
2390 /* UnMask wake up src at unassociated sleep */
2391 coex_cmd
.flags
|= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK
;
2392 memcpy(coex_cmd
.sta_prio
, cu_priorities
,
2393 sizeof(struct iwl_wimax_coex_event_entry
) *
2394 COEX_NUM_OF_EVENTS
);
2396 /* enabling the coexistence feature */
2397 coex_cmd
.flags
|= COEX_FLAGS_COEX_ENABLE_MSK
;
2399 /* enabling the priorities tables */
2400 coex_cmd
.flags
|= COEX_FLAGS_STA_TABLE_VALID_MSK
;
2402 /* coexistence is disabled */
2403 memset(&coex_cmd
, 0, sizeof(coex_cmd
));
2405 return iwl_send_cmd_pdu(priv
, COEX_PRIORITY_TABLE_CMD
,
2406 sizeof(coex_cmd
), &coex_cmd
);
2408 EXPORT_SYMBOL(iwl_send_wimax_coex
);
2410 #ifdef CONFIG_IWLWIFI_DEBUGFS
2412 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2414 void iwl_reset_traffic_log(struct iwl_priv
*priv
)
2416 priv
->tx_traffic_idx
= 0;
2417 priv
->rx_traffic_idx
= 0;
2418 if (priv
->tx_traffic
)
2419 memset(priv
->tx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
2420 if (priv
->rx_traffic
)
2421 memset(priv
->rx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
2424 int iwl_alloc_traffic_mem(struct iwl_priv
*priv
)
2426 u32 traffic_size
= IWL_TRAFFIC_DUMP_SIZE
;
2428 if (iwl_debug_level
& IWL_DL_TX
) {
2429 if (!priv
->tx_traffic
) {
2431 kzalloc(traffic_size
, GFP_KERNEL
);
2432 if (!priv
->tx_traffic
)
2436 if (iwl_debug_level
& IWL_DL_RX
) {
2437 if (!priv
->rx_traffic
) {
2439 kzalloc(traffic_size
, GFP_KERNEL
);
2440 if (!priv
->rx_traffic
)
2444 iwl_reset_traffic_log(priv
);
2447 EXPORT_SYMBOL(iwl_alloc_traffic_mem
);
2449 void iwl_free_traffic_mem(struct iwl_priv
*priv
)
2451 kfree(priv
->tx_traffic
);
2452 priv
->tx_traffic
= NULL
;
2454 kfree(priv
->rx_traffic
);
2455 priv
->rx_traffic
= NULL
;
2457 EXPORT_SYMBOL(iwl_free_traffic_mem
);
2459 void iwl_dbg_log_tx_data_frame(struct iwl_priv
*priv
,
2460 u16 length
, struct ieee80211_hdr
*header
)
2465 if (likely(!(iwl_debug_level
& IWL_DL_TX
)))
2468 if (!priv
->tx_traffic
)
2471 fc
= header
->frame_control
;
2472 if (ieee80211_is_data(fc
)) {
2473 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
2474 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
2475 memcpy((priv
->tx_traffic
+
2476 (priv
->tx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
2478 priv
->tx_traffic_idx
=
2479 (priv
->tx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
2482 EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame
);
2484 void iwl_dbg_log_rx_data_frame(struct iwl_priv
*priv
,
2485 u16 length
, struct ieee80211_hdr
*header
)
2490 if (likely(!(iwl_debug_level
& IWL_DL_RX
)))
2493 if (!priv
->rx_traffic
)
2496 fc
= header
->frame_control
;
2497 if (ieee80211_is_data(fc
)) {
2498 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
2499 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
2500 memcpy((priv
->rx_traffic
+
2501 (priv
->rx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
2503 priv
->rx_traffic_idx
=
2504 (priv
->rx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
2507 EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame
);
2509 const char *get_mgmt_string(int cmd
)
2512 IWL_CMD(MANAGEMENT_ASSOC_REQ
);
2513 IWL_CMD(MANAGEMENT_ASSOC_RESP
);
2514 IWL_CMD(MANAGEMENT_REASSOC_REQ
);
2515 IWL_CMD(MANAGEMENT_REASSOC_RESP
);
2516 IWL_CMD(MANAGEMENT_PROBE_REQ
);
2517 IWL_CMD(MANAGEMENT_PROBE_RESP
);
2518 IWL_CMD(MANAGEMENT_BEACON
);
2519 IWL_CMD(MANAGEMENT_ATIM
);
2520 IWL_CMD(MANAGEMENT_DISASSOC
);
2521 IWL_CMD(MANAGEMENT_AUTH
);
2522 IWL_CMD(MANAGEMENT_DEAUTH
);
2523 IWL_CMD(MANAGEMENT_ACTION
);
2530 const char *get_ctrl_string(int cmd
)
2533 IWL_CMD(CONTROL_BACK_REQ
);
2534 IWL_CMD(CONTROL_BACK
);
2535 IWL_CMD(CONTROL_PSPOLL
);
2536 IWL_CMD(CONTROL_RTS
);
2537 IWL_CMD(CONTROL_CTS
);
2538 IWL_CMD(CONTROL_ACK
);
2539 IWL_CMD(CONTROL_CFEND
);
2540 IWL_CMD(CONTROL_CFENDACK
);
2547 void iwl_clear_traffic_stats(struct iwl_priv
*priv
)
2549 memset(&priv
->tx_stats
, 0, sizeof(struct traffic_stats
));
2550 memset(&priv
->rx_stats
, 0, sizeof(struct traffic_stats
));
2555 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
2556 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
2557 * Use debugFs to display the rx/rx_statistics
2558 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
2559 * information will be recorded, but DATA pkt still will be recorded
2560 * for the reason of iwl_led.c need to control the led blinking based on
2561 * number of tx and rx data.
2564 void iwl_update_stats(struct iwl_priv
*priv
, bool is_tx
, __le16 fc
, u16 len
)
2566 struct traffic_stats
*stats
;
2569 stats
= &priv
->tx_stats
;
2571 stats
= &priv
->rx_stats
;
2573 if (ieee80211_is_mgmt(fc
)) {
2574 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
2575 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
2576 stats
->mgmt
[MANAGEMENT_ASSOC_REQ
]++;
2578 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP
):
2579 stats
->mgmt
[MANAGEMENT_ASSOC_RESP
]++;
2581 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
2582 stats
->mgmt
[MANAGEMENT_REASSOC_REQ
]++;
2584 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP
):
2585 stats
->mgmt
[MANAGEMENT_REASSOC_RESP
]++;
2587 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ
):
2588 stats
->mgmt
[MANAGEMENT_PROBE_REQ
]++;
2590 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP
):
2591 stats
->mgmt
[MANAGEMENT_PROBE_RESP
]++;
2593 case cpu_to_le16(IEEE80211_STYPE_BEACON
):
2594 stats
->mgmt
[MANAGEMENT_BEACON
]++;
2596 case cpu_to_le16(IEEE80211_STYPE_ATIM
):
2597 stats
->mgmt
[MANAGEMENT_ATIM
]++;
2599 case cpu_to_le16(IEEE80211_STYPE_DISASSOC
):
2600 stats
->mgmt
[MANAGEMENT_DISASSOC
]++;
2602 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
2603 stats
->mgmt
[MANAGEMENT_AUTH
]++;
2605 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
2606 stats
->mgmt
[MANAGEMENT_DEAUTH
]++;
2608 case cpu_to_le16(IEEE80211_STYPE_ACTION
):
2609 stats
->mgmt
[MANAGEMENT_ACTION
]++;
2612 } else if (ieee80211_is_ctl(fc
)) {
2613 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
2614 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ
):
2615 stats
->ctrl
[CONTROL_BACK_REQ
]++;
2617 case cpu_to_le16(IEEE80211_STYPE_BACK
):
2618 stats
->ctrl
[CONTROL_BACK
]++;
2620 case cpu_to_le16(IEEE80211_STYPE_PSPOLL
):
2621 stats
->ctrl
[CONTROL_PSPOLL
]++;
2623 case cpu_to_le16(IEEE80211_STYPE_RTS
):
2624 stats
->ctrl
[CONTROL_RTS
]++;
2626 case cpu_to_le16(IEEE80211_STYPE_CTS
):
2627 stats
->ctrl
[CONTROL_CTS
]++;
2629 case cpu_to_le16(IEEE80211_STYPE_ACK
):
2630 stats
->ctrl
[CONTROL_ACK
]++;
2632 case cpu_to_le16(IEEE80211_STYPE_CFEND
):
2633 stats
->ctrl
[CONTROL_CFEND
]++;
2635 case cpu_to_le16(IEEE80211_STYPE_CFENDACK
):
2636 stats
->ctrl
[CONTROL_CFENDACK
]++;
2642 stats
->data_bytes
+= len
;
2644 iwl_leds_background(priv
);
2646 EXPORT_SYMBOL(iwl_update_stats
);
2649 const static char *get_csr_string(int cmd
)
2652 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
2653 IWL_CMD(CSR_INT_COALESCING
);
2655 IWL_CMD(CSR_INT_MASK
);
2656 IWL_CMD(CSR_FH_INT_STATUS
);
2657 IWL_CMD(CSR_GPIO_IN
);
2659 IWL_CMD(CSR_GP_CNTRL
);
2660 IWL_CMD(CSR_HW_REV
);
2661 IWL_CMD(CSR_EEPROM_REG
);
2662 IWL_CMD(CSR_EEPROM_GP
);
2663 IWL_CMD(CSR_OTP_GP_REG
);
2664 IWL_CMD(CSR_GIO_REG
);
2665 IWL_CMD(CSR_GP_UCODE_REG
);
2666 IWL_CMD(CSR_GP_DRIVER_REG
);
2667 IWL_CMD(CSR_UCODE_DRV_GP1
);
2668 IWL_CMD(CSR_UCODE_DRV_GP2
);
2669 IWL_CMD(CSR_LED_REG
);
2670 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
2671 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
2672 IWL_CMD(CSR_ANA_PLL_CFG
);
2673 IWL_CMD(CSR_HW_REV_WA_REG
);
2674 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
2681 void iwl_dump_csr(struct iwl_priv
*priv
)
2685 CSR_HW_IF_CONFIG_REG
,
2703 CSR_DRAM_INT_TBL_REG
,
2704 CSR_GIO_CHICKEN_BITS
,
2707 CSR_DBG_HPET_MEM_REG
2709 IWL_ERR(priv
, "CSR values:\n");
2710 IWL_ERR(priv
, "(2nd byte of CSR_INT_COALESCING is "
2711 "CSR_INT_PERIODIC_REG)\n");
2712 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
2713 IWL_ERR(priv
, " %25s: 0X%08x\n",
2714 get_csr_string(csr_tbl
[i
]),
2715 iwl_read32(priv
, csr_tbl
[i
]));
2718 EXPORT_SYMBOL(iwl_dump_csr
);
2720 const static char *get_fh_string(int cmd
)
2723 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG
);
2724 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG
);
2725 IWL_CMD(FH_RSCSR_CHNL0_WPTR
);
2726 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG
);
2727 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG
);
2728 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG
);
2729 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
);
2730 IWL_CMD(FH_TSSR_TX_STATUS_REG
);
2731 IWL_CMD(FH_TSSR_TX_ERROR_REG
);
2738 int iwl_dump_fh(struct iwl_priv
*priv
, char **buf
, bool display
)
2741 #ifdef CONFIG_IWLWIFI_DEBUG
2746 FH_RSCSR_CHNL0_STTS_WPTR_REG
,
2747 FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
2748 FH_RSCSR_CHNL0_WPTR
,
2749 FH_MEM_RCSR_CHNL0_CONFIG_REG
,
2750 FH_MEM_RSSR_SHARED_CTRL_REG
,
2751 FH_MEM_RSSR_RX_STATUS_REG
,
2752 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
,
2753 FH_TSSR_TX_STATUS_REG
,
2754 FH_TSSR_TX_ERROR_REG
2756 #ifdef CONFIG_IWLWIFI_DEBUG
2758 bufsz
= ARRAY_SIZE(fh_tbl
) * 48 + 40;
2759 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
2762 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
2763 "FH register values:\n");
2764 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
2765 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
2767 get_fh_string(fh_tbl
[i
]),
2768 iwl_read_direct32(priv
, fh_tbl
[i
]));
2773 IWL_ERR(priv
, "FH register values:\n");
2774 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
2775 IWL_ERR(priv
, " %34s: 0X%08x\n",
2776 get_fh_string(fh_tbl
[i
]),
2777 iwl_read_direct32(priv
, fh_tbl
[i
]));
2781 EXPORT_SYMBOL(iwl_dump_fh
);
2783 static void iwl_force_rf_reset(struct iwl_priv
*priv
)
2785 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2788 if (!iwl_is_associated(priv
)) {
2789 IWL_DEBUG_SCAN(priv
, "force reset rejected: not associated\n");
2793 * There is no easy and better way to force reset the radio,
2794 * the only known method is switching channel which will force to
2795 * reset and tune the radio.
2796 * Use internal short scan (single channel) operation to should
2797 * achieve this objective.
2798 * Driver should reset the radio when number of consecutive missed
2799 * beacon, or any other uCode error condition detected.
2801 IWL_DEBUG_INFO(priv
, "perform radio reset.\n");
2802 iwl_internal_short_hw_scan(priv
);
2807 int iwl_force_reset(struct iwl_priv
*priv
, int mode
)
2809 struct iwl_force_reset
*force_reset
;
2811 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2814 if (mode
>= IWL_MAX_FORCE_RESET
) {
2815 IWL_DEBUG_INFO(priv
, "invalid reset request.\n");
2818 force_reset
= &priv
->force_reset
[mode
];
2819 force_reset
->reset_request_count
++;
2820 if (force_reset
->last_force_reset_jiffies
&&
2821 time_after(force_reset
->last_force_reset_jiffies
+
2822 force_reset
->reset_duration
, jiffies
)) {
2823 IWL_DEBUG_INFO(priv
, "force reset rejected\n");
2824 force_reset
->reset_reject_count
++;
2827 force_reset
->reset_success_count
++;
2828 force_reset
->last_force_reset_jiffies
= jiffies
;
2829 IWL_DEBUG_INFO(priv
, "perform force reset (%d)\n", mode
);
2832 iwl_force_rf_reset(priv
);
2835 IWL_ERR(priv
, "On demand firmware reload\n");
2836 /* Set the FW error flag -- cleared on iwl_down */
2837 set_bit(STATUS_FW_ERROR
, &priv
->status
);
2838 wake_up_interruptible(&priv
->wait_command_queue
);
2840 * Keep the restart process from trying to send host
2841 * commands by clearing the INIT status bit
2843 clear_bit(STATUS_READY
, &priv
->status
);
2844 queue_work(priv
->workqueue
, &priv
->restart
);
2849 EXPORT_SYMBOL(iwl_force_reset
);
2852 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2854 * During normal condition (no queue is stuck), the timer is continually set to
2855 * execute every monitor_recover_period milliseconds after the last timer
2856 * expired. When the queue read_ptr is at the same place, the timer is
2857 * shorten to 100mSecs. This is
2858 * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
2859 * 2) to detect the stuck queues quicker before the station and AP can
2860 * disassociate each other.
2862 * This function monitors all the tx queues and recover from it if any
2863 * of the queues are stuck.
2864 * 1. It first check the cmd queue for stuck conditions. If it is stuck,
2865 * it will recover by resetting the firmware and return.
2866 * 2. Then, it checks for station association. If it associates it will check
2867 * other queues. If any queue is stuck, it will recover by resetting
2869 * Note: It the number of times the queue read_ptr to be at the same place to
2870 * be MAX_REPEAT+1 in order to consider to be stuck.
2873 * The maximum number of times the read pointer of the tx queue at the
2874 * same place without considering to be stuck.
2876 #define MAX_REPEAT (2)
2877 static int iwl_check_stuck_queue(struct iwl_priv
*priv
, int cnt
)
2879 struct iwl_tx_queue
*txq
;
2880 struct iwl_queue
*q
;
2882 txq
= &priv
->txq
[cnt
];
2884 /* queue is empty, skip */
2885 if (q
->read_ptr
!= q
->write_ptr
) {
2886 if (q
->read_ptr
== q
->last_read_ptr
) {
2887 /* a queue has not been read from last time */
2888 if (q
->repeat_same_read_ptr
> MAX_REPEAT
) {
2890 "queue %d stuck %d time. Fw reload.\n",
2891 q
->id
, q
->repeat_same_read_ptr
);
2892 q
->repeat_same_read_ptr
= 0;
2893 iwl_force_reset(priv
, IWL_FW_RESET
);
2895 q
->repeat_same_read_ptr
++;
2896 IWL_DEBUG_RADIO(priv
,
2897 "queue %d, not read %d time\n",
2899 q
->repeat_same_read_ptr
);
2900 mod_timer(&priv
->monitor_recover
, jiffies
+
2901 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS
));
2905 q
->last_read_ptr
= q
->read_ptr
;
2906 q
->repeat_same_read_ptr
= 0;
2912 void iwl_bg_monitor_recover(unsigned long data
)
2914 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
2917 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2920 /* monitor and check for stuck cmd queue */
2921 if (iwl_check_stuck_queue(priv
, IWL_CMD_QUEUE_NUM
))
2924 /* monitor and check for other stuck queues */
2925 if (iwl_is_associated(priv
)) {
2926 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
2927 /* skip as we already checked the command queue */
2928 if (cnt
== IWL_CMD_QUEUE_NUM
)
2930 if (iwl_check_stuck_queue(priv
, cnt
))
2935 * Reschedule the timer to occur in
2936 * priv->cfg->monitor_recover_period
2938 mod_timer(&priv
->monitor_recover
,
2939 jiffies
+ msecs_to_jiffies(priv
->cfg
->monitor_recover_period
));
2941 EXPORT_SYMBOL(iwl_bg_monitor_recover
);
2945 int iwl_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2947 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2950 * This function is called when system goes into suspend state
2951 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2952 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2953 * it will not call apm_ops.stop() to stop the DMA operation.
2954 * Calling apm_ops.stop here to make sure we stop the DMA.
2956 priv
->cfg
->ops
->lib
->apm_ops
.stop(priv
);
2958 pci_save_state(pdev
);
2959 pci_disable_device(pdev
);
2960 pci_set_power_state(pdev
, PCI_D3hot
);
2964 EXPORT_SYMBOL(iwl_pci_suspend
);
2966 int iwl_pci_resume(struct pci_dev
*pdev
)
2968 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2971 pci_set_power_state(pdev
, PCI_D0
);
2972 ret
= pci_enable_device(pdev
);
2975 pci_restore_state(pdev
);
2976 iwl_enable_interrupts(priv
);
2980 EXPORT_SYMBOL(iwl_pci_resume
);
2982 #endif /* CONFIG_PM */