iwlwifi: Generic approach to measure temperature
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-core.c
CommitLineData
df48c323 1/******************************************************************************
df48c323
TW
2 *
3 * GPL LICENSE SUMMARY
4 *
1f447808 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
df48c323
TW
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
df48c323
TW
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
8ccde88a 31#include <linux/etherdevice.h>
d43c36dc 32#include <linux/sched.h>
1d0a082d 33#include <net/mac80211.h>
df48c323 34
6bc913bd 35#include "iwl-eeprom.h"
3e0d4cb1 36#include "iwl-dev.h" /* FIXME: remove */
19335774 37#include "iwl-debug.h"
df48c323 38#include "iwl-core.h"
b661c819 39#include "iwl-io.h"
5da4b55f 40#include "iwl-power.h"
83dde8c9 41#include "iwl-sta.h"
ef850d7c 42#include "iwl-helpers.h"
df48c323 43
1d0a082d 44
df48c323
TW
45MODULE_DESCRIPTION("iwl core");
46MODULE_VERSION(IWLWIFI_VERSION);
a7b75207 47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
712b6cf5 48MODULE_LICENSE("GPL");
df48c323 49
06702a73
WYG
50/*
51 * set bt_coex_active to true, uCode will do kill/defer
52 * every time the priority line is asserted (BT is sending signals on the
53 * priority line in the PCIx).
54 * set bt_coex_active to false, uCode will ignore the BT activity and
55 * perform the normal operation
56 *
57 * User might experience transmit issue on some platform due to WiFi/BT
58 * co-exist problem. The possible behaviors are:
59 * Able to scan and finding all the available AP
60 * Not able to associate with any AP
61 * On those platforms, WiFi communication can be restored by set
62 * "bt_coex_active" module parameter to "false"
63 *
64 * default: bt_coex_active = true (BT_COEX_ENABLE)
65 */
66static bool bt_coex_active = true;
67module_param(bt_coex_active, bool, S_IRUGO);
68MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
69
1933ac4d
WYG
70static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
71 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
72 0, COEX_UNASSOC_IDLE_FLAGS},
73 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
74 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
75 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
76 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
77 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
78 0, COEX_CALIBRATION_FLAGS},
79 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
80 0, COEX_PERIODIC_CALIBRATION_FLAGS},
81 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
82 0, COEX_CONNECTION_ESTAB_FLAGS},
83 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
84 0, COEX_ASSOCIATED_IDLE_FLAGS},
85 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
86 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
87 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
88 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
89 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
90 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
91 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
92 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
93 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
94 0, COEX_STAND_ALONE_DEBUG_FLAGS},
95 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
96 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
97 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
98 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
99};
100
c7de35cd
RR
101#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
102 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
103 IWL_RATE_SISO_##s##M_PLCP, \
104 IWL_RATE_MIMO2_##s##M_PLCP,\
105 IWL_RATE_MIMO3_##s##M_PLCP,\
106 IWL_RATE_##r##M_IEEE, \
107 IWL_RATE_##ip##M_INDEX, \
108 IWL_RATE_##in##M_INDEX, \
109 IWL_RATE_##rp##M_INDEX, \
110 IWL_RATE_##rn##M_INDEX, \
111 IWL_RATE_##pp##M_INDEX, \
112 IWL_RATE_##np##M_INDEX }
113
a562a9dd
RC
114u32 iwl_debug_level;
115EXPORT_SYMBOL(iwl_debug_level);
116
c7de35cd
RR
117/*
118 * Parameter order:
119 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
120 *
121 * If there isn't a valid next or previous rate then INV is used which
122 * maps to IWL_RATE_INVALID
123 *
124 */
1826dcc0 125const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
c7de35cd
RR
126 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
127 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
128 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
129 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
130 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
131 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
132 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
133 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
134 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
135 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
136 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
137 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
138 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
139 /* FIXME:RS: ^^ should be INV (legacy) */
140};
1826dcc0 141EXPORT_SYMBOL(iwl_rates);
c7de35cd 142
e7d326ac
TW
143int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
144{
145 int idx = 0;
146
147 /* HT rate format */
148 if (rate_n_flags & RATE_MCS_HT_MSK) {
149 idx = (rate_n_flags & 0xff);
150
60d32215
DH
151 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
152 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
153 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
e7d326ac
TW
154 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
155
156 idx += IWL_FIRST_OFDM_RATE;
157 /* skip 9M not supported in ht*/
158 if (idx >= IWL_RATE_9M_INDEX)
159 idx += 1;
160 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
161 return idx;
162
163 /* legacy rate format, search for match in table */
164 } else {
165 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
166 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
167 return idx;
168 }
169
170 return -1;
171}
172EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
173
76eff18b
TW
174u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
175{
176 int i;
177 u8 ind = ant;
178 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
179 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
180 if (priv->hw_params.valid_tx_ant & BIT(ind))
181 return ind;
182 }
183 return ant;
184}
47ff65c4 185EXPORT_SYMBOL(iwl_toggle_tx_ant);
57bd1bea
TW
186
187const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
188EXPORT_SYMBOL(iwl_bcast_addr);
189
190
1d0a082d
AK
191/* This function both allocates and initializes hw and priv. */
192struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
193 struct ieee80211_ops *hw_ops)
194{
195 struct iwl_priv *priv;
196
197 /* mac80211 allocates memory for this device instance, including
198 * space for this driver's private structure */
199 struct ieee80211_hw *hw =
200 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
201 if (hw == NULL) {
a3139c59
SO
202 printk(KERN_ERR "%s: Can not allocate network device\n",
203 cfg->name);
1d0a082d
AK
204 goto out;
205 }
206
207 priv = hw->priv;
208 priv->hw = hw;
209
210out:
211 return hw;
212}
213EXPORT_SYMBOL(iwl_alloc_all);
214
b661c819
TW
215void iwl_hw_detect(struct iwl_priv *priv)
216{
217 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
218 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
219 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
220}
221EXPORT_SYMBOL(iwl_hw_detect);
222
14d2aac5
AK
223/*
224 * QoS support
225*/
226void iwl_activate_qos(struct iwl_priv *priv, u8 force)
227{
228 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
229 return;
230
231 priv->qos_data.def_qos_parm.qos_flags = 0;
232
233 if (priv->qos_data.qos_cap.q_AP.queue_request &&
234 !priv->qos_data.qos_cap.q_AP.txop_request)
235 priv->qos_data.def_qos_parm.qos_flags |=
236 QOS_PARAM_FLG_TXOP_TYPE_MSK;
237 if (priv->qos_data.qos_active)
238 priv->qos_data.def_qos_parm.qos_flags |=
239 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
240
241 if (priv->current_ht_config.is_ht)
242 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
243
244 if (force || iwl_is_associated(priv)) {
245 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
246 priv->qos_data.qos_active,
247 priv->qos_data.def_qos_parm.qos_flags);
248
249 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
250 sizeof(struct iwl_qosparam_cmd),
251 &priv->qos_data.def_qos_parm, NULL);
252 }
253}
254EXPORT_SYMBOL(iwl_activate_qos);
255
f2c95b04
WYG
256/*
257 * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
258 * (802.11b) (802.11a/g)
259 * AC_BK 15 1023 7 0 0
260 * AC_BE 15 1023 3 0 0
261 * AC_VI 7 15 2 6.016ms 3.008ms
262 * AC_VO 3 7 2 3.264ms 1.504ms
263 */
c7de35cd 264void iwl_reset_qos(struct iwl_priv *priv)
bf85ea4f
AK
265{
266 u16 cw_min = 15;
267 u16 cw_max = 1023;
268 u8 aifs = 2;
30dab79e 269 bool is_legacy = false;
bf85ea4f
AK
270 unsigned long flags;
271 int i;
272
273 spin_lock_irqsave(&priv->lock, flags);
30dab79e
WT
274 /* QoS always active in AP and ADHOC mode
275 * In STA mode wait for association
276 */
277 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
278 priv->iw_mode == NL80211_IFTYPE_AP)
279 priv->qos_data.qos_active = 1;
280 else
281 priv->qos_data.qos_active = 0;
bf85ea4f 282
30dab79e
WT
283 /* check for legacy mode */
284 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
285 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
286 (priv->iw_mode == NL80211_IFTYPE_STATION &&
287 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
bf85ea4f
AK
288 cw_min = 31;
289 is_legacy = 1;
290 }
291
292 if (priv->qos_data.qos_active)
293 aifs = 3;
294
f2c95b04 295 /* AC_BE */
bf85ea4f
AK
296 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
297 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
298 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
299 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
300 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
301
302 if (priv->qos_data.qos_active) {
f2c95b04 303 /* AC_BK */
bf85ea4f
AK
304 i = 1;
305 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
306 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
307 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
308 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
309 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
310
f2c95b04 311 /* AC_VI */
bf85ea4f
AK
312 i = 2;
313 priv->qos_data.def_qos_parm.ac[i].cw_min =
314 cpu_to_le16((cw_min + 1) / 2 - 1);
315 priv->qos_data.def_qos_parm.ac[i].cw_max =
f2c95b04 316 cpu_to_le16(cw_min);
bf85ea4f
AK
317 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
318 if (is_legacy)
319 priv->qos_data.def_qos_parm.ac[i].edca_txop =
320 cpu_to_le16(6016);
321 else
322 priv->qos_data.def_qos_parm.ac[i].edca_txop =
323 cpu_to_le16(3008);
324 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
325
f2c95b04 326 /* AC_VO */
bf85ea4f
AK
327 i = 3;
328 priv->qos_data.def_qos_parm.ac[i].cw_min =
329 cpu_to_le16((cw_min + 1) / 4 - 1);
330 priv->qos_data.def_qos_parm.ac[i].cw_max =
f2c95b04 331 cpu_to_le16((cw_min + 1) / 2 - 1);
bf85ea4f
AK
332 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
333 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
334 if (is_legacy)
335 priv->qos_data.def_qos_parm.ac[i].edca_txop =
336 cpu_to_le16(3264);
337 else
338 priv->qos_data.def_qos_parm.ac[i].edca_txop =
339 cpu_to_le16(1504);
340 } else {
341 for (i = 1; i < 4; i++) {
342 priv->qos_data.def_qos_parm.ac[i].cw_min =
343 cpu_to_le16(cw_min);
344 priv->qos_data.def_qos_parm.ac[i].cw_max =
345 cpu_to_le16(cw_max);
346 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
347 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
348 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
349 }
350 }
e1623446 351 IWL_DEBUG_QOS(priv, "set QoS to default \n");
bf85ea4f
AK
352
353 spin_unlock_irqrestore(&priv->lock, flags);
354}
c7de35cd
RR
355EXPORT_SYMBOL(iwl_reset_qos);
356
d9fe60de
JB
357#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
358#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
c7de35cd 359static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
d9fe60de 360 struct ieee80211_sta_ht_cap *ht_info,
c7de35cd
RR
361 enum ieee80211_band band)
362{
39130df3
RR
363 u16 max_bit_rate = 0;
364 u8 rx_chains_num = priv->hw_params.rx_chains_num;
365 u8 tx_chains_num = priv->hw_params.tx_chains_num;
366
c7de35cd 367 ht_info->cap = 0;
d9fe60de 368 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
c7de35cd 369
d9fe60de 370 ht_info->ht_supported = true;
c7de35cd 371
b261793d
DH
372 if (priv->cfg->ht_greenfield_support)
373 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
d9fe60de 374 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
39130df3 375 max_bit_rate = MAX_BIT_RATE_20_MHZ;
7aafef1c 376 if (priv->hw_params.ht40_channel & BIT(band)) {
d9fe60de
JB
377 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
378 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
379 ht_info->mcs.rx_mask[4] = 0x01;
39130df3 380 max_bit_rate = MAX_BIT_RATE_40_MHZ;
c7de35cd 381 }
c7de35cd
RR
382
383 if (priv->cfg->mod_params->amsdu_size_8K)
d9fe60de 384 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
c7de35cd
RR
385
386 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
387 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
388
d9fe60de 389 ht_info->mcs.rx_mask[0] = 0xFF;
39130df3 390 if (rx_chains_num >= 2)
d9fe60de 391 ht_info->mcs.rx_mask[1] = 0xFF;
39130df3 392 if (rx_chains_num >= 3)
d9fe60de 393 ht_info->mcs.rx_mask[2] = 0xFF;
39130df3
RR
394
395 /* Highest supported Rx data rate */
396 max_bit_rate *= rx_chains_num;
d9fe60de
JB
397 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
398 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
39130df3
RR
399
400 /* Tx MCS capabilities */
d9fe60de 401 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
39130df3 402 if (tx_chains_num != rx_chains_num) {
d9fe60de
JB
403 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
404 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
405 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
39130df3 406 }
c7de35cd 407}
c7de35cd 408
c7de35cd
RR
409/**
410 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
411 */
534166de 412int iwlcore_init_geos(struct iwl_priv *priv)
c7de35cd
RR
413{
414 struct iwl_channel_info *ch;
415 struct ieee80211_supported_band *sband;
416 struct ieee80211_channel *channels;
417 struct ieee80211_channel *geo_ch;
418 struct ieee80211_rate *rates;
419 int i = 0;
420
421 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
422 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
e1623446 423 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
c7de35cd
RR
424 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
425 return 0;
426 }
427
428 channels = kzalloc(sizeof(struct ieee80211_channel) *
429 priv->channel_count, GFP_KERNEL);
430 if (!channels)
431 return -ENOMEM;
432
5027309b 433 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
c7de35cd
RR
434 GFP_KERNEL);
435 if (!rates) {
436 kfree(channels);
437 return -ENOMEM;
438 }
439
440 /* 5.2GHz channels start after the 2.4GHz channels */
441 sband = &priv->bands[IEEE80211_BAND_5GHZ];
442 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
443 /* just OFDM */
444 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
5027309b 445 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
c7de35cd 446
49779293 447 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 448 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 449 IEEE80211_BAND_5GHZ);
c7de35cd
RR
450
451 sband = &priv->bands[IEEE80211_BAND_2GHZ];
452 sband->channels = channels;
453 /* OFDM & CCK */
454 sband->bitrates = rates;
5027309b 455 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
c7de35cd 456
49779293 457 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 458 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 459 IEEE80211_BAND_2GHZ);
c7de35cd
RR
460
461 priv->ieee_channels = channels;
462 priv->ieee_rates = rates;
463
c7de35cd
RR
464 for (i = 0; i < priv->channel_count; i++) {
465 ch = &priv->channel_info[i];
466
467 /* FIXME: might be removed if scan is OK */
468 if (!is_channel_valid(ch))
469 continue;
470
471 if (is_channel_a_band(ch))
472 sband = &priv->bands[IEEE80211_BAND_5GHZ];
473 else
474 sband = &priv->bands[IEEE80211_BAND_2GHZ];
475
476 geo_ch = &sband->channels[sband->n_channels++];
477
478 geo_ch->center_freq =
479 ieee80211_channel_to_frequency(ch->channel);
480 geo_ch->max_power = ch->max_power_avg;
481 geo_ch->max_antenna_gain = 0xff;
482 geo_ch->hw_value = ch->channel;
483
484 if (is_channel_valid(ch)) {
485 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
486 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
487
488 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
489 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
490
491 if (ch->flags & EEPROM_CHANNEL_RADAR)
492 geo_ch->flags |= IEEE80211_CHAN_RADAR;
493
7aafef1c 494 geo_ch->flags |= ch->ht40_extension_channel;
4d38c2e8 495
dc1b0973
WYG
496 if (ch->max_power_avg > priv->tx_power_device_lmt)
497 priv->tx_power_device_lmt = ch->max_power_avg;
c7de35cd
RR
498 } else {
499 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
500 }
501
e1623446 502 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
c7de35cd
RR
503 ch->channel, geo_ch->center_freq,
504 is_channel_a_band(ch) ? "5.2" : "2.4",
505 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
506 "restricted" : "valid",
507 geo_ch->flags);
508 }
509
510 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
511 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
512 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
513 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
a3139c59
SO
514 priv->pci_dev->device,
515 priv->pci_dev->subsystem_device);
c7de35cd
RR
516 priv->cfg->sku &= ~IWL_SKU_A;
517 }
518
978785a3 519 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
a3139c59
SO
520 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
521 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
c7de35cd
RR
522
523 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
524
525 return 0;
526}
534166de 527EXPORT_SYMBOL(iwlcore_init_geos);
c7de35cd
RR
528
529/*
530 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
531 */
534166de 532void iwlcore_free_geos(struct iwl_priv *priv)
c7de35cd
RR
533{
534 kfree(priv->ieee_channels);
535 kfree(priv->ieee_rates);
536 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
537}
534166de 538EXPORT_SYMBOL(iwlcore_free_geos);
c7de35cd 539
37dc70fe
AK
540/*
541 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
542 * function.
543 */
544void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
545 __le32 *tx_flags)
546{
547 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
548 *tx_flags |= TX_CMD_FLG_RTS_MSK;
549 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
550 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
551 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
552 *tx_flags |= TX_CMD_FLG_CTS_MSK;
553 }
554}
555EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
556
28a6b07a 557static bool is_single_rx_stream(struct iwl_priv *priv)
c7de35cd 558{
ba37a3d0 559 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
02bb1bea 560 priv->current_ht_config.single_chain_sufficient;
c7de35cd 561}
963f5517 562
47c5196e
TW
563static u8 iwl_is_channel_extension(struct iwl_priv *priv,
564 enum ieee80211_band band,
565 u16 channel, u8 extension_chan_offset)
566{
567 const struct iwl_channel_info *ch_info;
568
569 ch_info = iwl_get_channel_info(priv, band, channel);
570 if (!is_channel_valid(ch_info))
571 return 0;
572
d9fe60de 573 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
7aafef1c 574 return !(ch_info->ht40_extension_channel &
689da1b3 575 IEEE80211_CHAN_NO_HT40PLUS);
d9fe60de 576 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
7aafef1c 577 return !(ch_info->ht40_extension_channel &
689da1b3 578 IEEE80211_CHAN_NO_HT40MINUS);
47c5196e
TW
579
580 return 0;
581}
582
7aafef1c 583u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
d9fe60de 584 struct ieee80211_sta_ht_cap *sta_ht_inf)
47c5196e 585{
fad95bf5 586 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
47c5196e 587
fad95bf5 588 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
47c5196e
TW
589 return 0;
590
a2b0f02e
WYG
591 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
592 * the bit will not set if it is pure 40MHz case
593 */
47c5196e 594 if (sta_ht_inf) {
a2b0f02e 595 if (!sta_ht_inf->ht_supported)
47c5196e
TW
596 return 0;
597 }
1e4247d4
WYG
598#ifdef CONFIG_IWLWIFI_DEBUG
599 if (priv->disable_ht40)
600 return 0;
601#endif
611d3eb7
WYG
602 return iwl_is_channel_extension(priv, priv->band,
603 le16_to_cpu(priv->staging_rxon.channel),
fad95bf5 604 ht_conf->extension_chan_offset);
47c5196e 605}
7aafef1c 606EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
47c5196e 607
2c2f3b33
TW
608static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
609{
610 u16 new_val = 0;
611 u16 beacon_factor = 0;
612
613 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
614 new_val = beacon_val / beacon_factor;
615
616 if (!new_val)
617 new_val = max_beacon_val;
618
619 return new_val;
620}
621
622void iwl_setup_rxon_timing(struct iwl_priv *priv)
623{
624 u64 tsf;
625 s32 interval_tm, rem;
626 unsigned long flags;
627 struct ieee80211_conf *conf = NULL;
628 u16 beacon_int;
629
630 conf = ieee80211_get_hw_conf(priv->hw);
631
632 spin_lock_irqsave(&priv->lock, flags);
633 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
634 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
635
636 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
637 beacon_int = priv->beacon_int;
638 priv->rxon_timing.atim_window = 0;
639 } else {
640 beacon_int = priv->vif->bss_conf.beacon_int;
641
642 /* TODO: we need to get atim_window from upper stack
643 * for now we set to 0 */
644 priv->rxon_timing.atim_window = 0;
645 }
646
647 beacon_int = iwl_adjust_beacon_interval(beacon_int,
648 priv->hw_params.max_beacon_itrvl * 1024);
649 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
650
651 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
652 interval_tm = beacon_int * 1024;
653 rem = do_div(tsf, interval_tm);
654 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
655
656 spin_unlock_irqrestore(&priv->lock, flags);
657 IWL_DEBUG_ASSOC(priv,
658 "beacon interval %d beacon timer %d beacon tim %d\n",
659 le16_to_cpu(priv->rxon_timing.beacon_interval),
660 le32_to_cpu(priv->rxon_timing.beacon_init_val),
661 le16_to_cpu(priv->rxon_timing.atim_window));
662}
663EXPORT_SYMBOL(iwl_setup_rxon_timing);
664
8ccde88a
SO
665void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
666{
667 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
668
669 if (hw_decrypt)
670 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
671 else
672 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
673
674}
675EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
676
677/**
678 * iwl_check_rxon_cmd - validate RXON structure is valid
679 *
680 * NOTE: This is really only useful during development and can eventually
681 * be #ifdef'd out once the driver is stable and folks aren't actively
682 * making changes
683 */
684int iwl_check_rxon_cmd(struct iwl_priv *priv)
685{
686 int error = 0;
687 int counter = 1;
688 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
689
690 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
691 error |= le32_to_cpu(rxon->flags &
692 (RXON_FLG_TGJ_NARROW_BAND_MSK |
693 RXON_FLG_RADAR_DETECT_MSK));
694 if (error)
695 IWL_WARN(priv, "check 24G fields %d | %d\n",
696 counter++, error);
697 } else {
698 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
699 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
700 if (error)
701 IWL_WARN(priv, "check 52 fields %d | %d\n",
702 counter++, error);
703 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
704 if (error)
705 IWL_WARN(priv, "check 52 CCK %d | %d\n",
706 counter++, error);
707 }
708 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
709 if (error)
710 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
711
712 /* make sure basic rates 6Mbps and 1Mbps are supported */
713 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
714 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
715 if (error)
716 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
717
718 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
719 if (error)
720 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
721
722 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
723 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
724 if (error)
725 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
726 counter++, error);
727
728 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
729 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
730 if (error)
731 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
732 counter++, error);
733
734 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
735 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
736 if (error)
737 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
738 counter++, error);
739
740 if (error)
741 IWL_WARN(priv, "Tuning to channel %d\n",
742 le16_to_cpu(rxon->channel));
743
744 if (error) {
745 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n");
746 return -1;
747 }
748 return 0;
749}
750EXPORT_SYMBOL(iwl_check_rxon_cmd);
751
752/**
753 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
754 * @priv: staging_rxon is compared to active_rxon
755 *
756 * If the RXON structure is changing enough to require a new tune,
757 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
758 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
759 */
760int iwl_full_rxon_required(struct iwl_priv *priv)
761{
762
763 /* These items are only settable from the full RXON command */
764 if (!(iwl_is_associated(priv)) ||
765 compare_ether_addr(priv->staging_rxon.bssid_addr,
766 priv->active_rxon.bssid_addr) ||
767 compare_ether_addr(priv->staging_rxon.node_addr,
768 priv->active_rxon.node_addr) ||
769 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
770 priv->active_rxon.wlap_bssid_addr) ||
771 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
772 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
773 (priv->staging_rxon.air_propagation !=
774 priv->active_rxon.air_propagation) ||
775 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
776 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
777 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
778 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
c2105fa7
DH
779 (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
780 priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
8ccde88a
SO
781 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
782 return 1;
783
784 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
785 * be updated with the RXON_ASSOC command -- however only some
786 * flag transitions are allowed using RXON_ASSOC */
787
788 /* Check if we are not switching bands */
789 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
790 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
791 return 1;
792
793 /* Check if we are switching association toggle */
794 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
795 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
796 return 1;
797
798 return 0;
799}
800EXPORT_SYMBOL(iwl_full_rxon_required);
801
802u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
803{
4a02886b
JB
804 /*
805 * Assign the lowest rate -- should really get this from
806 * the beacon skb from mac80211.
807 */
8ccde88a
SO
808 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
809 return IWL_RATE_1M_PLCP;
810 else
811 return IWL_RATE_6M_PLCP;
812}
813EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
814
fad95bf5 815void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
47c5196e 816{
c1adf9fb 817 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
47c5196e 818
fad95bf5 819 if (!ht_conf->is_ht) {
a2b0f02e 820 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
42eb7c64 821 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
7aafef1c 822 RXON_FLG_HT40_PROT_MSK |
42eb7c64 823 RXON_FLG_HT_PROT_MSK);
47c5196e 824 return;
42eb7c64 825 }
47c5196e 826
a2b0f02e
WYG
827 /* FIXME: if the definition of ht_protection changed, the "translation"
828 * will be needed for rxon->flags
829 */
fad95bf5 830 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
a2b0f02e
WYG
831
832 /* Set up channel bandwidth:
7aafef1c 833 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
a2b0f02e
WYG
834 /* clear the HT channel mode before set the mode */
835 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
836 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
7aafef1c
WYG
837 if (iwl_is_ht40_tx_allowed(priv, NULL)) {
838 /* pure ht40 */
fad95bf5 839 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
a2b0f02e 840 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
508b08e7 841 /* Note: control channel is opposite of extension channel */
fad95bf5 842 switch (ht_conf->extension_chan_offset) {
508b08e7
WYG
843 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
844 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
845 break;
846 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
847 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
848 break;
849 }
850 } else {
a2b0f02e 851 /* Note: control channel is opposite of extension channel */
fad95bf5 852 switch (ht_conf->extension_chan_offset) {
a2b0f02e
WYG
853 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
854 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
855 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
856 break;
857 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
858 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
859 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
860 break;
861 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
862 default:
863 /* channel location only valid if in Mixed mode */
864 IWL_ERR(priv, "invalid extension channel offset\n");
865 break;
866 }
867 }
868 } else {
869 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
47c5196e
TW
870 }
871
45823531
AK
872 if (priv->cfg->ops->hcmd->set_rxon_chain)
873 priv->cfg->ops->hcmd->set_rxon_chain(priv);
47c5196e 874
02bb1bea 875 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
ae5eb026 876 "extension channel offset 0x%x\n",
fad95bf5
JB
877 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
878 ht_conf->extension_chan_offset);
47c5196e
TW
879 return;
880}
881EXPORT_SYMBOL(iwl_set_rxon_ht);
882
9e5e6c32
TW
883#define IWL_NUM_RX_CHAINS_MULTIPLE 3
884#define IWL_NUM_RX_CHAINS_SINGLE 2
885#define IWL_NUM_IDLE_CHAINS_DUAL 2
886#define IWL_NUM_IDLE_CHAINS_SINGLE 1
887
2b396a12
JB
888/*
889 * Determine how many receiver/antenna chains to use.
890 *
891 * More provides better reception via diversity. Fewer saves power
892 * at the expense of throughput, but only when not in powersave to
893 * start with.
894 *
c7de35cd
RR
895 * MIMO (dual stream) requires at least 2, but works better with 3.
896 * This does not determine *which* chains to use, just how many.
897 */
28a6b07a 898static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
c7de35cd 899{
c7de35cd 900 /* # of Rx chains to use when expecting MIMO. */
02bb1bea 901 if (is_single_rx_stream(priv))
9e5e6c32 902 return IWL_NUM_RX_CHAINS_SINGLE;
c7de35cd 903 else
9e5e6c32 904 return IWL_NUM_RX_CHAINS_MULTIPLE;
28a6b07a 905}
c7de35cd 906
2b396a12 907/*
3f3e0376
WYG
908 * When we are in power saving mode, unless device support spatial
909 * multiplexing power save, use the active count for rx chain count.
2b396a12 910 */
28a6b07a
TW
911static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
912{
ba37a3d0
JB
913 /* # Rx chains when idling, depending on SMPS mode */
914 switch (priv->current_ht_config.smps) {
915 case IEEE80211_SMPS_STATIC:
916 case IEEE80211_SMPS_DYNAMIC:
917 return IWL_NUM_IDLE_CHAINS_SINGLE;
918 case IEEE80211_SMPS_OFF:
919 return active_cnt;
c15d20c1 920 default:
ba37a3d0
JB
921 WARN(1, "invalid SMPS mode %d",
922 priv->current_ht_config.smps);
923 return active_cnt;
3f3e0376 924 }
c7de35cd
RR
925}
926
04816448
GE
927/* up to 4 chains */
928static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
929{
930 u8 res;
931 res = (chain_bitmap & BIT(0)) >> 0;
932 res += (chain_bitmap & BIT(1)) >> 1;
933 res += (chain_bitmap & BIT(2)) >> 2;
9bddbab3 934 res += (chain_bitmap & BIT(3)) >> 3;
04816448
GE
935 return res;
936}
937
4c4df78f
CR
938/**
939 * iwl_is_monitor_mode - Determine if interface in monitor mode
940 *
941 * priv->iw_mode is set in add_interface, but add_interface is
942 * never called for monitor mode. The only way mac80211 informs us about
943 * monitor mode is through configuring filters (call to configure_filter).
944 */
279b05d4 945bool iwl_is_monitor_mode(struct iwl_priv *priv)
4c4df78f
CR
946{
947 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
948}
279b05d4 949EXPORT_SYMBOL(iwl_is_monitor_mode);
4c4df78f 950
c7de35cd
RR
951/**
952 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
953 *
954 * Selects how many and which Rx receivers/antennas/chains to use.
955 * This should not be used for scan command ... it puts data in wrong place.
956 */
957void iwl_set_rxon_chain(struct iwl_priv *priv)
958{
28a6b07a
TW
959 bool is_single = is_single_rx_stream(priv);
960 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
04816448
GE
961 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
962 u32 active_chains;
28a6b07a 963 u16 rx_chain;
c7de35cd
RR
964
965 /* Tell uCode which antennas are actually connected.
966 * Before first association, we assume all antennas are connected.
967 * Just after first association, iwl_chain_noise_calibration()
968 * checks which antennas actually *are* connected. */
04816448
GE
969 if (priv->chain_noise_data.active_chains)
970 active_chains = priv->chain_noise_data.active_chains;
971 else
972 active_chains = priv->hw_params.valid_rx_ant;
973
974 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
c7de35cd
RR
975
976 /* How many receivers should we use? */
28a6b07a
TW
977 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
978 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
979
28a6b07a 980
04816448
GE
981 /* correct rx chain count according hw settings
982 * and chain noise calibration
983 */
984 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
985 if (valid_rx_cnt < active_rx_cnt)
986 active_rx_cnt = valid_rx_cnt;
987
988 if (valid_rx_cnt < idle_rx_cnt)
989 idle_rx_cnt = valid_rx_cnt;
28a6b07a
TW
990
991 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
992 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
993
7b841727
RF
994 /* copied from 'iwl_bg_request_scan()' */
995 /* Force use of chains B and C (0x6) for Rx for 4965
996 * Avoid A (0x1) because of its off-channel reception on A-band.
997 * MIMO is not used here, but value is required */
998 if (iwl_is_monitor_mode(priv) &&
999 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
1000 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
fff7a434
WYG
1001 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
1002 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
1003 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1004 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
7b841727
RF
1005 }
1006
28a6b07a
TW
1007 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
1008
9e5e6c32 1009 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
c7de35cd
RR
1010 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1011 else
1012 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1013
e1623446 1014 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
28a6b07a
TW
1015 priv->staging_rxon.rx_chain,
1016 active_rx_cnt, idle_rx_cnt);
1017
1018 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1019 active_rx_cnt < idle_rx_cnt);
c7de35cd
RR
1020}
1021EXPORT_SYMBOL(iwl_set_rxon_chain);
bf85ea4f
AK
1022
1023/**
17e72782 1024 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
bf85ea4f
AK
1025 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
1026 * @channel: Any channel valid for the requested phymode
1027
1028 * In addition to setting the staging RXON, priv->phymode is also set.
1029 *
1030 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
1031 * in the staging RXON flag structure based on the phymode
1032 */
17e72782 1033int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
bf85ea4f 1034{
17e72782
TW
1035 enum ieee80211_band band = ch->band;
1036 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
1037
8622e705 1038 if (!iwl_get_channel_info(priv, band, channel)) {
e1623446 1039 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
bf85ea4f
AK
1040 channel, band);
1041 return -EINVAL;
1042 }
1043
1044 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
1045 (priv->band == band))
1046 return 0;
1047
1048 priv->staging_rxon.channel = cpu_to_le16(channel);
1049 if (band == IEEE80211_BAND_5GHZ)
1050 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
1051 else
1052 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1053
1054 priv->band = band;
1055
e1623446 1056 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
bf85ea4f
AK
1057
1058 return 0;
1059}
c7de35cd 1060EXPORT_SYMBOL(iwl_set_rxon_channel);
bf85ea4f 1061
8ccde88a
SO
1062void iwl_set_flags_for_band(struct iwl_priv *priv,
1063 enum ieee80211_band band)
1064{
1065 if (band == IEEE80211_BAND_5GHZ) {
1066 priv->staging_rxon.flags &=
1067 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1068 | RXON_FLG_CCK_MSK);
1069 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1070 } else {
1071 /* Copied from iwl_post_associate() */
1072 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1073 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1074 else
1075 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1076
1077 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
1078 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1079
1080 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1081 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1082 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
1083 }
1084}
8ccde88a
SO
1085
1086/*
1087 * initialize rxon structure with default values from eeprom
1088 */
1089void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1090{
1091 const struct iwl_channel_info *ch_info;
1092
1093 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1094
1095 switch (mode) {
1096 case NL80211_IFTYPE_AP:
1097 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
1098 break;
1099
1100 case NL80211_IFTYPE_STATION:
1101 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
1102 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
1103 break;
1104
1105 case NL80211_IFTYPE_ADHOC:
1106 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1107 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1108 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1109 RXON_FILTER_ACCEPT_GRP_MSK;
1110 break;
1111
8ccde88a
SO
1112 default:
1113 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
1114 break;
1115 }
1116
1117#if 0
1118 /* TODO: Figure out when short_preamble would be set and cache from
1119 * that */
1120 if (!hw_to_local(priv->hw)->short_preamble)
1121 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1122 else
1123 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1124#endif
1125
1126 ch_info = iwl_get_channel_info(priv, priv->band,
1127 le16_to_cpu(priv->active_rxon.channel));
1128
1129 if (!ch_info)
1130 ch_info = &priv->channel_info[0];
1131
8ccde88a
SO
1132 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1133 priv->band = ch_info->band;
1134
1135 iwl_set_flags_for_band(priv, priv->band);
1136
1137 priv->staging_rxon.ofdm_basic_rates =
1138 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1139 priv->staging_rxon.cck_basic_rates =
1140 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1141
a2b0f02e
WYG
1142 /* clear both MIX and PURE40 mode flag */
1143 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1144 RXON_FLG_CHANNEL_MODE_PURE_40);
8ccde88a
SO
1145 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1146 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1147 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1148 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
11397a65 1149 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
8ccde88a
SO
1150}
1151EXPORT_SYMBOL(iwl_connection_init_rx_config);
1152
782571f4 1153static void iwl_set_rate(struct iwl_priv *priv)
8ccde88a
SO
1154{
1155 const struct ieee80211_supported_band *hw = NULL;
1156 struct ieee80211_rate *rate;
1157 int i;
1158
1159 hw = iwl_get_hw_mode(priv, priv->band);
1160 if (!hw) {
1161 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1162 return;
1163 }
1164
1165 priv->active_rate = 0;
8ccde88a
SO
1166
1167 for (i = 0; i < hw->n_bitrates; i++) {
1168 rate = &(hw->bitrates[i]);
5027309b 1169 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
8ccde88a
SO
1170 priv->active_rate |= (1 << rate->hw_value);
1171 }
1172
4a02886b 1173 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
8ccde88a 1174
4a02886b
JB
1175 priv->staging_rxon.cck_basic_rates =
1176 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1177
1178 priv->staging_rxon.ofdm_basic_rates =
1179 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
8ccde88a 1180}
8ccde88a
SO
1181
1182void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1183{
2f301227 1184 struct iwl_rx_packet *pkt = rxb_addr(rxb);
8ccde88a
SO
1185 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1186 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
4a56e965 1187
0924e519
WYG
1188 if (priv->switch_rxon.switch_in_progress) {
1189 if (!le32_to_cpu(csa->status) &&
1190 (csa->channel == priv->switch_rxon.channel)) {
1191 rxon->channel = csa->channel;
1192 priv->staging_rxon.channel = csa->channel;
1193 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1194 le16_to_cpu(csa->channel));
1195 } else
1196 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1197 le16_to_cpu(csa->channel));
1198
1199 priv->switch_rxon.switch_in_progress = false;
1200 }
8ccde88a
SO
1201}
1202EXPORT_SYMBOL(iwl_rx_csa);
1203
1204#ifdef CONFIG_IWLWIFI_DEBUG
a643565e 1205void iwl_print_rx_config_cmd(struct iwl_priv *priv)
8ccde88a
SO
1206{
1207 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1208
e1623446 1209 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
3d816c77 1210 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
e1623446
TW
1211 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1212 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1213 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
8ccde88a 1214 le32_to_cpu(rxon->filter_flags));
e1623446
TW
1215 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
1216 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
8ccde88a 1217 rxon->ofdm_basic_rates);
e1623446
TW
1218 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1219 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
1220 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1221 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
8ccde88a 1222}
a643565e 1223EXPORT_SYMBOL(iwl_print_rx_config_cmd);
6686d17e 1224#endif
8ccde88a
SO
1225/**
1226 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1227 */
1228void iwl_irq_handle_error(struct iwl_priv *priv)
1229{
1230 /* Set the FW error flag -- cleared on iwl_down */
1231 set_bit(STATUS_FW_ERROR, &priv->status);
1232
1233 /* Cancel currently queued command. */
1234 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1235
3a3ff72c 1236 priv->cfg->ops->lib->dump_nic_error_log(priv);
696bdee3
WYG
1237 if (priv->cfg->ops->lib->dump_csr)
1238 priv->cfg->ops->lib->dump_csr(priv);
1b3eb823
WYG
1239 if (priv->cfg->ops->lib->dump_fh)
1240 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
b03d7d0f 1241 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
8ccde88a 1242#ifdef CONFIG_IWLWIFI_DEBUG
c341ddb2 1243 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
8ccde88a 1244 iwl_print_rx_config_cmd(priv);
8ccde88a
SO
1245#endif
1246
1247 wake_up_interruptible(&priv->wait_command_queue);
1248
1249 /* Keep the restart process from trying to send host
1250 * commands by clearing the INIT status bit */
1251 clear_bit(STATUS_READY, &priv->status);
1252
1253 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
e1623446 1254 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
8ccde88a
SO
1255 "Restarting adapter due to uCode error.\n");
1256
8ccde88a
SO
1257 if (priv->cfg->mod_params->restart_fw)
1258 queue_work(priv->workqueue, &priv->restart);
1259 }
1260}
1261EXPORT_SYMBOL(iwl_irq_handle_error);
1262
f8e200de 1263static int iwl_apm_stop_master(struct iwl_priv *priv)
d68b603c 1264{
5220af0c 1265 int ret = 0;
d68b603c 1266
5220af0c 1267 /* stop device's busmaster DMA activity */
d68b603c
AK
1268 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1269
5220af0c 1270 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
d68b603c 1271 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
5220af0c
BC
1272 if (ret)
1273 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
d68b603c 1274
d68b603c
AK
1275 IWL_DEBUG_INFO(priv, "stop master\n");
1276
5220af0c 1277 return ret;
d68b603c 1278}
d68b603c
AK
1279
1280void iwl_apm_stop(struct iwl_priv *priv)
1281{
fadb3582
BC
1282 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1283
5220af0c 1284 /* Stop device's DMA activity */
d68b603c
AK
1285 iwl_apm_stop_master(priv);
1286
5220af0c 1287 /* Reset the entire device */
d68b603c
AK
1288 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1289
1290 udelay(10);
5220af0c
BC
1291
1292 /*
1293 * Clear "initialization complete" bit to move adapter from
1294 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1295 */
d68b603c 1296 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
d68b603c
AK
1297}
1298EXPORT_SYMBOL(iwl_apm_stop);
1299
fadb3582
BC
1300
1301/*
1302 * Start up NIC's basic functionality after it has been reset
1303 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1304 * NOTE: This does not load uCode nor start the embedded processor
1305 */
1306int iwl_apm_init(struct iwl_priv *priv)
1307{
1308 int ret = 0;
1309 u16 lctl;
1310
1311 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1312
1313 /*
1314 * Use "set_bit" below rather than "write", to preserve any hardware
1315 * bits already set by default after reset.
1316 */
1317
1318 /* Disable L0S exit timer (platform NMI Work/Around) */
1319 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1320 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1321
1322 /*
1323 * Disable L0s without affecting L1;
1324 * don't wait for ICH L0s (ICH bug W/A)
1325 */
1326 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1327 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1328
1329 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1330 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1331
1332 /*
1333 * Enable HAP INTA (interrupt from management bus) to
1334 * wake device's PCI Express link L1a -> L0s
1335 * NOTE: This is no-op for 3945 (non-existant bit)
1336 */
1337 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1338 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1339
1340 /*
a6c5c731
BC
1341 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1342 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1343 * If so (likely), disable L0S, so device moves directly L0->L1;
1344 * costs negligible amount of power savings.
1345 * If not (unlikely), enable L0S, so there is at least some
1346 * power savings, even without L1.
fadb3582
BC
1347 */
1348 if (priv->cfg->set_l0s) {
1349 lctl = iwl_pcie_link_ctl(priv);
1350 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1351 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1352 /* L1-ASPM enabled; disable(!) L0S */
1353 iwl_set_bit(priv, CSR_GIO_REG,
1354 CSR_GIO_REG_VAL_L0S_ENABLED);
1355 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1356 } else {
1357 /* L1-ASPM disabled; enable(!) L0S */
1358 iwl_clear_bit(priv, CSR_GIO_REG,
1359 CSR_GIO_REG_VAL_L0S_ENABLED);
1360 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1361 }
1362 }
1363
1364 /* Configure analog phase-lock-loop before activating to D0A */
1365 if (priv->cfg->pll_cfg_val)
1366 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1367
1368 /*
1369 * Set "initialization complete" bit to move adapter from
1370 * D0U* --> D0A* (powered-up active) state.
1371 */
1372 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1373
1374 /*
1375 * Wait for clock stabilization; once stabilized, access to
1376 * device-internal resources is supported, e.g. iwl_write_prph()
1377 * and accesses to uCode SRAM.
1378 */
1379 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1380 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1381 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1382 if (ret < 0) {
1383 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1384 goto out;
1385 }
1386
1387 /*
1388 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1389 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1390 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1391 * and don't need BSM to restore data after power-saving sleep.
1392 *
1393 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1394 * do not disable clocks. This preserves any hardware bits already
1395 * set by default in "CLK_CTRL_REG" after reset.
1396 */
1397 if (priv->cfg->use_bsm)
1398 iwl_write_prph(priv, APMG_CLK_EN_REG,
1399 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1400 else
1401 iwl_write_prph(priv, APMG_CLK_EN_REG,
1402 APMG_CLK_VAL_DMA_CLK_RQT);
1403 udelay(20);
1404
1405 /* Disable L1-Active */
1406 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1407 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1408
1409out:
1410 return ret;
1411}
1412EXPORT_SYMBOL(iwl_apm_init);
1413
1414
1415
8ccde88a
SO
1416void iwl_configure_filter(struct ieee80211_hw *hw,
1417 unsigned int changed_flags,
1418 unsigned int *total_flags,
3ac64bee 1419 u64 multicast)
8ccde88a
SO
1420{
1421 struct iwl_priv *priv = hw->priv;
1422 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
1423
e1623446 1424 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
8ccde88a
SO
1425 changed_flags, *total_flags);
1426
1427 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1428 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
1429 *filter_flags |= RXON_FILTER_PROMISC_MSK;
1430 else
1431 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
1432 }
1433 if (changed_flags & FIF_ALLMULTI) {
1434 if (*total_flags & FIF_ALLMULTI)
1435 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
1436 else
1437 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
1438 }
1439 if (changed_flags & FIF_CONTROL) {
1440 if (*total_flags & FIF_CONTROL)
1441 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
1442 else
1443 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
1444 }
1445 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
1446 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1447 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1448 else
1449 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
1450 }
1451
1452 /* We avoid iwl_commit_rxon here to commit the new filter flags
1453 * since mac80211 will call ieee80211_hw_config immediately.
1454 * (mc_list is not supported at this time). Otherwise, we need to
1455 * queue a background iwl_commit_rxon work.
1456 */
1457
1458 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1459 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1460}
1461EXPORT_SYMBOL(iwl_configure_filter);
1462
da154e30
RR
1463int iwl_set_hw_params(struct iwl_priv *priv)
1464{
da154e30
RR
1465 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1466 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1467 if (priv->cfg->mod_params->amsdu_size_8K)
2f301227 1468 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
da154e30 1469 else
2f301227 1470 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
da154e30 1471
2c2f3b33
TW
1472 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1473
49779293
RR
1474 if (priv->cfg->mod_params->disable_11n)
1475 priv->cfg->sku &= ~IWL_SKU_N;
1476
da154e30
RR
1477 /* Device-specific setup */
1478 return priv->cfg->ops->lib->set_hw_params(priv);
1479}
1480EXPORT_SYMBOL(iwl_set_hw_params);
6ba87956 1481
630fe9b6
TW
1482int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1483{
1484 int ret = 0;
5eadd94b
WYG
1485 s8 prev_tx_power = priv->tx_power_user_lmt;
1486
630fe9b6 1487 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
daf518de
WF
1488 IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
1489 tx_power,
1490 IWL_TX_POWER_TARGET_POWER_MIN);
630fe9b6
TW
1491 return -EINVAL;
1492 }
1493
dc1b0973 1494 if (tx_power > priv->tx_power_device_lmt) {
08f2d58d
WYG
1495 IWL_WARN(priv,
1496 "Requested user TXPOWER %d above upper limit %d.\n",
dc1b0973 1497 tx_power, priv->tx_power_device_lmt);
630fe9b6
TW
1498 return -EINVAL;
1499 }
1500
1501 if (priv->tx_power_user_lmt != tx_power)
1502 force = true;
1503
019fb97d 1504 /* if nic is not up don't send command */
5eadd94b
WYG
1505 if (iwl_is_ready_rf(priv)) {
1506 priv->tx_power_user_lmt = tx_power;
1507 if (force && priv->cfg->ops->lib->send_tx_power)
1508 ret = priv->cfg->ops->lib->send_tx_power(priv);
1509 else if (!priv->cfg->ops->lib->send_tx_power)
1510 ret = -EOPNOTSUPP;
1511 /*
1512 * if fail to set tx_power, restore the orig. tx power
1513 */
1514 if (ret)
1515 priv->tx_power_user_lmt = prev_tx_power;
1516 }
630fe9b6 1517
5eadd94b
WYG
1518 /*
1519 * Even this is an async host command, the command
1520 * will always report success from uCode
1521 * So once driver can placing the command into the queue
1522 * successfully, driver can use priv->tx_power_user_lmt
1523 * to reflect the current tx power
1524 */
630fe9b6
TW
1525 return ret;
1526}
1527EXPORT_SYMBOL(iwl_set_tx_power);
1528
ef850d7c 1529irqreturn_t iwl_isr_legacy(int irq, void *data)
f17d08a6
AK
1530{
1531 struct iwl_priv *priv = data;
1532 u32 inta, inta_mask;
1533 u32 inta_fh;
1534 if (!priv)
1535 return IRQ_NONE;
1536
1537 spin_lock(&priv->lock);
1538
1539 /* Disable (but don't clear!) interrupts here to avoid
1540 * back-to-back ISRs and sporadic interrupts from our NIC.
1541 * If we have something to service, the tasklet will re-enable ints.
1542 * If we *don't* have something, we'll re-enable before leaving here. */
1543 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1544 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1545
1546 /* Discover which interrupts are active/pending */
1547 inta = iwl_read32(priv, CSR_INT);
1548 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1549
1550 /* Ignore interrupt if there's nothing in NIC to service.
1551 * This may be due to IRQ shared with another device,
1552 * or due to sporadic interrupts thrown from our NIC. */
1553 if (!inta && !inta_fh) {
1554 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1555 goto none;
1556 }
1557
1558 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1559 /* Hardware disappeared. It might have already raised
1560 * an interrupt */
1561 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1562 goto unplugged;
1563 }
1564
1565 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1566 inta, inta_mask, inta_fh);
1567
1568 inta &= ~CSR_INT_BIT_SCD;
1569
1570 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1571 if (likely(inta || inta_fh))
1572 tasklet_schedule(&priv->irq_tasklet);
1573
1574 unplugged:
1575 spin_unlock(&priv->lock);
1576 return IRQ_HANDLED;
1577
1578 none:
1579 /* re-enable interrupts here since we don't have anything to service. */
1580 /* only Re-enable if diabled by irq */
1581 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1582 iwl_enable_interrupts(priv);
1583 spin_unlock(&priv->lock);
1584 return IRQ_NONE;
1585}
ef850d7c 1586EXPORT_SYMBOL(iwl_isr_legacy);
f17d08a6 1587
17f841cd
SO
1588int iwl_send_bt_config(struct iwl_priv *priv)
1589{
1590 struct iwl_bt_cmd bt_cmd = {
456d0f76
WYG
1591 .lead_time = BT_LEAD_TIME_DEF,
1592 .max_kill = BT_MAX_KILL_DEF,
17f841cd
SO
1593 .kill_ack_mask = 0,
1594 .kill_cts_mask = 0,
1595 };
1596
06702a73
WYG
1597 if (!bt_coex_active)
1598 bt_cmd.flags = BT_COEX_DISABLE;
1599 else
1600 bt_cmd.flags = BT_COEX_ENABLE;
1601
1602 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1603 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1604
17f841cd
SO
1605 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1606 sizeof(struct iwl_bt_cmd), &bt_cmd);
1607}
1608EXPORT_SYMBOL(iwl_send_bt_config);
1609
ef8d5529 1610int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
49ea8596 1611{
ef8d5529
WYG
1612 struct iwl_statistics_cmd statistics_cmd = {
1613 .configuration_flags =
1614 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
49ea8596 1615 };
ef8d5529
WYG
1616
1617 if (flags & CMD_ASYNC)
1618 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1619 sizeof(struct iwl_statistics_cmd),
1620 &statistics_cmd, NULL);
1621 else
1622 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1623 sizeof(struct iwl_statistics_cmd),
1624 &statistics_cmd);
49ea8596
EG
1625}
1626EXPORT_SYMBOL(iwl_send_statistics_request);
7e8c519e 1627
b0692f2f
EG
1628/**
1629 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1630 * using sample data 100 bytes apart. If these sample points are good,
1631 * it's a pretty good bet that everything between them is good, too.
1632 */
1633static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1634{
1635 u32 val;
1636 int ret = 0;
1637 u32 errcnt = 0;
1638 u32 i;
1639
e1623446 1640 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
b0692f2f 1641
b0692f2f
EG
1642 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1643 /* read data comes through single port, auto-incr addr */
1644 /* NOTE: Use the debugless read so we don't flood kernel log
1645 * if IWL_DL_IO is set */
1646 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 1647 i + IWL49_RTC_INST_LOWER_BOUND);
b0692f2f
EG
1648 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1649 if (val != le32_to_cpu(*image)) {
1650 ret = -EIO;
1651 errcnt++;
1652 if (errcnt >= 3)
1653 break;
1654 }
1655 }
1656
b0692f2f
EG
1657 return ret;
1658}
1659
1660/**
1661 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1662 * looking at all data.
1663 */
1664static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1665 u32 len)
1666{
1667 u32 val;
1668 u32 save_len = len;
1669 int ret = 0;
1670 u32 errcnt;
1671
e1623446 1672 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
b0692f2f 1673
250bdd21
SO
1674 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1675 IWL49_RTC_INST_LOWER_BOUND);
b0692f2f
EG
1676
1677 errcnt = 0;
1678 for (; len > 0; len -= sizeof(u32), image++) {
1679 /* read data comes through single port, auto-incr addr */
1680 /* NOTE: Use the debugless read so we don't flood kernel log
1681 * if IWL_DL_IO is set */
1682 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1683 if (val != le32_to_cpu(*image)) {
15b1687c 1684 IWL_ERR(priv, "uCode INST section is invalid at "
b0692f2f
EG
1685 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1686 save_len - len, val, le32_to_cpu(*image));
1687 ret = -EIO;
1688 errcnt++;
1689 if (errcnt >= 20)
1690 break;
1691 }
1692 }
1693
b0692f2f 1694 if (!errcnt)
e1623446
TW
1695 IWL_DEBUG_INFO(priv,
1696 "ucode image in INSTRUCTION memory is good\n");
b0692f2f
EG
1697
1698 return ret;
1699}
1700
1701/**
1702 * iwl_verify_ucode - determine which instruction image is in SRAM,
1703 * and verify its contents
1704 */
1705int iwl_verify_ucode(struct iwl_priv *priv)
1706{
1707 __le32 *image;
1708 u32 len;
1709 int ret;
1710
1711 /* Try bootstrap */
1712 image = (__le32 *)priv->ucode_boot.v_addr;
1713 len = priv->ucode_boot.len;
1714 ret = iwlcore_verify_inst_sparse(priv, image, len);
1715 if (!ret) {
e1623446 1716 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
b0692f2f
EG
1717 return 0;
1718 }
1719
1720 /* Try initialize */
1721 image = (__le32 *)priv->ucode_init.v_addr;
1722 len = priv->ucode_init.len;
1723 ret = iwlcore_verify_inst_sparse(priv, image, len);
1724 if (!ret) {
e1623446 1725 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
b0692f2f
EG
1726 return 0;
1727 }
1728
1729 /* Try runtime/protocol */
1730 image = (__le32 *)priv->ucode_code.v_addr;
1731 len = priv->ucode_code.len;
1732 ret = iwlcore_verify_inst_sparse(priv, image, len);
1733 if (!ret) {
e1623446 1734 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
b0692f2f
EG
1735 return 0;
1736 }
1737
15b1687c 1738 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
b0692f2f
EG
1739
1740 /* Since nothing seems to match, show first several data entries in
1741 * instruction SRAM, so maybe visual inspection will give a clue.
1742 * Selection of bootstrap image (vs. other images) is arbitrary. */
1743 image = (__le32 *)priv->ucode_boot.v_addr;
1744 len = priv->ucode_boot.len;
1745 ret = iwl_verify_inst_full(priv, image, len);
1746
1747 return ret;
1748}
1749EXPORT_SYMBOL(iwl_verify_ucode);
1750
56e12615 1751
47f4a587
EG
1752void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1753{
1754 struct iwl_ct_kill_config cmd;
672639de 1755 struct iwl_ct_kill_throttling_config adv_cmd;
47f4a587
EG
1756 unsigned long flags;
1757 int ret = 0;
1758
1759 spin_lock_irqsave(&priv->lock, flags);
1760 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1761 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1762 spin_unlock_irqrestore(&priv->lock, flags);
3ad3b92a 1763 priv->thermal_throttle.ct_kill_toggle = false;
47f4a587 1764
480e8407 1765 if (priv->cfg->support_ct_kill_exit) {
672639de
WYG
1766 adv_cmd.critical_temperature_enter =
1767 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1768 adv_cmd.critical_temperature_exit =
1769 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1770
1771 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1772 sizeof(adv_cmd), &adv_cmd);
d91b1ba3
WYG
1773 if (ret)
1774 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1775 else
1776 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1777 "succeeded, "
1778 "critical temperature enter is %d,"
1779 "exit is %d\n",
1780 priv->hw_params.ct_kill_threshold,
1781 priv->hw_params.ct_kill_exit_threshold);
480e8407 1782 } else {
672639de
WYG
1783 cmd.critical_temperature_R =
1784 cpu_to_le32(priv->hw_params.ct_kill_threshold);
189a2b59 1785
672639de
WYG
1786 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1787 sizeof(cmd), &cmd);
d91b1ba3
WYG
1788 if (ret)
1789 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1790 else
1791 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1792 "succeeded, "
1793 "critical temperature is %d\n",
1794 priv->hw_params.ct_kill_threshold);
672639de 1795 }
47f4a587
EG
1796}
1797EXPORT_SYMBOL(iwl_rf_kill_ct_config);
14a08a7f 1798
0ad91a35 1799
14a08a7f
EG
1800/*
1801 * CARD_STATE_CMD
1802 *
1803 * Use: Sets the device's internal card state to enable, disable, or halt
1804 *
1805 * When in the 'enable' state the card operates as normal.
1806 * When in the 'disable' state, the card enters into a low power mode.
1807 * When in the 'halt' state, the card is shut down and must be fully
1808 * restarted to come back on.
1809 */
c496294e 1810int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
14a08a7f
EG
1811{
1812 struct iwl_host_cmd cmd = {
1813 .id = REPLY_CARD_STATE_CMD,
1814 .len = sizeof(u32),
1815 .data = &flags,
c2acea8e 1816 .flags = meta_flag,
14a08a7f
EG
1817 };
1818
1819 return iwl_send_cmd(priv, &cmd);
1820}
1821
030f05ed
AK
1822void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1823 struct iwl_rx_mem_buffer *rxb)
1824{
1825#ifdef CONFIG_IWLWIFI_DEBUG
2f301227 1826 struct iwl_rx_packet *pkt = rxb_addr(rxb);
030f05ed
AK
1827 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1828 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1829 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1830#endif
1831}
1832EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1833
1834void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1835 struct iwl_rx_mem_buffer *rxb)
1836{
2f301227 1837 struct iwl_rx_packet *pkt = rxb_addr(rxb);
396887a2 1838 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
030f05ed 1839 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
396887a2
DH
1840 "notification for %s:\n", len,
1841 get_cmd_string(pkt->hdr.cmd));
1842 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
030f05ed
AK
1843}
1844EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
261b9c33
AK
1845
1846void iwl_rx_reply_error(struct iwl_priv *priv,
1847 struct iwl_rx_mem_buffer *rxb)
1848{
2f301227 1849 struct iwl_rx_packet *pkt = rxb_addr(rxb);
261b9c33
AK
1850
1851 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1852 "seq 0x%04X ser 0x%08X\n",
1853 le32_to_cpu(pkt->u.err_resp.error_type),
1854 get_cmd_string(pkt->u.err_resp.cmd_id),
1855 pkt->u.err_resp.cmd_id,
1856 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1857 le32_to_cpu(pkt->u.err_resp.error_info));
1858}
1859EXPORT_SYMBOL(iwl_rx_reply_error);
1860
a83b9141
WYG
1861void iwl_clear_isr_stats(struct iwl_priv *priv)
1862{
1863 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1864}
a83b9141 1865
488829f1
AK
1866int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1867 const struct ieee80211_tx_queue_params *params)
1868{
1869 struct iwl_priv *priv = hw->priv;
1870 unsigned long flags;
1871 int q;
1872
1873 IWL_DEBUG_MAC80211(priv, "enter\n");
1874
1875 if (!iwl_is_ready_rf(priv)) {
1876 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1877 return -EIO;
1878 }
1879
1880 if (queue >= AC_NUM) {
1881 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1882 return 0;
1883 }
1884
1885 q = AC_NUM - 1 - queue;
1886
1887 spin_lock_irqsave(&priv->lock, flags);
1888
1889 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
1890 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
1891 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1892 priv->qos_data.def_qos_parm.ac[q].edca_txop =
1893 cpu_to_le16((params->txop * 32));
1894
1895 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1896 priv->qos_data.qos_active = 1;
1897
1898 if (priv->iw_mode == NL80211_IFTYPE_AP)
1899 iwl_activate_qos(priv, 1);
1900 else if (priv->assoc_id && iwl_is_associated(priv))
1901 iwl_activate_qos(priv, 0);
1902
1903 spin_unlock_irqrestore(&priv->lock, flags);
1904
1905 IWL_DEBUG_MAC80211(priv, "leave\n");
1906 return 0;
1907}
1908EXPORT_SYMBOL(iwl_mac_conf_tx);
5bbe233b
AK
1909
1910static void iwl_ht_conf(struct iwl_priv *priv,
02bb1bea 1911 struct ieee80211_bss_conf *bss_conf)
5bbe233b 1912{
fad95bf5 1913 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
5bbe233b
AK
1914 struct ieee80211_sta *sta;
1915
1916 IWL_DEBUG_MAC80211(priv, "enter: \n");
1917
fad95bf5 1918 if (!ht_conf->is_ht)
5bbe233b
AK
1919 return;
1920
fad95bf5 1921 ht_conf->ht_protection =
9ed6bcce 1922 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
fad95bf5 1923 ht_conf->non_GF_STA_present =
9ed6bcce 1924 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5bbe233b 1925
02bb1bea
JB
1926 ht_conf->single_chain_sufficient = false;
1927
1928 switch (priv->iw_mode) {
1929 case NL80211_IFTYPE_STATION:
1930 rcu_read_lock();
5ed176e1 1931 sta = ieee80211_find_sta(priv->vif, priv->bssid);
02bb1bea
JB
1932 if (sta) {
1933 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1934 int maxstreams;
1935
1936 maxstreams = (ht_cap->mcs.tx_params &
1937 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
1938 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1939 maxstreams += 1;
1940
1941 if ((ht_cap->mcs.rx_mask[1] == 0) &&
1942 (ht_cap->mcs.rx_mask[2] == 0))
1943 ht_conf->single_chain_sufficient = true;
1944 if (maxstreams <= 1)
1945 ht_conf->single_chain_sufficient = true;
1946 } else {
1947 /*
1948 * If at all, this can only happen through a race
1949 * when the AP disconnects us while we're still
1950 * setting up the connection, in that case mac80211
1951 * will soon tell us about that.
1952 */
1953 ht_conf->single_chain_sufficient = true;
1954 }
1955 rcu_read_unlock();
1956 break;
1957 case NL80211_IFTYPE_ADHOC:
1958 ht_conf->single_chain_sufficient = true;
1959 break;
1960 default:
1961 break;
1962 }
5bbe233b
AK
1963
1964 IWL_DEBUG_MAC80211(priv, "leave\n");
1965}
1966
c91c3efc
AK
1967static inline void iwl_set_no_assoc(struct iwl_priv *priv)
1968{
1969 priv->assoc_id = 0;
1970 iwl_led_disassociate(priv);
1971 /*
1972 * inform the ucode that there is no longer an
1973 * association and that no more packets should be
1974 * sent
1975 */
1976 priv->staging_rxon.filter_flags &=
1977 ~RXON_FILTER_ASSOC_MSK;
1978 priv->staging_rxon.assoc_id = 0;
1979 iwlcore_commit_rxon(priv);
1980}
1981
5bbe233b
AK
1982#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
1983void iwl_bss_info_changed(struct ieee80211_hw *hw,
2d0ddec5
JB
1984 struct ieee80211_vif *vif,
1985 struct ieee80211_bss_conf *bss_conf,
1986 u32 changes)
5bbe233b
AK
1987{
1988 struct iwl_priv *priv = hw->priv;
3a650292 1989 int ret;
5bbe233b
AK
1990
1991 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
1992
2d0ddec5
JB
1993 if (!iwl_is_alive(priv))
1994 return;
1995
1996 mutex_lock(&priv->mutex);
1997
1998 if (changes & BSS_CHANGED_BEACON &&
1999 priv->iw_mode == NL80211_IFTYPE_AP) {
2000 dev_kfree_skb(priv->ibss_beacon);
2001 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
2002 }
2003
d7129e19
JB
2004 if (changes & BSS_CHANGED_BEACON_INT) {
2005 priv->beacon_int = bss_conf->beacon_int;
2006 /* TODO: in AP mode, do something to make this take effect */
2007 }
2008
2009 if (changes & BSS_CHANGED_BSSID) {
2010 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2011
2012 /*
2013 * If there is currently a HW scan going on in the
2014 * background then we need to cancel it else the RXON
2015 * below/in post_associate will fail.
2016 */
2d0ddec5 2017 if (iwl_scan_cancel_timeout(priv, 100)) {
d7129e19 2018 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2d0ddec5
JB
2019 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2020 mutex_unlock(&priv->mutex);
2021 return;
2022 }
2d0ddec5 2023
d7129e19
JB
2024 /* mac80211 only sets assoc when in STATION mode */
2025 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
2026 bss_conf->assoc) {
2027 memcpy(priv->staging_rxon.bssid_addr,
2028 bss_conf->bssid, ETH_ALEN);
2d0ddec5 2029
d7129e19
JB
2030 /* currently needed in a few places */
2031 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2032 } else {
2033 priv->staging_rxon.filter_flags &=
2034 ~RXON_FILTER_ASSOC_MSK;
2d0ddec5 2035 }
d7129e19 2036
2d0ddec5
JB
2037 }
2038
d7129e19
JB
2039 /*
2040 * This needs to be after setting the BSSID in case
2041 * mac80211 decides to do both changes at once because
2042 * it will invoke post_associate.
2043 */
2d0ddec5
JB
2044 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2045 changes & BSS_CHANGED_BEACON) {
2046 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2047
2048 if (beacon)
2049 iwl_mac_beacon_update(hw, beacon);
2050 }
2051
5bbe233b
AK
2052 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2053 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2054 bss_conf->use_short_preamble);
2055 if (bss_conf->use_short_preamble)
2056 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2057 else
2058 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2059 }
2060
2061 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2062 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
2063 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
2064 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
2065 else
2066 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2067 }
2068
d7129e19
JB
2069 if (changes & BSS_CHANGED_BASIC_RATES) {
2070 /* XXX use this information
2071 *
2072 * To do that, remove code from iwl_set_rate() and put something
2073 * like this here:
2074 *
2075 if (A-band)
2076 priv->staging_rxon.ofdm_basic_rates =
2077 bss_conf->basic_rates;
2078 else
2079 priv->staging_rxon.ofdm_basic_rates =
2080 bss_conf->basic_rates >> 4;
2081 priv->staging_rxon.cck_basic_rates =
2082 bss_conf->basic_rates & 0xF;
2083 */
2084 }
2085
5bbe233b
AK
2086 if (changes & BSS_CHANGED_HT) {
2087 iwl_ht_conf(priv, bss_conf);
45823531
AK
2088
2089 if (priv->cfg->ops->hcmd->set_rxon_chain)
2090 priv->cfg->ops->hcmd->set_rxon_chain(priv);
5bbe233b
AK
2091 }
2092
2093 if (changes & BSS_CHANGED_ASSOC) {
2094 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
5bbe233b
AK
2095 if (bss_conf->assoc) {
2096 priv->assoc_id = bss_conf->aid;
2097 priv->beacon_int = bss_conf->beacon_int;
5bbe233b
AK
2098 priv->timestamp = bss_conf->timestamp;
2099 priv->assoc_capability = bss_conf->assoc_capability;
2100
e932a609
JB
2101 iwl_led_associate(priv);
2102
d7129e19
JB
2103 /*
2104 * We have just associated, don't start scan too early
2105 * leave time for EAPOL exchange to complete.
2106 *
2107 * XXX: do this in mac80211
5bbe233b
AK
2108 */
2109 priv->next_scan_jiffies = jiffies +
2110 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
d7129e19
JB
2111 if (!iwl_is_rfkill(priv))
2112 priv->cfg->ops->lib->post_associate(priv);
c91c3efc
AK
2113 } else
2114 iwl_set_no_assoc(priv);
d7129e19
JB
2115 }
2116
2117 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2118 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2119 changes);
2120 ret = iwl_send_rxon_assoc(priv);
2121 if (!ret) {
2122 /* Sync active_rxon with latest change. */
2123 memcpy((void *)&priv->active_rxon,
2124 &priv->staging_rxon,
2125 sizeof(struct iwl_rxon_cmd));
5bbe233b 2126 }
5bbe233b 2127 }
d7129e19 2128
c91c3efc
AK
2129 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2130 if (vif->bss_conf.enable_beacon) {
2131 memcpy(priv->staging_rxon.bssid_addr,
2132 bss_conf->bssid, ETH_ALEN);
2133 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2134 iwlcore_config_ap(priv);
2135 } else
2136 iwl_set_no_assoc(priv);
f513dfff
DH
2137 }
2138
d7129e19
JB
2139 mutex_unlock(&priv->mutex);
2140
2d0ddec5 2141 IWL_DEBUG_MAC80211(priv, "leave\n");
5bbe233b
AK
2142}
2143EXPORT_SYMBOL(iwl_bss_info_changed);
2144
9944b938
AK
2145int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2146{
2147 struct iwl_priv *priv = hw->priv;
2148 unsigned long flags;
2149 __le64 timestamp;
2150
2151 IWL_DEBUG_MAC80211(priv, "enter\n");
2152
2153 if (!iwl_is_ready_rf(priv)) {
2154 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2155 return -EIO;
2156 }
2157
9944b938
AK
2158 spin_lock_irqsave(&priv->lock, flags);
2159
2160 if (priv->ibss_beacon)
2161 dev_kfree_skb(priv->ibss_beacon);
2162
2163 priv->ibss_beacon = skb;
2164
2165 priv->assoc_id = 0;
2166 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2167 priv->timestamp = le64_to_cpu(timestamp);
2168
2169 IWL_DEBUG_MAC80211(priv, "leave\n");
2170 spin_unlock_irqrestore(&priv->lock, flags);
2171
2172 iwl_reset_qos(priv);
2173
2174 priv->cfg->ops->lib->post_associate(priv);
2175
2176
2177 return 0;
2178}
2179EXPORT_SYMBOL(iwl_mac_beacon_update);
2180
b55e75ed 2181static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
727882d6 2182{
b55e75ed 2183 iwl_connection_init_rx_config(priv, vif->type);
727882d6
AK
2184
2185 if (priv->cfg->ops->hcmd->set_rxon_chain)
2186 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2187
2188 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2189
b55e75ed 2190 return iwlcore_commit_rxon(priv);
727882d6 2191}
727882d6 2192
b55e75ed 2193int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
cbb6ab94
AK
2194{
2195 struct iwl_priv *priv = hw->priv;
47e28f41 2196 int err = 0;
cbb6ab94 2197
1ed32e4f 2198 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
cbb6ab94 2199
47e28f41
JB
2200 mutex_lock(&priv->mutex);
2201
b55e75ed
JB
2202 if (WARN_ON(!iwl_is_ready_rf(priv))) {
2203 err = -EINVAL;
2204 goto out;
2205 }
2206
cbb6ab94
AK
2207 if (priv->vif) {
2208 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
47e28f41
JB
2209 err = -EOPNOTSUPP;
2210 goto out;
cbb6ab94
AK
2211 }
2212
1ed32e4f
JB
2213 priv->vif = vif;
2214 priv->iw_mode = vif->type;
cbb6ab94 2215
b55e75ed
JB
2216 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2217 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
cbb6ab94 2218
b55e75ed
JB
2219 err = iwl_set_mode(priv, vif);
2220 if (err)
2221 goto out_err;
7e246191
RC
2222
2223 /* Add the broadcast address so we can send broadcast frames */
2224 priv->cfg->ops->lib->add_bcast_station(priv);
2225
b55e75ed 2226 goto out;
cbb6ab94 2227
b55e75ed
JB
2228 out_err:
2229 priv->vif = NULL;
2230 priv->iw_mode = NL80211_IFTYPE_STATION;
47e28f41 2231 out:
cbb6ab94
AK
2232 mutex_unlock(&priv->mutex);
2233
2234 IWL_DEBUG_MAC80211(priv, "leave\n");
47e28f41 2235 return err;
cbb6ab94
AK
2236}
2237EXPORT_SYMBOL(iwl_mac_add_interface);
2238
d8052319 2239void iwl_mac_remove_interface(struct ieee80211_hw *hw,
b55e75ed 2240 struct ieee80211_vif *vif)
d8052319
AK
2241{
2242 struct iwl_priv *priv = hw->priv;
2243
2244 IWL_DEBUG_MAC80211(priv, "enter\n");
2245
2246 mutex_lock(&priv->mutex);
2247
7e246191
RC
2248 iwl_clear_ucode_stations(priv, true);
2249
d8052319
AK
2250 if (iwl_is_ready_rf(priv)) {
2251 iwl_scan_cancel_timeout(priv, 100);
2252 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2253 iwlcore_commit_rxon(priv);
2254 }
1ed32e4f 2255 if (priv->vif == vif) {
d8052319
AK
2256 priv->vif = NULL;
2257 memset(priv->bssid, 0, ETH_ALEN);
2258 }
2259 mutex_unlock(&priv->mutex);
2260
2261 IWL_DEBUG_MAC80211(priv, "leave\n");
2262
2263}
2264EXPORT_SYMBOL(iwl_mac_remove_interface);
2265
4808368d
AK
2266/**
2267 * iwl_mac_config - mac80211 config callback
2268 *
2269 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2270 * be set inappropriately and the driver currently sets the hardware up to
2271 * use it whenever needed.
2272 */
2273int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2274{
2275 struct iwl_priv *priv = hw->priv;
2276 const struct iwl_channel_info *ch_info;
2277 struct ieee80211_conf *conf = &hw->conf;
fad95bf5 2278 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
4808368d
AK
2279 unsigned long flags = 0;
2280 int ret = 0;
2281 u16 ch;
2282 int scan_active = 0;
2283
2284 mutex_lock(&priv->mutex);
2285
4808368d
AK
2286 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2287 conf->channel->hw_value, changed);
2288
2289 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2290 test_bit(STATUS_SCANNING, &priv->status))) {
2291 scan_active = 1;
2292 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2293 }
2294
ba37a3d0
JB
2295 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2296 IEEE80211_CONF_CHANGE_CHANNEL)) {
2297 /* mac80211 uses static for non-HT which is what we want */
2298 priv->current_ht_config.smps = conf->smps_mode;
2299
2300 /*
2301 * Recalculate chain counts.
2302 *
2303 * If monitor mode is enabled then mac80211 will
2304 * set up the SM PS mode to OFF if an HT channel is
2305 * configured.
2306 */
2307 if (priv->cfg->ops->hcmd->set_rxon_chain)
2308 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2309 }
4808368d
AK
2310
2311 /* during scanning mac80211 will delay channel setting until
2312 * scan finish with changed = 0
2313 */
2314 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2315 if (scan_active)
2316 goto set_ch_out;
2317
2318 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2319 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2320 if (!is_channel_valid(ch_info)) {
2321 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2322 ret = -EINVAL;
2323 goto set_ch_out;
2324 }
2325
4808368d
AK
2326 spin_lock_irqsave(&priv->lock, flags);
2327
28bd723b
DH
2328 /* Configure HT40 channels */
2329 ht_conf->is_ht = conf_is_ht(conf);
2330 if (ht_conf->is_ht) {
2331 if (conf_is_ht40_minus(conf)) {
2332 ht_conf->extension_chan_offset =
2333 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
c812ee24 2334 ht_conf->is_40mhz = true;
28bd723b
DH
2335 } else if (conf_is_ht40_plus(conf)) {
2336 ht_conf->extension_chan_offset =
2337 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
c812ee24 2338 ht_conf->is_40mhz = true;
28bd723b
DH
2339 } else {
2340 ht_conf->extension_chan_offset =
2341 IEEE80211_HT_PARAM_CHA_SEC_NONE;
c812ee24 2342 ht_conf->is_40mhz = false;
28bd723b
DH
2343 }
2344 } else
c812ee24 2345 ht_conf->is_40mhz = false;
28bd723b
DH
2346 /* Default to no protection. Protection mode will later be set
2347 * from BSS config in iwl_ht_conf */
2348 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
4808368d
AK
2349
2350 /* if we are switching from ht to 2.4 clear flags
2351 * from any ht related info since 2.4 does not
2352 * support ht */
2353 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2354 priv->staging_rxon.flags = 0;
2355
2356 iwl_set_rxon_channel(priv, conf->channel);
5e2f75b8 2357 iwl_set_rxon_ht(priv, ht_conf);
4808368d
AK
2358
2359 iwl_set_flags_for_band(priv, conf->channel->band);
2360 spin_unlock_irqrestore(&priv->lock, flags);
0924e519
WYG
2361 if (iwl_is_associated(priv) &&
2362 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2363 priv->cfg->ops->lib->set_channel_switch) {
2364 iwl_set_rate(priv);
2365 /*
2366 * at this point, staging_rxon has the
2367 * configuration for channel switch
2368 */
2369 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2370 ch);
2371 if (!ret) {
2372 iwl_print_rx_config_cmd(priv);
2373 goto out;
2374 }
2375 priv->switch_rxon.switch_in_progress = false;
2376 }
4808368d
AK
2377 set_ch_out:
2378 /* The list of supported rates and rate mask can be different
2379 * for each band; since the band may have changed, reset
2380 * the rate mask to what mac80211 lists */
2381 iwl_set_rate(priv);
2382 }
2383
78f5fb7f
JB
2384 if (changed & (IEEE80211_CONF_CHANGE_PS |
2385 IEEE80211_CONF_CHANGE_IDLE)) {
e312c24c 2386 ret = iwl_power_update_mode(priv, false);
4808368d 2387 if (ret)
e312c24c 2388 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
4808368d
AK
2389 }
2390
2391 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2392 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2393 priv->tx_power_user_lmt, conf->power_level);
2394
2395 iwl_set_tx_power(priv, conf->power_level, false);
2396 }
2397
0cf4c01e
MA
2398 if (!iwl_is_ready(priv)) {
2399 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2400 goto out;
2401 }
2402
4808368d
AK
2403 if (scan_active)
2404 goto out;
2405
2406 if (memcmp(&priv->active_rxon,
2407 &priv->staging_rxon, sizeof(priv->staging_rxon)))
2408 iwlcore_commit_rxon(priv);
2409 else
2410 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
2411
2412
2413out:
2414 IWL_DEBUG_MAC80211(priv, "leave\n");
2415 mutex_unlock(&priv->mutex);
2416 return ret;
2417}
2418EXPORT_SYMBOL(iwl_mac_config);
2419
bd564261
AK
2420void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2421{
2422 struct iwl_priv *priv = hw->priv;
2423 unsigned long flags;
2424
2425 mutex_lock(&priv->mutex);
2426 IWL_DEBUG_MAC80211(priv, "enter\n");
2427
2428 spin_lock_irqsave(&priv->lock, flags);
fad95bf5 2429 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
bd564261
AK
2430 spin_unlock_irqrestore(&priv->lock, flags);
2431
2432 iwl_reset_qos(priv);
2433
2434 spin_lock_irqsave(&priv->lock, flags);
2435 priv->assoc_id = 0;
2436 priv->assoc_capability = 0;
bd564261
AK
2437
2438 /* new association get rid of ibss beacon skb */
2439 if (priv->ibss_beacon)
2440 dev_kfree_skb(priv->ibss_beacon);
2441
2442 priv->ibss_beacon = NULL;
2443
57c4d7b4 2444 priv->beacon_int = priv->vif->bss_conf.beacon_int;
bd564261 2445 priv->timestamp = 0;
bd564261
AK
2446
2447 spin_unlock_irqrestore(&priv->lock, flags);
2448
2449 if (!iwl_is_ready_rf(priv)) {
2450 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2451 mutex_unlock(&priv->mutex);
2452 return;
2453 }
2454
2455 /* we are restarting association process
2456 * clear RXON_FILTER_ASSOC_MSK bit
2457 */
b4665df4
JB
2458 iwl_scan_cancel_timeout(priv, 100);
2459 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2460 iwlcore_commit_rxon(priv);
bd564261
AK
2461
2462 iwl_set_rate(priv);
2463
2464 mutex_unlock(&priv->mutex);
2465
2466 IWL_DEBUG_MAC80211(priv, "leave\n");
2467}
2468EXPORT_SYMBOL(iwl_mac_reset_tsf);
2469
88804e2b
WYG
2470int iwl_alloc_txq_mem(struct iwl_priv *priv)
2471{
2472 if (!priv->txq)
2473 priv->txq = kzalloc(
2474 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2475 GFP_KERNEL);
2476 if (!priv->txq) {
2477 IWL_ERR(priv, "Not enough memory for txq \n");
2478 return -ENOMEM;
2479 }
2480 return 0;
2481}
2482EXPORT_SYMBOL(iwl_alloc_txq_mem);
2483
2484void iwl_free_txq_mem(struct iwl_priv *priv)
2485{
2486 kfree(priv->txq);
2487 priv->txq = NULL;
2488}
2489EXPORT_SYMBOL(iwl_free_txq_mem);
2490
1933ac4d
WYG
2491int iwl_send_wimax_coex(struct iwl_priv *priv)
2492{
2493 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2494
2495 if (priv->cfg->support_wimax_coexist) {
2496 /* UnMask wake up src at associated sleep */
2497 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2498
2499 /* UnMask wake up src at unassociated sleep */
2500 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2501 memcpy(coex_cmd.sta_prio, cu_priorities,
2502 sizeof(struct iwl_wimax_coex_event_entry) *
2503 COEX_NUM_OF_EVENTS);
2504
2505 /* enabling the coexistence feature */
2506 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2507
2508 /* enabling the priorities tables */
2509 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2510 } else {
2511 /* coexistence is disabled */
2512 memset(&coex_cmd, 0, sizeof(coex_cmd));
2513 }
2514 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2515 sizeof(coex_cmd), &coex_cmd);
2516}
2517EXPORT_SYMBOL(iwl_send_wimax_coex);
2518
20594eb0
WYG
2519#ifdef CONFIG_IWLWIFI_DEBUGFS
2520
2521#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2522
2523void iwl_reset_traffic_log(struct iwl_priv *priv)
2524{
2525 priv->tx_traffic_idx = 0;
2526 priv->rx_traffic_idx = 0;
2527 if (priv->tx_traffic)
2528 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2529 if (priv->rx_traffic)
2530 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2531}
2532
2533int iwl_alloc_traffic_mem(struct iwl_priv *priv)
2534{
2535 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
2536
2537 if (iwl_debug_level & IWL_DL_TX) {
2538 if (!priv->tx_traffic) {
2539 priv->tx_traffic =
2540 kzalloc(traffic_size, GFP_KERNEL);
2541 if (!priv->tx_traffic)
2542 return -ENOMEM;
2543 }
2544 }
2545 if (iwl_debug_level & IWL_DL_RX) {
2546 if (!priv->rx_traffic) {
2547 priv->rx_traffic =
2548 kzalloc(traffic_size, GFP_KERNEL);
2549 if (!priv->rx_traffic)
2550 return -ENOMEM;
2551 }
2552 }
2553 iwl_reset_traffic_log(priv);
2554 return 0;
2555}
2556EXPORT_SYMBOL(iwl_alloc_traffic_mem);
2557
2558void iwl_free_traffic_mem(struct iwl_priv *priv)
2559{
2560 kfree(priv->tx_traffic);
2561 priv->tx_traffic = NULL;
2562
2563 kfree(priv->rx_traffic);
2564 priv->rx_traffic = NULL;
2565}
2566EXPORT_SYMBOL(iwl_free_traffic_mem);
2567
2568void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
2569 u16 length, struct ieee80211_hdr *header)
2570{
2571 __le16 fc;
2572 u16 len;
2573
2574 if (likely(!(iwl_debug_level & IWL_DL_TX)))
2575 return;
2576
2577 if (!priv->tx_traffic)
2578 return;
2579
2580 fc = header->frame_control;
2581 if (ieee80211_is_data(fc)) {
2582 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2583 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2584 memcpy((priv->tx_traffic +
2585 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2586 header, len);
2587 priv->tx_traffic_idx =
2588 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2589 }
2590}
2591EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
2592
2593void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
2594 u16 length, struct ieee80211_hdr *header)
2595{
2596 __le16 fc;
2597 u16 len;
2598
2599 if (likely(!(iwl_debug_level & IWL_DL_RX)))
2600 return;
2601
2602 if (!priv->rx_traffic)
2603 return;
2604
2605 fc = header->frame_control;
2606 if (ieee80211_is_data(fc)) {
2607 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2608 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2609 memcpy((priv->rx_traffic +
2610 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2611 header, len);
2612 priv->rx_traffic_idx =
2613 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2614 }
2615}
2616EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
22fdf3c9
WYG
2617
2618const char *get_mgmt_string(int cmd)
2619{
2620 switch (cmd) {
2621 IWL_CMD(MANAGEMENT_ASSOC_REQ);
2622 IWL_CMD(MANAGEMENT_ASSOC_RESP);
2623 IWL_CMD(MANAGEMENT_REASSOC_REQ);
2624 IWL_CMD(MANAGEMENT_REASSOC_RESP);
2625 IWL_CMD(MANAGEMENT_PROBE_REQ);
2626 IWL_CMD(MANAGEMENT_PROBE_RESP);
2627 IWL_CMD(MANAGEMENT_BEACON);
2628 IWL_CMD(MANAGEMENT_ATIM);
2629 IWL_CMD(MANAGEMENT_DISASSOC);
2630 IWL_CMD(MANAGEMENT_AUTH);
2631 IWL_CMD(MANAGEMENT_DEAUTH);
2632 IWL_CMD(MANAGEMENT_ACTION);
2633 default:
2634 return "UNKNOWN";
2635
2636 }
2637}
2638
2639const char *get_ctrl_string(int cmd)
2640{
2641 switch (cmd) {
2642 IWL_CMD(CONTROL_BACK_REQ);
2643 IWL_CMD(CONTROL_BACK);
2644 IWL_CMD(CONTROL_PSPOLL);
2645 IWL_CMD(CONTROL_RTS);
2646 IWL_CMD(CONTROL_CTS);
2647 IWL_CMD(CONTROL_ACK);
2648 IWL_CMD(CONTROL_CFEND);
2649 IWL_CMD(CONTROL_CFENDACK);
2650 default:
2651 return "UNKNOWN";
2652
2653 }
2654}
2655
7163b8a4 2656void iwl_clear_traffic_stats(struct iwl_priv *priv)
22fdf3c9
WYG
2657{
2658 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
22fdf3c9 2659 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
7163b8a4 2660 priv->led_tpt = 0;
22fdf3c9
WYG
2661}
2662
2663/*
2664 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
2665 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
2666 * Use debugFs to display the rx/rx_statistics
2667 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
2668 * information will be recorded, but DATA pkt still will be recorded
2669 * for the reason of iwl_led.c need to control the led blinking based on
2670 * number of tx and rx data.
2671 *
2672 */
2673void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
2674{
2675 struct traffic_stats *stats;
2676
2677 if (is_tx)
2678 stats = &priv->tx_stats;
2679 else
2680 stats = &priv->rx_stats;
2681
2682 if (ieee80211_is_mgmt(fc)) {
2683 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2684 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2685 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
2686 break;
2687 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2688 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
2689 break;
2690 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2691 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
2692 break;
2693 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2694 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
2695 break;
2696 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2697 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
2698 break;
2699 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2700 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
2701 break;
2702 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2703 stats->mgmt[MANAGEMENT_BEACON]++;
2704 break;
2705 case cpu_to_le16(IEEE80211_STYPE_ATIM):
2706 stats->mgmt[MANAGEMENT_ATIM]++;
2707 break;
2708 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2709 stats->mgmt[MANAGEMENT_DISASSOC]++;
2710 break;
2711 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2712 stats->mgmt[MANAGEMENT_AUTH]++;
2713 break;
2714 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2715 stats->mgmt[MANAGEMENT_DEAUTH]++;
2716 break;
2717 case cpu_to_le16(IEEE80211_STYPE_ACTION):
2718 stats->mgmt[MANAGEMENT_ACTION]++;
2719 break;
2720 }
2721 } else if (ieee80211_is_ctl(fc)) {
2722 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2723 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
2724 stats->ctrl[CONTROL_BACK_REQ]++;
2725 break;
2726 case cpu_to_le16(IEEE80211_STYPE_BACK):
2727 stats->ctrl[CONTROL_BACK]++;
2728 break;
2729 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
2730 stats->ctrl[CONTROL_PSPOLL]++;
2731 break;
2732 case cpu_to_le16(IEEE80211_STYPE_RTS):
2733 stats->ctrl[CONTROL_RTS]++;
2734 break;
2735 case cpu_to_le16(IEEE80211_STYPE_CTS):
2736 stats->ctrl[CONTROL_CTS]++;
2737 break;
2738 case cpu_to_le16(IEEE80211_STYPE_ACK):
2739 stats->ctrl[CONTROL_ACK]++;
2740 break;
2741 case cpu_to_le16(IEEE80211_STYPE_CFEND):
2742 stats->ctrl[CONTROL_CFEND]++;
2743 break;
2744 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
2745 stats->ctrl[CONTROL_CFENDACK]++;
2746 break;
2747 }
2748 } else {
2749 /* data */
2750 stats->data_cnt++;
2751 stats->data_bytes += len;
2752 }
d5f4cf71 2753 iwl_leds_background(priv);
22fdf3c9
WYG
2754}
2755EXPORT_SYMBOL(iwl_update_stats);
20594eb0
WYG
2756#endif
2757
696bdee3
WYG
2758const static char *get_csr_string(int cmd)
2759{
2760 switch (cmd) {
2761 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2762 IWL_CMD(CSR_INT_COALESCING);
2763 IWL_CMD(CSR_INT);
2764 IWL_CMD(CSR_INT_MASK);
2765 IWL_CMD(CSR_FH_INT_STATUS);
2766 IWL_CMD(CSR_GPIO_IN);
2767 IWL_CMD(CSR_RESET);
2768 IWL_CMD(CSR_GP_CNTRL);
2769 IWL_CMD(CSR_HW_REV);
2770 IWL_CMD(CSR_EEPROM_REG);
2771 IWL_CMD(CSR_EEPROM_GP);
2772 IWL_CMD(CSR_OTP_GP_REG);
2773 IWL_CMD(CSR_GIO_REG);
2774 IWL_CMD(CSR_GP_UCODE_REG);
2775 IWL_CMD(CSR_GP_DRIVER_REG);
2776 IWL_CMD(CSR_UCODE_DRV_GP1);
2777 IWL_CMD(CSR_UCODE_DRV_GP2);
2778 IWL_CMD(CSR_LED_REG);
2779 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2780 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2781 IWL_CMD(CSR_ANA_PLL_CFG);
2782 IWL_CMD(CSR_HW_REV_WA_REG);
2783 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2784 default:
2785 return "UNKNOWN";
2786
2787 }
2788}
2789
2790void iwl_dump_csr(struct iwl_priv *priv)
2791{
2792 int i;
2793 u32 csr_tbl[] = {
2794 CSR_HW_IF_CONFIG_REG,
2795 CSR_INT_COALESCING,
2796 CSR_INT,
2797 CSR_INT_MASK,
2798 CSR_FH_INT_STATUS,
2799 CSR_GPIO_IN,
2800 CSR_RESET,
2801 CSR_GP_CNTRL,
2802 CSR_HW_REV,
2803 CSR_EEPROM_REG,
2804 CSR_EEPROM_GP,
2805 CSR_OTP_GP_REG,
2806 CSR_GIO_REG,
2807 CSR_GP_UCODE_REG,
2808 CSR_GP_DRIVER_REG,
2809 CSR_UCODE_DRV_GP1,
2810 CSR_UCODE_DRV_GP2,
2811 CSR_LED_REG,
2812 CSR_DRAM_INT_TBL_REG,
2813 CSR_GIO_CHICKEN_BITS,
2814 CSR_ANA_PLL_CFG,
2815 CSR_HW_REV_WA_REG,
2816 CSR_DBG_HPET_MEM_REG
2817 };
2818 IWL_ERR(priv, "CSR values:\n");
2819 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2820 "CSR_INT_PERIODIC_REG)\n");
2821 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2822 IWL_ERR(priv, " %25s: 0X%08x\n",
2823 get_csr_string(csr_tbl[i]),
2824 iwl_read32(priv, csr_tbl[i]));
2825 }
2826}
2827EXPORT_SYMBOL(iwl_dump_csr);
2828
1b3eb823
WYG
2829const static char *get_fh_string(int cmd)
2830{
2831 switch (cmd) {
2832 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2833 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2834 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2835 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2836 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2837 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2838 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2839 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2840 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2841 default:
2842 return "UNKNOWN";
2843
2844 }
2845}
2846
2847int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2848{
2849 int i;
2850#ifdef CONFIG_IWLWIFI_DEBUG
2851 int pos = 0;
2852 size_t bufsz = 0;
2853#endif
2854 u32 fh_tbl[] = {
2855 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2856 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2857 FH_RSCSR_CHNL0_WPTR,
2858 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2859 FH_MEM_RSSR_SHARED_CTRL_REG,
2860 FH_MEM_RSSR_RX_STATUS_REG,
2861 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2862 FH_TSSR_TX_STATUS_REG,
2863 FH_TSSR_TX_ERROR_REG
2864 };
2865#ifdef CONFIG_IWLWIFI_DEBUG
2866 if (display) {
2867 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2868 *buf = kmalloc(bufsz, GFP_KERNEL);
2869 if (!*buf)
2870 return -ENOMEM;
2871 pos += scnprintf(*buf + pos, bufsz - pos,
2872 "FH register values:\n");
2873 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2874 pos += scnprintf(*buf + pos, bufsz - pos,
2875 " %34s: 0X%08x\n",
2876 get_fh_string(fh_tbl[i]),
2877 iwl_read_direct32(priv, fh_tbl[i]));
2878 }
2879 return pos;
2880 }
2881#endif
2882 IWL_ERR(priv, "FH register values:\n");
2883 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2884 IWL_ERR(priv, " %34s: 0X%08x\n",
2885 get_fh_string(fh_tbl[i]),
2886 iwl_read_direct32(priv, fh_tbl[i]));
2887 }
2888 return 0;
2889}
2890EXPORT_SYMBOL(iwl_dump_fh);
2891
a93e7973 2892static void iwl_force_rf_reset(struct iwl_priv *priv)
afbdd69a
WYG
2893{
2894 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2895 return;
2896
2897 if (!iwl_is_associated(priv)) {
2898 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
2899 return;
2900 }
2901 /*
2902 * There is no easy and better way to force reset the radio,
2903 * the only known method is switching channel which will force to
2904 * reset and tune the radio.
2905 * Use internal short scan (single channel) operation to should
2906 * achieve this objective.
2907 * Driver should reset the radio when number of consecutive missed
2908 * beacon, or any other uCode error condition detected.
2909 */
2910 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
2911 iwl_internal_short_hw_scan(priv);
2912 return;
2913}
a93e7973 2914
a93e7973
WYG
2915
2916int iwl_force_reset(struct iwl_priv *priv, int mode)
2917{
8a472da4
WYG
2918 struct iwl_force_reset *force_reset;
2919
a93e7973
WYG
2920 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2921 return -EINVAL;
2922
8a472da4
WYG
2923 if (mode >= IWL_MAX_FORCE_RESET) {
2924 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
2925 return -EINVAL;
2926 }
2927 force_reset = &priv->force_reset[mode];
2928 force_reset->reset_request_count++;
2929 if (force_reset->last_force_reset_jiffies &&
2930 time_after(force_reset->last_force_reset_jiffies +
2931 force_reset->reset_duration, jiffies)) {
a93e7973 2932 IWL_DEBUG_INFO(priv, "force reset rejected\n");
8a472da4 2933 force_reset->reset_reject_count++;
a93e7973
WYG
2934 return -EAGAIN;
2935 }
8a472da4
WYG
2936 force_reset->reset_success_count++;
2937 force_reset->last_force_reset_jiffies = jiffies;
a93e7973 2938 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
a93e7973
WYG
2939 switch (mode) {
2940 case IWL_RF_RESET:
2941 iwl_force_rf_reset(priv);
2942 break;
2943 case IWL_FW_RESET:
2944 IWL_ERR(priv, "On demand firmware reload\n");
2945 /* Set the FW error flag -- cleared on iwl_down */
2946 set_bit(STATUS_FW_ERROR, &priv->status);
2947 wake_up_interruptible(&priv->wait_command_queue);
2948 /*
2949 * Keep the restart process from trying to send host
2950 * commands by clearing the INIT status bit
2951 */
2952 clear_bit(STATUS_READY, &priv->status);
2953 queue_work(priv->workqueue, &priv->restart);
2954 break;
a93e7973 2955 }
a93e7973
WYG
2956 return 0;
2957}
b74e31a9
WYG
2958EXPORT_SYMBOL(iwl_force_reset);
2959
2960/**
2961 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2962 *
2963 * During normal condition (no queue is stuck), the timer is continually set to
2964 * execute every monitor_recover_period milliseconds after the last timer
2965 * expired. When the queue read_ptr is at the same place, the timer is
2966 * shorten to 100mSecs. This is
2967 * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
2968 * 2) to detect the stuck queues quicker before the station and AP can
2969 * disassociate each other.
2970 *
2971 * This function monitors all the tx queues and recover from it if any
2972 * of the queues are stuck.
2973 * 1. It first check the cmd queue for stuck conditions. If it is stuck,
2974 * it will recover by resetting the firmware and return.
2975 * 2. Then, it checks for station association. If it associates it will check
2976 * other queues. If any queue is stuck, it will recover by resetting
2977 * the firmware.
2978 * Note: It the number of times the queue read_ptr to be at the same place to
2979 * be MAX_REPEAT+1 in order to consider to be stuck.
2980 */
2981/*
2982 * The maximum number of times the read pointer of the tx queue at the
2983 * same place without considering to be stuck.
2984 */
2985#define MAX_REPEAT (2)
2986static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2987{
2988 struct iwl_tx_queue *txq;
2989 struct iwl_queue *q;
2990
2991 txq = &priv->txq[cnt];
2992 q = &txq->q;
2993 /* queue is empty, skip */
2994 if (q->read_ptr != q->write_ptr) {
2995 if (q->read_ptr == q->last_read_ptr) {
2996 /* a queue has not been read from last time */
2997 if (q->repeat_same_read_ptr > MAX_REPEAT) {
2998 IWL_ERR(priv,
2999 "queue %d stuck %d time. Fw reload.\n",
3000 q->id, q->repeat_same_read_ptr);
3001 q->repeat_same_read_ptr = 0;
3002 iwl_force_reset(priv, IWL_FW_RESET);
3003 } else {
3004 q->repeat_same_read_ptr++;
3005 IWL_DEBUG_RADIO(priv,
3006 "queue %d, not read %d time\n",
3007 q->id,
3008 q->repeat_same_read_ptr);
3009 mod_timer(&priv->monitor_recover, jiffies +
3010 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
3011 }
3012 return 1;
3013 } else {
3014 q->last_read_ptr = q->read_ptr;
3015 q->repeat_same_read_ptr = 0;
3016 }
3017 }
3018 return 0;
3019}
3020
3021void iwl_bg_monitor_recover(unsigned long data)
3022{
3023 struct iwl_priv *priv = (struct iwl_priv *)data;
3024 int cnt;
3025
3026 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3027 return;
3028
3029 /* monitor and check for stuck cmd queue */
3030 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
3031 return;
3032
3033 /* monitor and check for other stuck queues */
3034 if (iwl_is_associated(priv)) {
3035 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
3036 /* skip as we already checked the command queue */
3037 if (cnt == IWL_CMD_QUEUE_NUM)
3038 continue;
3039 if (iwl_check_stuck_queue(priv, cnt))
3040 return;
3041 }
3042 }
3043 /*
3044 * Reschedule the timer to occur in
3045 * priv->cfg->monitor_recover_period
3046 */
3047 mod_timer(&priv->monitor_recover,
3048 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
3049}
3050EXPORT_SYMBOL(iwl_bg_monitor_recover);
afbdd69a 3051
6da3a13e
WYG
3052#ifdef CONFIG_PM
3053
3054int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3055{
3056 struct iwl_priv *priv = pci_get_drvdata(pdev);
3057
3058 /*
3059 * This function is called when system goes into suspend state
3060 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
3061 * first but since iwl_mac_stop() has no knowledge of who the caller is,
3062 * it will not call apm_ops.stop() to stop the DMA operation.
3063 * Calling apm_ops.stop here to make sure we stop the DMA.
3064 */
3065 priv->cfg->ops->lib->apm_ops.stop(priv);
3066
3067 pci_save_state(pdev);
3068 pci_disable_device(pdev);
3069 pci_set_power_state(pdev, PCI_D3hot);
3070
3071 return 0;
3072}
3073EXPORT_SYMBOL(iwl_pci_suspend);
3074
3075int iwl_pci_resume(struct pci_dev *pdev)
3076{
3077 struct iwl_priv *priv = pci_get_drvdata(pdev);
3078 int ret;
3079
3080 pci_set_power_state(pdev, PCI_D0);
3081 ret = pci_enable_device(pdev);
3082 if (ret)
3083 return ret;
3084 pci_restore_state(pdev);
3085 iwl_enable_interrupts(priv);
3086
3087 return 0;
3088}
3089EXPORT_SYMBOL(iwl_pci_resume);
3090
3091#endif /* CONFIG_PM */
This page took 0.602352 seconds and 5 git commands to generate.