Merge tag 'topic/drm-fixes-2015-07-16' of git://anongit.freedesktop.org/drm-intel...
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65 #include <linux/kernel.h>
66 #include <linux/slab.h>
67 #include <linux/skbuff.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/ip.h>
71 #include <linux/if_arp.h>
72 #include <linux/devcoredump.h>
73 #include <net/mac80211.h>
74 #include <net/ieee80211_radiotap.h>
75 #include <net/tcp.h>
76
77 #include "iwl-op-mode.h"
78 #include "iwl-io.h"
79 #include "mvm.h"
80 #include "sta.h"
81 #include "time-event.h"
82 #include "iwl-eeprom-parse.h"
83 #include "iwl-phy-db.h"
84 #include "testmode.h"
85 #include "iwl-fw-error-dump.h"
86 #include "iwl-prph.h"
87 #include "iwl-csr.h"
88 #include "iwl-nvm-parse.h"
89
90 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 {
92 .max = 1,
93 .types = BIT(NL80211_IFTYPE_STATION),
94 },
95 {
96 .max = 1,
97 .types = BIT(NL80211_IFTYPE_AP) |
98 BIT(NL80211_IFTYPE_P2P_CLIENT) |
99 BIT(NL80211_IFTYPE_P2P_GO),
100 },
101 {
102 .max = 1,
103 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 },
105 };
106
107 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 {
109 .num_different_channels = 2,
110 .max_interfaces = 3,
111 .limits = iwl_mvm_limits,
112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 },
114 };
115
116 #ifdef CONFIG_PM_SLEEP
117 static const struct nl80211_wowlan_tcp_data_token_feature
118 iwl_mvm_wowlan_tcp_token_feature = {
119 .min_len = 0,
120 .max_len = 255,
121 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122 };
123
124 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 .tok = &iwl_mvm_wowlan_tcp_token_feature,
126 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 sizeof(struct ethhdr) -
128 sizeof(struct iphdr) -
129 sizeof(struct tcphdr),
130 .data_interval_max = 65535, /* __le16 in API */
131 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 sizeof(struct ethhdr) -
133 sizeof(struct iphdr) -
134 sizeof(struct tcphdr),
135 .seq = true,
136 };
137 #endif
138
139 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
140 /*
141 * Use the reserved field to indicate magic values.
142 * these values will only be used internally by the driver,
143 * and won't make it to the fw (reserved will be 0).
144 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145 * be the vif's ip address. in case there is not a single
146 * ip address (0, or more than 1), this attribute will
147 * be skipped.
148 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149 * the LSB bytes of the vif's mac address
150 */
151 enum {
152 BC_FILTER_MAGIC_NONE = 0,
153 BC_FILTER_MAGIC_IP,
154 BC_FILTER_MAGIC_MAC,
155 };
156
157 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 {
159 /* arp */
160 .discard = 0,
161 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 .attrs = {
163 {
164 /* frame type - arp, hw type - ethernet */
165 .offset_type =
166 BCAST_FILTER_OFFSET_PAYLOAD_START,
167 .offset = sizeof(rfc1042_header),
168 .val = cpu_to_be32(0x08060001),
169 .mask = cpu_to_be32(0xffffffff),
170 },
171 {
172 /* arp dest ip */
173 .offset_type =
174 BCAST_FILTER_OFFSET_PAYLOAD_START,
175 .offset = sizeof(rfc1042_header) + 2 +
176 sizeof(struct arphdr) +
177 ETH_ALEN + sizeof(__be32) +
178 ETH_ALEN,
179 .mask = cpu_to_be32(0xffffffff),
180 /* mark it as special field */
181 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 },
183 },
184 },
185 {
186 /* dhcp offer bcast */
187 .discard = 0,
188 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 .attrs = {
190 {
191 /* udp dest port - 68 (bootp client)*/
192 .offset_type = BCAST_FILTER_OFFSET_IP_END,
193 .offset = offsetof(struct udphdr, dest),
194 .val = cpu_to_be32(0x00440000),
195 .mask = cpu_to_be32(0xffff0000),
196 },
197 {
198 /* dhcp - lsb bytes of client hw address */
199 .offset_type = BCAST_FILTER_OFFSET_IP_END,
200 .offset = 38,
201 .mask = cpu_to_be32(0xffffffff),
202 /* mark it as special field */
203 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 },
205 },
206 },
207 /* last filter must be empty */
208 {},
209 };
210 #endif
211
212 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213 {
214 if (!iwl_mvm_is_d0i3_supported(mvm))
215 return;
216
217 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
218 spin_lock_bh(&mvm->refs_lock);
219 mvm->refs[ref_type]++;
220 spin_unlock_bh(&mvm->refs_lock);
221 iwl_trans_ref(mvm->trans);
222 }
223
224 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225 {
226 if (!iwl_mvm_is_d0i3_supported(mvm))
227 return;
228
229 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
230 spin_lock_bh(&mvm->refs_lock);
231 WARN_ON(!mvm->refs[ref_type]--);
232 spin_unlock_bh(&mvm->refs_lock);
233 iwl_trans_unref(mvm->trans);
234 }
235
236 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 enum iwl_mvm_ref_type except_ref)
238 {
239 int i, j;
240
241 if (!iwl_mvm_is_d0i3_supported(mvm))
242 return;
243
244 spin_lock_bh(&mvm->refs_lock);
245 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 if (except_ref == i || !mvm->refs[i])
247 continue;
248
249 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 i, mvm->refs[i]);
251 for (j = 0; j < mvm->refs[i]; j++)
252 iwl_trans_unref(mvm->trans);
253 mvm->refs[i] = 0;
254 }
255 spin_unlock_bh(&mvm->refs_lock);
256 }
257
258 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259 {
260 int i;
261 bool taken = false;
262
263 if (!iwl_mvm_is_d0i3_supported(mvm))
264 return true;
265
266 spin_lock_bh(&mvm->refs_lock);
267 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 if (mvm->refs[i]) {
269 taken = true;
270 break;
271 }
272 }
273 spin_unlock_bh(&mvm->refs_lock);
274
275 return taken;
276 }
277
278 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
279 {
280 iwl_mvm_ref(mvm, ref_type);
281
282 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 HZ)) {
285 WARN_ON_ONCE(1);
286 iwl_mvm_unref(mvm, ref_type);
287 return -EIO;
288 }
289
290 return 0;
291 }
292
293 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294 {
295 int i;
296
297 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 for (i = 0; i < NUM_PHY_CTX; i++) {
299 mvm->phy_ctxts[i].id = i;
300 mvm->phy_ctxts[i].ref = 0;
301 }
302 }
303
304 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
305 const char *alpha2,
306 enum iwl_mcc_source src_id,
307 bool *changed)
308 {
309 struct ieee80211_regdomain *regd = NULL;
310 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 struct iwl_mcc_update_resp *resp;
313
314 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315
316 lockdep_assert_held(&mvm->mutex);
317
318 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
319 if (IS_ERR_OR_NULL(resp)) {
320 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
321 PTR_ERR_OR_ZERO(resp));
322 goto out;
323 }
324
325 if (changed)
326 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327
328 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
329 __le32_to_cpu(resp->n_channels),
330 resp->channels,
331 __le16_to_cpu(resp->mcc));
332 /* Store the return source id */
333 src_id = resp->source_id;
334 kfree(resp);
335 if (IS_ERR_OR_NULL(regd)) {
336 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
337 PTR_ERR_OR_ZERO(regd));
338 goto out;
339 }
340
341 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
343 mvm->lar_regdom_set = true;
344 mvm->mcc_src = src_id;
345
346 out:
347 return regd;
348 }
349
350 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351 {
352 bool changed;
353 struct ieee80211_regdomain *regd;
354
355 if (!iwl_mvm_is_lar_supported(mvm))
356 return;
357
358 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 if (!IS_ERR_OR_NULL(regd)) {
360 /* only update the regulatory core if changed */
361 if (changed)
362 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363
364 kfree(regd);
365 }
366 }
367
368 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 bool *changed)
370 {
371 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 MCC_SOURCE_GET_CURRENT :
374 MCC_SOURCE_OLD_FW, changed);
375 }
376
377 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378 {
379 enum iwl_mcc_source used_src;
380 struct ieee80211_regdomain *regd;
381 int ret;
382 bool changed;
383 const struct ieee80211_regdomain *r =
384 rtnl_dereference(mvm->hw->wiphy->regd);
385
386 if (!r)
387 return -ENOENT;
388
389 /* save the last source in case we overwrite it below */
390 used_src = mvm->mcc_src;
391 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 /* Notify the firmware we support wifi location updates */
393 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
394 if (!IS_ERR_OR_NULL(regd))
395 kfree(regd);
396 }
397
398 /* Now set our last stored MCC and source */
399 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 &changed);
401 if (IS_ERR_OR_NULL(regd))
402 return -EIO;
403
404 /* update cfg80211 if the regdomain was changed */
405 if (changed)
406 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 else
408 ret = 0;
409
410 kfree(regd);
411 return ret;
412 }
413
414 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415 {
416 struct ieee80211_hw *hw = mvm->hw;
417 int num_mac, ret, i;
418 static const u32 mvm_ciphers[] = {
419 WLAN_CIPHER_SUITE_WEP40,
420 WLAN_CIPHER_SUITE_WEP104,
421 WLAN_CIPHER_SUITE_TKIP,
422 WLAN_CIPHER_SUITE_CCMP,
423 };
424
425 /* Tell mac80211 our characteristics */
426 ieee80211_hw_set(hw, SIGNAL_DBM);
427 ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 ieee80211_hw_set(hw, QUEUE_CONTROL);
430 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 ieee80211_hw_set(hw, SUPPORTS_PS);
432 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
439
440 hw->queues = mvm->first_agg_queue;
441 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
442 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
444 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
446 hw->rate_control_algorithm = "iwl-mvm-rs";
447 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
449
450 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 hw->wiphy->cipher_suites = mvm->ciphers;
454
455 /*
456 * Enable 11w if advertised by firmware and software crypto
457 * is not enabled (as the firmware will interpret some mgmt
458 * packets, so enabling it with software crypto isn't safe)
459 */
460 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
461 !iwlwifi_mod_params.sw_crypto) {
462 ieee80211_hw_set(hw, MFP_CAPABLE);
463 mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 WLAN_CIPHER_SUITE_AES_CMAC;
465 hw->wiphy->n_cipher_suites++;
466 }
467
468 /* currently FW API supports only one optional cipher scheme */
469 if (mvm->fw->cs[0].cipher) {
470 mvm->hw->n_cipher_schemes = 1;
471 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 mvm->fw->cs[0].cipher;
474 hw->wiphy->n_cipher_suites++;
475 }
476
477 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
478 hw->wiphy->features |=
479 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
480 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
482
483 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
485 hw->chanctx_data_size = sizeof(u16);
486
487 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
488 BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 BIT(NL80211_IFTYPE_AP) |
490 BIT(NL80211_IFTYPE_P2P_GO) |
491 BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 BIT(NL80211_IFTYPE_ADHOC);
493
494 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
495 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 if (iwl_mvm_is_lar_supported(mvm))
497 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 else
499 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 REGULATORY_DISABLE_BEACON_HINTS;
501
502 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504
505 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
506
507 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 hw->wiphy->n_iface_combinations =
509 ARRAY_SIZE(iwl_mvm_iface_combinations);
510
511 hw->wiphy->max_remain_on_channel_duration = 10000;
512 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
513 /* we can compensate an offset of up to 3 channels = 15 MHz */
514 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
515
516 /* Extract MAC address */
517 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 hw->wiphy->addresses = mvm->addresses;
519 hw->wiphy->n_addresses = 1;
520
521 /* Extract additional MAC addresses if available */
522 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524
525 for (i = 1; i < num_mac; i++) {
526 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
527 ETH_ALEN);
528 mvm->addresses[i].addr[5]++;
529 hw->wiphy->n_addresses++;
530 }
531
532 iwl_mvm_reset_phy_ctxts(mvm);
533
534 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
535
536 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537
538 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
539 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541
542 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
543 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 else
545 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546
547 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
550 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
551 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553
554 if (fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 fw_has_api(&mvm->fw->ucode_capa,
557 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
558 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 }
561
562 hw->wiphy->hw_version = mvm->trans->hw_id;
563
564 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
565 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 else
567 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568
569 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 /* we create the 802.11 header and zero length SSID IE. */
573 hw->wiphy->max_sched_scan_ie_len =
574 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
575
576 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
577 NL80211_FEATURE_LOW_PRIORITY_SCAN |
578 NL80211_FEATURE_P2P_GO_OPPPS |
579 NL80211_FEATURE_DYNAMIC_SMPS |
580 NL80211_FEATURE_STATIC_SMPS |
581 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
582
583 if (fw_has_capa(&mvm->fw->ucode_capa,
584 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
585 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
586 if (fw_has_capa(&mvm->fw->ucode_capa,
587 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
588 hw->wiphy->features |= NL80211_FEATURE_QUIET;
589
590 if (fw_has_capa(&mvm->fw->ucode_capa,
591 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
592 hw->wiphy->features |=
593 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
594
595 if (fw_has_capa(&mvm->fw->ucode_capa,
596 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
597 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
598
599 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
600
601 #ifdef CONFIG_PM_SLEEP
602 if (iwl_mvm_is_d0i3_supported(mvm) &&
603 device_can_wakeup(mvm->trans->dev)) {
604 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
605 hw->wiphy->wowlan = &mvm->wowlan;
606 }
607
608 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
609 mvm->trans->ops->d3_suspend &&
610 mvm->trans->ops->d3_resume &&
611 device_can_wakeup(mvm->trans->dev)) {
612 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
613 WIPHY_WOWLAN_DISCONNECT |
614 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
615 WIPHY_WOWLAN_RFKILL_RELEASE |
616 WIPHY_WOWLAN_NET_DETECT;
617 if (!iwlwifi_mod_params.sw_crypto)
618 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
619 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
620 WIPHY_WOWLAN_4WAY_HANDSHAKE;
621
622 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
623 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
624 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
625 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
626 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
627 hw->wiphy->wowlan = &mvm->wowlan;
628 }
629 #endif
630
631 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
632 /* assign default bcast filtering configuration */
633 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
634 #endif
635
636 ret = iwl_mvm_leds_init(mvm);
637 if (ret)
638 return ret;
639
640 if (fw_has_capa(&mvm->fw->ucode_capa,
641 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
642 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
643 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
644 }
645
646 if (fw_has_capa(&mvm->fw->ucode_capa,
647 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
648 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
649 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
650 }
651
652 ret = ieee80211_register_hw(mvm->hw);
653 if (ret)
654 iwl_mvm_leds_exit(mvm);
655
656 return ret;
657 }
658
659 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
660 struct ieee80211_sta *sta,
661 struct sk_buff *skb)
662 {
663 struct iwl_mvm_sta *mvmsta;
664 bool defer = false;
665
666 /*
667 * double check the IN_D0I3 flag both before and after
668 * taking the spinlock, in order to prevent taking
669 * the spinlock when not needed.
670 */
671 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
672 return false;
673
674 spin_lock(&mvm->d0i3_tx_lock);
675 /*
676 * testing the flag again ensures the skb dequeue
677 * loop (on d0i3 exit) hasn't run yet.
678 */
679 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
680 goto out;
681
682 mvmsta = iwl_mvm_sta_from_mac80211(sta);
683 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
684 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
685 goto out;
686
687 __skb_queue_tail(&mvm->d0i3_tx, skb);
688 ieee80211_stop_queues(mvm->hw);
689
690 /* trigger wakeup */
691 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
692 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
693
694 defer = true;
695 out:
696 spin_unlock(&mvm->d0i3_tx_lock);
697 return defer;
698 }
699
700 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
701 struct ieee80211_tx_control *control,
702 struct sk_buff *skb)
703 {
704 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
705 struct ieee80211_sta *sta = control->sta;
706 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
707 struct ieee80211_hdr *hdr = (void *)skb->data;
708
709 if (iwl_mvm_is_radio_killed(mvm)) {
710 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
711 goto drop;
712 }
713
714 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
715 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
716 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
717 goto drop;
718
719 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
720 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
721 ieee80211_is_mgmt(hdr->frame_control) &&
722 !ieee80211_is_deauth(hdr->frame_control) &&
723 !ieee80211_is_disassoc(hdr->frame_control) &&
724 !ieee80211_is_action(hdr->frame_control)))
725 sta = NULL;
726
727 if (sta) {
728 if (iwl_mvm_defer_tx(mvm, sta, skb))
729 return;
730 if (iwl_mvm_tx_skb(mvm, skb, sta))
731 goto drop;
732 return;
733 }
734
735 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
736 goto drop;
737 return;
738 drop:
739 ieee80211_free_txskb(hw, skb);
740 }
741
742 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
743 {
744 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
745 return false;
746 return true;
747 }
748
749 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
750 {
751 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
752 return false;
753 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
754 return true;
755
756 /* enabled by default */
757 return true;
758 }
759
760 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
761 do { \
762 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
763 break; \
764 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
765 } while (0)
766
767 static void
768 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
769 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
770 enum ieee80211_ampdu_mlme_action action)
771 {
772 struct iwl_fw_dbg_trigger_tlv *trig;
773 struct iwl_fw_dbg_trigger_ba *ba_trig;
774
775 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
776 return;
777
778 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
779 ba_trig = (void *)trig->data;
780
781 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
782 return;
783
784 switch (action) {
785 case IEEE80211_AMPDU_TX_OPERATIONAL: {
786 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
787 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
788
789 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
790 "TX AGG START: MAC %pM tid %d ssn %d\n",
791 sta->addr, tid, tid_data->ssn);
792 break;
793 }
794 case IEEE80211_AMPDU_TX_STOP_CONT:
795 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
796 "TX AGG STOP: MAC %pM tid %d\n",
797 sta->addr, tid);
798 break;
799 case IEEE80211_AMPDU_RX_START:
800 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
801 "RX AGG START: MAC %pM tid %d ssn %d\n",
802 sta->addr, tid, rx_ba_ssn);
803 break;
804 case IEEE80211_AMPDU_RX_STOP:
805 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
806 "RX AGG STOP: MAC %pM tid %d\n",
807 sta->addr, tid);
808 break;
809 default:
810 break;
811 }
812 }
813
814 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
815 struct ieee80211_vif *vif,
816 enum ieee80211_ampdu_mlme_action action,
817 struct ieee80211_sta *sta, u16 tid,
818 u16 *ssn, u8 buf_size)
819 {
820 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
821 int ret;
822 bool tx_agg_ref = false;
823
824 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
825 sta->addr, tid, action);
826
827 if (!(mvm->nvm_data->sku_cap_11n_enable))
828 return -EACCES;
829
830 /* return from D0i3 before starting a new Tx aggregation */
831 switch (action) {
832 case IEEE80211_AMPDU_TX_START:
833 case IEEE80211_AMPDU_TX_STOP_CONT:
834 case IEEE80211_AMPDU_TX_STOP_FLUSH:
835 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
836 case IEEE80211_AMPDU_TX_OPERATIONAL:
837 /*
838 * for tx start, wait synchronously until D0i3 exit to
839 * get the correct sequence number for the tid.
840 * additionally, some other ampdu actions use direct
841 * target access, which is not handled automatically
842 * by the trans layer (unlike commands), so wait for
843 * d0i3 exit in these cases as well.
844 */
845 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
846 if (ret)
847 return ret;
848
849 tx_agg_ref = true;
850 break;
851 default:
852 break;
853 }
854
855 mutex_lock(&mvm->mutex);
856
857 switch (action) {
858 case IEEE80211_AMPDU_RX_START:
859 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
860 ret = -EINVAL;
861 break;
862 }
863 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
864 break;
865 case IEEE80211_AMPDU_RX_STOP:
866 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
867 break;
868 case IEEE80211_AMPDU_TX_START:
869 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
870 ret = -EINVAL;
871 break;
872 }
873 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
874 break;
875 case IEEE80211_AMPDU_TX_STOP_CONT:
876 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
877 break;
878 case IEEE80211_AMPDU_TX_STOP_FLUSH:
879 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
880 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
881 break;
882 case IEEE80211_AMPDU_TX_OPERATIONAL:
883 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
884 break;
885 default:
886 WARN_ON_ONCE(1);
887 ret = -EINVAL;
888 break;
889 }
890
891 if (!ret) {
892 u16 rx_ba_ssn = 0;
893
894 if (action == IEEE80211_AMPDU_RX_START)
895 rx_ba_ssn = *ssn;
896
897 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
898 rx_ba_ssn, action);
899 }
900 mutex_unlock(&mvm->mutex);
901
902 /*
903 * If the tid is marked as started, we won't use it for offloaded
904 * traffic on the next D0i3 entry. It's safe to unref.
905 */
906 if (tx_agg_ref)
907 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
908
909 return ret;
910 }
911
912 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
913 struct ieee80211_vif *vif)
914 {
915 struct iwl_mvm *mvm = data;
916 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
917
918 mvmvif->uploaded = false;
919 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
920
921 spin_lock_bh(&mvm->time_event_lock);
922 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
923 spin_unlock_bh(&mvm->time_event_lock);
924
925 mvmvif->phy_ctxt = NULL;
926 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
927 }
928
929 static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
930 const void *data, size_t datalen)
931 {
932 const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
933 ssize_t bytes_read;
934 ssize_t bytes_read_trans;
935
936 if (offset < dump_ptrs->op_mode_len) {
937 bytes_read = min_t(ssize_t, count,
938 dump_ptrs->op_mode_len - offset);
939 memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
940 bytes_read);
941 offset += bytes_read;
942 count -= bytes_read;
943
944 if (count == 0)
945 return bytes_read;
946 } else {
947 bytes_read = 0;
948 }
949
950 if (!dump_ptrs->trans_ptr)
951 return bytes_read;
952
953 offset -= dump_ptrs->op_mode_len;
954 bytes_read_trans = min_t(ssize_t, count,
955 dump_ptrs->trans_ptr->len - offset);
956 memcpy(buffer + bytes_read,
957 (u8 *)dump_ptrs->trans_ptr->data + offset,
958 bytes_read_trans);
959
960 return bytes_read + bytes_read_trans;
961 }
962
963 static void iwl_mvm_free_coredump(const void *data)
964 {
965 const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
966
967 vfree(fw_error_dump->op_mode_ptr);
968 vfree(fw_error_dump->trans_ptr);
969 kfree(fw_error_dump);
970 }
971
972 static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
973 struct iwl_fw_error_dump_data **dump_data)
974 {
975 struct iwl_fw_error_dump_fifo *fifo_hdr;
976 u32 *fifo_data;
977 u32 fifo_len;
978 unsigned long flags;
979 int i, j;
980
981 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
982 return;
983
984 /* Pull RXF data from all RXFs */
985 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
986 /*
987 * Keep aside the additional offset that might be needed for
988 * next RXF
989 */
990 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
991
992 fifo_hdr = (void *)(*dump_data)->data;
993 fifo_data = (void *)fifo_hdr->data;
994 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
995
996 /* No need to try to read the data if the length is 0 */
997 if (fifo_len == 0)
998 continue;
999
1000 /* Add a TLV for the RXF */
1001 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1002 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1003
1004 fifo_hdr->fifo_num = cpu_to_le32(i);
1005 fifo_hdr->available_bytes =
1006 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1007 RXF_RD_D_SPACE +
1008 offset_diff));
1009 fifo_hdr->wr_ptr =
1010 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1011 RXF_RD_WR_PTR +
1012 offset_diff));
1013 fifo_hdr->rd_ptr =
1014 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1015 RXF_RD_RD_PTR +
1016 offset_diff));
1017 fifo_hdr->fence_ptr =
1018 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1019 RXF_RD_FENCE_PTR +
1020 offset_diff));
1021 fifo_hdr->fence_mode =
1022 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1023 RXF_SET_FENCE_MODE +
1024 offset_diff));
1025
1026 /* Lock fence */
1027 iwl_trans_write_prph(mvm->trans,
1028 RXF_SET_FENCE_MODE + offset_diff, 0x1);
1029 /* Set fence pointer to the same place like WR pointer */
1030 iwl_trans_write_prph(mvm->trans,
1031 RXF_LD_WR2FENCE + offset_diff, 0x1);
1032 /* Set fence offset */
1033 iwl_trans_write_prph(mvm->trans,
1034 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1035 0x0);
1036
1037 /* Read FIFO */
1038 fifo_len /= sizeof(u32); /* Size in DWORDS */
1039 for (j = 0; j < fifo_len; j++)
1040 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1041 RXF_FIFO_RD_FENCE_INC +
1042 offset_diff);
1043 *dump_data = iwl_fw_error_next_data(*dump_data);
1044 }
1045
1046 /* Pull TXF data from all TXFs */
1047 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1048 /* Mark the number of TXF we're pulling now */
1049 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1050
1051 fifo_hdr = (void *)(*dump_data)->data;
1052 fifo_data = (void *)fifo_hdr->data;
1053 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1054
1055 /* No need to try to read the data if the length is 0 */
1056 if (fifo_len == 0)
1057 continue;
1058
1059 /* Add a TLV for the FIFO */
1060 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1061 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1062
1063 fifo_hdr->fifo_num = cpu_to_le32(i);
1064 fifo_hdr->available_bytes =
1065 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1066 TXF_FIFO_ITEM_CNT));
1067 fifo_hdr->wr_ptr =
1068 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1069 TXF_WR_PTR));
1070 fifo_hdr->rd_ptr =
1071 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1072 TXF_RD_PTR));
1073 fifo_hdr->fence_ptr =
1074 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1075 TXF_FENCE_PTR));
1076 fifo_hdr->fence_mode =
1077 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1078 TXF_LOCK_FENCE));
1079
1080 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1081 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1082 TXF_WR_PTR);
1083
1084 /* Dummy-read to advance the read pointer to the head */
1085 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1086
1087 /* Read FIFO */
1088 fifo_len /= sizeof(u32); /* Size in DWORDS */
1089 for (j = 0; j < fifo_len; j++)
1090 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1091 TXF_READ_MODIFY_DATA);
1092 *dump_data = iwl_fw_error_next_data(*dump_data);
1093 }
1094
1095 iwl_trans_release_nic_access(mvm->trans, &flags);
1096 }
1097
1098 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1099 {
1100 if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1101 !mvm->fw_dump_desc)
1102 return;
1103
1104 kfree(mvm->fw_dump_desc);
1105 mvm->fw_dump_desc = NULL;
1106 }
1107
1108 #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
1109 #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
1110
1111 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1112 {
1113 struct iwl_fw_error_dump_file *dump_file;
1114 struct iwl_fw_error_dump_data *dump_data;
1115 struct iwl_fw_error_dump_info *dump_info;
1116 struct iwl_fw_error_dump_mem *dump_mem;
1117 struct iwl_fw_error_dump_trigger_desc *dump_trig;
1118 struct iwl_mvm_dump_ptrs *fw_error_dump;
1119 u32 sram_len, sram_ofs;
1120 u32 file_len, fifo_data_len = 0;
1121 u32 smem_len = mvm->cfg->smem_len;
1122 u32 sram2_len = mvm->cfg->dccm2_len;
1123
1124 lockdep_assert_held(&mvm->mutex);
1125
1126 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1127 if (!fw_error_dump)
1128 return;
1129
1130 /* SRAM - include stack CCM if driver knows the values for it */
1131 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1132 const struct fw_img *img;
1133
1134 img = &mvm->fw->img[mvm->cur_ucode];
1135 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1136 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1137 } else {
1138 sram_ofs = mvm->cfg->dccm_offset;
1139 sram_len = mvm->cfg->dccm_len;
1140 }
1141
1142 /* reading RXF/TXF sizes */
1143 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1144 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1145 int i;
1146
1147 fifo_data_len = 0;
1148
1149 /* Count RXF size */
1150 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1151 if (!mem_cfg->rxfifo_size[i])
1152 continue;
1153
1154 /* Add header info */
1155 fifo_data_len += mem_cfg->rxfifo_size[i] +
1156 sizeof(*dump_data) +
1157 sizeof(struct iwl_fw_error_dump_fifo);
1158 }
1159
1160 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1161 if (!mem_cfg->txfifo_size[i])
1162 continue;
1163
1164 /* Add header info */
1165 fifo_data_len += mem_cfg->txfifo_size[i] +
1166 sizeof(*dump_data) +
1167 sizeof(struct iwl_fw_error_dump_fifo);
1168 }
1169 }
1170
1171 file_len = sizeof(*dump_file) +
1172 sizeof(*dump_data) * 2 +
1173 sram_len + sizeof(*dump_mem) +
1174 fifo_data_len +
1175 sizeof(*dump_info);
1176
1177 /*
1178 * In 8000 HW family B-step include the ICCM (which resides separately)
1179 */
1180 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1181 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1182 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1183 IWL8260_ICCM_LEN;
1184
1185 if (mvm->fw_dump_desc)
1186 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1187 mvm->fw_dump_desc->len;
1188
1189 /* Make room for the SMEM, if it exists */
1190 if (smem_len)
1191 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1192
1193 /* Make room for the secondary SRAM, if it exists */
1194 if (sram2_len)
1195 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1196
1197 dump_file = vzalloc(file_len);
1198 if (!dump_file) {
1199 kfree(fw_error_dump);
1200 iwl_mvm_free_fw_dump_desc(mvm);
1201 return;
1202 }
1203
1204 fw_error_dump->op_mode_ptr = dump_file;
1205
1206 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1207 dump_data = (void *)dump_file->data;
1208
1209 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1210 dump_data->len = cpu_to_le32(sizeof(*dump_info));
1211 dump_info = (void *) dump_data->data;
1212 dump_info->device_family =
1213 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1214 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1215 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1216 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1217 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1218 sizeof(dump_info->fw_human_readable));
1219 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1220 sizeof(dump_info->dev_human_readable));
1221 strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1222 sizeof(dump_info->bus_human_readable));
1223
1224 dump_data = iwl_fw_error_next_data(dump_data);
1225 /* We only dump the FIFOs if the FW is in error state */
1226 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1227 iwl_mvm_dump_fifos(mvm, &dump_data);
1228
1229 if (mvm->fw_dump_desc) {
1230 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1231 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1232 mvm->fw_dump_desc->len);
1233 dump_trig = (void *)dump_data->data;
1234 memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1235 sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1236
1237 /* now we can free this copy */
1238 iwl_mvm_free_fw_dump_desc(mvm);
1239 dump_data = iwl_fw_error_next_data(dump_data);
1240 }
1241
1242 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1243 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1244 dump_mem = (void *)dump_data->data;
1245 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1246 dump_mem->offset = cpu_to_le32(sram_ofs);
1247 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1248 sram_len);
1249
1250 if (smem_len) {
1251 dump_data = iwl_fw_error_next_data(dump_data);
1252 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1253 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1254 dump_mem = (void *)dump_data->data;
1255 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1256 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1257 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1258 dump_mem->data, smem_len);
1259 }
1260
1261 if (sram2_len) {
1262 dump_data = iwl_fw_error_next_data(dump_data);
1263 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1264 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1265 dump_mem = (void *)dump_data->data;
1266 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1267 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1268 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1269 dump_mem->data, sram2_len);
1270 }
1271
1272 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1273 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1274 dump_data = iwl_fw_error_next_data(dump_data);
1275 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1276 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1277 sizeof(*dump_mem));
1278 dump_mem = (void *)dump_data->data;
1279 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1280 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1281 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1282 dump_mem->data, IWL8260_ICCM_LEN);
1283 }
1284
1285 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
1286 fw_error_dump->op_mode_len = file_len;
1287 if (fw_error_dump->trans_ptr)
1288 file_len += fw_error_dump->trans_ptr->len;
1289 dump_file->file_len = cpu_to_le32(file_len);
1290
1291 dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1292 GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1293
1294 clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1295 }
1296
1297 struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1298 .trig_desc = {
1299 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1300 },
1301 };
1302
1303 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1304 {
1305 /* clear the D3 reconfig, we only need it to avoid dumping a
1306 * firmware coredump on reconfiguration, we shouldn't do that
1307 * on D3->D0 transition
1308 */
1309 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1310 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1311 iwl_mvm_fw_error_dump(mvm);
1312 }
1313
1314 /* cleanup all stale references (scan, roc), but keep the
1315 * ucode_down ref until reconfig is complete
1316 */
1317 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1318
1319 iwl_trans_stop_device(mvm->trans);
1320
1321 mvm->scan_status = 0;
1322 mvm->ps_disabled = false;
1323 mvm->calibrating = false;
1324
1325 /* just in case one was running */
1326 ieee80211_remain_on_channel_expired(mvm->hw);
1327
1328 /*
1329 * cleanup all interfaces, even inactive ones, as some might have
1330 * gone down during the HW restart
1331 */
1332 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1333
1334 mvm->p2p_device_vif = NULL;
1335 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1336
1337 iwl_mvm_reset_phy_ctxts(mvm);
1338 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1339 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1340 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1341 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1342 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1343 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1344 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1345 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1346
1347 ieee80211_wake_queues(mvm->hw);
1348
1349 /* clear any stale d0i3 state */
1350 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1351
1352 mvm->vif_count = 0;
1353 mvm->rx_ba_sessions = 0;
1354 mvm->fw_dbg_conf = FW_DBG_INVALID;
1355
1356 /* keep statistics ticking */
1357 iwl_mvm_accu_radio_stats(mvm);
1358 }
1359
1360 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1361 {
1362 int ret;
1363
1364 lockdep_assert_held(&mvm->mutex);
1365
1366 /* Clean up some internal and mac80211 state on restart */
1367 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1368 iwl_mvm_restart_cleanup(mvm);
1369
1370 ret = iwl_mvm_up(mvm);
1371
1372 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1373 /* Something went wrong - we need to finish some cleanup
1374 * that normally iwl_mvm_mac_restart_complete() below
1375 * would do.
1376 */
1377 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1378 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1379 }
1380
1381 return ret;
1382 }
1383
1384 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1385 {
1386 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1387 int ret;
1388
1389 /* Some hw restart cleanups must not hold the mutex */
1390 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1391 /*
1392 * Make sure we are out of d0i3. This is needed
1393 * to make sure the reference accounting is correct
1394 * (and there is no stale d0i3_exit_work).
1395 */
1396 wait_event_timeout(mvm->d0i3_exit_waitq,
1397 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1398 &mvm->status),
1399 HZ);
1400 }
1401
1402 mutex_lock(&mvm->mutex);
1403 ret = __iwl_mvm_mac_start(mvm);
1404 mutex_unlock(&mvm->mutex);
1405
1406 return ret;
1407 }
1408
1409 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1410 {
1411 int ret;
1412
1413 mutex_lock(&mvm->mutex);
1414
1415 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1416 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1417 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1418 if (ret)
1419 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1420 ret);
1421
1422 /* allow transport/FW low power modes */
1423 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1424
1425 /*
1426 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1427 * of packets the FW sent out, so we must reconnect.
1428 */
1429 iwl_mvm_teardown_tdls_peers(mvm);
1430
1431 mutex_unlock(&mvm->mutex);
1432 }
1433
1434 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1435 {
1436 bool exit_now;
1437
1438 if (!iwl_mvm_is_d0i3_supported(mvm))
1439 return;
1440
1441 mutex_lock(&mvm->d0i3_suspend_mutex);
1442 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1443 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1444 &mvm->d0i3_suspend_flags);
1445 mutex_unlock(&mvm->d0i3_suspend_mutex);
1446
1447 if (exit_now) {
1448 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
1449 _iwl_mvm_exit_d0i3(mvm);
1450 }
1451
1452 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1453 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1454 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1455 &mvm->status),
1456 HZ))
1457 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1458 }
1459
1460 static void
1461 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1462 enum ieee80211_reconfig_type reconfig_type)
1463 {
1464 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1465
1466 switch (reconfig_type) {
1467 case IEEE80211_RECONFIG_TYPE_RESTART:
1468 iwl_mvm_restart_complete(mvm);
1469 break;
1470 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1471 iwl_mvm_resume_complete(mvm);
1472 break;
1473 }
1474 }
1475
1476 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1477 {
1478 lockdep_assert_held(&mvm->mutex);
1479
1480 /* firmware counters are obviously reset now, but we shouldn't
1481 * partially track so also clear the fw_reset_accu counters.
1482 */
1483 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1484
1485 /*
1486 * Disallow low power states when the FW is down by taking
1487 * the UCODE_DOWN ref. in case of ongoing hw restart the
1488 * ref is already taken, so don't take it again.
1489 */
1490 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1491 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1492
1493 /* async_handlers_wk is now blocked */
1494
1495 /*
1496 * The work item could be running or queued if the
1497 * ROC time event stops just as we get here.
1498 */
1499 flush_work(&mvm->roc_done_wk);
1500
1501 iwl_trans_stop_device(mvm->trans);
1502
1503 iwl_mvm_async_handlers_purge(mvm);
1504 /* async_handlers_list is empty and will stay empty: HW is stopped */
1505
1506 /* the fw is stopped, the aux sta is dead: clean up driver state */
1507 iwl_mvm_del_aux_sta(mvm);
1508
1509 /*
1510 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1511 * won't be called in this case).
1512 * But make sure to cleanup interfaces that have gone down before/during
1513 * HW restart was requested.
1514 */
1515 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1516 ieee80211_iterate_interfaces(mvm->hw, 0,
1517 iwl_mvm_cleanup_iterator, mvm);
1518
1519 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1520 * make sure there's nothing left there and warn if any is found.
1521 */
1522 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1523 int i;
1524
1525 for (i = 0; i < mvm->max_scans; i++) {
1526 if (WARN_ONCE(mvm->scan_uid_status[i],
1527 "UMAC scan UID %d status was not cleaned\n",
1528 i))
1529 mvm->scan_uid_status[i] = 0;
1530 }
1531 }
1532
1533 mvm->ucode_loaded = false;
1534 }
1535
1536 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1537 {
1538 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1539
1540 flush_work(&mvm->d0i3_exit_work);
1541 flush_work(&mvm->async_handlers_wk);
1542 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1543 iwl_mvm_free_fw_dump_desc(mvm);
1544
1545 mutex_lock(&mvm->mutex);
1546 __iwl_mvm_mac_stop(mvm);
1547 mutex_unlock(&mvm->mutex);
1548
1549 /*
1550 * The worker might have been waiting for the mutex, let it run and
1551 * discover that its list is now empty.
1552 */
1553 cancel_work_sync(&mvm->async_handlers_wk);
1554 }
1555
1556 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1557 {
1558 u16 i;
1559
1560 lockdep_assert_held(&mvm->mutex);
1561
1562 for (i = 0; i < NUM_PHY_CTX; i++)
1563 if (!mvm->phy_ctxts[i].ref)
1564 return &mvm->phy_ctxts[i];
1565
1566 IWL_ERR(mvm, "No available PHY context\n");
1567 return NULL;
1568 }
1569
1570 static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
1571 struct ieee80211_vif *vif, s8 tx_power)
1572 {
1573 /* FW is in charge of regulatory enforcement */
1574 struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
1575 .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
1576 .pwr_restriction = cpu_to_le16(tx_power),
1577 };
1578
1579 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
1580 sizeof(reduce_txpwr_cmd),
1581 &reduce_txpwr_cmd);
1582 }
1583
1584 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1585 s16 tx_power)
1586 {
1587 struct iwl_dev_tx_power_cmd cmd = {
1588 .set_mode = 0,
1589 .mac_context_id =
1590 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1591 .pwr_restriction = cpu_to_le16(8 * tx_power),
1592 };
1593
1594 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
1595 return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
1596
1597 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1598 cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1599
1600 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
1601 sizeof(cmd), &cmd);
1602 }
1603
1604 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1605 struct ieee80211_vif *vif)
1606 {
1607 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1608 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1609 int ret;
1610
1611 mvmvif->mvm = mvm;
1612
1613 /*
1614 * make sure D0i3 exit is completed, otherwise a target access
1615 * during tx queue configuration could be done when still in
1616 * D0i3 state.
1617 */
1618 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1619 if (ret)
1620 return ret;
1621
1622 /*
1623 * Not much to do here. The stack will not allow interface
1624 * types or combinations that we didn't advertise, so we
1625 * don't really have to check the types.
1626 */
1627
1628 mutex_lock(&mvm->mutex);
1629
1630 /* make sure that beacon statistics don't go backwards with FW reset */
1631 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1632 mvmvif->beacon_stats.accu_num_beacons +=
1633 mvmvif->beacon_stats.num_beacons;
1634
1635 /* Allocate resources for the MAC context, and add it to the fw */
1636 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1637 if (ret)
1638 goto out_unlock;
1639
1640 /* Counting number of interfaces is needed for legacy PM */
1641 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1642 mvm->vif_count++;
1643
1644 /*
1645 * The AP binding flow can be done only after the beacon
1646 * template is configured (which happens only in the mac80211
1647 * start_ap() flow), and adding the broadcast station can happen
1648 * only after the binding.
1649 * In addition, since modifying the MAC before adding a bcast
1650 * station is not allowed by the FW, delay the adding of MAC context to
1651 * the point where we can also add the bcast station.
1652 * In short: there's not much we can do at this point, other than
1653 * allocating resources :)
1654 */
1655 if (vif->type == NL80211_IFTYPE_AP ||
1656 vif->type == NL80211_IFTYPE_ADHOC) {
1657 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1658 if (ret) {
1659 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1660 goto out_release;
1661 }
1662
1663 iwl_mvm_vif_dbgfs_register(mvm, vif);
1664 goto out_unlock;
1665 }
1666
1667 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1668 if (ret)
1669 goto out_release;
1670
1671 ret = iwl_mvm_power_update_mac(mvm);
1672 if (ret)
1673 goto out_remove_mac;
1674
1675 /* beacon filtering */
1676 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1677 if (ret)
1678 goto out_remove_mac;
1679
1680 if (!mvm->bf_allowed_vif &&
1681 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1682 mvm->bf_allowed_vif = mvmvif;
1683 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1684 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1685 }
1686
1687 /*
1688 * P2P_DEVICE interface does not have a channel context assigned to it,
1689 * so a dedicated PHY context is allocated to it and the corresponding
1690 * MAC context is bound to it at this stage.
1691 */
1692 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1693
1694 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1695 if (!mvmvif->phy_ctxt) {
1696 ret = -ENOSPC;
1697 goto out_free_bf;
1698 }
1699
1700 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1701 ret = iwl_mvm_binding_add_vif(mvm, vif);
1702 if (ret)
1703 goto out_unref_phy;
1704
1705 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1706 if (ret)
1707 goto out_unbind;
1708
1709 /* Save a pointer to p2p device vif, so it can later be used to
1710 * update the p2p device MAC when a GO is started/stopped */
1711 mvm->p2p_device_vif = vif;
1712 }
1713
1714 iwl_mvm_vif_dbgfs_register(mvm, vif);
1715 goto out_unlock;
1716
1717 out_unbind:
1718 iwl_mvm_binding_remove_vif(mvm, vif);
1719 out_unref_phy:
1720 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1721 out_free_bf:
1722 if (mvm->bf_allowed_vif == mvmvif) {
1723 mvm->bf_allowed_vif = NULL;
1724 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1725 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1726 }
1727 out_remove_mac:
1728 mvmvif->phy_ctxt = NULL;
1729 iwl_mvm_mac_ctxt_remove(mvm, vif);
1730 out_release:
1731 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1732 mvm->vif_count--;
1733
1734 iwl_mvm_mac_ctxt_release(mvm, vif);
1735 out_unlock:
1736 mutex_unlock(&mvm->mutex);
1737
1738 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1739
1740 return ret;
1741 }
1742
1743 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1744 struct ieee80211_vif *vif)
1745 {
1746 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1747
1748 if (tfd_msk) {
1749 /*
1750 * mac80211 first removes all the stations of the vif and
1751 * then removes the vif. When it removes a station it also
1752 * flushes the AMPDU session. So by now, all the AMPDU sessions
1753 * of all the stations of this vif are closed, and the queues
1754 * of these AMPDU sessions are properly closed.
1755 * We still need to take care of the shared queues of the vif.
1756 * Flush them here.
1757 */
1758 mutex_lock(&mvm->mutex);
1759 iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
1760 mutex_unlock(&mvm->mutex);
1761
1762 /*
1763 * There are transports that buffer a few frames in the host.
1764 * For these, the flush above isn't enough since while we were
1765 * flushing, the transport might have sent more frames to the
1766 * device. To solve this, wait here until the transport is
1767 * empty. Technically, this could have replaced the flush
1768 * above, but flush is much faster than draining. So flush
1769 * first, and drain to make sure we have no frames in the
1770 * transport anymore.
1771 * If a station still had frames on the shared queues, it is
1772 * already marked as draining, so to complete the draining, we
1773 * just need to wait until the transport is empty.
1774 */
1775 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1776 }
1777
1778 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1779 /*
1780 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1781 * We assume here that all the packets sent to the OFFCHANNEL
1782 * queue are sent in ROC session.
1783 */
1784 flush_work(&mvm->roc_done_wk);
1785 } else {
1786 /*
1787 * By now, all the AC queues are empty. The AGG queues are
1788 * empty too. We already got all the Tx responses for all the
1789 * packets in the queues. The drain work can have been
1790 * triggered. Flush it.
1791 */
1792 flush_work(&mvm->sta_drained_wk);
1793 }
1794 }
1795
1796 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1797 struct ieee80211_vif *vif)
1798 {
1799 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1800 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1801
1802 iwl_mvm_prepare_mac_removal(mvm, vif);
1803
1804 mutex_lock(&mvm->mutex);
1805
1806 if (mvm->bf_allowed_vif == mvmvif) {
1807 mvm->bf_allowed_vif = NULL;
1808 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1809 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1810 }
1811
1812 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1813
1814 /*
1815 * For AP/GO interface, the tear down of the resources allocated to the
1816 * interface is be handled as part of the stop_ap flow.
1817 */
1818 if (vif->type == NL80211_IFTYPE_AP ||
1819 vif->type == NL80211_IFTYPE_ADHOC) {
1820 #ifdef CONFIG_NL80211_TESTMODE
1821 if (vif == mvm->noa_vif) {
1822 mvm->noa_vif = NULL;
1823 mvm->noa_duration = 0;
1824 }
1825 #endif
1826 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1827 goto out_release;
1828 }
1829
1830 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1831 mvm->p2p_device_vif = NULL;
1832 iwl_mvm_rm_bcast_sta(mvm, vif);
1833 iwl_mvm_binding_remove_vif(mvm, vif);
1834 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1835 mvmvif->phy_ctxt = NULL;
1836 }
1837
1838 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1839 mvm->vif_count--;
1840
1841 iwl_mvm_power_update_mac(mvm);
1842 iwl_mvm_mac_ctxt_remove(mvm, vif);
1843
1844 out_release:
1845 iwl_mvm_mac_ctxt_release(mvm, vif);
1846 mutex_unlock(&mvm->mutex);
1847 }
1848
1849 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1850 {
1851 return 0;
1852 }
1853
1854 struct iwl_mvm_mc_iter_data {
1855 struct iwl_mvm *mvm;
1856 int port_id;
1857 };
1858
1859 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1860 struct ieee80211_vif *vif)
1861 {
1862 struct iwl_mvm_mc_iter_data *data = _data;
1863 struct iwl_mvm *mvm = data->mvm;
1864 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1865 int ret, len;
1866
1867 /* if we don't have free ports, mcast frames will be dropped */
1868 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1869 return;
1870
1871 if (vif->type != NL80211_IFTYPE_STATION ||
1872 !vif->bss_conf.assoc)
1873 return;
1874
1875 cmd->port_id = data->port_id++;
1876 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1877 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1878
1879 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1880 if (ret)
1881 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1882 }
1883
1884 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1885 {
1886 struct iwl_mvm_mc_iter_data iter_data = {
1887 .mvm = mvm,
1888 };
1889
1890 lockdep_assert_held(&mvm->mutex);
1891
1892 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1893 return;
1894
1895 ieee80211_iterate_active_interfaces_atomic(
1896 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1897 iwl_mvm_mc_iface_iterator, &iter_data);
1898 }
1899
1900 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1901 struct netdev_hw_addr_list *mc_list)
1902 {
1903 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1904 struct iwl_mcast_filter_cmd *cmd;
1905 struct netdev_hw_addr *addr;
1906 int addr_count;
1907 bool pass_all;
1908 int len;
1909
1910 addr_count = netdev_hw_addr_list_count(mc_list);
1911 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1912 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1913 if (pass_all)
1914 addr_count = 0;
1915
1916 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1917 cmd = kzalloc(len, GFP_ATOMIC);
1918 if (!cmd)
1919 return 0;
1920
1921 if (pass_all) {
1922 cmd->pass_all = 1;
1923 return (u64)(unsigned long)cmd;
1924 }
1925
1926 netdev_hw_addr_list_for_each(addr, mc_list) {
1927 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1928 cmd->count, addr->addr);
1929 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1930 addr->addr, ETH_ALEN);
1931 cmd->count++;
1932 }
1933
1934 return (u64)(unsigned long)cmd;
1935 }
1936
1937 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1938 unsigned int changed_flags,
1939 unsigned int *total_flags,
1940 u64 multicast)
1941 {
1942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1943 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1944
1945 mutex_lock(&mvm->mutex);
1946
1947 /* replace previous configuration */
1948 kfree(mvm->mcast_filter_cmd);
1949 mvm->mcast_filter_cmd = cmd;
1950
1951 if (!cmd)
1952 goto out;
1953
1954 iwl_mvm_recalc_multicast(mvm);
1955 out:
1956 mutex_unlock(&mvm->mutex);
1957 *total_flags = 0;
1958 }
1959
1960 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1961 struct iwl_bcast_iter_data {
1962 struct iwl_mvm *mvm;
1963 struct iwl_bcast_filter_cmd *cmd;
1964 u8 current_filter;
1965 };
1966
1967 static void
1968 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1969 const struct iwl_fw_bcast_filter *in_filter,
1970 struct iwl_fw_bcast_filter *out_filter)
1971 {
1972 struct iwl_fw_bcast_filter_attr *attr;
1973 int i;
1974
1975 memcpy(out_filter, in_filter, sizeof(*out_filter));
1976
1977 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1978 attr = &out_filter->attrs[i];
1979
1980 if (!attr->mask)
1981 break;
1982
1983 switch (attr->reserved1) {
1984 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1985 if (vif->bss_conf.arp_addr_cnt != 1) {
1986 attr->mask = 0;
1987 continue;
1988 }
1989
1990 attr->val = vif->bss_conf.arp_addr_list[0];
1991 break;
1992 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1993 attr->val = *(__be32 *)&vif->addr[2];
1994 break;
1995 default:
1996 break;
1997 }
1998 attr->reserved1 = 0;
1999 out_filter->num_attrs++;
2000 }
2001 }
2002
2003 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2004 struct ieee80211_vif *vif)
2005 {
2006 struct iwl_bcast_iter_data *data = _data;
2007 struct iwl_mvm *mvm = data->mvm;
2008 struct iwl_bcast_filter_cmd *cmd = data->cmd;
2009 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2010 struct iwl_fw_bcast_mac *bcast_mac;
2011 int i;
2012
2013 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2014 return;
2015
2016 bcast_mac = &cmd->macs[mvmvif->id];
2017
2018 /*
2019 * enable filtering only for associated stations, but not for P2P
2020 * Clients
2021 */
2022 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2023 !vif->bss_conf.assoc)
2024 return;
2025
2026 bcast_mac->default_discard = 1;
2027
2028 /* copy all configured filters */
2029 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2030 /*
2031 * Make sure we don't exceed our filters limit.
2032 * if there is still a valid filter to be configured,
2033 * be on the safe side and just allow bcast for this mac.
2034 */
2035 if (WARN_ON_ONCE(data->current_filter >=
2036 ARRAY_SIZE(cmd->filters))) {
2037 bcast_mac->default_discard = 0;
2038 bcast_mac->attached_filters = 0;
2039 break;
2040 }
2041
2042 iwl_mvm_set_bcast_filter(vif,
2043 &mvm->bcast_filters[i],
2044 &cmd->filters[data->current_filter]);
2045
2046 /* skip current filter if it contains no attributes */
2047 if (!cmd->filters[data->current_filter].num_attrs)
2048 continue;
2049
2050 /* attach the filter to current mac */
2051 bcast_mac->attached_filters |=
2052 cpu_to_le16(BIT(data->current_filter));
2053
2054 data->current_filter++;
2055 }
2056 }
2057
2058 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2059 struct iwl_bcast_filter_cmd *cmd)
2060 {
2061 struct iwl_bcast_iter_data iter_data = {
2062 .mvm = mvm,
2063 .cmd = cmd,
2064 };
2065
2066 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2067 return false;
2068
2069 memset(cmd, 0, sizeof(*cmd));
2070 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2071 cmd->max_macs = ARRAY_SIZE(cmd->macs);
2072
2073 #ifdef CONFIG_IWLWIFI_DEBUGFS
2074 /* use debugfs filters/macs if override is configured */
2075 if (mvm->dbgfs_bcast_filtering.override) {
2076 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2077 sizeof(cmd->filters));
2078 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2079 sizeof(cmd->macs));
2080 return true;
2081 }
2082 #endif
2083
2084 /* if no filters are configured, do nothing */
2085 if (!mvm->bcast_filters)
2086 return false;
2087
2088 /* configure and attach these filters for each associated sta vif */
2089 ieee80211_iterate_active_interfaces(
2090 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2091 iwl_mvm_bcast_filter_iterator, &iter_data);
2092
2093 return true;
2094 }
2095 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2096 struct ieee80211_vif *vif)
2097 {
2098 struct iwl_bcast_filter_cmd cmd;
2099
2100 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2101 return 0;
2102
2103 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2104 return 0;
2105
2106 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2107 sizeof(cmd), &cmd);
2108 }
2109 #else
2110 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2111 struct ieee80211_vif *vif)
2112 {
2113 return 0;
2114 }
2115 #endif
2116
2117 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2118 struct ieee80211_vif *vif,
2119 struct ieee80211_bss_conf *bss_conf,
2120 u32 changes)
2121 {
2122 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2123 int ret;
2124
2125 /*
2126 * Re-calculate the tsf id, as the master-slave relations depend on the
2127 * beacon interval, which was not known when the station interface was
2128 * added.
2129 */
2130 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2131 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2132
2133 /*
2134 * If we're not associated yet, take the (new) BSSID before associating
2135 * so the firmware knows. If we're already associated, then use the old
2136 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2137 * branch for disassociation below.
2138 */
2139 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2140 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2141
2142 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2143 if (ret)
2144 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2145
2146 /* after sending it once, adopt mac80211 data */
2147 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2148 mvmvif->associated = bss_conf->assoc;
2149
2150 if (changes & BSS_CHANGED_ASSOC) {
2151 if (bss_conf->assoc) {
2152 /* clear statistics to get clean beacon counter */
2153 iwl_mvm_request_statistics(mvm, true);
2154 memset(&mvmvif->beacon_stats, 0,
2155 sizeof(mvmvif->beacon_stats));
2156
2157 /* add quota for this interface */
2158 ret = iwl_mvm_update_quotas(mvm, true, NULL);
2159 if (ret) {
2160 IWL_ERR(mvm, "failed to update quotas\n");
2161 return;
2162 }
2163
2164 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2165 &mvm->status)) {
2166 /*
2167 * If we're restarting then the firmware will
2168 * obviously have lost synchronisation with
2169 * the AP. It will attempt to synchronise by
2170 * itself, but we can make it more reliable by
2171 * scheduling a session protection time event.
2172 *
2173 * The firmware needs to receive a beacon to
2174 * catch up with synchronisation, use 110% of
2175 * the beacon interval.
2176 *
2177 * Set a large maximum delay to allow for more
2178 * than a single interface.
2179 */
2180 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2181 iwl_mvm_protect_session(mvm, vif, dur, dur,
2182 5 * dur, false);
2183 }
2184
2185 iwl_mvm_sf_update(mvm, vif, false);
2186 iwl_mvm_power_vif_assoc(mvm, vif);
2187 if (vif->p2p) {
2188 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2189 iwl_mvm_update_smps(mvm, vif,
2190 IWL_MVM_SMPS_REQ_PROT,
2191 IEEE80211_SMPS_DYNAMIC);
2192 }
2193 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2194 /*
2195 * If update fails - SF might be running in associated
2196 * mode while disassociated - which is forbidden.
2197 */
2198 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2199 "Failed to update SF upon disassociation\n");
2200
2201 /* remove AP station now that the MAC is unassoc */
2202 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2203 if (ret)
2204 IWL_ERR(mvm, "failed to remove AP station\n");
2205
2206 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2207 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2208 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2209 /* remove quota for this interface */
2210 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2211 if (ret)
2212 IWL_ERR(mvm, "failed to update quotas\n");
2213
2214 if (vif->p2p)
2215 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2216
2217 /* this will take the cleared BSSID from bss_conf */
2218 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2219 if (ret)
2220 IWL_ERR(mvm,
2221 "failed to update MAC %pM (clear after unassoc)\n",
2222 vif->addr);
2223 }
2224
2225 iwl_mvm_recalc_multicast(mvm);
2226 iwl_mvm_configure_bcast_filter(mvm, vif);
2227
2228 /* reset rssi values */
2229 mvmvif->bf_data.ave_beacon_signal = 0;
2230
2231 iwl_mvm_bt_coex_vif_change(mvm);
2232 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2233 IEEE80211_SMPS_AUTOMATIC);
2234 } else if (changes & BSS_CHANGED_BEACON_INFO) {
2235 /*
2236 * We received a beacon _after_ association so
2237 * remove the session protection.
2238 */
2239 iwl_mvm_remove_time_event(mvm, mvmvif,
2240 &mvmvif->time_event_data);
2241 }
2242
2243 if (changes & BSS_CHANGED_BEACON_INFO) {
2244 iwl_mvm_sf_update(mvm, vif, false);
2245 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2246 }
2247
2248 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2249 ret = iwl_mvm_power_update_mac(mvm);
2250 if (ret)
2251 IWL_ERR(mvm, "failed to update power mode\n");
2252 }
2253
2254 if (changes & BSS_CHANGED_TXPOWER) {
2255 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2256 bss_conf->txpower);
2257 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2258 }
2259
2260 if (changes & BSS_CHANGED_CQM) {
2261 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2262 /* reset cqm events tracking */
2263 mvmvif->bf_data.last_cqm_event = 0;
2264 if (mvmvif->bf_data.bf_enabled) {
2265 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2266 if (ret)
2267 IWL_ERR(mvm,
2268 "failed to update CQM thresholds\n");
2269 }
2270 }
2271
2272 if (changes & BSS_CHANGED_ARP_FILTER) {
2273 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2274 iwl_mvm_configure_bcast_filter(mvm, vif);
2275 }
2276 }
2277
2278 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2279 struct ieee80211_vif *vif)
2280 {
2281 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2282 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2283 int ret;
2284
2285 /*
2286 * iwl_mvm_mac_ctxt_add() might read directly from the device
2287 * (the system time), so make sure it is available.
2288 */
2289 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2290 if (ret)
2291 return ret;
2292
2293 mutex_lock(&mvm->mutex);
2294
2295 /* Send the beacon template */
2296 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2297 if (ret)
2298 goto out_unlock;
2299
2300 /*
2301 * Re-calculate the tsf id, as the master-slave relations depend on the
2302 * beacon interval, which was not known when the AP interface was added.
2303 */
2304 if (vif->type == NL80211_IFTYPE_AP)
2305 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2306
2307 /* Add the mac context */
2308 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2309 if (ret)
2310 goto out_unlock;
2311
2312 /* Perform the binding */
2313 ret = iwl_mvm_binding_add_vif(mvm, vif);
2314 if (ret)
2315 goto out_remove;
2316
2317 /* Send the bcast station. At this stage the TBTT and DTIM time events
2318 * are added and applied to the scheduler */
2319 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2320 if (ret)
2321 goto out_unbind;
2322
2323 /* must be set before quota calculations */
2324 mvmvif->ap_ibss_active = true;
2325
2326 /* power updated needs to be done before quotas */
2327 iwl_mvm_power_update_mac(mvm);
2328
2329 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2330 if (ret)
2331 goto out_quota_failed;
2332
2333 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2334 if (vif->p2p && mvm->p2p_device_vif)
2335 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2336
2337 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2338
2339 iwl_mvm_bt_coex_vif_change(mvm);
2340
2341 /* we don't support TDLS during DCM */
2342 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2343 iwl_mvm_teardown_tdls_peers(mvm);
2344
2345 goto out_unlock;
2346
2347 out_quota_failed:
2348 iwl_mvm_power_update_mac(mvm);
2349 mvmvif->ap_ibss_active = false;
2350 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2351 out_unbind:
2352 iwl_mvm_binding_remove_vif(mvm, vif);
2353 out_remove:
2354 iwl_mvm_mac_ctxt_remove(mvm, vif);
2355 out_unlock:
2356 mutex_unlock(&mvm->mutex);
2357 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2358 return ret;
2359 }
2360
2361 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2362 struct ieee80211_vif *vif)
2363 {
2364 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2365 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2366
2367 iwl_mvm_prepare_mac_removal(mvm, vif);
2368
2369 mutex_lock(&mvm->mutex);
2370
2371 /* Handle AP stop while in CSA */
2372 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2373 iwl_mvm_remove_time_event(mvm, mvmvif,
2374 &mvmvif->time_event_data);
2375 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2376 }
2377
2378 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2379 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2380 mvm->csa_tx_block_bcn_timeout = 0;
2381 }
2382
2383 mvmvif->ap_ibss_active = false;
2384 mvm->ap_last_beacon_gp2 = 0;
2385
2386 iwl_mvm_bt_coex_vif_change(mvm);
2387
2388 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2389
2390 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2391 if (vif->p2p && mvm->p2p_device_vif)
2392 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2393
2394 iwl_mvm_update_quotas(mvm, false, NULL);
2395 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2396 iwl_mvm_binding_remove_vif(mvm, vif);
2397
2398 iwl_mvm_power_update_mac(mvm);
2399
2400 iwl_mvm_mac_ctxt_remove(mvm, vif);
2401
2402 mutex_unlock(&mvm->mutex);
2403 }
2404
2405 static void
2406 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2407 struct ieee80211_vif *vif,
2408 struct ieee80211_bss_conf *bss_conf,
2409 u32 changes)
2410 {
2411 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2412
2413 /* Changes will be applied when the AP/IBSS is started */
2414 if (!mvmvif->ap_ibss_active)
2415 return;
2416
2417 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2418 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2419 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2420 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2421
2422 /* Need to send a new beacon template to the FW */
2423 if (changes & BSS_CHANGED_BEACON &&
2424 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2425 IWL_WARN(mvm, "Failed updating beacon data\n");
2426
2427 if (changes & BSS_CHANGED_TXPOWER) {
2428 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2429 bss_conf->txpower);
2430 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2431 }
2432
2433 }
2434
2435 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2436 struct ieee80211_vif *vif,
2437 struct ieee80211_bss_conf *bss_conf,
2438 u32 changes)
2439 {
2440 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2441
2442 /*
2443 * iwl_mvm_bss_info_changed_station() might call
2444 * iwl_mvm_protect_session(), which reads directly from
2445 * the device (the system time), so make sure it is available.
2446 */
2447 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2448 return;
2449
2450 mutex_lock(&mvm->mutex);
2451
2452 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2453 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2454
2455 switch (vif->type) {
2456 case NL80211_IFTYPE_STATION:
2457 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2458 break;
2459 case NL80211_IFTYPE_AP:
2460 case NL80211_IFTYPE_ADHOC:
2461 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2462 break;
2463 default:
2464 /* shouldn't happen */
2465 WARN_ON_ONCE(1);
2466 }
2467
2468 mutex_unlock(&mvm->mutex);
2469 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2470 }
2471
2472 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2473 struct ieee80211_vif *vif,
2474 struct ieee80211_scan_request *hw_req)
2475 {
2476 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2477 int ret;
2478
2479 if (hw_req->req.n_channels == 0 ||
2480 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2481 return -EINVAL;
2482
2483 mutex_lock(&mvm->mutex);
2484 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2485 mutex_unlock(&mvm->mutex);
2486
2487 return ret;
2488 }
2489
2490 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2491 struct ieee80211_vif *vif)
2492 {
2493 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2494
2495 mutex_lock(&mvm->mutex);
2496
2497 /* Due to a race condition, it's possible that mac80211 asks
2498 * us to stop a hw_scan when it's already stopped. This can
2499 * happen, for instance, if we stopped the scan ourselves,
2500 * called ieee80211_scan_completed() and the userspace called
2501 * cancel scan scan before ieee80211_scan_work() could run.
2502 * To handle that, simply return if the scan is not running.
2503 */
2504 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2505 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2506
2507 mutex_unlock(&mvm->mutex);
2508 }
2509
2510 static void
2511 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2512 struct ieee80211_sta *sta, u16 tids,
2513 int num_frames,
2514 enum ieee80211_frame_release_type reason,
2515 bool more_data)
2516 {
2517 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2518
2519 /* Called when we need to transmit (a) frame(s) from mac80211 */
2520
2521 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2522 tids, more_data, false);
2523 }
2524
2525 static void
2526 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2527 struct ieee80211_sta *sta, u16 tids,
2528 int num_frames,
2529 enum ieee80211_frame_release_type reason,
2530 bool more_data)
2531 {
2532 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2533
2534 /* Called when we need to transmit (a) frame(s) from agg queue */
2535
2536 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2537 tids, more_data, true);
2538 }
2539
2540 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2541 struct ieee80211_vif *vif,
2542 enum sta_notify_cmd cmd,
2543 struct ieee80211_sta *sta)
2544 {
2545 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2546 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2547 unsigned long txqs = 0, tids = 0;
2548 int tid;
2549
2550 spin_lock_bh(&mvmsta->lock);
2551 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2552 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2553
2554 if (tid_data->state != IWL_AGG_ON &&
2555 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2556 continue;
2557
2558 __set_bit(tid_data->txq_id, &txqs);
2559
2560 if (iwl_mvm_tid_queued(tid_data) == 0)
2561 continue;
2562
2563 __set_bit(tid, &tids);
2564 }
2565
2566 switch (cmd) {
2567 case STA_NOTIFY_SLEEP:
2568 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2569 ieee80211_sta_block_awake(hw, sta, true);
2570
2571 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2572 ieee80211_sta_set_buffered(sta, tid, true);
2573
2574 if (txqs)
2575 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2576 /*
2577 * The fw updates the STA to be asleep. Tx packets on the Tx
2578 * queues to this station will not be transmitted. The fw will
2579 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2580 */
2581 break;
2582 case STA_NOTIFY_AWAKE:
2583 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2584 break;
2585
2586 if (txqs)
2587 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2588 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2589 break;
2590 default:
2591 break;
2592 }
2593 spin_unlock_bh(&mvmsta->lock);
2594 }
2595
2596 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2597 struct ieee80211_vif *vif,
2598 struct ieee80211_sta *sta)
2599 {
2600 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2601 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2602
2603 /*
2604 * This is called before mac80211 does RCU synchronisation,
2605 * so here we already invalidate our internal RCU-protected
2606 * station pointer. The rest of the code will thus no longer
2607 * be able to find the station this way, and we don't rely
2608 * on further RCU synchronisation after the sta_state()
2609 * callback deleted the station.
2610 */
2611 mutex_lock(&mvm->mutex);
2612 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2613 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2614 ERR_PTR(-ENOENT));
2615 mutex_unlock(&mvm->mutex);
2616 }
2617
2618 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2619 const u8 *bssid)
2620 {
2621 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2622 return;
2623
2624 if (iwlwifi_mod_params.uapsd_disable) {
2625 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2626 return;
2627 }
2628
2629 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2630 }
2631
2632 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2633 struct ieee80211_vif *vif,
2634 struct ieee80211_sta *sta,
2635 enum ieee80211_sta_state old_state,
2636 enum ieee80211_sta_state new_state)
2637 {
2638 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2639 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2640 int ret;
2641
2642 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2643 sta->addr, old_state, new_state);
2644
2645 /* this would be a mac80211 bug ... but don't crash */
2646 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2647 return -EINVAL;
2648
2649 /* if a STA is being removed, reuse its ID */
2650 flush_work(&mvm->sta_drained_wk);
2651
2652 mutex_lock(&mvm->mutex);
2653 if (old_state == IEEE80211_STA_NOTEXIST &&
2654 new_state == IEEE80211_STA_NONE) {
2655 /*
2656 * Firmware bug - it'll crash if the beacon interval is less
2657 * than 16. We can't avoid connecting at all, so refuse the
2658 * station state change, this will cause mac80211 to abandon
2659 * attempts to connect to this AP, and eventually wpa_s will
2660 * blacklist the AP...
2661 */
2662 if (vif->type == NL80211_IFTYPE_STATION &&
2663 vif->bss_conf.beacon_int < 16) {
2664 IWL_ERR(mvm,
2665 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2666 sta->addr, vif->bss_conf.beacon_int);
2667 ret = -EINVAL;
2668 goto out_unlock;
2669 }
2670
2671 if (sta->tdls &&
2672 (vif->p2p ||
2673 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2674 IWL_MVM_TDLS_STA_COUNT ||
2675 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2676 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2677 ret = -EBUSY;
2678 goto out_unlock;
2679 }
2680
2681 ret = iwl_mvm_add_sta(mvm, vif, sta);
2682 if (sta->tdls && ret == 0)
2683 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2684 } else if (old_state == IEEE80211_STA_NONE &&
2685 new_state == IEEE80211_STA_AUTH) {
2686 /*
2687 * EBS may be disabled due to previous failures reported by FW.
2688 * Reset EBS status here assuming environment has been changed.
2689 */
2690 mvm->last_ebs_successful = true;
2691 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2692 ret = 0;
2693 } else if (old_state == IEEE80211_STA_AUTH &&
2694 new_state == IEEE80211_STA_ASSOC) {
2695 ret = iwl_mvm_update_sta(mvm, vif, sta);
2696 if (ret == 0)
2697 iwl_mvm_rs_rate_init(mvm, sta,
2698 mvmvif->phy_ctxt->channel->band,
2699 true);
2700 } else if (old_state == IEEE80211_STA_ASSOC &&
2701 new_state == IEEE80211_STA_AUTHORIZED) {
2702
2703 /* we don't support TDLS during DCM */
2704 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2705 iwl_mvm_teardown_tdls_peers(mvm);
2706
2707 /* enable beacon filtering */
2708 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2709 ret = 0;
2710 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2711 new_state == IEEE80211_STA_ASSOC) {
2712 /* disable beacon filtering */
2713 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2714 ret = 0;
2715 } else if (old_state == IEEE80211_STA_ASSOC &&
2716 new_state == IEEE80211_STA_AUTH) {
2717 ret = 0;
2718 } else if (old_state == IEEE80211_STA_AUTH &&
2719 new_state == IEEE80211_STA_NONE) {
2720 ret = 0;
2721 } else if (old_state == IEEE80211_STA_NONE &&
2722 new_state == IEEE80211_STA_NOTEXIST) {
2723 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2724 if (sta->tdls)
2725 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2726 } else {
2727 ret = -EIO;
2728 }
2729 out_unlock:
2730 mutex_unlock(&mvm->mutex);
2731
2732 if (sta->tdls && ret == 0) {
2733 if (old_state == IEEE80211_STA_NOTEXIST &&
2734 new_state == IEEE80211_STA_NONE)
2735 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2736 else if (old_state == IEEE80211_STA_NONE &&
2737 new_state == IEEE80211_STA_NOTEXIST)
2738 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2739 }
2740
2741 return ret;
2742 }
2743
2744 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2745 {
2746 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2747
2748 mvm->rts_threshold = value;
2749
2750 return 0;
2751 }
2752
2753 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2754 struct ieee80211_vif *vif,
2755 struct ieee80211_sta *sta, u32 changed)
2756 {
2757 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2758
2759 if (vif->type == NL80211_IFTYPE_STATION &&
2760 changed & IEEE80211_RC_NSS_CHANGED)
2761 iwl_mvm_sf_update(mvm, vif, false);
2762 }
2763
2764 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2765 struct ieee80211_vif *vif, u16 ac,
2766 const struct ieee80211_tx_queue_params *params)
2767 {
2768 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2769 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2770
2771 mvmvif->queue_params[ac] = *params;
2772
2773 /*
2774 * No need to update right away, we'll get BSS_CHANGED_QOS
2775 * The exception is P2P_DEVICE interface which needs immediate update.
2776 */
2777 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2778 int ret;
2779
2780 mutex_lock(&mvm->mutex);
2781 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2782 mutex_unlock(&mvm->mutex);
2783 return ret;
2784 }
2785 return 0;
2786 }
2787
2788 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2789 struct ieee80211_vif *vif)
2790 {
2791 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2792 u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2793 200 + vif->bss_conf.beacon_int);
2794 u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2795 100 + vif->bss_conf.beacon_int);
2796
2797 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2798 return;
2799
2800 /*
2801 * iwl_mvm_protect_session() reads directly from the device
2802 * (the system time), so make sure it is available.
2803 */
2804 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2805 return;
2806
2807 mutex_lock(&mvm->mutex);
2808 /* Try really hard to protect the session and hear a beacon */
2809 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2810 mutex_unlock(&mvm->mutex);
2811
2812 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2813 }
2814
2815 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2816 struct ieee80211_vif *vif,
2817 struct cfg80211_sched_scan_request *req,
2818 struct ieee80211_scan_ies *ies)
2819 {
2820 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2821
2822 int ret;
2823
2824 mutex_lock(&mvm->mutex);
2825
2826 if (!vif->bss_conf.idle) {
2827 ret = -EBUSY;
2828 goto out;
2829 }
2830
2831 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2832
2833 out:
2834 mutex_unlock(&mvm->mutex);
2835 return ret;
2836 }
2837
2838 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2839 struct ieee80211_vif *vif)
2840 {
2841 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2842 int ret;
2843
2844 mutex_lock(&mvm->mutex);
2845
2846 /* Due to a race condition, it's possible that mac80211 asks
2847 * us to stop a sched_scan when it's already stopped. This
2848 * can happen, for instance, if we stopped the scan ourselves,
2849 * called ieee80211_sched_scan_stopped() and the userspace called
2850 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2851 * could run. To handle this, simply return if the scan is
2852 * not running.
2853 */
2854 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2855 mutex_unlock(&mvm->mutex);
2856 return 0;
2857 }
2858
2859 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2860 mutex_unlock(&mvm->mutex);
2861 iwl_mvm_wait_for_async_handlers(mvm);
2862
2863 return ret;
2864 }
2865
2866 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2867 enum set_key_cmd cmd,
2868 struct ieee80211_vif *vif,
2869 struct ieee80211_sta *sta,
2870 struct ieee80211_key_conf *key)
2871 {
2872 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2873 int ret;
2874
2875 if (iwlwifi_mod_params.sw_crypto) {
2876 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2877 return -EOPNOTSUPP;
2878 }
2879
2880 switch (key->cipher) {
2881 case WLAN_CIPHER_SUITE_TKIP:
2882 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2883 /* fall-through */
2884 case WLAN_CIPHER_SUITE_CCMP:
2885 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2886 break;
2887 case WLAN_CIPHER_SUITE_AES_CMAC:
2888 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2889 break;
2890 case WLAN_CIPHER_SUITE_WEP40:
2891 case WLAN_CIPHER_SUITE_WEP104:
2892 /* For non-client mode, only use WEP keys for TX as we probably
2893 * don't have a station yet anyway and would then have to keep
2894 * track of the keys, linking them to each of the clients/peers
2895 * as they appear. For now, don't do that, for performance WEP
2896 * offload doesn't really matter much, but we need it for some
2897 * other offload features in client mode.
2898 */
2899 if (vif->type != NL80211_IFTYPE_STATION)
2900 return 0;
2901 break;
2902 default:
2903 /* currently FW supports only one optional cipher scheme */
2904 if (hw->n_cipher_schemes &&
2905 hw->cipher_schemes->cipher == key->cipher)
2906 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2907 else
2908 return -EOPNOTSUPP;
2909 }
2910
2911 mutex_lock(&mvm->mutex);
2912
2913 switch (cmd) {
2914 case SET_KEY:
2915 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2916 vif->type == NL80211_IFTYPE_AP) && !sta) {
2917 /*
2918 * GTK on AP interface is a TX-only key, return 0;
2919 * on IBSS they're per-station and because we're lazy
2920 * we don't support them for RX, so do the same.
2921 */
2922 ret = 0;
2923 key->hw_key_idx = STA_KEY_IDX_INVALID;
2924 break;
2925 }
2926
2927 /* During FW restart, in order to restore the state as it was,
2928 * don't try to reprogram keys we previously failed for.
2929 */
2930 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2931 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2932 IWL_DEBUG_MAC80211(mvm,
2933 "skip invalid idx key programming during restart\n");
2934 ret = 0;
2935 break;
2936 }
2937
2938 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2939 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
2940 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2941 &mvm->status));
2942 if (ret) {
2943 IWL_WARN(mvm, "set key failed\n");
2944 /*
2945 * can't add key for RX, but we don't need it
2946 * in the device for TX so still return 0
2947 */
2948 key->hw_key_idx = STA_KEY_IDX_INVALID;
2949 ret = 0;
2950 }
2951
2952 break;
2953 case DISABLE_KEY:
2954 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2955 ret = 0;
2956 break;
2957 }
2958
2959 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2960 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2961 break;
2962 default:
2963 ret = -EINVAL;
2964 }
2965
2966 mutex_unlock(&mvm->mutex);
2967 return ret;
2968 }
2969
2970 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2971 struct ieee80211_vif *vif,
2972 struct ieee80211_key_conf *keyconf,
2973 struct ieee80211_sta *sta,
2974 u32 iv32, u16 *phase1key)
2975 {
2976 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2977
2978 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2979 return;
2980
2981 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2982 }
2983
2984
2985 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2986 struct iwl_rx_packet *pkt, void *data)
2987 {
2988 struct iwl_mvm *mvm =
2989 container_of(notif_wait, struct iwl_mvm, notif_wait);
2990 struct iwl_hs20_roc_res *resp;
2991 int resp_len = iwl_rx_packet_payload_len(pkt);
2992 struct iwl_mvm_time_event_data *te_data = data;
2993
2994 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2995 return true;
2996
2997 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2998 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2999 return true;
3000 }
3001
3002 resp = (void *)pkt->data;
3003
3004 IWL_DEBUG_TE(mvm,
3005 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3006 resp->status, resp->event_unique_id);
3007
3008 te_data->uid = le32_to_cpu(resp->event_unique_id);
3009 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3010 te_data->uid);
3011
3012 spin_lock_bh(&mvm->time_event_lock);
3013 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3014 spin_unlock_bh(&mvm->time_event_lock);
3015
3016 return true;
3017 }
3018
3019 #define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
3020 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3021 struct ieee80211_channel *channel,
3022 struct ieee80211_vif *vif,
3023 int duration)
3024 {
3025 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3026 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3027 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3028 static const u8 time_event_response[] = { HOT_SPOT_CMD };
3029 struct iwl_notification_wait wait_time_event;
3030 struct iwl_hs20_roc_req aux_roc_req = {
3031 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3032 .id_and_color =
3033 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3034 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3035 /* Set the channel info data */
3036 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3037 PHY_BAND_24 : PHY_BAND_5,
3038 .channel_info.channel = channel->hw_value,
3039 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3040 /* Set the time and duration */
3041 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3042 .apply_time_max_delay =
3043 cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3044 .duration = cpu_to_le32(MSEC_TO_TU(duration)),
3045 };
3046
3047 /* Set the node address */
3048 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3049
3050 lockdep_assert_held(&mvm->mutex);
3051
3052 spin_lock_bh(&mvm->time_event_lock);
3053
3054 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3055 spin_unlock_bh(&mvm->time_event_lock);
3056 return -EIO;
3057 }
3058
3059 te_data->vif = vif;
3060 te_data->duration = duration;
3061 te_data->id = HOT_SPOT_CMD;
3062
3063 spin_unlock_bh(&mvm->time_event_lock);
3064
3065 /*
3066 * Use a notification wait, which really just processes the
3067 * command response and doesn't wait for anything, in order
3068 * to be able to process the response and get the UID inside
3069 * the RX path. Using CMD_WANT_SKB doesn't work because it
3070 * stores the buffer and then wakes up this thread, by which
3071 * time another notification (that the time event started)
3072 * might already be processed unsuccessfully.
3073 */
3074 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3075 time_event_response,
3076 ARRAY_SIZE(time_event_response),
3077 iwl_mvm_rx_aux_roc, te_data);
3078
3079 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3080 &aux_roc_req);
3081
3082 if (res) {
3083 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3084 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3085 goto out_clear_te;
3086 }
3087
3088 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3089 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3090 /* should never fail */
3091 WARN_ON_ONCE(res);
3092
3093 if (res) {
3094 out_clear_te:
3095 spin_lock_bh(&mvm->time_event_lock);
3096 iwl_mvm_te_clear_data(mvm, te_data);
3097 spin_unlock_bh(&mvm->time_event_lock);
3098 }
3099
3100 return res;
3101 }
3102
3103 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3104 struct ieee80211_vif *vif,
3105 struct ieee80211_channel *channel,
3106 int duration,
3107 enum ieee80211_roc_type type)
3108 {
3109 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3110 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3111 struct cfg80211_chan_def chandef;
3112 struct iwl_mvm_phy_ctxt *phy_ctxt;
3113 int ret, i;
3114
3115 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3116 duration, type);
3117
3118 flush_work(&mvm->roc_done_wk);
3119
3120 mutex_lock(&mvm->mutex);
3121
3122 switch (vif->type) {
3123 case NL80211_IFTYPE_STATION:
3124 if (fw_has_capa(&mvm->fw->ucode_capa,
3125 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3126 /* Use aux roc framework (HS20) */
3127 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3128 vif, duration);
3129 goto out_unlock;
3130 }
3131 IWL_ERR(mvm, "hotspot not supported\n");
3132 ret = -EINVAL;
3133 goto out_unlock;
3134 case NL80211_IFTYPE_P2P_DEVICE:
3135 /* handle below */
3136 break;
3137 default:
3138 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3139 ret = -EINVAL;
3140 goto out_unlock;
3141 }
3142
3143 for (i = 0; i < NUM_PHY_CTX; i++) {
3144 phy_ctxt = &mvm->phy_ctxts[i];
3145 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3146 continue;
3147
3148 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3149 /*
3150 * Unbind the P2P_DEVICE from the current PHY context,
3151 * and if the PHY context is not used remove it.
3152 */
3153 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3154 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3155 goto out_unlock;
3156
3157 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3158
3159 /* Bind the P2P_DEVICE to the current PHY Context */
3160 mvmvif->phy_ctxt = phy_ctxt;
3161
3162 ret = iwl_mvm_binding_add_vif(mvm, vif);
3163 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3164 goto out_unlock;
3165
3166 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3167 goto schedule_time_event;
3168 }
3169 }
3170
3171 /* Need to update the PHY context only if the ROC channel changed */
3172 if (channel == mvmvif->phy_ctxt->channel)
3173 goto schedule_time_event;
3174
3175 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3176
3177 /*
3178 * Change the PHY context configuration as it is currently referenced
3179 * only by the P2P Device MAC
3180 */
3181 if (mvmvif->phy_ctxt->ref == 1) {
3182 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3183 &chandef, 1, 1);
3184 if (ret)
3185 goto out_unlock;
3186 } else {
3187 /*
3188 * The PHY context is shared with other MACs. Need to remove the
3189 * P2P Device from the binding, allocate an new PHY context and
3190 * create a new binding
3191 */
3192 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3193 if (!phy_ctxt) {
3194 ret = -ENOSPC;
3195 goto out_unlock;
3196 }
3197
3198 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3199 1, 1);
3200 if (ret) {
3201 IWL_ERR(mvm, "Failed to change PHY context\n");
3202 goto out_unlock;
3203 }
3204
3205 /* Unbind the P2P_DEVICE from the current PHY context */
3206 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3207 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3208 goto out_unlock;
3209
3210 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3211
3212 /* Bind the P2P_DEVICE to the new allocated PHY context */
3213 mvmvif->phy_ctxt = phy_ctxt;
3214
3215 ret = iwl_mvm_binding_add_vif(mvm, vif);
3216 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3217 goto out_unlock;
3218
3219 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3220 }
3221
3222 schedule_time_event:
3223 /* Schedule the time events */
3224 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3225
3226 out_unlock:
3227 mutex_unlock(&mvm->mutex);
3228 IWL_DEBUG_MAC80211(mvm, "leave\n");
3229 return ret;
3230 }
3231
3232 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3233 {
3234 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3235
3236 IWL_DEBUG_MAC80211(mvm, "enter\n");
3237
3238 mutex_lock(&mvm->mutex);
3239 iwl_mvm_stop_roc(mvm);
3240 mutex_unlock(&mvm->mutex);
3241
3242 IWL_DEBUG_MAC80211(mvm, "leave\n");
3243 return 0;
3244 }
3245
3246 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3247 struct ieee80211_chanctx_conf *ctx)
3248 {
3249 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3250 struct iwl_mvm_phy_ctxt *phy_ctxt;
3251 int ret;
3252
3253 lockdep_assert_held(&mvm->mutex);
3254
3255 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3256
3257 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3258 if (!phy_ctxt) {
3259 ret = -ENOSPC;
3260 goto out;
3261 }
3262
3263 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3264 ctx->rx_chains_static,
3265 ctx->rx_chains_dynamic);
3266 if (ret) {
3267 IWL_ERR(mvm, "Failed to add PHY context\n");
3268 goto out;
3269 }
3270
3271 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3272 *phy_ctxt_id = phy_ctxt->id;
3273 out:
3274 return ret;
3275 }
3276
3277 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3278 struct ieee80211_chanctx_conf *ctx)
3279 {
3280 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3281 int ret;
3282
3283 mutex_lock(&mvm->mutex);
3284 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3285 mutex_unlock(&mvm->mutex);
3286
3287 return ret;
3288 }
3289
3290 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3291 struct ieee80211_chanctx_conf *ctx)
3292 {
3293 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3294 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3295
3296 lockdep_assert_held(&mvm->mutex);
3297
3298 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3299 }
3300
3301 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3302 struct ieee80211_chanctx_conf *ctx)
3303 {
3304 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3305
3306 mutex_lock(&mvm->mutex);
3307 __iwl_mvm_remove_chanctx(mvm, ctx);
3308 mutex_unlock(&mvm->mutex);
3309 }
3310
3311 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3312 struct ieee80211_chanctx_conf *ctx,
3313 u32 changed)
3314 {
3315 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3316 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3317 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3318
3319 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3320 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3321 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3322 IEEE80211_CHANCTX_CHANGE_RADAR |
3323 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3324 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3325 phy_ctxt->ref, changed))
3326 return;
3327
3328 mutex_lock(&mvm->mutex);
3329 iwl_mvm_bt_coex_vif_change(mvm);
3330 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3331 ctx->rx_chains_static,
3332 ctx->rx_chains_dynamic);
3333 mutex_unlock(&mvm->mutex);
3334 }
3335
3336 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3337 struct ieee80211_vif *vif,
3338 struct ieee80211_chanctx_conf *ctx,
3339 bool switching_chanctx)
3340 {
3341 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3342 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3343 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3344 int ret;
3345
3346 lockdep_assert_held(&mvm->mutex);
3347
3348 mvmvif->phy_ctxt = phy_ctxt;
3349
3350 switch (vif->type) {
3351 case NL80211_IFTYPE_AP:
3352 /* only needed if we're switching chanctx (i.e. during CSA) */
3353 if (switching_chanctx) {
3354 mvmvif->ap_ibss_active = true;
3355 break;
3356 }
3357 case NL80211_IFTYPE_ADHOC:
3358 /*
3359 * The AP binding flow is handled as part of the start_ap flow
3360 * (in bss_info_changed), similarly for IBSS.
3361 */
3362 ret = 0;
3363 goto out;
3364 case NL80211_IFTYPE_STATION:
3365 break;
3366 case NL80211_IFTYPE_MONITOR:
3367 /* always disable PS when a monitor interface is active */
3368 mvmvif->ps_disabled = true;
3369 break;
3370 default:
3371 ret = -EINVAL;
3372 goto out;
3373 }
3374
3375 ret = iwl_mvm_binding_add_vif(mvm, vif);
3376 if (ret)
3377 goto out;
3378
3379 /*
3380 * Power state must be updated before quotas,
3381 * otherwise fw will complain.
3382 */
3383 iwl_mvm_power_update_mac(mvm);
3384
3385 /* Setting the quota at this stage is only required for monitor
3386 * interfaces. For the other types, the bss_info changed flow
3387 * will handle quota settings.
3388 */
3389 if (vif->type == NL80211_IFTYPE_MONITOR) {
3390 mvmvif->monitor_active = true;
3391 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3392 if (ret)
3393 goto out_remove_binding;
3394 }
3395
3396 /* Handle binding during CSA */
3397 if (vif->type == NL80211_IFTYPE_AP) {
3398 iwl_mvm_update_quotas(mvm, false, NULL);
3399 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3400 }
3401
3402 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3403 u32 duration = 2 * vif->bss_conf.beacon_int;
3404
3405 /* iwl_mvm_protect_session() reads directly from the
3406 * device (the system time), so make sure it is
3407 * available.
3408 */
3409 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3410 if (ret)
3411 goto out_remove_binding;
3412
3413 /* Protect the session to make sure we hear the first
3414 * beacon on the new channel.
3415 */
3416 iwl_mvm_protect_session(mvm, vif, duration, duration,
3417 vif->bss_conf.beacon_int / 2,
3418 true);
3419
3420 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3421
3422 iwl_mvm_update_quotas(mvm, false, NULL);
3423 }
3424
3425 goto out;
3426
3427 out_remove_binding:
3428 iwl_mvm_binding_remove_vif(mvm, vif);
3429 iwl_mvm_power_update_mac(mvm);
3430 out:
3431 if (ret)
3432 mvmvif->phy_ctxt = NULL;
3433 return ret;
3434 }
3435 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3436 struct ieee80211_vif *vif,
3437 struct ieee80211_chanctx_conf *ctx)
3438 {
3439 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3440 int ret;
3441
3442 mutex_lock(&mvm->mutex);
3443 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3444 mutex_unlock(&mvm->mutex);
3445
3446 return ret;
3447 }
3448
3449 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3450 struct ieee80211_vif *vif,
3451 struct ieee80211_chanctx_conf *ctx,
3452 bool switching_chanctx)
3453 {
3454 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3455 struct ieee80211_vif *disabled_vif = NULL;
3456
3457 lockdep_assert_held(&mvm->mutex);
3458
3459 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3460
3461 switch (vif->type) {
3462 case NL80211_IFTYPE_ADHOC:
3463 goto out;
3464 case NL80211_IFTYPE_MONITOR:
3465 mvmvif->monitor_active = false;
3466 mvmvif->ps_disabled = false;
3467 break;
3468 case NL80211_IFTYPE_AP:
3469 /* This part is triggered only during CSA */
3470 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3471 goto out;
3472
3473 mvmvif->csa_countdown = false;
3474
3475 /* Set CS bit on all the stations */
3476 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3477
3478 /* Save blocked iface, the timeout is set on the next beacon */
3479 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3480
3481 mvmvif->ap_ibss_active = false;
3482 break;
3483 case NL80211_IFTYPE_STATION:
3484 if (!switching_chanctx)
3485 break;
3486
3487 disabled_vif = vif;
3488
3489 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3490 break;
3491 default:
3492 break;
3493 }
3494
3495 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3496 iwl_mvm_binding_remove_vif(mvm, vif);
3497
3498 out:
3499 mvmvif->phy_ctxt = NULL;
3500 iwl_mvm_power_update_mac(mvm);
3501 }
3502
3503 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3504 struct ieee80211_vif *vif,
3505 struct ieee80211_chanctx_conf *ctx)
3506 {
3507 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3508
3509 mutex_lock(&mvm->mutex);
3510 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3511 mutex_unlock(&mvm->mutex);
3512 }
3513
3514 static int
3515 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3516 struct ieee80211_vif_chanctx_switch *vifs)
3517 {
3518 int ret;
3519
3520 mutex_lock(&mvm->mutex);
3521 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3522 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3523
3524 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3525 if (ret) {
3526 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3527 goto out_reassign;
3528 }
3529
3530 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3531 true);
3532 if (ret) {
3533 IWL_ERR(mvm,
3534 "failed to assign new_ctx during channel switch\n");
3535 goto out_remove;
3536 }
3537
3538 /* we don't support TDLS during DCM - can be caused by channel switch */
3539 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3540 iwl_mvm_teardown_tdls_peers(mvm);
3541
3542 goto out;
3543
3544 out_remove:
3545 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3546
3547 out_reassign:
3548 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3549 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3550 goto out_restart;
3551 }
3552
3553 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3554 true)) {
3555 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3556 goto out_restart;
3557 }
3558
3559 goto out;
3560
3561 out_restart:
3562 /* things keep failing, better restart the hw */
3563 iwl_mvm_nic_restart(mvm, false);
3564
3565 out:
3566 mutex_unlock(&mvm->mutex);
3567
3568 return ret;
3569 }
3570
3571 static int
3572 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3573 struct ieee80211_vif_chanctx_switch *vifs)
3574 {
3575 int ret;
3576
3577 mutex_lock(&mvm->mutex);
3578 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3579
3580 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3581 true);
3582 if (ret) {
3583 IWL_ERR(mvm,
3584 "failed to assign new_ctx during channel switch\n");
3585 goto out_reassign;
3586 }
3587
3588 goto out;
3589
3590 out_reassign:
3591 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3592 true)) {
3593 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3594 goto out_restart;
3595 }
3596
3597 goto out;
3598
3599 out_restart:
3600 /* things keep failing, better restart the hw */
3601 iwl_mvm_nic_restart(mvm, false);
3602
3603 out:
3604 mutex_unlock(&mvm->mutex);
3605
3606 return ret;
3607 }
3608
3609 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3610 struct ieee80211_vif_chanctx_switch *vifs,
3611 int n_vifs,
3612 enum ieee80211_chanctx_switch_mode mode)
3613 {
3614 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3615 int ret;
3616
3617 /* we only support a single-vif right now */
3618 if (n_vifs > 1)
3619 return -EOPNOTSUPP;
3620
3621 switch (mode) {
3622 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3623 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3624 break;
3625 case CHANCTX_SWMODE_REASSIGN_VIF:
3626 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3627 break;
3628 default:
3629 ret = -EOPNOTSUPP;
3630 break;
3631 }
3632
3633 return ret;
3634 }
3635
3636 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3637 struct ieee80211_sta *sta,
3638 bool set)
3639 {
3640 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3641 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3642
3643 if (!mvm_sta || !mvm_sta->vif) {
3644 IWL_ERR(mvm, "Station is not associated to a vif\n");
3645 return -EINVAL;
3646 }
3647
3648 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3649 }
3650
3651 #ifdef CONFIG_NL80211_TESTMODE
3652 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3653 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3654 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3655 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3656 };
3657
3658 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3659 struct ieee80211_vif *vif,
3660 void *data, int len)
3661 {
3662 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3663 int err;
3664 u32 noa_duration;
3665
3666 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3667 if (err)
3668 return err;
3669
3670 if (!tb[IWL_MVM_TM_ATTR_CMD])
3671 return -EINVAL;
3672
3673 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3674 case IWL_MVM_TM_CMD_SET_NOA:
3675 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3676 !vif->bss_conf.enable_beacon ||
3677 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3678 return -EINVAL;
3679
3680 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3681 if (noa_duration >= vif->bss_conf.beacon_int)
3682 return -EINVAL;
3683
3684 mvm->noa_duration = noa_duration;
3685 mvm->noa_vif = vif;
3686
3687 return iwl_mvm_update_quotas(mvm, false, NULL);
3688 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3689 /* must be associated client vif - ignore authorized */
3690 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3691 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3692 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3693 return -EINVAL;
3694
3695 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3696 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3697 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3698 }
3699
3700 return -EOPNOTSUPP;
3701 }
3702
3703 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3704 struct ieee80211_vif *vif,
3705 void *data, int len)
3706 {
3707 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3708 int err;
3709
3710 mutex_lock(&mvm->mutex);
3711 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3712 mutex_unlock(&mvm->mutex);
3713
3714 return err;
3715 }
3716 #endif
3717
3718 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3719 struct ieee80211_vif *vif,
3720 struct ieee80211_channel_switch *chsw)
3721 {
3722 /* By implementing this operation, we prevent mac80211 from
3723 * starting its own channel switch timer, so that we can call
3724 * ieee80211_chswitch_done() ourselves at the right time
3725 * (which is when the absence time event starts).
3726 */
3727
3728 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3729 "dummy channel switch op\n");
3730 }
3731
3732 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3733 struct ieee80211_vif *vif,
3734 struct ieee80211_channel_switch *chsw)
3735 {
3736 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3737 struct ieee80211_vif *csa_vif;
3738 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3739 u32 apply_time;
3740 int ret;
3741
3742 mutex_lock(&mvm->mutex);
3743
3744 mvmvif->csa_failed = false;
3745
3746 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3747 chsw->chandef.center_freq1);
3748
3749 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3750
3751 switch (vif->type) {
3752 case NL80211_IFTYPE_AP:
3753 csa_vif =
3754 rcu_dereference_protected(mvm->csa_vif,
3755 lockdep_is_held(&mvm->mutex));
3756 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3757 "Another CSA is already in progress")) {
3758 ret = -EBUSY;
3759 goto out_unlock;
3760 }
3761
3762 rcu_assign_pointer(mvm->csa_vif, vif);
3763
3764 if (WARN_ONCE(mvmvif->csa_countdown,
3765 "Previous CSA countdown didn't complete")) {
3766 ret = -EBUSY;
3767 goto out_unlock;
3768 }
3769
3770 break;
3771 case NL80211_IFTYPE_STATION:
3772 /* Schedule the time event to a bit before beacon 1,
3773 * to make sure we're in the new channel when the
3774 * GO/AP arrives.
3775 */
3776 apply_time = chsw->device_timestamp +
3777 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3778 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3779
3780 if (chsw->block_tx)
3781 iwl_mvm_csa_client_absent(mvm, vif);
3782
3783 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3784 apply_time);
3785 if (mvmvif->bf_data.bf_enabled) {
3786 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3787 if (ret)
3788 goto out_unlock;
3789 }
3790
3791 break;
3792 default:
3793 break;
3794 }
3795
3796 mvmvif->ps_disabled = true;
3797
3798 ret = iwl_mvm_power_update_ps(mvm);
3799 if (ret)
3800 goto out_unlock;
3801
3802 /* we won't be on this channel any longer */
3803 iwl_mvm_teardown_tdls_peers(mvm);
3804
3805 out_unlock:
3806 mutex_unlock(&mvm->mutex);
3807
3808 return ret;
3809 }
3810
3811 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3812 struct ieee80211_vif *vif)
3813 {
3814 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3815 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3816 int ret;
3817
3818 mutex_lock(&mvm->mutex);
3819
3820 if (mvmvif->csa_failed) {
3821 mvmvif->csa_failed = false;
3822 ret = -EIO;
3823 goto out_unlock;
3824 }
3825
3826 if (vif->type == NL80211_IFTYPE_STATION) {
3827 struct iwl_mvm_sta *mvmsta;
3828
3829 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3830 mvmvif->ap_sta_id);
3831
3832 if (WARN_ON(!mvmsta)) {
3833 ret = -EIO;
3834 goto out_unlock;
3835 }
3836
3837 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3838
3839 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3840
3841 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3842 if (ret)
3843 goto out_unlock;
3844
3845 iwl_mvm_stop_session_protection(mvm, vif);
3846 }
3847
3848 mvmvif->ps_disabled = false;
3849
3850 ret = iwl_mvm_power_update_ps(mvm);
3851
3852 out_unlock:
3853 mutex_unlock(&mvm->mutex);
3854
3855 return ret;
3856 }
3857
3858 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3859 struct ieee80211_vif *vif, u32 queues, bool drop)
3860 {
3861 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3862 struct iwl_mvm_vif *mvmvif;
3863 struct iwl_mvm_sta *mvmsta;
3864 struct ieee80211_sta *sta;
3865 int i;
3866 u32 msk = 0;
3867
3868 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3869 return;
3870
3871 mutex_lock(&mvm->mutex);
3872 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3873
3874 /* flush the AP-station and all TDLS peers */
3875 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3876 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3877 lockdep_is_held(&mvm->mutex));
3878 if (IS_ERR_OR_NULL(sta))
3879 continue;
3880
3881 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3882 if (mvmsta->vif != vif)
3883 continue;
3884
3885 /* make sure only TDLS peers or the AP are flushed */
3886 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3887
3888 msk |= mvmsta->tfd_queue_msk;
3889 }
3890
3891 if (drop) {
3892 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3893 IWL_ERR(mvm, "flush request fail\n");
3894 mutex_unlock(&mvm->mutex);
3895 } else {
3896 mutex_unlock(&mvm->mutex);
3897
3898 /* this can take a while, and we may need/want other operations
3899 * to succeed while doing this, so do it without the mutex held
3900 */
3901 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3902 }
3903 }
3904
3905 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3906 struct survey_info *survey)
3907 {
3908 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3909 int ret;
3910
3911 memset(survey, 0, sizeof(*survey));
3912
3913 /* only support global statistics right now */
3914 if (idx != 0)
3915 return -ENOENT;
3916
3917 if (fw_has_capa(&mvm->fw->ucode_capa,
3918 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3919 return -ENOENT;
3920
3921 mutex_lock(&mvm->mutex);
3922
3923 if (mvm->ucode_loaded) {
3924 ret = iwl_mvm_request_statistics(mvm, false);
3925 if (ret)
3926 goto out;
3927 }
3928
3929 survey->filled = SURVEY_INFO_TIME |
3930 SURVEY_INFO_TIME_RX |
3931 SURVEY_INFO_TIME_TX |
3932 SURVEY_INFO_TIME_SCAN;
3933 survey->time = mvm->accu_radio_stats.on_time_rf +
3934 mvm->radio_stats.on_time_rf;
3935 do_div(survey->time, USEC_PER_MSEC);
3936
3937 survey->time_rx = mvm->accu_radio_stats.rx_time +
3938 mvm->radio_stats.rx_time;
3939 do_div(survey->time_rx, USEC_PER_MSEC);
3940
3941 survey->time_tx = mvm->accu_radio_stats.tx_time +
3942 mvm->radio_stats.tx_time;
3943 do_div(survey->time_tx, USEC_PER_MSEC);
3944
3945 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3946 mvm->radio_stats.on_time_scan;
3947 do_div(survey->time_scan, USEC_PER_MSEC);
3948
3949 ret = 0;
3950 out:
3951 mutex_unlock(&mvm->mutex);
3952 return ret;
3953 }
3954
3955 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3956 struct ieee80211_vif *vif,
3957 struct ieee80211_sta *sta,
3958 struct station_info *sinfo)
3959 {
3960 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3961 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3962 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3963
3964 if (fw_has_capa(&mvm->fw->ucode_capa,
3965 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3966 return;
3967
3968 /* if beacon filtering isn't on mac80211 does it anyway */
3969 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3970 return;
3971
3972 if (!vif->bss_conf.assoc)
3973 return;
3974
3975 mutex_lock(&mvm->mutex);
3976
3977 if (mvmvif->ap_sta_id != mvmsta->sta_id)
3978 goto unlock;
3979
3980 if (iwl_mvm_request_statistics(mvm, false))
3981 goto unlock;
3982
3983 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3984 mvmvif->beacon_stats.accu_num_beacons;
3985 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3986 if (mvmvif->beacon_stats.avg_signal) {
3987 /* firmware only reports a value after RXing a few beacons */
3988 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
3989 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
3990 }
3991 unlock:
3992 mutex_unlock(&mvm->mutex);
3993 }
3994
3995 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
3996 struct ieee80211_vif *vif,
3997 const struct ieee80211_event *event)
3998 {
3999 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4000 do { \
4001 if ((_cnt) && --(_cnt)) \
4002 break; \
4003 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4004 } while (0)
4005
4006 struct iwl_fw_dbg_trigger_tlv *trig;
4007 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4008
4009 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4010 return;
4011
4012 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4013 trig_mlme = (void *)trig->data;
4014 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4015 return;
4016
4017 if (event->u.mlme.data == ASSOC_EVENT) {
4018 if (event->u.mlme.status == MLME_DENIED)
4019 CHECK_MLME_TRIGGER(mvm, trig, buf,
4020 trig_mlme->stop_assoc_denied,
4021 "DENIED ASSOC: reason %d",
4022 event->u.mlme.reason);
4023 else if (event->u.mlme.status == MLME_TIMEOUT)
4024 CHECK_MLME_TRIGGER(mvm, trig, buf,
4025 trig_mlme->stop_assoc_timeout,
4026 "ASSOC TIMEOUT");
4027 } else if (event->u.mlme.data == AUTH_EVENT) {
4028 if (event->u.mlme.status == MLME_DENIED)
4029 CHECK_MLME_TRIGGER(mvm, trig, buf,
4030 trig_mlme->stop_auth_denied,
4031 "DENIED AUTH: reason %d",
4032 event->u.mlme.reason);
4033 else if (event->u.mlme.status == MLME_TIMEOUT)
4034 CHECK_MLME_TRIGGER(mvm, trig, buf,
4035 trig_mlme->stop_auth_timeout,
4036 "AUTH TIMEOUT");
4037 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4038 CHECK_MLME_TRIGGER(mvm, trig, buf,
4039 trig_mlme->stop_rx_deauth,
4040 "DEAUTH RX %d", event->u.mlme.reason);
4041 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4042 CHECK_MLME_TRIGGER(mvm, trig, buf,
4043 trig_mlme->stop_tx_deauth,
4044 "DEAUTH TX %d", event->u.mlme.reason);
4045 }
4046 #undef CHECK_MLME_TRIGGER
4047 }
4048
4049 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4050 struct ieee80211_vif *vif,
4051 const struct ieee80211_event *event)
4052 {
4053 struct iwl_fw_dbg_trigger_tlv *trig;
4054 struct iwl_fw_dbg_trigger_ba *ba_trig;
4055
4056 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4057 return;
4058
4059 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4060 ba_trig = (void *)trig->data;
4061 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4062 return;
4063
4064 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4065 return;
4066
4067 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4068 "BAR received from %pM, tid %d, ssn %d",
4069 event->u.ba.sta->addr, event->u.ba.tid,
4070 event->u.ba.ssn);
4071 }
4072
4073 static void
4074 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4075 struct ieee80211_vif *vif,
4076 const struct ieee80211_event *event)
4077 {
4078 struct iwl_fw_dbg_trigger_tlv *trig;
4079 struct iwl_fw_dbg_trigger_ba *ba_trig;
4080
4081 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4082 return;
4083
4084 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4085 ba_trig = (void *)trig->data;
4086 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4087 return;
4088
4089 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4090 return;
4091
4092 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4093 "Frame from %pM timed out, tid %d",
4094 event->u.ba.sta->addr, event->u.ba.tid);
4095 }
4096
4097 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4098 struct ieee80211_vif *vif,
4099 const struct ieee80211_event *event)
4100 {
4101 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4102
4103 switch (event->type) {
4104 case MLME_EVENT:
4105 iwl_mvm_event_mlme_callback(mvm, vif, event);
4106 break;
4107 case BAR_RX_EVENT:
4108 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4109 break;
4110 case BA_FRAME_TIMEOUT:
4111 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4112 break;
4113 default:
4114 break;
4115 }
4116 }
4117
4118 const struct ieee80211_ops iwl_mvm_hw_ops = {
4119 .tx = iwl_mvm_mac_tx,
4120 .ampdu_action = iwl_mvm_mac_ampdu_action,
4121 .start = iwl_mvm_mac_start,
4122 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4123 .stop = iwl_mvm_mac_stop,
4124 .add_interface = iwl_mvm_mac_add_interface,
4125 .remove_interface = iwl_mvm_mac_remove_interface,
4126 .config = iwl_mvm_mac_config,
4127 .prepare_multicast = iwl_mvm_prepare_multicast,
4128 .configure_filter = iwl_mvm_configure_filter,
4129 .bss_info_changed = iwl_mvm_bss_info_changed,
4130 .hw_scan = iwl_mvm_mac_hw_scan,
4131 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4132 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4133 .sta_state = iwl_mvm_mac_sta_state,
4134 .sta_notify = iwl_mvm_mac_sta_notify,
4135 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4136 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4137 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4138 .sta_rc_update = iwl_mvm_sta_rc_update,
4139 .conf_tx = iwl_mvm_mac_conf_tx,
4140 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4141 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4142 .flush = iwl_mvm_mac_flush,
4143 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4144 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4145 .set_key = iwl_mvm_mac_set_key,
4146 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4147 .remain_on_channel = iwl_mvm_roc,
4148 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4149 .add_chanctx = iwl_mvm_add_chanctx,
4150 .remove_chanctx = iwl_mvm_remove_chanctx,
4151 .change_chanctx = iwl_mvm_change_chanctx,
4152 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4153 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4154 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4155
4156 .start_ap = iwl_mvm_start_ap_ibss,
4157 .stop_ap = iwl_mvm_stop_ap_ibss,
4158 .join_ibss = iwl_mvm_start_ap_ibss,
4159 .leave_ibss = iwl_mvm_stop_ap_ibss,
4160
4161 .set_tim = iwl_mvm_set_tim,
4162
4163 .channel_switch = iwl_mvm_channel_switch,
4164 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4165 .post_channel_switch = iwl_mvm_post_channel_switch,
4166
4167 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4168 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4169 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4170
4171 .event_callback = iwl_mvm_mac_event_callback,
4172
4173 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4174
4175 #ifdef CONFIG_PM_SLEEP
4176 /* look at d3.c */
4177 .suspend = iwl_mvm_suspend,
4178 .resume = iwl_mvm_resume,
4179 .set_wakeup = iwl_mvm_set_wakeup,
4180 .set_rekey_data = iwl_mvm_set_rekey_data,
4181 #if IS_ENABLED(CONFIG_IPV6)
4182 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4183 #endif
4184 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4185 #endif
4186 .get_survey = iwl_mvm_mac_get_survey,
4187 .sta_statistics = iwl_mvm_mac_sta_statistics,
4188 };
This page took 0.13658 seconds and 6 git commands to generate.