ARM: dts: orion5x: fix the missing mtd flash on linkstation lswtgl
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <linuxwifi@intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65 #include <net/mac80211.h>
66
67 #include "mvm.h"
68 #include "sta.h"
69 #include "rs.h"
70
71 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
72 enum nl80211_iftype iftype)
73 {
74 int sta_id;
75 u32 reserved_ids = 0;
76
77 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
78 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
79
80 lockdep_assert_held(&mvm->mutex);
81
82 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
83 if (iftype != NL80211_IFTYPE_STATION)
84 reserved_ids = BIT(0);
85
86 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
87 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
88 if (BIT(sta_id) & reserved_ids)
89 continue;
90
91 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
92 lockdep_is_held(&mvm->mutex)))
93 return sta_id;
94 }
95 return IWL_MVM_STATION_COUNT;
96 }
97
98 /* send station add/update command to firmware */
99 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
100 bool update)
101 {
102 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
103 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
104 .sta_id = mvm_sta->sta_id,
105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
106 .add_modify = update ? 1 : 0,
107 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
108 STA_FLG_MIMO_EN_MSK),
109 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
110 };
111 int ret;
112 u32 status;
113 u32 agg_size = 0, mpdu_dens = 0;
114
115 if (!update) {
116 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
117 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
118 }
119
120 switch (sta->bandwidth) {
121 case IEEE80211_STA_RX_BW_160:
122 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
123 /* fall through */
124 case IEEE80211_STA_RX_BW_80:
125 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
126 /* fall through */
127 case IEEE80211_STA_RX_BW_40:
128 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
129 /* fall through */
130 case IEEE80211_STA_RX_BW_20:
131 if (sta->ht_cap.ht_supported)
132 add_sta_cmd.station_flags |=
133 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
134 break;
135 }
136
137 switch (sta->rx_nss) {
138 case 1:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
140 break;
141 case 2:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
143 break;
144 case 3 ... 8:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
146 break;
147 }
148
149 switch (sta->smps_mode) {
150 case IEEE80211_SMPS_AUTOMATIC:
151 case IEEE80211_SMPS_NUM_MODES:
152 WARN_ON(1);
153 break;
154 case IEEE80211_SMPS_STATIC:
155 /* override NSS */
156 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
157 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
158 break;
159 case IEEE80211_SMPS_DYNAMIC:
160 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
161 break;
162 case IEEE80211_SMPS_OFF:
163 /* nothing */
164 break;
165 }
166
167 if (sta->ht_cap.ht_supported) {
168 add_sta_cmd.station_flags_msk |=
169 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
170 STA_FLG_AGG_MPDU_DENS_MSK);
171
172 mpdu_dens = sta->ht_cap.ampdu_density;
173 }
174
175 if (sta->vht_cap.vht_supported) {
176 agg_size = sta->vht_cap.cap &
177 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
178 agg_size >>=
179 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
180 } else if (sta->ht_cap.ht_supported) {
181 agg_size = sta->ht_cap.ampdu_factor;
182 }
183
184 add_sta_cmd.station_flags |=
185 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
186 add_sta_cmd.station_flags |=
187 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
188
189 status = ADD_STA_SUCCESS;
190 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
191 &add_sta_cmd, &status);
192 if (ret)
193 return ret;
194
195 switch (status) {
196 case ADD_STA_SUCCESS:
197 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
198 break;
199 default:
200 ret = -EIO;
201 IWL_ERR(mvm, "ADD_STA failed\n");
202 break;
203 }
204
205 return ret;
206 }
207
208 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
209 struct ieee80211_sta *sta)
210 {
211 unsigned long used_hw_queues;
212 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
213 unsigned int wdg_timeout =
214 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
215 u32 ac;
216
217 lockdep_assert_held(&mvm->mutex);
218
219 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
220
221 /* Find available queues, and allocate them to the ACs */
222 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
223 u8 queue = find_first_zero_bit(&used_hw_queues,
224 mvm->first_agg_queue);
225
226 if (queue >= mvm->first_agg_queue) {
227 IWL_ERR(mvm, "Failed to allocate STA queue\n");
228 return -EBUSY;
229 }
230
231 __set_bit(queue, &used_hw_queues);
232 mvmsta->hw_queue[ac] = queue;
233 }
234
235 /* Found a place for all queues - enable them */
236 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
237 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
238 mvmsta->hw_queue[ac],
239 iwl_mvm_ac_to_tx_fifo[ac], 0,
240 wdg_timeout);
241 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
242 }
243
244 return 0;
245 }
246
247 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
248 struct ieee80211_sta *sta)
249 {
250 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
251 unsigned long sta_msk;
252 int i;
253
254 lockdep_assert_held(&mvm->mutex);
255
256 /* disable the TDLS STA-specific queues */
257 sta_msk = mvmsta->tfd_queue_msk;
258 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
259 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
260 }
261
262 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
263 struct ieee80211_vif *vif,
264 struct ieee80211_sta *sta)
265 {
266 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
267 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
268 int i, ret, sta_id;
269
270 lockdep_assert_held(&mvm->mutex);
271
272 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
273 sta_id = iwl_mvm_find_free_sta_id(mvm,
274 ieee80211_vif_type_p2p(vif));
275 else
276 sta_id = mvm_sta->sta_id;
277
278 if (sta_id == IWL_MVM_STATION_COUNT)
279 return -ENOSPC;
280
281 spin_lock_init(&mvm_sta->lock);
282
283 mvm_sta->sta_id = sta_id;
284 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
285 mvmvif->color);
286 mvm_sta->vif = vif;
287 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
288 mvm_sta->tx_protection = 0;
289 mvm_sta->tt_tx_protection = false;
290
291 /* HW restart, don't assume the memory has been zeroed */
292 atomic_set(&mvm->pending_frames[sta_id], 0);
293 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
294 mvm_sta->tfd_queue_msk = 0;
295
296 /* allocate new queues for a TDLS station */
297 if (sta->tdls) {
298 ret = iwl_mvm_tdls_sta_init(mvm, sta);
299 if (ret)
300 return ret;
301 } else {
302 for (i = 0; i < IEEE80211_NUM_ACS; i++)
303 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
304 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
305 }
306
307 /* for HW restart - reset everything but the sequence number */
308 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
309 u16 seq = mvm_sta->tid_data[i].seq_number;
310 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
311 mvm_sta->tid_data[i].seq_number = seq;
312 }
313 mvm_sta->agg_tids = 0;
314
315 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
316 if (ret)
317 goto err;
318
319 if (vif->type == NL80211_IFTYPE_STATION) {
320 if (!sta->tdls) {
321 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
322 mvmvif->ap_sta_id = sta_id;
323 } else {
324 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
325 }
326 }
327
328 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
329
330 return 0;
331
332 err:
333 iwl_mvm_tdls_sta_deinit(mvm, sta);
334 return ret;
335 }
336
337 int iwl_mvm_update_sta(struct iwl_mvm *mvm,
338 struct ieee80211_vif *vif,
339 struct ieee80211_sta *sta)
340 {
341 return iwl_mvm_sta_send_to_fw(mvm, sta, true);
342 }
343
344 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
345 bool drain)
346 {
347 struct iwl_mvm_add_sta_cmd cmd = {};
348 int ret;
349 u32 status;
350
351 lockdep_assert_held(&mvm->mutex);
352
353 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
354 cmd.sta_id = mvmsta->sta_id;
355 cmd.add_modify = STA_MODE_MODIFY;
356 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
357 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
358
359 status = ADD_STA_SUCCESS;
360 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
361 &cmd, &status);
362 if (ret)
363 return ret;
364
365 switch (status) {
366 case ADD_STA_SUCCESS:
367 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
368 mvmsta->sta_id);
369 break;
370 default:
371 ret = -EIO;
372 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
373 mvmsta->sta_id);
374 break;
375 }
376
377 return ret;
378 }
379
380 /*
381 * Remove a station from the FW table. Before sending the command to remove
382 * the station validate that the station is indeed known to the driver (sanity
383 * only).
384 */
385 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
386 {
387 struct ieee80211_sta *sta;
388 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
389 .sta_id = sta_id,
390 };
391 int ret;
392
393 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
394 lockdep_is_held(&mvm->mutex));
395
396 /* Note: internal stations are marked as error values */
397 if (!sta) {
398 IWL_ERR(mvm, "Invalid station id\n");
399 return -EINVAL;
400 }
401
402 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
403 sizeof(rm_sta_cmd), &rm_sta_cmd);
404 if (ret) {
405 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
406 return ret;
407 }
408
409 return 0;
410 }
411
412 void iwl_mvm_sta_drained_wk(struct work_struct *wk)
413 {
414 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
415 u8 sta_id;
416
417 /*
418 * The mutex is needed because of the SYNC cmd, but not only: if the
419 * work would run concurrently with iwl_mvm_rm_sta, it would run before
420 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
421 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
422 * that later.
423 */
424 mutex_lock(&mvm->mutex);
425
426 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
427 int ret;
428 struct ieee80211_sta *sta =
429 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
430 lockdep_is_held(&mvm->mutex));
431
432 /*
433 * This station is in use or RCU-removed; the latter happens in
434 * managed mode, where mac80211 removes the station before we
435 * can remove it from firmware (we can only do that after the
436 * MAC is marked unassociated), and possibly while the deauth
437 * frame to disconnect from the AP is still queued. Then, the
438 * station pointer is -ENOENT when the last skb is reclaimed.
439 */
440 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
441 continue;
442
443 if (PTR_ERR(sta) == -EINVAL) {
444 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
445 sta_id);
446 continue;
447 }
448
449 if (!sta) {
450 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
451 sta_id);
452 continue;
453 }
454
455 WARN_ON(PTR_ERR(sta) != -EBUSY);
456 /* This station was removed and we waited until it got drained,
457 * we can now proceed and remove it.
458 */
459 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
460 if (ret) {
461 IWL_ERR(mvm,
462 "Couldn't remove sta %d after it was drained\n",
463 sta_id);
464 continue;
465 }
466 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
467 clear_bit(sta_id, mvm->sta_drained);
468
469 if (mvm->tfd_drained[sta_id]) {
470 unsigned long i, msk = mvm->tfd_drained[sta_id];
471
472 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
473 iwl_mvm_disable_txq(mvm, i, i,
474 IWL_MAX_TID_COUNT, 0);
475
476 mvm->tfd_drained[sta_id] = 0;
477 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
478 sta_id, msk);
479 }
480 }
481
482 mutex_unlock(&mvm->mutex);
483 }
484
485 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
486 struct ieee80211_vif *vif,
487 struct ieee80211_sta *sta)
488 {
489 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
490 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
491 int ret;
492
493 lockdep_assert_held(&mvm->mutex);
494
495 if (vif->type == NL80211_IFTYPE_STATION &&
496 mvmvif->ap_sta_id == mvm_sta->sta_id) {
497 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
498 if (ret)
499 return ret;
500 /* flush its queues here since we are freeing mvm_sta */
501 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
502 if (ret)
503 return ret;
504 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
505 mvm_sta->tfd_queue_msk);
506 if (ret)
507 return ret;
508 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
509
510 /* if we are associated - we can't remove the AP STA now */
511 if (vif->bss_conf.assoc)
512 return ret;
513
514 /* unassoc - go ahead - remove the AP STA now */
515 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
516
517 /* clear d0i3_ap_sta_id if no longer relevant */
518 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
519 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
520 }
521
522 /*
523 * This shouldn't happen - the TDLS channel switch should be canceled
524 * before the STA is removed.
525 */
526 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
527 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
528 cancel_delayed_work(&mvm->tdls_cs.dwork);
529 }
530
531 /*
532 * Make sure that the tx response code sees the station as -EBUSY and
533 * calls the drain worker.
534 */
535 spin_lock_bh(&mvm_sta->lock);
536 /*
537 * There are frames pending on the AC queues for this station.
538 * We need to wait until all the frames are drained...
539 */
540 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
541 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
542 ERR_PTR(-EBUSY));
543 spin_unlock_bh(&mvm_sta->lock);
544
545 /* disable TDLS sta queues on drain complete */
546 if (sta->tdls) {
547 mvm->tfd_drained[mvm_sta->sta_id] =
548 mvm_sta->tfd_queue_msk;
549 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
550 mvm_sta->sta_id);
551 }
552
553 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
554 } else {
555 spin_unlock_bh(&mvm_sta->lock);
556
557 if (sta->tdls)
558 iwl_mvm_tdls_sta_deinit(mvm, sta);
559
560 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
561 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
562 }
563
564 return ret;
565 }
566
567 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
568 struct ieee80211_vif *vif,
569 u8 sta_id)
570 {
571 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
572
573 lockdep_assert_held(&mvm->mutex);
574
575 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
576 return ret;
577 }
578
579 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
580 struct iwl_mvm_int_sta *sta,
581 u32 qmask, enum nl80211_iftype iftype)
582 {
583 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
584 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
585 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
586 return -ENOSPC;
587 }
588
589 sta->tfd_queue_msk = qmask;
590
591 /* put a non-NULL value so iterating over the stations won't stop */
592 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
593 return 0;
594 }
595
596 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
597 struct iwl_mvm_int_sta *sta)
598 {
599 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
600 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
601 sta->sta_id = IWL_MVM_STATION_COUNT;
602 }
603
604 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
605 struct iwl_mvm_int_sta *sta,
606 const u8 *addr,
607 u16 mac_id, u16 color)
608 {
609 struct iwl_mvm_add_sta_cmd cmd;
610 int ret;
611 u32 status;
612
613 lockdep_assert_held(&mvm->mutex);
614
615 memset(&cmd, 0, sizeof(cmd));
616 cmd.sta_id = sta->sta_id;
617 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
618 color));
619
620 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
621 cmd.tid_disable_tx = cpu_to_le16(0xffff);
622
623 if (addr)
624 memcpy(cmd.addr, addr, ETH_ALEN);
625
626 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
627 &cmd, &status);
628 if (ret)
629 return ret;
630
631 switch (status) {
632 case ADD_STA_SUCCESS:
633 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
634 return 0;
635 default:
636 ret = -EIO;
637 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
638 status);
639 break;
640 }
641 return ret;
642 }
643
644 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
645 {
646 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
647 mvm->cfg->base_params->wd_timeout :
648 IWL_WATCHDOG_DISABLED;
649 int ret;
650
651 lockdep_assert_held(&mvm->mutex);
652
653 /* Map Aux queue to fifo - needs to happen before adding Aux station */
654 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
655 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
656
657 /* Allocate aux station and assign to it the aux queue */
658 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
659 NL80211_IFTYPE_UNSPECIFIED);
660 if (ret)
661 return ret;
662
663 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
664 MAC_INDEX_AUX, 0);
665
666 if (ret)
667 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
668 return ret;
669 }
670
671 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
672 {
673 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
674
675 lockdep_assert_held(&mvm->mutex);
676 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
677 mvmvif->id, 0);
678 }
679
680 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
681 {
682 int ret;
683
684 lockdep_assert_held(&mvm->mutex);
685
686 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
687 if (ret)
688 IWL_WARN(mvm, "Failed sending remove station\n");
689
690 return ret;
691 }
692
693 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
694 {
695 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
696 }
697
698 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
699 {
700 lockdep_assert_held(&mvm->mutex);
701
702 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
703 }
704
705 /*
706 * Send the add station command for the vif's broadcast station.
707 * Assumes that the station was already allocated.
708 *
709 * @mvm: the mvm component
710 * @vif: the interface to which the broadcast station is added
711 * @bsta: the broadcast station to add.
712 */
713 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
714 {
715 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
716 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
717 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
718 const u8 *baddr = _baddr;
719
720 lockdep_assert_held(&mvm->mutex);
721
722 if (vif->type == NL80211_IFTYPE_ADHOC)
723 baddr = vif->bss_conf.bssid;
724
725 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
726 return -ENOSPC;
727
728 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
729 mvmvif->id, mvmvif->color);
730 }
731
732 /* Send the FW a request to remove the station from it's internal data
733 * structures, but DO NOT remove the entry from the local data structures. */
734 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
735 {
736 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
737 int ret;
738
739 lockdep_assert_held(&mvm->mutex);
740
741 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
742 if (ret)
743 IWL_WARN(mvm, "Failed sending remove station\n");
744 return ret;
745 }
746
747 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
748 {
749 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
750 u32 qmask;
751
752 lockdep_assert_held(&mvm->mutex);
753
754 qmask = iwl_mvm_mac_get_queues_mask(vif);
755
756 /*
757 * The firmware defines the TFD queue mask to only be relevant
758 * for *unicast* queues, so the multicast (CAB) queue shouldn't
759 * be included.
760 */
761 if (vif->type == NL80211_IFTYPE_AP)
762 qmask &= ~BIT(vif->cab_queue);
763
764 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
765 ieee80211_vif_type_p2p(vif));
766 }
767
768 /* Allocate a new station entry for the broadcast station to the given vif,
769 * and send it to the FW.
770 * Note that each P2P mac should have its own broadcast station.
771 *
772 * @mvm: the mvm component
773 * @vif: the interface to which the broadcast station is added
774 * @bsta: the broadcast station to add. */
775 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
776 {
777 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
778 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
779 int ret;
780
781 lockdep_assert_held(&mvm->mutex);
782
783 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
784 if (ret)
785 return ret;
786
787 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
788
789 if (ret)
790 iwl_mvm_dealloc_int_sta(mvm, bsta);
791
792 return ret;
793 }
794
795 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
796 {
797 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
798
799 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
800 }
801
802 /*
803 * Send the FW a request to remove the station from it's internal data
804 * structures, and in addition remove it from the local data structure.
805 */
806 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
807 {
808 int ret;
809
810 lockdep_assert_held(&mvm->mutex);
811
812 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
813
814 iwl_mvm_dealloc_bcast_sta(mvm, vif);
815
816 return ret;
817 }
818
819 #define IWL_MAX_RX_BA_SESSIONS 16
820
821 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
822 int tid, u16 ssn, bool start)
823 {
824 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
825 struct iwl_mvm_add_sta_cmd cmd = {};
826 int ret;
827 u32 status;
828
829 lockdep_assert_held(&mvm->mutex);
830
831 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
832 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
833 return -ENOSPC;
834 }
835
836 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
837 cmd.sta_id = mvm_sta->sta_id;
838 cmd.add_modify = STA_MODE_MODIFY;
839 if (start) {
840 cmd.add_immediate_ba_tid = (u8) tid;
841 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
842 } else {
843 cmd.remove_immediate_ba_tid = (u8) tid;
844 }
845 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
846 STA_MODIFY_REMOVE_BA_TID;
847
848 status = ADD_STA_SUCCESS;
849 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
850 &cmd, &status);
851 if (ret)
852 return ret;
853
854 switch (status) {
855 case ADD_STA_SUCCESS:
856 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
857 start ? "start" : "stopp");
858 break;
859 case ADD_STA_IMMEDIATE_BA_FAILURE:
860 IWL_WARN(mvm, "RX BA Session refused by fw\n");
861 ret = -ENOSPC;
862 break;
863 default:
864 ret = -EIO;
865 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
866 start ? "start" : "stopp", status);
867 break;
868 }
869
870 if (!ret) {
871 if (start)
872 mvm->rx_ba_sessions++;
873 else if (mvm->rx_ba_sessions > 0)
874 /* check that restart flow didn't zero the counter */
875 mvm->rx_ba_sessions--;
876 }
877
878 return ret;
879 }
880
881 static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
882 int tid, u8 queue, bool start)
883 {
884 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
885 struct iwl_mvm_add_sta_cmd cmd = {};
886 int ret;
887 u32 status;
888
889 lockdep_assert_held(&mvm->mutex);
890
891 if (start) {
892 mvm_sta->tfd_queue_msk |= BIT(queue);
893 mvm_sta->tid_disable_agg &= ~BIT(tid);
894 } else {
895 mvm_sta->tfd_queue_msk &= ~BIT(queue);
896 mvm_sta->tid_disable_agg |= BIT(tid);
897 }
898
899 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
900 cmd.sta_id = mvm_sta->sta_id;
901 cmd.add_modify = STA_MODE_MODIFY;
902 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
903 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
904 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
905
906 status = ADD_STA_SUCCESS;
907 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
908 &cmd, &status);
909 if (ret)
910 return ret;
911
912 switch (status) {
913 case ADD_STA_SUCCESS:
914 break;
915 default:
916 ret = -EIO;
917 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
918 start ? "start" : "stopp", status);
919 break;
920 }
921
922 return ret;
923 }
924
925 const u8 tid_to_mac80211_ac[] = {
926 IEEE80211_AC_BE,
927 IEEE80211_AC_BK,
928 IEEE80211_AC_BK,
929 IEEE80211_AC_BE,
930 IEEE80211_AC_VI,
931 IEEE80211_AC_VI,
932 IEEE80211_AC_VO,
933 IEEE80211_AC_VO,
934 };
935
936 static const u8 tid_to_ucode_ac[] = {
937 AC_BE,
938 AC_BK,
939 AC_BK,
940 AC_BE,
941 AC_VI,
942 AC_VI,
943 AC_VO,
944 AC_VO,
945 };
946
947 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
948 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
949 {
950 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
951 struct iwl_mvm_tid_data *tid_data;
952 int txq_id;
953 int ret;
954
955 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
956 return -EINVAL;
957
958 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
959 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
960 mvmsta->tid_data[tid].state);
961 return -ENXIO;
962 }
963
964 lockdep_assert_held(&mvm->mutex);
965
966 spin_lock_bh(&mvmsta->lock);
967
968 /* possible race condition - we entered D0i3 while starting agg */
969 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
970 spin_unlock_bh(&mvmsta->lock);
971 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
972 return -EIO;
973 }
974
975 spin_lock_bh(&mvm->queue_info_lock);
976
977 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
978 mvm->last_agg_queue);
979 if (txq_id < 0) {
980 ret = txq_id;
981 spin_unlock_bh(&mvm->queue_info_lock);
982 IWL_ERR(mvm, "Failed to allocate agg queue\n");
983 goto release_locks;
984 }
985 mvm->queue_info[txq_id].setup_reserved = true;
986 spin_unlock_bh(&mvm->queue_info_lock);
987
988 tid_data = &mvmsta->tid_data[tid];
989 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
990 tid_data->txq_id = txq_id;
991 *ssn = tid_data->ssn;
992
993 IWL_DEBUG_TX_QUEUES(mvm,
994 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
995 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
996 tid_data->next_reclaimed);
997
998 if (tid_data->ssn == tid_data->next_reclaimed) {
999 tid_data->state = IWL_AGG_STARTING;
1000 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1001 } else {
1002 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1003 }
1004
1005 ret = 0;
1006
1007 release_locks:
1008 spin_unlock_bh(&mvmsta->lock);
1009
1010 return ret;
1011 }
1012
1013 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1014 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
1015 {
1016 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1017 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1018 unsigned int wdg_timeout =
1019 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
1020 int queue, fifo, ret;
1021 u16 ssn;
1022
1023 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1024 != IWL_MAX_TID_COUNT);
1025
1026 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
1027
1028 spin_lock_bh(&mvmsta->lock);
1029 ssn = tid_data->ssn;
1030 queue = tid_data->txq_id;
1031 tid_data->state = IWL_AGG_ON;
1032 mvmsta->agg_tids |= BIT(tid);
1033 tid_data->ssn = 0xffff;
1034 spin_unlock_bh(&mvmsta->lock);
1035
1036 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1037
1038 iwl_mvm_enable_agg_txq(mvm, queue,
1039 vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
1040 mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
1041
1042 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1043 if (ret)
1044 return -EIO;
1045
1046 /* No need to mark as reserved */
1047 spin_lock_bh(&mvm->queue_info_lock);
1048 mvm->queue_info[queue].setup_reserved = false;
1049 spin_unlock_bh(&mvm->queue_info_lock);
1050
1051 /*
1052 * Even though in theory the peer could have different
1053 * aggregation reorder buffer sizes for different sessions,
1054 * our ucode doesn't allow for that and has a global limit
1055 * for each station. Therefore, use the minimum of all the
1056 * aggregation sessions and our default value.
1057 */
1058 mvmsta->max_agg_bufsize =
1059 min(mvmsta->max_agg_bufsize, buf_size);
1060 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
1061
1062 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
1063 sta->addr, tid);
1064
1065 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
1066 }
1067
1068 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1069 struct ieee80211_sta *sta, u16 tid)
1070 {
1071 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1072 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1073 u16 txq_id;
1074 int err;
1075
1076
1077 /*
1078 * If mac80211 is cleaning its state, then say that we finished since
1079 * our state has been cleared anyway.
1080 */
1081 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1082 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1083 return 0;
1084 }
1085
1086 spin_lock_bh(&mvmsta->lock);
1087
1088 txq_id = tid_data->txq_id;
1089
1090 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1091 mvmsta->sta_id, tid, txq_id, tid_data->state);
1092
1093 mvmsta->agg_tids &= ~BIT(tid);
1094
1095 /* No need to mark as reserved anymore */
1096 spin_lock_bh(&mvm->queue_info_lock);
1097 mvm->queue_info[txq_id].setup_reserved = false;
1098 spin_unlock_bh(&mvm->queue_info_lock);
1099
1100 switch (tid_data->state) {
1101 case IWL_AGG_ON:
1102 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1103
1104 IWL_DEBUG_TX_QUEUES(mvm,
1105 "ssn = %d, next_recl = %d\n",
1106 tid_data->ssn, tid_data->next_reclaimed);
1107
1108 /* There are still packets for this RA / TID in the HW */
1109 if (tid_data->ssn != tid_data->next_reclaimed) {
1110 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1111 err = 0;
1112 break;
1113 }
1114
1115 tid_data->ssn = 0xffff;
1116 tid_data->state = IWL_AGG_OFF;
1117 spin_unlock_bh(&mvmsta->lock);
1118
1119 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1120
1121 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1122
1123 iwl_mvm_disable_txq(mvm, txq_id,
1124 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1125 0);
1126 return 0;
1127 case IWL_AGG_STARTING:
1128 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1129 /*
1130 * The agg session has been stopped before it was set up. This
1131 * can happen when the AddBA timer times out for example.
1132 */
1133
1134 /* No barriers since we are under mutex */
1135 lockdep_assert_held(&mvm->mutex);
1136
1137 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1138 tid_data->state = IWL_AGG_OFF;
1139 err = 0;
1140 break;
1141 default:
1142 IWL_ERR(mvm,
1143 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1144 mvmsta->sta_id, tid, tid_data->state);
1145 IWL_ERR(mvm,
1146 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1147 err = -EINVAL;
1148 }
1149
1150 spin_unlock_bh(&mvmsta->lock);
1151
1152 return err;
1153 }
1154
1155 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1156 struct ieee80211_sta *sta, u16 tid)
1157 {
1158 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1159 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1160 u16 txq_id;
1161 enum iwl_mvm_agg_state old_state;
1162
1163 /*
1164 * First set the agg state to OFF to avoid calling
1165 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1166 */
1167 spin_lock_bh(&mvmsta->lock);
1168 txq_id = tid_data->txq_id;
1169 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1170 mvmsta->sta_id, tid, txq_id, tid_data->state);
1171 old_state = tid_data->state;
1172 tid_data->state = IWL_AGG_OFF;
1173 mvmsta->agg_tids &= ~BIT(tid);
1174 spin_unlock_bh(&mvmsta->lock);
1175
1176 /* No need to mark as reserved */
1177 spin_lock_bh(&mvm->queue_info_lock);
1178 mvm->queue_info[txq_id].setup_reserved = false;
1179 spin_unlock_bh(&mvm->queue_info_lock);
1180
1181 if (old_state >= IWL_AGG_ON) {
1182 iwl_mvm_drain_sta(mvm, mvmsta, true);
1183 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
1184 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1185 iwl_trans_wait_tx_queue_empty(mvm->trans,
1186 mvmsta->tfd_queue_msk);
1187 iwl_mvm_drain_sta(mvm, mvmsta, false);
1188
1189 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1190
1191 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1192 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1193 0);
1194 }
1195
1196 return 0;
1197 }
1198
1199 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1200 {
1201 int i, max = -1, max_offs = -1;
1202
1203 lockdep_assert_held(&mvm->mutex);
1204
1205 /* Pick the unused key offset with the highest 'deleted'
1206 * counter. Every time a key is deleted, all the counters
1207 * are incremented and the one that was just deleted is
1208 * reset to zero. Thus, the highest counter is the one
1209 * that was deleted longest ago. Pick that one.
1210 */
1211 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1212 if (test_bit(i, mvm->fw_key_table))
1213 continue;
1214 if (mvm->fw_key_deleted[i] > max) {
1215 max = mvm->fw_key_deleted[i];
1216 max_offs = i;
1217 }
1218 }
1219
1220 if (max_offs < 0)
1221 return STA_KEY_IDX_INVALID;
1222
1223 return max_offs;
1224 }
1225
1226 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
1227 struct ieee80211_vif *vif,
1228 struct ieee80211_sta *sta)
1229 {
1230 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1231
1232 if (sta)
1233 return iwl_mvm_sta_from_mac80211(sta);
1234
1235 /*
1236 * The device expects GTKs for station interfaces to be
1237 * installed as GTKs for the AP station. If we have no
1238 * station ID, then use AP's station ID.
1239 */
1240 if (vif->type == NL80211_IFTYPE_STATION &&
1241 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1242 u8 sta_id = mvmvif->ap_sta_id;
1243
1244 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
1245 lockdep_is_held(&mvm->mutex));
1246 /*
1247 * It is possible that the 'sta' parameter is NULL,
1248 * for example when a GTK is removed - the sta_id will then
1249 * be the AP ID, and no station was passed by mac80211.
1250 */
1251 if (IS_ERR_OR_NULL(sta))
1252 return NULL;
1253
1254 return iwl_mvm_sta_from_mac80211(sta);
1255 }
1256
1257 return NULL;
1258 }
1259
1260 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1261 struct iwl_mvm_sta *mvm_sta,
1262 struct ieee80211_key_conf *keyconf, bool mcast,
1263 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
1264 u8 key_offset)
1265 {
1266 struct iwl_mvm_add_sta_key_cmd cmd = {};
1267 __le16 key_flags;
1268 int ret;
1269 u32 status;
1270 u16 keyidx;
1271 int i;
1272 u8 sta_id = mvm_sta->sta_id;
1273
1274 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1275 STA_KEY_FLG_KEYID_MSK;
1276 key_flags = cpu_to_le16(keyidx);
1277 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
1278
1279 switch (keyconf->cipher) {
1280 case WLAN_CIPHER_SUITE_TKIP:
1281 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1282 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1283 for (i = 0; i < 5; i++)
1284 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1285 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1286 break;
1287 case WLAN_CIPHER_SUITE_CCMP:
1288 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1289 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1290 break;
1291 case WLAN_CIPHER_SUITE_WEP104:
1292 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1293 /* fall through */
1294 case WLAN_CIPHER_SUITE_WEP40:
1295 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1296 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
1297 break;
1298 default:
1299 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
1300 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1301 }
1302
1303 if (mcast)
1304 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1305
1306 cmd.key_offset = key_offset;
1307 cmd.key_flags = key_flags;
1308 cmd.sta_id = sta_id;
1309
1310 status = ADD_STA_SUCCESS;
1311 if (cmd_flags & CMD_ASYNC)
1312 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1313 sizeof(cmd), &cmd);
1314 else
1315 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1316 &cmd, &status);
1317
1318 switch (status) {
1319 case ADD_STA_SUCCESS:
1320 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
1321 break;
1322 default:
1323 ret = -EIO;
1324 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
1325 break;
1326 }
1327
1328 return ret;
1329 }
1330
1331 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1332 struct ieee80211_key_conf *keyconf,
1333 u8 sta_id, bool remove_key)
1334 {
1335 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
1336
1337 /* verify the key details match the required command's expectations */
1338 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
1339 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
1340 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
1341 return -EINVAL;
1342
1343 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
1344 igtk_cmd.sta_id = cpu_to_le32(sta_id);
1345
1346 if (remove_key) {
1347 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
1348 } else {
1349 struct ieee80211_key_seq seq;
1350 const u8 *pn;
1351
1352 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1353 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1354 pn = seq.aes_cmac.pn;
1355 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
1356 ((u64) pn[4] << 8) |
1357 ((u64) pn[3] << 16) |
1358 ((u64) pn[2] << 24) |
1359 ((u64) pn[1] << 32) |
1360 ((u64) pn[0] << 40));
1361 }
1362
1363 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
1364 remove_key ? "removing" : "installing",
1365 igtk_cmd.sta_id);
1366
1367 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1368 sizeof(igtk_cmd), &igtk_cmd);
1369 }
1370
1371
1372 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1373 struct ieee80211_vif *vif,
1374 struct ieee80211_sta *sta)
1375 {
1376 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1377
1378 if (sta)
1379 return sta->addr;
1380
1381 if (vif->type == NL80211_IFTYPE_STATION &&
1382 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1383 u8 sta_id = mvmvif->ap_sta_id;
1384 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1385 lockdep_is_held(&mvm->mutex));
1386 return sta->addr;
1387 }
1388
1389
1390 return NULL;
1391 }
1392
1393 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1394 struct ieee80211_vif *vif,
1395 struct ieee80211_sta *sta,
1396 struct ieee80211_key_conf *keyconf,
1397 u8 key_offset,
1398 bool mcast)
1399 {
1400 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1401 int ret;
1402 const u8 *addr;
1403 struct ieee80211_key_seq seq;
1404 u16 p1k[5];
1405
1406 switch (keyconf->cipher) {
1407 case WLAN_CIPHER_SUITE_TKIP:
1408 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
1409 /* get phase 1 key from mac80211 */
1410 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1411 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1412 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1413 seq.tkip.iv32, p1k, 0, key_offset);
1414 break;
1415 case WLAN_CIPHER_SUITE_CCMP:
1416 case WLAN_CIPHER_SUITE_WEP40:
1417 case WLAN_CIPHER_SUITE_WEP104:
1418 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1419 0, NULL, 0, key_offset);
1420 break;
1421 default:
1422 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1423 0, NULL, 0, key_offset);
1424 }
1425
1426 return ret;
1427 }
1428
1429 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
1430 struct ieee80211_key_conf *keyconf,
1431 bool mcast)
1432 {
1433 struct iwl_mvm_add_sta_key_cmd cmd = {};
1434 __le16 key_flags;
1435 int ret;
1436 u32 status;
1437
1438 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1439 STA_KEY_FLG_KEYID_MSK);
1440 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
1441 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
1442
1443 if (mcast)
1444 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1445
1446 cmd.key_flags = key_flags;
1447 cmd.key_offset = keyconf->hw_key_idx;
1448 cmd.sta_id = sta_id;
1449
1450 status = ADD_STA_SUCCESS;
1451 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1452 &cmd, &status);
1453
1454 switch (status) {
1455 case ADD_STA_SUCCESS:
1456 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
1457 break;
1458 default:
1459 ret = -EIO;
1460 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
1461 break;
1462 }
1463
1464 return ret;
1465 }
1466
1467 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1468 struct ieee80211_vif *vif,
1469 struct ieee80211_sta *sta,
1470 struct ieee80211_key_conf *keyconf,
1471 u8 key_offset)
1472 {
1473 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1474 struct iwl_mvm_sta *mvm_sta;
1475 u8 sta_id;
1476 int ret;
1477 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1478
1479 lockdep_assert_held(&mvm->mutex);
1480
1481 /* Get the station id from the mvm local station table */
1482 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
1483 if (!mvm_sta) {
1484 IWL_ERR(mvm, "Failed to find station\n");
1485 return -EINVAL;
1486 }
1487 sta_id = mvm_sta->sta_id;
1488
1489 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
1490 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
1491 goto end;
1492 }
1493
1494 /*
1495 * It is possible that the 'sta' parameter is NULL, and thus
1496 * there is a need to retrieve the sta from the local station table.
1497 */
1498 if (!sta) {
1499 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1500 lockdep_is_held(&mvm->mutex));
1501 if (IS_ERR_OR_NULL(sta)) {
1502 IWL_ERR(mvm, "Invalid station id\n");
1503 return -EINVAL;
1504 }
1505 }
1506
1507 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1508 return -EINVAL;
1509
1510 /* If the key_offset is not pre-assigned, we need to find a
1511 * new offset to use. In normal cases, the offset is not
1512 * pre-assigned, but during HW_RESTART we want to reuse the
1513 * same indices, so we pass them when this function is called.
1514 *
1515 * In D3 entry, we need to hardcoded the indices (because the
1516 * firmware hardcodes the PTK offset to 0). In this case, we
1517 * need to make sure we don't overwrite the hw_key_idx in the
1518 * keyconf structure, because otherwise we cannot configure
1519 * the original ones back when resuming.
1520 */
1521 if (key_offset == STA_KEY_IDX_INVALID) {
1522 key_offset = iwl_mvm_set_fw_key_idx(mvm);
1523 if (key_offset == STA_KEY_IDX_INVALID)
1524 return -ENOSPC;
1525 keyconf->hw_key_idx = key_offset;
1526 }
1527
1528 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
1529 if (ret)
1530 goto end;
1531
1532 /*
1533 * For WEP, the same key is used for multicast and unicast. Upload it
1534 * again, using the same key offset, and now pointing the other one
1535 * to the same key slot (offset).
1536 * If this fails, remove the original as well.
1537 */
1538 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1539 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1540 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
1541 key_offset, !mcast);
1542 if (ret) {
1543 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1544 goto end;
1545 }
1546 }
1547
1548 __set_bit(key_offset, mvm->fw_key_table);
1549
1550 end:
1551 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1552 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1553 sta ? sta->addr : zero_addr, ret);
1554 return ret;
1555 }
1556
1557 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1558 struct ieee80211_vif *vif,
1559 struct ieee80211_sta *sta,
1560 struct ieee80211_key_conf *keyconf)
1561 {
1562 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1563 struct iwl_mvm_sta *mvm_sta;
1564 u8 sta_id = IWL_MVM_STATION_COUNT;
1565 int ret, i;
1566
1567 lockdep_assert_held(&mvm->mutex);
1568
1569 /* Get the station from the mvm local station table */
1570 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
1571
1572 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1573 keyconf->keyidx, sta_id);
1574
1575 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1576 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
1577
1578 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
1579 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
1580 keyconf->hw_key_idx);
1581 return -ENOENT;
1582 }
1583
1584 /* track which key was deleted last */
1585 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1586 if (mvm->fw_key_deleted[i] < U8_MAX)
1587 mvm->fw_key_deleted[i]++;
1588 }
1589 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
1590
1591 if (!mvm_sta) {
1592 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
1593 return 0;
1594 }
1595
1596 sta_id = mvm_sta->sta_id;
1597
1598 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1599 if (ret)
1600 return ret;
1601
1602 /* delete WEP key twice to get rid of (now useless) offset */
1603 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1604 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
1605 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
1606
1607 return ret;
1608 }
1609
1610 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1611 struct ieee80211_vif *vif,
1612 struct ieee80211_key_conf *keyconf,
1613 struct ieee80211_sta *sta, u32 iv32,
1614 u16 *phase1key)
1615 {
1616 struct iwl_mvm_sta *mvm_sta;
1617 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1618
1619 rcu_read_lock();
1620
1621 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
1622 if (WARN_ON_ONCE(!mvm_sta))
1623 goto unlock;
1624 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1625 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
1626
1627 unlock:
1628 rcu_read_unlock();
1629 }
1630
1631 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1632 struct ieee80211_sta *sta)
1633 {
1634 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1635 struct iwl_mvm_add_sta_cmd cmd = {
1636 .add_modify = STA_MODE_MODIFY,
1637 .sta_id = mvmsta->sta_id,
1638 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1639 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1640 };
1641 int ret;
1642
1643 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1644 if (ret)
1645 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1646 }
1647
1648 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1649 struct ieee80211_sta *sta,
1650 enum ieee80211_frame_release_type reason,
1651 u16 cnt, u16 tids, bool more_data,
1652 bool agg)
1653 {
1654 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1655 struct iwl_mvm_add_sta_cmd cmd = {
1656 .add_modify = STA_MODE_MODIFY,
1657 .sta_id = mvmsta->sta_id,
1658 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1659 .sleep_tx_count = cpu_to_le16(cnt),
1660 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1661 };
1662 int tid, ret;
1663 unsigned long _tids = tids;
1664
1665 /* convert TIDs to ACs - we don't support TSPEC so that's OK
1666 * Note that this field is reserved and unused by firmware not
1667 * supporting GO uAPSD, so it's safe to always do this.
1668 */
1669 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
1670 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
1671
1672 /* If we're releasing frames from aggregation queues then check if the
1673 * all queues combined that we're releasing frames from have
1674 * - more frames than the service period, in which case more_data
1675 * needs to be set
1676 * - fewer than 'cnt' frames, in which case we need to adjust the
1677 * firmware command (but do that unconditionally)
1678 */
1679 if (agg) {
1680 int remaining = cnt;
1681 int sleep_tx_count;
1682
1683 spin_lock_bh(&mvmsta->lock);
1684 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
1685 struct iwl_mvm_tid_data *tid_data;
1686 u16 n_queued;
1687
1688 tid_data = &mvmsta->tid_data[tid];
1689 if (WARN(tid_data->state != IWL_AGG_ON &&
1690 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
1691 "TID %d state is %d\n",
1692 tid, tid_data->state)) {
1693 spin_unlock_bh(&mvmsta->lock);
1694 ieee80211_sta_eosp(sta);
1695 return;
1696 }
1697
1698 n_queued = iwl_mvm_tid_queued(tid_data);
1699 if (n_queued > remaining) {
1700 more_data = true;
1701 remaining = 0;
1702 break;
1703 }
1704 remaining -= n_queued;
1705 }
1706 sleep_tx_count = cnt - remaining;
1707 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
1708 mvmsta->sleep_tx_count = sleep_tx_count;
1709 spin_unlock_bh(&mvmsta->lock);
1710
1711 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
1712 if (WARN_ON(cnt - remaining == 0)) {
1713 ieee80211_sta_eosp(sta);
1714 return;
1715 }
1716 }
1717
1718 /* Note: this is ignored by firmware not supporting GO uAPSD */
1719 if (more_data)
1720 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
1721
1722 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
1723 mvmsta->next_status_eosp = true;
1724 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
1725 } else {
1726 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1727 }
1728
1729 /* block the Tx queues until the FW updated the sleep Tx count */
1730 iwl_trans_block_txq_ptrs(mvm->trans, true);
1731
1732 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
1733 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
1734 sizeof(cmd), &cmd);
1735 if (ret)
1736 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1737 }
1738
1739 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
1740 struct iwl_rx_cmd_buffer *rxb)
1741 {
1742 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1743 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
1744 struct ieee80211_sta *sta;
1745 u32 sta_id = le32_to_cpu(notif->sta_id);
1746
1747 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
1748 return;
1749
1750 rcu_read_lock();
1751 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1752 if (!IS_ERR_OR_NULL(sta))
1753 ieee80211_sta_eosp(sta);
1754 rcu_read_unlock();
1755 }
1756
1757 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
1758 struct iwl_mvm_sta *mvmsta, bool disable)
1759 {
1760 struct iwl_mvm_add_sta_cmd cmd = {
1761 .add_modify = STA_MODE_MODIFY,
1762 .sta_id = mvmsta->sta_id,
1763 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
1764 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
1765 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1766 };
1767 int ret;
1768
1769 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1770 if (ret)
1771 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1772 }
1773
1774 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
1775 struct ieee80211_sta *sta,
1776 bool disable)
1777 {
1778 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1779
1780 spin_lock_bh(&mvm_sta->lock);
1781
1782 if (mvm_sta->disable_tx == disable) {
1783 spin_unlock_bh(&mvm_sta->lock);
1784 return;
1785 }
1786
1787 mvm_sta->disable_tx = disable;
1788
1789 /*
1790 * Tell mac80211 to start/stop queuing tx for this station,
1791 * but don't stop queuing if there are still pending frames
1792 * for this station.
1793 */
1794 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
1795 ieee80211_sta_block_awake(mvm->hw, sta, disable);
1796
1797 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
1798
1799 spin_unlock_bh(&mvm_sta->lock);
1800 }
1801
1802 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
1803 struct iwl_mvm_vif *mvmvif,
1804 bool disable)
1805 {
1806 struct ieee80211_sta *sta;
1807 struct iwl_mvm_sta *mvm_sta;
1808 int i;
1809
1810 lockdep_assert_held(&mvm->mutex);
1811
1812 /* Block/unblock all the stations of the given mvmvif */
1813 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
1814 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
1815 lockdep_is_held(&mvm->mutex));
1816 if (IS_ERR_OR_NULL(sta))
1817 continue;
1818
1819 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1820 if (mvm_sta->mac_id_n_color !=
1821 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
1822 continue;
1823
1824 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
1825 }
1826 }
1827
1828 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1829 {
1830 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1831 struct iwl_mvm_sta *mvmsta;
1832
1833 rcu_read_lock();
1834
1835 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1836
1837 if (!WARN_ON(!mvmsta))
1838 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
1839
1840 rcu_read_unlock();
1841 }
This page took 0.101297 seconds and 5 git commands to generate.