iwlwifi: mvm: loosen nssn comparison to reorder buffer head
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / rxmq.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
61 #include <linux/etherdevice.h>
62 #include <linux/skbuff.h>
63 #include "iwl-trans.h"
64 #include "mvm.h"
65 #include "fw-api.h"
66 #include "fw-dbg.h"
67
68 void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
69 {
70 mvm->ampdu_ref++;
71
72 #ifdef CONFIG_IWLWIFI_DEBUGFS
73 if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
74 spin_lock(&mvm->drv_stats_lock);
75 mvm->drv_rx_stats.ampdu_count++;
76 spin_unlock(&mvm->drv_stats_lock);
77 }
78 #endif
79 }
80
81 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
82 int queue, struct ieee80211_sta *sta)
83 {
84 struct iwl_mvm_sta *mvmsta;
85 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
86 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
87 struct iwl_mvm_key_pn *ptk_pn;
88 u8 tid, keyidx;
89 u8 pn[IEEE80211_CCMP_PN_LEN];
90 u8 *extiv;
91
92 /* do PN checking */
93
94 /* multicast and non-data only arrives on default queue */
95 if (!ieee80211_is_data(hdr->frame_control) ||
96 is_multicast_ether_addr(hdr->addr1))
97 return 0;
98
99 /* do not check PN for open AP */
100 if (!(stats->flag & RX_FLAG_DECRYPTED))
101 return 0;
102
103 /*
104 * avoid checking for default queue - we don't want to replicate
105 * all the logic that's necessary for checking the PN on fragmented
106 * frames, leave that to mac80211
107 */
108 if (queue == 0)
109 return 0;
110
111 /* if we are here - this for sure is either CCMP or GCMP */
112 if (IS_ERR_OR_NULL(sta)) {
113 IWL_ERR(mvm,
114 "expected hw-decrypted unicast frame for station\n");
115 return -1;
116 }
117
118 mvmsta = iwl_mvm_sta_from_mac80211(sta);
119
120 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
121 keyidx = extiv[3] >> 6;
122
123 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
124 if (!ptk_pn)
125 return -1;
126
127 if (ieee80211_is_data_qos(hdr->frame_control))
128 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
129 else
130 tid = 0;
131
132 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
133 if (tid >= IWL_MAX_TID_COUNT)
134 return -1;
135
136 /* load pn */
137 pn[0] = extiv[7];
138 pn[1] = extiv[6];
139 pn[2] = extiv[5];
140 pn[3] = extiv[4];
141 pn[4] = extiv[1];
142 pn[5] = extiv[0];
143
144 if (memcmp(pn, ptk_pn->q[queue].pn[tid],
145 IEEE80211_CCMP_PN_LEN) <= 0)
146 return -1;
147
148 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
149 stats->flag |= RX_FLAG_PN_VALIDATED;
150
151 return 0;
152 }
153
154 /* iwl_mvm_create_skb Adds the rxb to a new skb */
155 static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
156 u16 len, u8 crypt_len,
157 struct iwl_rx_cmd_buffer *rxb)
158 {
159 struct iwl_rx_packet *pkt = rxb_addr(rxb);
160 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
161 unsigned int headlen, fraglen, pad_len = 0;
162 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
163
164 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
165 pad_len = 2;
166 len -= pad_len;
167
168 /* If frame is small enough to fit in skb->head, pull it completely.
169 * If not, only pull ieee80211_hdr (including crypto if present, and
170 * an additional 8 bytes for SNAP/ethertype, see below) so that
171 * splice() or TCP coalesce are more efficient.
172 *
173 * Since, in addition, ieee80211_data_to_8023() always pull in at
174 * least 8 bytes (possibly more for mesh) we can do the same here
175 * to save the cost of doing it later. That still doesn't pull in
176 * the actual IP header since the typical case has a SNAP header.
177 * If the latter changes (there are efforts in the standards group
178 * to do so) we should revisit this and ieee80211_data_to_8023().
179 */
180 headlen = (len <= skb_tailroom(skb)) ? len :
181 hdrlen + crypt_len + 8;
182
183 /* The firmware may align the packet to DWORD.
184 * The padding is inserted after the IV.
185 * After copying the header + IV skip the padding if
186 * present before copying packet data.
187 */
188 hdrlen += crypt_len;
189 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
190 memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
191 headlen - hdrlen);
192
193 fraglen = len - headlen;
194
195 if (fraglen) {
196 int offset = (void *)hdr + headlen + pad_len -
197 rxb_addr(rxb) + rxb_offset(rxb);
198
199 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
200 fraglen, rxb->truesize);
201 }
202 }
203
204 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
205 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
206 struct napi_struct *napi,
207 struct sk_buff *skb, int queue,
208 struct ieee80211_sta *sta)
209 {
210 if (iwl_mvm_check_pn(mvm, skb, queue, sta))
211 kfree_skb(skb);
212 else
213 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
214 }
215
216 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
217 struct iwl_rx_mpdu_desc *desc,
218 struct ieee80211_rx_status *rx_status)
219 {
220 int energy_a, energy_b, max_energy;
221
222 energy_a = desc->energy_a;
223 energy_a = energy_a ? -energy_a : S8_MIN;
224 energy_b = desc->energy_b;
225 energy_b = energy_b ? -energy_b : S8_MIN;
226 max_energy = max(energy_a, energy_b);
227
228 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
229 energy_a, energy_b, max_energy);
230
231 rx_status->signal = max_energy;
232 rx_status->chains = 0; /* TODO: phy info */
233 rx_status->chain_signal[0] = energy_a;
234 rx_status->chain_signal[1] = energy_b;
235 rx_status->chain_signal[2] = S8_MIN;
236 }
237
238 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
239 struct ieee80211_rx_status *stats,
240 struct iwl_rx_mpdu_desc *desc, int queue,
241 u8 *crypt_len)
242 {
243 u16 status = le16_to_cpu(desc->status);
244
245 if (!ieee80211_has_protected(hdr->frame_control) ||
246 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
247 IWL_RX_MPDU_STATUS_SEC_NONE)
248 return 0;
249
250 /* TODO: handle packets encrypted with unknown alg */
251
252 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
253 case IWL_RX_MPDU_STATUS_SEC_CCM:
254 case IWL_RX_MPDU_STATUS_SEC_GCM:
255 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
256 /* alg is CCM: check MIC only */
257 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
258 return -1;
259
260 stats->flag |= RX_FLAG_DECRYPTED;
261 *crypt_len = IEEE80211_CCMP_HDR_LEN;
262 return 0;
263 case IWL_RX_MPDU_STATUS_SEC_TKIP:
264 /* Don't drop the frame and decrypt it in SW */
265 if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
266 return 0;
267
268 *crypt_len = IEEE80211_TKIP_IV_LEN;
269 /* fall through if TTAK OK */
270 case IWL_RX_MPDU_STATUS_SEC_WEP:
271 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
272 return -1;
273
274 stats->flag |= RX_FLAG_DECRYPTED;
275 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
276 IWL_RX_MPDU_STATUS_SEC_WEP)
277 *crypt_len = IEEE80211_WEP_IV_LEN;
278 return 0;
279 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
280 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
281 return -1;
282 stats->flag |= RX_FLAG_DECRYPTED;
283 return 0;
284 default:
285 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
286 }
287
288 return 0;
289 }
290
291 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
292 struct sk_buff *skb,
293 struct iwl_rx_mpdu_desc *desc)
294 {
295 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
297 u16 flags = le16_to_cpu(desc->l3l4_flags);
298 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
299 IWL_RX_L3_PROTO_POS);
300
301 if (mvmvif->features & NETIF_F_RXCSUM &&
302 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
303 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
304 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
305 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
306 skb->ip_summed = CHECKSUM_UNNECESSARY;
307 }
308
309 /*
310 * returns true if a packet outside BA session is a duplicate and
311 * should be dropped
312 */
313 static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
314 struct ieee80211_rx_status *rx_status,
315 struct ieee80211_hdr *hdr,
316 struct iwl_rx_mpdu_desc *desc)
317 {
318 struct iwl_mvm_sta *mvm_sta;
319 struct iwl_mvm_rxq_dup_data *dup_data;
320 u8 baid, tid, sub_frame_idx;
321
322 if (WARN_ON(IS_ERR_OR_NULL(sta)))
323 return false;
324
325 baid = (le32_to_cpu(desc->reorder_data) &
326 IWL_RX_MPDU_REORDER_BAID_MASK) >>
327 IWL_RX_MPDU_REORDER_BAID_SHIFT;
328
329 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
330 return false;
331
332 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
333 dup_data = &mvm_sta->dup_data[queue];
334
335 /*
336 * Drop duplicate 802.11 retransmissions
337 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
338 */
339 if (ieee80211_is_ctl(hdr->frame_control) ||
340 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
341 is_multicast_ether_addr(hdr->addr1)) {
342 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
343 return false;
344 }
345
346 if (ieee80211_is_data_qos(hdr->frame_control))
347 /* frame has qos control */
348 tid = *ieee80211_get_qos_ctl(hdr) &
349 IEEE80211_QOS_CTL_TID_MASK;
350 else
351 tid = IWL_MAX_TID_COUNT;
352
353 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
354 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
355
356 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
357 dup_data->last_seq[tid] == hdr->seq_ctrl &&
358 dup_data->last_sub_frame[tid] >= sub_frame_idx))
359 return true;
360
361 dup_data->last_seq[tid] = hdr->seq_ctrl;
362 dup_data->last_sub_frame[tid] = sub_frame_idx;
363
364 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
365
366 return false;
367 }
368
369 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
370 const u8 *data, u32 count)
371 {
372 struct iwl_rxq_sync_cmd *cmd;
373 u32 data_size = sizeof(*cmd) + count;
374 int ret;
375
376 /* should be DWORD aligned */
377 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
378 return -EINVAL;
379
380 cmd = kzalloc(data_size, GFP_KERNEL);
381 if (!cmd)
382 return -ENOMEM;
383
384 cmd->rxq_mask = cpu_to_le32(rxq_mask);
385 cmd->count = cpu_to_le32(count);
386 cmd->flags = 0;
387 memcpy(cmd->payload, data, count);
388
389 ret = iwl_mvm_send_cmd_pdu(mvm,
390 WIDE_ID(DATA_PATH_GROUP,
391 TRIGGER_RX_QUEUES_NOTIF_CMD),
392 0, data_size, cmd);
393
394 kfree(cmd);
395 return ret;
396 }
397
398 /*
399 * Returns true if sn2 - buffer_size < sn1 < sn2.
400 * To be used only in order to compare reorder buffer head with NSSN.
401 * We fully trust NSSN unless it is behind us due to reorder timeout.
402 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
403 */
404 static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
405 {
406 return ieee80211_sn_less(sn1, sn2) &&
407 !ieee80211_sn_less(sn1, sn2 - buffer_size);
408 }
409
410 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
411
412 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
413 struct ieee80211_sta *sta,
414 struct napi_struct *napi,
415 struct iwl_mvm_reorder_buffer *reorder_buf,
416 u16 nssn)
417 {
418 u16 ssn = reorder_buf->head_sn;
419
420 lockdep_assert_held(&reorder_buf->lock);
421
422 /* ignore nssn smaller than head sn - this can happen due to timeout */
423 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
424 return;
425
426 while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
427 int index = ssn % reorder_buf->buf_size;
428 struct sk_buff_head *skb_list = &reorder_buf->entries[index];
429 struct sk_buff *skb;
430
431 ssn = ieee80211_sn_inc(ssn);
432
433 /* holes are valid since nssn indicates frames were received. */
434 if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
435 continue;
436 /* Empty the list. Will have more than one frame for A-MSDU */
437 while ((skb = __skb_dequeue(skb_list))) {
438 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
439 reorder_buf->queue,
440 sta);
441 reorder_buf->num_stored--;
442 }
443 }
444 reorder_buf->head_sn = nssn;
445
446 if (reorder_buf->num_stored && !reorder_buf->removed) {
447 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
448
449 while (!skb_peek_tail(&reorder_buf->entries[index]))
450 index = (index + 1) % reorder_buf->buf_size;
451 /* modify timer to match next frame's expiration time */
452 mod_timer(&reorder_buf->reorder_timer,
453 reorder_buf->reorder_time[index] + 1 +
454 RX_REORDER_BUF_TIMEOUT_MQ);
455 } else {
456 del_timer(&reorder_buf->reorder_timer);
457 }
458 }
459
460 void iwl_mvm_reorder_timer_expired(unsigned long data)
461 {
462 struct iwl_mvm_reorder_buffer *buf = (void *)data;
463 int i;
464 u16 sn = 0, index = 0;
465 bool expired = false;
466
467 spin_lock_bh(&buf->lock);
468
469 if (!buf->num_stored || buf->removed) {
470 spin_unlock_bh(&buf->lock);
471 return;
472 }
473
474 for (i = 0; i < buf->buf_size ; i++) {
475 index = (buf->head_sn + i) % buf->buf_size;
476
477 if (!skb_peek_tail(&buf->entries[index]))
478 continue;
479 if (!time_after(jiffies, buf->reorder_time[index] +
480 RX_REORDER_BUF_TIMEOUT_MQ))
481 break;
482 expired = true;
483 sn = ieee80211_sn_add(buf->head_sn, i + 1);
484 }
485
486 if (expired) {
487 struct ieee80211_sta *sta;
488
489 rcu_read_lock();
490 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
491 /* SN is set to the last expired frame + 1 */
492 iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
493 rcu_read_unlock();
494 } else if (buf->num_stored) {
495 /*
496 * If no frame expired and there are stored frames, index is now
497 * pointing to the first unexpired frame - modify timer
498 * accordingly to this frame.
499 */
500 mod_timer(&buf->reorder_timer,
501 buf->reorder_time[index] +
502 1 + RX_REORDER_BUF_TIMEOUT_MQ);
503 }
504 spin_unlock_bh(&buf->lock);
505 }
506
507 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
508 struct iwl_mvm_delba_data *data)
509 {
510 struct iwl_mvm_baid_data *ba_data;
511 struct ieee80211_sta *sta;
512 struct iwl_mvm_reorder_buffer *reorder_buf;
513 u8 baid = data->baid;
514
515 if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
516 return;
517
518 rcu_read_lock();
519
520 ba_data = rcu_dereference(mvm->baid_map[baid]);
521 if (WARN_ON_ONCE(!ba_data))
522 goto out;
523
524 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
525 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
526 goto out;
527
528 reorder_buf = &ba_data->reorder_buf[queue];
529
530 /* release all frames that are in the reorder buffer to the stack */
531 spin_lock_bh(&reorder_buf->lock);
532 iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
533 ieee80211_sn_add(reorder_buf->head_sn,
534 reorder_buf->buf_size));
535 spin_unlock_bh(&reorder_buf->lock);
536 del_timer_sync(&reorder_buf->reorder_timer);
537
538 out:
539 rcu_read_unlock();
540 }
541
542 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
543 int queue)
544 {
545 struct iwl_rx_packet *pkt = rxb_addr(rxb);
546 struct iwl_rxq_sync_notification *notif;
547 struct iwl_mvm_internal_rxq_notif *internal_notif;
548
549 notif = (void *)pkt->data;
550 internal_notif = (void *)notif->payload;
551
552 if (internal_notif->sync) {
553 if (mvm->queue_sync_cookie != internal_notif->cookie) {
554 WARN_ONCE(1,
555 "Received expired RX queue sync message\n");
556 return;
557 }
558 atomic_dec(&mvm->queue_sync_counter);
559 }
560
561 switch (internal_notif->type) {
562 case IWL_MVM_RXQ_EMPTY:
563 break;
564 case IWL_MVM_RXQ_NOTIF_DEL_BA:
565 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
566 break;
567 default:
568 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
569 }
570 }
571
572 /*
573 * Returns true if the MPDU was buffered\dropped, false if it should be passed
574 * to upper layer.
575 */
576 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
577 struct napi_struct *napi,
578 int queue,
579 struct ieee80211_sta *sta,
580 struct sk_buff *skb,
581 struct iwl_rx_mpdu_desc *desc)
582 {
583 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
584 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
585 struct iwl_mvm_baid_data *baid_data;
586 struct iwl_mvm_reorder_buffer *buffer;
587 struct sk_buff *tail;
588 u32 reorder = le32_to_cpu(desc->reorder_data);
589 bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
590 u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
591 u8 sub_frame_idx = desc->amsdu_info &
592 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
593 int index;
594 u16 nssn, sn;
595 u8 baid;
596
597 baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
598 IWL_RX_MPDU_REORDER_BAID_SHIFT;
599
600 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
601 return false;
602
603 /* no sta yet */
604 if (WARN_ON(IS_ERR_OR_NULL(sta)))
605 return false;
606
607 /* not a data packet */
608 if (!ieee80211_is_data_qos(hdr->frame_control) ||
609 is_multicast_ether_addr(hdr->addr1))
610 return false;
611
612 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
613 return false;
614
615 baid_data = rcu_dereference(mvm->baid_map[baid]);
616 if (WARN(!baid_data,
617 "Received baid %d, but no data exists for this BAID\n", baid))
618 return false;
619 if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
620 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
621 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
622 tid))
623 return false;
624
625 nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
626 sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
627 IWL_RX_MPDU_REORDER_SN_SHIFT;
628
629 buffer = &baid_data->reorder_buf[queue];
630
631 spin_lock_bh(&buffer->lock);
632
633 /*
634 * If there was a significant jump in the nssn - adjust.
635 * If the SN is smaller than the NSSN it might need to first go into
636 * the reorder buffer, in which case we just release up to it and the
637 * rest of the function will take of storing it and releasing up to the
638 * nssn
639 */
640 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
641 buffer->buf_size)) {
642 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
643
644 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
645 }
646
647 /* drop any oudated packets */
648 if (ieee80211_sn_less(sn, buffer->head_sn))
649 goto drop;
650
651 /* release immediately if allowed by nssn and no stored frames */
652 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
653 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
654 buffer->buf_size))
655 buffer->head_sn = nssn;
656 /* No need to update AMSDU last SN - we are moving the head */
657 spin_unlock_bh(&buffer->lock);
658 return false;
659 }
660
661 index = sn % buffer->buf_size;
662
663 /*
664 * Check if we already stored this frame
665 * As AMSDU is either received or not as whole, logic is simple:
666 * If we have frames in that position in the buffer and the last frame
667 * originated from AMSDU had a different SN then it is a retransmission.
668 * If it is the same SN then if the subframe index is incrementing it
669 * is the same AMSDU - otherwise it is a retransmission.
670 */
671 tail = skb_peek_tail(&buffer->entries[index]);
672 if (tail && !amsdu)
673 goto drop;
674 else if (tail && (sn != buffer->last_amsdu ||
675 buffer->last_sub_index >= sub_frame_idx))
676 goto drop;
677
678 /* put in reorder buffer */
679 __skb_queue_tail(&buffer->entries[index], skb);
680 buffer->num_stored++;
681 buffer->reorder_time[index] = jiffies;
682
683 if (amsdu) {
684 buffer->last_amsdu = sn;
685 buffer->last_sub_index = sub_frame_idx;
686 }
687
688 iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
689 spin_unlock_bh(&buffer->lock);
690 return true;
691
692 drop:
693 kfree_skb(skb);
694 spin_unlock_bh(&buffer->lock);
695 return true;
696 }
697
698 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
699 {
700 unsigned long now = jiffies;
701 unsigned long timeout;
702 struct iwl_mvm_baid_data *data;
703
704 rcu_read_lock();
705
706 data = rcu_dereference(mvm->baid_map[baid]);
707 if (WARN_ON(!data))
708 goto out;
709
710 if (!data->timeout)
711 goto out;
712
713 timeout = data->timeout;
714 /*
715 * Do not update last rx all the time to avoid cache bouncing
716 * between the rx queues.
717 * Update it every timeout. Worst case is the session will
718 * expire after ~ 2 * timeout, which doesn't matter that much.
719 */
720 if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
721 /* Update is atomic */
722 data->last_rx = now;
723
724 out:
725 rcu_read_unlock();
726 }
727
728 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
729 struct iwl_rx_cmd_buffer *rxb, int queue)
730 {
731 struct ieee80211_rx_status *rx_status;
732 struct iwl_rx_packet *pkt = rxb_addr(rxb);
733 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
734 struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
735 u32 len = le16_to_cpu(desc->mpdu_len);
736 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
737 struct ieee80211_sta *sta = NULL;
738 struct sk_buff *skb;
739 u8 crypt_len = 0;
740
741 /* Dont use dev_alloc_skb(), we'll have enough headroom once
742 * ieee80211_hdr pulled.
743 */
744 skb = alloc_skb(128, GFP_ATOMIC);
745 if (!skb) {
746 IWL_ERR(mvm, "alloc_skb failed\n");
747 return;
748 }
749
750 rx_status = IEEE80211_SKB_RXCB(skb);
751
752 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
753 kfree_skb(skb);
754 return;
755 }
756
757 /*
758 * Keep packets with CRC errors (and with overrun) for monitor mode
759 * (otherwise the firmware discards them) but mark them as bad.
760 */
761 if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
762 !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
763 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
764 le16_to_cpu(desc->status));
765 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
766 }
767
768 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
769 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
770 rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
771 NL80211_BAND_2GHZ;
772 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
773 rx_status->band);
774 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
775 /* TSF as indicated by the firmware is at INA time */
776 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
777
778 rcu_read_lock();
779
780 if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
781 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
782
783 if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
784 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
785 if (IS_ERR(sta))
786 sta = NULL;
787 }
788 } else if (!is_multicast_ether_addr(hdr->addr2)) {
789 /*
790 * This is fine since we prevent two stations with the same
791 * address from being added.
792 */
793 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
794 }
795
796 if (sta) {
797 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
798 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
799 IWL_RX_MPDU_REORDER_BAID_MASK) >>
800 IWL_RX_MPDU_REORDER_BAID_SHIFT);
801
802 /*
803 * We have tx blocked stations (with CS bit). If we heard
804 * frames from a blocked station on a new channel we can
805 * TX to it again.
806 */
807 if (unlikely(mvm->csa_tx_block_bcn_timeout))
808 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
809
810 rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
811
812 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
813 ieee80211_is_beacon(hdr->frame_control)) {
814 struct iwl_fw_dbg_trigger_tlv *trig;
815 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
816 bool trig_check;
817 s32 rssi;
818
819 trig = iwl_fw_dbg_get_trigger(mvm->fw,
820 FW_DBG_TRIGGER_RSSI);
821 rssi_trig = (void *)trig->data;
822 rssi = le32_to_cpu(rssi_trig->rssi);
823
824 trig_check =
825 iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
826 trig);
827 if (trig_check && rx_status->signal < rssi)
828 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
829 }
830
831 /* TODO: multi queue TCM */
832
833 if (ieee80211_is_data(hdr->frame_control))
834 iwl_mvm_rx_csum(sta, skb, desc);
835
836 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
837 kfree_skb(skb);
838 rcu_read_unlock();
839 return;
840 }
841
842 /*
843 * Our hardware de-aggregates AMSDUs but copies the mac header
844 * as it to the de-aggregated MPDUs. We need to turn off the
845 * AMSDU bit in the QoS control ourselves.
846 */
847 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
848 !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
849 u8 *qc = ieee80211_get_qos_ctl(hdr);
850
851 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
852 }
853 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
854 iwl_mvm_agg_rx_received(mvm, baid);
855 }
856
857 /*
858 * TODO: PHY info.
859 * Verify we don't have the information in the MPDU descriptor and
860 * that it is not needed.
861 * Make sure for monitor mode that we are on default queue, update
862 * ampdu_ref and the rest of phy info then
863 */
864
865 /* Set up the HT phy flags */
866 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
867 case RATE_MCS_CHAN_WIDTH_20:
868 break;
869 case RATE_MCS_CHAN_WIDTH_40:
870 rx_status->flag |= RX_FLAG_40MHZ;
871 break;
872 case RATE_MCS_CHAN_WIDTH_80:
873 rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
874 break;
875 case RATE_MCS_CHAN_WIDTH_160:
876 rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
877 break;
878 }
879 if (rate_n_flags & RATE_MCS_SGI_MSK)
880 rx_status->flag |= RX_FLAG_SHORT_GI;
881 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
882 rx_status->flag |= RX_FLAG_HT_GF;
883 if (rate_n_flags & RATE_MCS_LDPC_MSK)
884 rx_status->flag |= RX_FLAG_LDPC;
885 if (rate_n_flags & RATE_MCS_HT_MSK) {
886 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
887 RATE_MCS_STBC_POS;
888 rx_status->flag |= RX_FLAG_HT;
889 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
890 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
891 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
892 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
893 RATE_MCS_STBC_POS;
894 rx_status->vht_nss =
895 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
896 RATE_VHT_MCS_NSS_POS) + 1;
897 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
898 rx_status->flag |= RX_FLAG_VHT;
899 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
900 if (rate_n_flags & RATE_MCS_BF_MSK)
901 rx_status->vht_flag |= RX_VHT_FLAG_BF;
902 } else {
903 rx_status->rate_idx =
904 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
905 rx_status->band);
906 }
907
908 /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
909 /* TODO: PHY info - gscan */
910
911 iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
912 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
913 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
914 rcu_read_unlock();
915 }
916
917 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
918 struct iwl_rx_cmd_buffer *rxb, int queue)
919 {
920 struct iwl_rx_packet *pkt = rxb_addr(rxb);
921 struct iwl_frame_release *release = (void *)pkt->data;
922 struct ieee80211_sta *sta;
923 struct iwl_mvm_reorder_buffer *reorder_buf;
924 struct iwl_mvm_baid_data *ba_data;
925
926 int baid = release->baid;
927
928 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
929 return;
930
931 rcu_read_lock();
932
933 ba_data = rcu_dereference(mvm->baid_map[baid]);
934 if (WARN_ON_ONCE(!ba_data))
935 goto out;
936
937 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
938 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
939 goto out;
940
941 reorder_buf = &ba_data->reorder_buf[queue];
942
943 spin_lock_bh(&reorder_buf->lock);
944 iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
945 le16_to_cpu(release->nssn));
946 spin_unlock_bh(&reorder_buf->lock);
947
948 out:
949 rcu_read_unlock();
950 }
This page took 0.0678299999999999 seconds and 5 git commands to generate.