iwlwifi: mvm: add RSS queues notification infrastructure
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / rxmq.c
CommitLineData
780e87c2
JB
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 Intel Deutschland GmbH
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
61#include <linux/etherdevice.h>
62#include <linux/skbuff.h>
63#include "iwl-trans.h"
64#include "mvm.h"
65#include "fw-api.h"
66#include "fw-dbg.h"
67
68void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
69{
70 mvm->ampdu_ref++;
71
72#ifdef CONFIG_IWLWIFI_DEBUGFS
73 if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
74 spin_lock(&mvm->drv_stats_lock);
75 mvm->drv_rx_stats.ampdu_count++;
76 spin_unlock(&mvm->drv_stats_lock);
77 }
78#endif
79}
80
f5e28eac
JB
81static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
82 int queue, struct ieee80211_sta *sta)
83{
84 struct iwl_mvm_sta *mvmsta;
85 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
86 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
87 struct iwl_mvm_key_pn *ptk_pn;
88 u8 tid, keyidx;
89 u8 pn[IEEE80211_CCMP_PN_LEN];
90 u8 *extiv;
91
92 /* do PN checking */
93
94 /* multicast and non-data only arrives on default queue */
95 if (!ieee80211_is_data(hdr->frame_control) ||
96 is_multicast_ether_addr(hdr->addr1))
97 return 0;
98
99 /* do not check PN for open AP */
100 if (!(stats->flag & RX_FLAG_DECRYPTED))
101 return 0;
102
103 /*
104 * avoid checking for default queue - we don't want to replicate
105 * all the logic that's necessary for checking the PN on fragmented
106 * frames, leave that to mac80211
107 */
108 if (queue == 0)
109 return 0;
110
111 /* if we are here - this for sure is either CCMP or GCMP */
112 if (IS_ERR_OR_NULL(sta)) {
113 IWL_ERR(mvm,
114 "expected hw-decrypted unicast frame for station\n");
115 return -1;
116 }
117
118 mvmsta = iwl_mvm_sta_from_mac80211(sta);
119
120 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
121 keyidx = extiv[3] >> 6;
122
123 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
124 if (!ptk_pn)
125 return -1;
126
127 if (ieee80211_is_data_qos(hdr->frame_control))
128 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
129 else
130 tid = 0;
131
132 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
133 if (tid >= IWL_MAX_TID_COUNT)
134 return -1;
135
136 /* load pn */
137 pn[0] = extiv[7];
138 pn[1] = extiv[6];
139 pn[2] = extiv[5];
140 pn[3] = extiv[4];
141 pn[4] = extiv[1];
142 pn[5] = extiv[0];
143
144 if (memcmp(pn, ptk_pn->q[queue].pn[tid],
145 IEEE80211_CCMP_PN_LEN) <= 0)
146 return -1;
147
148 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
149 stats->flag |= RX_FLAG_PN_VALIDATED;
150
151 return 0;
152}
153
154/* iwl_mvm_create_skb Adds the rxb to a new skb */
155static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
156 u16 len, u8 crypt_len,
157 struct iwl_rx_cmd_buffer *rxb)
780e87c2
JB
158{
159 unsigned int hdrlen, fraglen;
160
161 /* If frame is small enough to fit in skb->head, pull it completely.
162 * If not, only pull ieee80211_hdr (including crypto if present, and
163 * an additional 8 bytes for SNAP/ethertype, see below) so that
164 * splice() or TCP coalesce are more efficient.
165 *
166 * Since, in addition, ieee80211_data_to_8023() always pull in at
167 * least 8 bytes (possibly more for mesh) we can do the same here
168 * to save the cost of doing it later. That still doesn't pull in
169 * the actual IP header since the typical case has a SNAP header.
170 * If the latter changes (there are efforts in the standards group
171 * to do so) we should revisit this and ieee80211_data_to_8023().
172 */
173 hdrlen = (len <= skb_tailroom(skb)) ? len :
174 sizeof(*hdr) + crypt_len + 8;
175
176 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
177 fraglen = len - hdrlen;
178
179 if (fraglen) {
180 int offset = (void *)hdr + hdrlen -
181 rxb_addr(rxb) + rxb_offset(rxb);
182
183 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
184 fraglen, rxb->truesize);
185 }
f5e28eac 186}
780e87c2 187
f5e28eac
JB
188/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
189static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
190 struct napi_struct *napi,
191 struct sk_buff *skb, int queue,
192 struct ieee80211_sta *sta)
193{
194 if (iwl_mvm_check_pn(mvm, skb, queue, sta))
195 kfree_skb(skb);
196 else
197 ieee80211_rx_napi(mvm->hw, skb, napi);
780e87c2
JB
198}
199
200static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
201 struct iwl_rx_mpdu_desc *desc,
202 struct ieee80211_rx_status *rx_status)
203{
d56a7801 204 int energy_a, energy_b, max_energy;
780e87c2
JB
205
206 energy_a = desc->energy_a;
207 energy_a = energy_a ? -energy_a : S8_MIN;
208 energy_b = desc->energy_b;
209 energy_b = energy_b ? -energy_b : S8_MIN;
780e87c2 210 max_energy = max(energy_a, energy_b);
780e87c2 211
d56a7801
SS
212 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
213 energy_a, energy_b, max_energy);
780e87c2
JB
214
215 rx_status->signal = max_energy;
216 rx_status->chains = 0; /* TODO: phy info */
217 rx_status->chain_signal[0] = energy_a;
218 rx_status->chain_signal[1] = energy_b;
d56a7801 219 rx_status->chain_signal[2] = S8_MIN;
780e87c2
JB
220}
221
f5e28eac 222static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
780e87c2
JB
223 struct ieee80211_rx_status *stats,
224 struct iwl_rx_mpdu_desc *desc, int queue,
225 u8 *crypt_len)
226{
227 u16 status = le16_to_cpu(desc->status);
228
229 if (!ieee80211_has_protected(hdr->frame_control) ||
230 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
231 IWL_RX_MPDU_STATUS_SEC_NONE)
232 return 0;
233
234 /* TODO: handle packets encrypted with unknown alg */
235
236 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
237 case IWL_RX_MPDU_STATUS_SEC_CCM:
238 case IWL_RX_MPDU_STATUS_SEC_GCM:
f5e28eac 239 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
780e87c2
JB
240 /* alg is CCM: check MIC only */
241 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
242 return -1;
243
244 stats->flag |= RX_FLAG_DECRYPTED;
245 *crypt_len = IEEE80211_CCMP_HDR_LEN;
246 return 0;
247 case IWL_RX_MPDU_STATUS_SEC_TKIP:
248 /* Don't drop the frame and decrypt it in SW */
249 if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
250 return 0;
251
252 *crypt_len = IEEE80211_TKIP_IV_LEN;
253 /* fall through if TTAK OK */
254 case IWL_RX_MPDU_STATUS_SEC_WEP:
255 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
256 return -1;
257
258 stats->flag |= RX_FLAG_DECRYPTED;
259 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
260 IWL_RX_MPDU_STATUS_SEC_WEP)
261 *crypt_len = IEEE80211_WEP_IV_LEN;
262 return 0;
263 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
264 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
265 return -1;
266 stats->flag |= RX_FLAG_DECRYPTED;
267 return 0;
268 default:
269 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
270 }
271
272 return 0;
273}
274
275static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
276 struct sk_buff *skb,
277 struct iwl_rx_mpdu_desc *desc)
278{
279 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
280 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
281
282 if (mvmvif->features & NETIF_F_RXCSUM &&
283 desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
284 desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
285 skb->ip_summed = CHECKSUM_UNNECESSARY;
286}
287
a571f5f6
SS
288/*
289 * returns true if a packet outside BA session is a duplicate and
290 * should be dropped
291 */
292static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
293 struct ieee80211_rx_status *rx_status,
294 struct ieee80211_hdr *hdr,
295 struct iwl_rx_mpdu_desc *desc)
296{
297 struct iwl_mvm_sta *mvm_sta;
298 struct iwl_mvm_rxq_dup_data *dup_data;
299 u8 baid, tid, sub_frame_idx;
300
301 if (WARN_ON(IS_ERR_OR_NULL(sta)))
302 return false;
303
304 baid = (le32_to_cpu(desc->reorder_data) &
305 IWL_RX_MPDU_REORDER_BAID_MASK) >>
306 IWL_RX_MPDU_REORDER_BAID_SHIFT;
307
308 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
309 return false;
310
311 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
312 dup_data = &mvm_sta->dup_data[queue];
313
314 /*
315 * Drop duplicate 802.11 retransmissions
316 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
317 */
318 if (ieee80211_is_ctl(hdr->frame_control) ||
319 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
320 is_multicast_ether_addr(hdr->addr1)) {
321 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
322 return false;
323 }
324
325 if (ieee80211_is_data_qos(hdr->frame_control))
326 /* frame has qos control */
327 tid = *ieee80211_get_qos_ctl(hdr) &
328 IEEE80211_QOS_CTL_TID_MASK;
329 else
330 tid = IWL_MAX_TID_COUNT;
331
332 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
333 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
334
335 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
336 dup_data->last_seq[tid] == hdr->seq_ctrl &&
337 dup_data->last_sub_frame[tid] >= sub_frame_idx))
338 return true;
339
340 dup_data->last_seq[tid] = hdr->seq_ctrl;
341 dup_data->last_sub_frame[tid] = sub_frame_idx;
342
343 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
344
345 return false;
346}
347
94bb4481
SS
348int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
349 const u8 *data, u32 count)
350{
351 struct iwl_rxq_sync_cmd *cmd;
352 u32 data_size = sizeof(*cmd) + count;
353 int ret;
354
355 /* should be DWORD aligned */
356 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
357 return -EINVAL;
358
359 cmd = kzalloc(data_size, GFP_KERNEL);
360 if (!cmd)
361 return -ENOMEM;
362
363 cmd->rxq_mask = cpu_to_le32(rxq_mask);
364 cmd->count = cpu_to_le32(count);
365 cmd->flags = 0;
366 memcpy(cmd->payload, data, count);
367
368 ret = iwl_mvm_send_cmd_pdu(mvm,
369 WIDE_ID(DATA_PATH_GROUP,
370 TRIGGER_RX_QUEUES_NOTIF_CMD),
371 0, data_size, cmd);
372
373 kfree(cmd);
374 return ret;
375}
376
377void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
378 int queue)
379{
380 struct iwl_rx_packet *pkt = rxb_addr(rxb);
381 struct iwl_rxq_sync_notification *notif;
382 struct iwl_mvm_internal_rxq_notif *internal_notif;
383
384 notif = (void *)pkt->data;
385 internal_notif = (void *)notif->payload;
386
387 switch (internal_notif->type) {
388 case IWL_MVM_RXQ_NOTIF_DEL_BA:
389 /* TODO */
390 break;
391 default:
392 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
393 }
394}
395
780e87c2
JB
396void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
397 struct iwl_rx_cmd_buffer *rxb, int queue)
398{
399 struct ieee80211_rx_status *rx_status;
400 struct iwl_rx_packet *pkt = rxb_addr(rxb);
401 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
0c1c6e37 402 struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
780e87c2
JB
403 u32 len = le16_to_cpu(desc->mpdu_len);
404 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
405 struct ieee80211_sta *sta = NULL;
406 struct sk_buff *skb;
780e87c2
JB
407 u8 crypt_len = 0;
408
409 /* Dont use dev_alloc_skb(), we'll have enough headroom once
410 * ieee80211_hdr pulled.
411 */
412 skb = alloc_skb(128, GFP_ATOMIC);
413 if (!skb) {
414 IWL_ERR(mvm, "alloc_skb failed\n");
415 return;
416 }
417
418 rx_status = IEEE80211_SKB_RXCB(skb);
419
420 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
421 kfree_skb(skb);
422 return;
423 }
424
425 /*
426 * Keep packets with CRC errors (and with overrun) for monitor mode
427 * (otherwise the firmware discards them) but mark them as bad.
428 */
429 if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
430 !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
431 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
432 le16_to_cpu(desc->status));
433 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
434 }
435
436 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
437 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
438 rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
439 IEEE80211_BAND_2GHZ;
440 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
441 rx_status->band);
442 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
77fe7395
SS
443 /* TSF as indicated by the firmware is at INA time */
444 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
780e87c2
JB
445
446 rcu_read_lock();
447
448 if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
449 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
450
451 if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
452 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
453 if (IS_ERR(sta))
454 sta = NULL;
455 }
456 } else if (!is_multicast_ether_addr(hdr->addr2)) {
457 /*
458 * This is fine since we prevent two stations with the same
459 * address from being added.
460 */
461 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
462 }
463
464 if (sta) {
465 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
466
467 /*
468 * We have tx blocked stations (with CS bit). If we heard
469 * frames from a blocked station on a new channel we can
470 * TX to it again.
471 */
472 if (unlikely(mvm->csa_tx_block_bcn_timeout))
473 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
474
475 rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
476
477 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
478 ieee80211_is_beacon(hdr->frame_control)) {
479 struct iwl_fw_dbg_trigger_tlv *trig;
480 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
481 bool trig_check;
482 s32 rssi;
483
484 trig = iwl_fw_dbg_get_trigger(mvm->fw,
485 FW_DBG_TRIGGER_RSSI);
486 rssi_trig = (void *)trig->data;
487 rssi = le32_to_cpu(rssi_trig->rssi);
488
489 trig_check =
490 iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
491 trig);
492 if (trig_check && rx_status->signal < rssi)
493 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
494 }
495
496 /* TODO: multi queue TCM */
497
498 if (ieee80211_is_data(hdr->frame_control))
499 iwl_mvm_rx_csum(sta, skb, desc);
a571f5f6
SS
500
501 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
502 kfree_skb(skb);
503 rcu_read_unlock();
504 return;
505 }
780e87c2
JB
506 }
507
780e87c2
JB
508 /*
509 * TODO: PHY info.
510 * Verify we don't have the information in the MPDU descriptor and
511 * that it is not needed.
512 * Make sure for monitor mode that we are on default queue, update
513 * ampdu_ref and the rest of phy info then
514 */
515
516 /* Set up the HT phy flags */
517 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
518 case RATE_MCS_CHAN_WIDTH_20:
519 break;
520 case RATE_MCS_CHAN_WIDTH_40:
521 rx_status->flag |= RX_FLAG_40MHZ;
522 break;
523 case RATE_MCS_CHAN_WIDTH_80:
524 rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
525 break;
526 case RATE_MCS_CHAN_WIDTH_160:
527 rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
528 break;
529 }
530 if (rate_n_flags & RATE_MCS_SGI_MSK)
531 rx_status->flag |= RX_FLAG_SHORT_GI;
532 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
533 rx_status->flag |= RX_FLAG_HT_GF;
534 if (rate_n_flags & RATE_MCS_LDPC_MSK)
535 rx_status->flag |= RX_FLAG_LDPC;
536 if (rate_n_flags & RATE_MCS_HT_MSK) {
537 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
538 RATE_MCS_STBC_POS;
539 rx_status->flag |= RX_FLAG_HT;
540 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
541 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
542 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
543 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
544 RATE_MCS_STBC_POS;
545 rx_status->vht_nss =
546 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
547 RATE_VHT_MCS_NSS_POS) + 1;
548 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
549 rx_status->flag |= RX_FLAG_VHT;
550 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
551 if (rate_n_flags & RATE_MCS_BF_MSK)
552 rx_status->vht_flag |= RX_VHT_FLAG_BF;
553 } else {
554 rx_status->rate_idx =
555 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
556 rx_status->band);
557 }
558
559 /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
560 /* TODO: PHY info - gscan */
561
f5e28eac
JB
562 iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
563 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
564 rcu_read_unlock();
780e87c2 565}
585a6fcc
SS
566
567void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
568 struct iwl_rx_cmd_buffer *rxb, int queue)
569{
570 /* TODO */
571}
This page took 0.116742 seconds and 5 git commands to generate.