ALSA: HDA: Make hda sound card usable for Loongson
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22 #include <asm/unaligned.h>
23
24 #include "ieee80211_i.h"
25 #include "driver-ops.h"
26 #include "led.h"
27 #include "mesh.h"
28 #include "wep.h"
29 #include "wpa.h"
30 #include "tkip.h"
31 #include "wme.h"
32 #include "rate.h"
33
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb)
42 {
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN))
45 __pskb_trim(skb, skb->len - FCS_LEN);
46 else {
47 /* driver bug */
48 WARN_ON(1);
49 dev_kfree_skb(skb);
50 skb = NULL;
51 }
52 }
53
54 return skb;
55 }
56
57 static inline int should_drop_frame(struct sk_buff *skb,
58 int present_fcs_len)
59 {
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
64 RX_FLAG_FAILED_PLCP_CRC |
65 RX_FLAG_AMPDU_IS_ZEROLEN))
66 return 1;
67 if (unlikely(skb->len < 16 + present_fcs_len))
68 return 1;
69 if (ieee80211_is_ctl(hdr->frame_control) &&
70 !ieee80211_is_pspoll(hdr->frame_control) &&
71 !ieee80211_is_back_req(hdr->frame_control))
72 return 1;
73 return 0;
74 }
75
76 static int
77 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
78 struct ieee80211_rx_status *status)
79 {
80 int len;
81
82 /* always present fields */
83 len = sizeof(struct ieee80211_radiotap_header) + 9;
84
85 if (status->flag & RX_FLAG_MACTIME_MPDU)
86 len += 8;
87 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
88 len += 1;
89
90 if (len & 1) /* padding for RX_FLAGS if necessary */
91 len++;
92
93 if (status->flag & RX_FLAG_HT) /* HT info */
94 len += 3;
95
96 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
97 /* padding */
98 while (len & 3)
99 len++;
100 len += 8;
101 }
102
103 return len;
104 }
105
106 /*
107 * ieee80211_add_rx_radiotap_header - add radiotap header
108 *
109 * add a radiotap header containing all the fields which the hardware provided.
110 */
111 static void
112 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
113 struct sk_buff *skb,
114 struct ieee80211_rate *rate,
115 int rtap_len, bool has_fcs)
116 {
117 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
118 struct ieee80211_radiotap_header *rthdr;
119 unsigned char *pos;
120 u16 rx_flags = 0;
121
122 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
123 memset(rthdr, 0, rtap_len);
124
125 /* radiotap header, set always present flags */
126 rthdr->it_present =
127 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
128 (1 << IEEE80211_RADIOTAP_CHANNEL) |
129 (1 << IEEE80211_RADIOTAP_ANTENNA) |
130 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
131 rthdr->it_len = cpu_to_le16(rtap_len);
132
133 pos = (unsigned char *)(rthdr+1);
134
135 /* the order of the following fields is important */
136
137 /* IEEE80211_RADIOTAP_TSFT */
138 if (status->flag & RX_FLAG_MACTIME_MPDU) {
139 put_unaligned_le64(status->mactime, pos);
140 rthdr->it_present |=
141 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
142 pos += 8;
143 }
144
145 /* IEEE80211_RADIOTAP_FLAGS */
146 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
147 *pos |= IEEE80211_RADIOTAP_F_FCS;
148 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
149 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
150 if (status->flag & RX_FLAG_SHORTPRE)
151 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
152 pos++;
153
154 /* IEEE80211_RADIOTAP_RATE */
155 if (!rate || status->flag & RX_FLAG_HT) {
156 /*
157 * Without rate information don't add it. If we have,
158 * MCS information is a separate field in radiotap,
159 * added below. The byte here is needed as padding
160 * for the channel though, so initialise it to 0.
161 */
162 *pos = 0;
163 } else {
164 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
165 *pos = rate->bitrate / 5;
166 }
167 pos++;
168
169 /* IEEE80211_RADIOTAP_CHANNEL */
170 put_unaligned_le16(status->freq, pos);
171 pos += 2;
172 if (status->band == IEEE80211_BAND_5GHZ)
173 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
174 pos);
175 else if (status->flag & RX_FLAG_HT)
176 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
177 pos);
178 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
179 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
180 pos);
181 else if (rate)
182 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
183 pos);
184 else
185 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
186 pos += 2;
187
188 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
189 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
190 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
191 *pos = status->signal;
192 rthdr->it_present |=
193 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
194 pos++;
195 }
196
197 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
198
199 /* IEEE80211_RADIOTAP_ANTENNA */
200 *pos = status->antenna;
201 pos++;
202
203 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
204
205 /* IEEE80211_RADIOTAP_RX_FLAGS */
206 /* ensure 2 byte alignment for the 2 byte field as required */
207 if ((pos - (u8 *)rthdr) & 1)
208 pos++;
209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
210 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
211 put_unaligned_le16(rx_flags, pos);
212 pos += 2;
213
214 if (status->flag & RX_FLAG_HT) {
215 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
216 *pos++ = local->hw.radiotap_mcs_details;
217 *pos = 0;
218 if (status->flag & RX_FLAG_SHORT_GI)
219 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
220 if (status->flag & RX_FLAG_40MHZ)
221 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
222 if (status->flag & RX_FLAG_HT_GF)
223 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
224 pos++;
225 *pos++ = status->rate_idx;
226 }
227
228 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
229 u16 flags = 0;
230
231 /* ensure 4 byte alignment */
232 while ((pos - (u8 *)rthdr) & 3)
233 pos++;
234 rthdr->it_present |=
235 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
236 put_unaligned_le32(status->ampdu_reference, pos);
237 pos += 4;
238 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
239 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
240 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
241 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
242 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
243 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
244 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
245 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
246 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
247 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
248 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
249 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
250 put_unaligned_le16(flags, pos);
251 pos += 2;
252 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
253 *pos++ = status->ampdu_delimiter_crc;
254 else
255 *pos++ = 0;
256 *pos++ = 0;
257 }
258 }
259
260 /*
261 * This function copies a received frame to all monitor interfaces and
262 * returns a cleaned-up SKB that no longer includes the FCS nor the
263 * radiotap header the driver might have added.
264 */
265 static struct sk_buff *
266 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
267 struct ieee80211_rate *rate)
268 {
269 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
270 struct ieee80211_sub_if_data *sdata;
271 int needed_headroom;
272 struct sk_buff *skb, *skb2;
273 struct net_device *prev_dev = NULL;
274 int present_fcs_len = 0;
275
276 /*
277 * First, we may need to make a copy of the skb because
278 * (1) we need to modify it for radiotap (if not present), and
279 * (2) the other RX handlers will modify the skb we got.
280 *
281 * We don't need to, of course, if we aren't going to return
282 * the SKB because it has a bad FCS/PLCP checksum.
283 */
284
285 /* room for the radiotap header based on driver features */
286 needed_headroom = ieee80211_rx_radiotap_len(local, status);
287
288 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
289 present_fcs_len = FCS_LEN;
290
291 /* make sure hdr->frame_control is on the linear part */
292 if (!pskb_may_pull(origskb, 2)) {
293 dev_kfree_skb(origskb);
294 return NULL;
295 }
296
297 if (!local->monitors) {
298 if (should_drop_frame(origskb, present_fcs_len)) {
299 dev_kfree_skb(origskb);
300 return NULL;
301 }
302
303 return remove_monitor_info(local, origskb);
304 }
305
306 if (should_drop_frame(origskb, present_fcs_len)) {
307 /* only need to expand headroom if necessary */
308 skb = origskb;
309 origskb = NULL;
310
311 /*
312 * This shouldn't trigger often because most devices have an
313 * RX header they pull before we get here, and that should
314 * be big enough for our radiotap information. We should
315 * probably export the length to drivers so that we can have
316 * them allocate enough headroom to start with.
317 */
318 if (skb_headroom(skb) < needed_headroom &&
319 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
320 dev_kfree_skb(skb);
321 return NULL;
322 }
323 } else {
324 /*
325 * Need to make a copy and possibly remove radiotap header
326 * and FCS from the original.
327 */
328 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
329
330 origskb = remove_monitor_info(local, origskb);
331
332 if (!skb)
333 return origskb;
334 }
335
336 /* prepend radiotap information */
337 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
338 true);
339
340 skb_reset_mac_header(skb);
341 skb->ip_summed = CHECKSUM_UNNECESSARY;
342 skb->pkt_type = PACKET_OTHERHOST;
343 skb->protocol = htons(ETH_P_802_2);
344
345 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
346 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
347 continue;
348
349 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
350 continue;
351
352 if (!ieee80211_sdata_running(sdata))
353 continue;
354
355 if (prev_dev) {
356 skb2 = skb_clone(skb, GFP_ATOMIC);
357 if (skb2) {
358 skb2->dev = prev_dev;
359 netif_receive_skb(skb2);
360 }
361 }
362
363 prev_dev = sdata->dev;
364 sdata->dev->stats.rx_packets++;
365 sdata->dev->stats.rx_bytes += skb->len;
366 }
367
368 if (prev_dev) {
369 skb->dev = prev_dev;
370 netif_receive_skb(skb);
371 } else
372 dev_kfree_skb(skb);
373
374 return origskb;
375 }
376
377
378 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
379 {
380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
381 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
382 int tid, seqno_idx, security_idx;
383
384 /* does the frame have a qos control field? */
385 if (ieee80211_is_data_qos(hdr->frame_control)) {
386 u8 *qc = ieee80211_get_qos_ctl(hdr);
387 /* frame has qos control */
388 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
389 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
390 status->rx_flags |= IEEE80211_RX_AMSDU;
391
392 seqno_idx = tid;
393 security_idx = tid;
394 } else {
395 /*
396 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
397 *
398 * Sequence numbers for management frames, QoS data
399 * frames with a broadcast/multicast address in the
400 * Address 1 field, and all non-QoS data frames sent
401 * by QoS STAs are assigned using an additional single
402 * modulo-4096 counter, [...]
403 *
404 * We also use that counter for non-QoS STAs.
405 */
406 seqno_idx = NUM_RX_DATA_QUEUES;
407 security_idx = 0;
408 if (ieee80211_is_mgmt(hdr->frame_control))
409 security_idx = NUM_RX_DATA_QUEUES;
410 tid = 0;
411 }
412
413 rx->seqno_idx = seqno_idx;
414 rx->security_idx = security_idx;
415 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
416 * For now, set skb->priority to 0 for other cases. */
417 rx->skb->priority = (tid > 7) ? 0 : tid;
418 }
419
420 /**
421 * DOC: Packet alignment
422 *
423 * Drivers always need to pass packets that are aligned to two-byte boundaries
424 * to the stack.
425 *
426 * Additionally, should, if possible, align the payload data in a way that
427 * guarantees that the contained IP header is aligned to a four-byte
428 * boundary. In the case of regular frames, this simply means aligning the
429 * payload to a four-byte boundary (because either the IP header is directly
430 * contained, or IV/RFC1042 headers that have a length divisible by four are
431 * in front of it). If the payload data is not properly aligned and the
432 * architecture doesn't support efficient unaligned operations, mac80211
433 * will align the data.
434 *
435 * With A-MSDU frames, however, the payload data address must yield two modulo
436 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
437 * push the IP header further back to a multiple of four again. Thankfully, the
438 * specs were sane enough this time around to require padding each A-MSDU
439 * subframe to a length that is a multiple of four.
440 *
441 * Padding like Atheros hardware adds which is between the 802.11 header and
442 * the payload is not supported, the driver is required to move the 802.11
443 * header to be directly in front of the payload in that case.
444 */
445 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
446 {
447 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
448 WARN_ONCE((unsigned long)rx->skb->data & 1,
449 "unaligned packet at 0x%p\n", rx->skb->data);
450 #endif
451 }
452
453
454 /* rx handlers */
455
456 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
457 {
458 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
459
460 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
461 return 0;
462
463 return ieee80211_is_robust_mgmt_frame(hdr);
464 }
465
466
467 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
468 {
469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
470
471 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
472 return 0;
473
474 return ieee80211_is_robust_mgmt_frame(hdr);
475 }
476
477
478 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
479 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
480 {
481 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
482 struct ieee80211_mmie *mmie;
483
484 if (skb->len < 24 + sizeof(*mmie) ||
485 !is_multicast_ether_addr(hdr->da))
486 return -1;
487
488 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
489 return -1; /* not a robust management frame */
490
491 mmie = (struct ieee80211_mmie *)
492 (skb->data + skb->len - sizeof(*mmie));
493 if (mmie->element_id != WLAN_EID_MMIE ||
494 mmie->length != sizeof(*mmie) - 2)
495 return -1;
496
497 return le16_to_cpu(mmie->key_id);
498 }
499
500
501 static ieee80211_rx_result
502 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
503 {
504 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
505 char *dev_addr = rx->sdata->vif.addr;
506
507 if (ieee80211_is_data(hdr->frame_control)) {
508 if (is_multicast_ether_addr(hdr->addr1)) {
509 if (ieee80211_has_tods(hdr->frame_control) ||
510 !ieee80211_has_fromds(hdr->frame_control))
511 return RX_DROP_MONITOR;
512 if (ether_addr_equal(hdr->addr3, dev_addr))
513 return RX_DROP_MONITOR;
514 } else {
515 if (!ieee80211_has_a4(hdr->frame_control))
516 return RX_DROP_MONITOR;
517 if (ether_addr_equal(hdr->addr4, dev_addr))
518 return RX_DROP_MONITOR;
519 }
520 }
521
522 /* If there is not an established peer link and this is not a peer link
523 * establisment frame, beacon or probe, drop the frame.
524 */
525
526 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
527 struct ieee80211_mgmt *mgmt;
528
529 if (!ieee80211_is_mgmt(hdr->frame_control))
530 return RX_DROP_MONITOR;
531
532 if (ieee80211_is_action(hdr->frame_control)) {
533 u8 category;
534 mgmt = (struct ieee80211_mgmt *)hdr;
535 category = mgmt->u.action.category;
536 if (category != WLAN_CATEGORY_MESH_ACTION &&
537 category != WLAN_CATEGORY_SELF_PROTECTED)
538 return RX_DROP_MONITOR;
539 return RX_CONTINUE;
540 }
541
542 if (ieee80211_is_probe_req(hdr->frame_control) ||
543 ieee80211_is_probe_resp(hdr->frame_control) ||
544 ieee80211_is_beacon(hdr->frame_control) ||
545 ieee80211_is_auth(hdr->frame_control))
546 return RX_CONTINUE;
547
548 return RX_DROP_MONITOR;
549
550 }
551
552 return RX_CONTINUE;
553 }
554
555 #define SEQ_MODULO 0x1000
556 #define SEQ_MASK 0xfff
557
558 static inline int seq_less(u16 sq1, u16 sq2)
559 {
560 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
561 }
562
563 static inline u16 seq_inc(u16 sq)
564 {
565 return (sq + 1) & SEQ_MASK;
566 }
567
568 static inline u16 seq_sub(u16 sq1, u16 sq2)
569 {
570 return (sq1 - sq2) & SEQ_MASK;
571 }
572
573
574 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
575 struct tid_ampdu_rx *tid_agg_rx,
576 int index)
577 {
578 struct ieee80211_local *local = sdata->local;
579 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
580 struct ieee80211_rx_status *status;
581
582 lockdep_assert_held(&tid_agg_rx->reorder_lock);
583
584 if (!skb)
585 goto no_frame;
586
587 /* release the frame from the reorder ring buffer */
588 tid_agg_rx->stored_mpdu_num--;
589 tid_agg_rx->reorder_buf[index] = NULL;
590 status = IEEE80211_SKB_RXCB(skb);
591 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
592 skb_queue_tail(&local->rx_skb_queue, skb);
593
594 no_frame:
595 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
596 }
597
598 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
599 struct tid_ampdu_rx *tid_agg_rx,
600 u16 head_seq_num)
601 {
602 int index;
603
604 lockdep_assert_held(&tid_agg_rx->reorder_lock);
605
606 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
607 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
608 tid_agg_rx->buf_size;
609 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
610 }
611 }
612
613 /*
614 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
615 * the skb was added to the buffer longer than this time ago, the earlier
616 * frames that have not yet been received are assumed to be lost and the skb
617 * can be released for processing. This may also release other skb's from the
618 * reorder buffer if there are no additional gaps between the frames.
619 *
620 * Callers must hold tid_agg_rx->reorder_lock.
621 */
622 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
623
624 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
625 struct tid_ampdu_rx *tid_agg_rx)
626 {
627 int index, j;
628
629 lockdep_assert_held(&tid_agg_rx->reorder_lock);
630
631 /* release the buffer until next missing frame */
632 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
633 tid_agg_rx->buf_size;
634 if (!tid_agg_rx->reorder_buf[index] &&
635 tid_agg_rx->stored_mpdu_num) {
636 /*
637 * No buffers ready to be released, but check whether any
638 * frames in the reorder buffer have timed out.
639 */
640 int skipped = 1;
641 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
642 j = (j + 1) % tid_agg_rx->buf_size) {
643 if (!tid_agg_rx->reorder_buf[j]) {
644 skipped++;
645 continue;
646 }
647 if (skipped &&
648 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
649 HT_RX_REORDER_BUF_TIMEOUT))
650 goto set_release_timer;
651
652 ht_dbg_ratelimited(sdata,
653 "release an RX reorder frame due to timeout on earlier frames\n");
654 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
655
656 /*
657 * Increment the head seq# also for the skipped slots.
658 */
659 tid_agg_rx->head_seq_num =
660 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
661 skipped = 0;
662 }
663 } else while (tid_agg_rx->reorder_buf[index]) {
664 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
665 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
666 tid_agg_rx->buf_size;
667 }
668
669 if (tid_agg_rx->stored_mpdu_num) {
670 j = index = seq_sub(tid_agg_rx->head_seq_num,
671 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
672
673 for (; j != (index - 1) % tid_agg_rx->buf_size;
674 j = (j + 1) % tid_agg_rx->buf_size) {
675 if (tid_agg_rx->reorder_buf[j])
676 break;
677 }
678
679 set_release_timer:
680
681 mod_timer(&tid_agg_rx->reorder_timer,
682 tid_agg_rx->reorder_time[j] + 1 +
683 HT_RX_REORDER_BUF_TIMEOUT);
684 } else {
685 del_timer(&tid_agg_rx->reorder_timer);
686 }
687 }
688
689 /*
690 * As this function belongs to the RX path it must be under
691 * rcu_read_lock protection. It returns false if the frame
692 * can be processed immediately, true if it was consumed.
693 */
694 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
695 struct tid_ampdu_rx *tid_agg_rx,
696 struct sk_buff *skb)
697 {
698 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
699 u16 sc = le16_to_cpu(hdr->seq_ctrl);
700 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
701 u16 head_seq_num, buf_size;
702 int index;
703 bool ret = true;
704
705 spin_lock(&tid_agg_rx->reorder_lock);
706
707 buf_size = tid_agg_rx->buf_size;
708 head_seq_num = tid_agg_rx->head_seq_num;
709
710 /* frame with out of date sequence number */
711 if (seq_less(mpdu_seq_num, head_seq_num)) {
712 dev_kfree_skb(skb);
713 goto out;
714 }
715
716 /*
717 * If frame the sequence number exceeds our buffering window
718 * size release some previous frames to make room for this one.
719 */
720 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
721 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
722 /* release stored frames up to new head to stack */
723 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
724 head_seq_num);
725 }
726
727 /* Now the new frame is always in the range of the reordering buffer */
728
729 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
730
731 /* check if we already stored this frame */
732 if (tid_agg_rx->reorder_buf[index]) {
733 dev_kfree_skb(skb);
734 goto out;
735 }
736
737 /*
738 * If the current MPDU is in the right order and nothing else
739 * is stored we can process it directly, no need to buffer it.
740 * If it is first but there's something stored, we may be able
741 * to release frames after this one.
742 */
743 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
744 tid_agg_rx->stored_mpdu_num == 0) {
745 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
746 ret = false;
747 goto out;
748 }
749
750 /* put the frame in the reordering buffer */
751 tid_agg_rx->reorder_buf[index] = skb;
752 tid_agg_rx->reorder_time[index] = jiffies;
753 tid_agg_rx->stored_mpdu_num++;
754 ieee80211_sta_reorder_release(sdata, tid_agg_rx);
755
756 out:
757 spin_unlock(&tid_agg_rx->reorder_lock);
758 return ret;
759 }
760
761 /*
762 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
763 * true if the MPDU was buffered, false if it should be processed.
764 */
765 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
766 {
767 struct sk_buff *skb = rx->skb;
768 struct ieee80211_local *local = rx->local;
769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
770 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
771 struct sta_info *sta = rx->sta;
772 struct tid_ampdu_rx *tid_agg_rx;
773 u16 sc;
774 u8 tid, ack_policy;
775
776 if (!ieee80211_is_data_qos(hdr->frame_control))
777 goto dont_reorder;
778
779 /*
780 * filter the QoS data rx stream according to
781 * STA/TID and check if this STA/TID is on aggregation
782 */
783
784 if (!sta)
785 goto dont_reorder;
786
787 ack_policy = *ieee80211_get_qos_ctl(hdr) &
788 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
789 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
790
791 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
792 if (!tid_agg_rx)
793 goto dont_reorder;
794
795 /* qos null data frames are excluded */
796 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
797 goto dont_reorder;
798
799 /* not part of a BA session */
800 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
801 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
802 goto dont_reorder;
803
804 /* not actually part of this BA session */
805 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
806 goto dont_reorder;
807
808 /* new, potentially un-ordered, ampdu frame - process it */
809
810 /* reset session timer */
811 if (tid_agg_rx->timeout)
812 tid_agg_rx->last_rx = jiffies;
813
814 /* if this mpdu is fragmented - terminate rx aggregation session */
815 sc = le16_to_cpu(hdr->seq_ctrl);
816 if (sc & IEEE80211_SCTL_FRAG) {
817 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
818 skb_queue_tail(&rx->sdata->skb_queue, skb);
819 ieee80211_queue_work(&local->hw, &rx->sdata->work);
820 return;
821 }
822
823 /*
824 * No locking needed -- we will only ever process one
825 * RX packet at a time, and thus own tid_agg_rx. All
826 * other code manipulating it needs to (and does) make
827 * sure that we cannot get to it any more before doing
828 * anything with it.
829 */
830 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
831 return;
832
833 dont_reorder:
834 skb_queue_tail(&local->rx_skb_queue, skb);
835 }
836
837 static ieee80211_rx_result debug_noinline
838 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
839 {
840 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
841 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
842
843 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
844 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
845 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
846 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
847 hdr->seq_ctrl)) {
848 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
849 rx->local->dot11FrameDuplicateCount++;
850 rx->sta->num_duplicates++;
851 }
852 return RX_DROP_UNUSABLE;
853 } else
854 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
855 }
856
857 if (unlikely(rx->skb->len < 16)) {
858 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
859 return RX_DROP_MONITOR;
860 }
861
862 /* Drop disallowed frame classes based on STA auth/assoc state;
863 * IEEE 802.11, Chap 5.5.
864 *
865 * mac80211 filters only based on association state, i.e. it drops
866 * Class 3 frames from not associated stations. hostapd sends
867 * deauth/disassoc frames when needed. In addition, hostapd is
868 * responsible for filtering on both auth and assoc states.
869 */
870
871 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
872 return ieee80211_rx_mesh_check(rx);
873
874 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
875 ieee80211_is_pspoll(hdr->frame_control)) &&
876 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
877 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
878 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
879 /*
880 * accept port control frames from the AP even when it's not
881 * yet marked ASSOC to prevent a race where we don't set the
882 * assoc bit quickly enough before it sends the first frame
883 */
884 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
885 ieee80211_is_data_present(hdr->frame_control)) {
886 u16 ethertype;
887 u8 *payload;
888
889 payload = rx->skb->data +
890 ieee80211_hdrlen(hdr->frame_control);
891 ethertype = (payload[6] << 8) | payload[7];
892 if (cpu_to_be16(ethertype) ==
893 rx->sdata->control_port_protocol)
894 return RX_CONTINUE;
895 }
896
897 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
898 cfg80211_rx_spurious_frame(rx->sdata->dev,
899 hdr->addr2,
900 GFP_ATOMIC))
901 return RX_DROP_UNUSABLE;
902
903 return RX_DROP_MONITOR;
904 }
905
906 return RX_CONTINUE;
907 }
908
909
910 static ieee80211_rx_result debug_noinline
911 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
912 {
913 struct sk_buff *skb = rx->skb;
914 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
916 int keyidx;
917 int hdrlen;
918 ieee80211_rx_result result = RX_DROP_UNUSABLE;
919 struct ieee80211_key *sta_ptk = NULL;
920 int mmie_keyidx = -1;
921 __le16 fc;
922
923 /*
924 * Key selection 101
925 *
926 * There are four types of keys:
927 * - GTK (group keys)
928 * - IGTK (group keys for management frames)
929 * - PTK (pairwise keys)
930 * - STK (station-to-station pairwise keys)
931 *
932 * When selecting a key, we have to distinguish between multicast
933 * (including broadcast) and unicast frames, the latter can only
934 * use PTKs and STKs while the former always use GTKs and IGTKs.
935 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
936 * unicast frames can also use key indices like GTKs. Hence, if we
937 * don't have a PTK/STK we check the key index for a WEP key.
938 *
939 * Note that in a regular BSS, multicast frames are sent by the
940 * AP only, associated stations unicast the frame to the AP first
941 * which then multicasts it on their behalf.
942 *
943 * There is also a slight problem in IBSS mode: GTKs are negotiated
944 * with each station, that is something we don't currently handle.
945 * The spec seems to expect that one negotiates the same key with
946 * every station but there's no such requirement; VLANs could be
947 * possible.
948 */
949
950 /*
951 * No point in finding a key and decrypting if the frame is neither
952 * addressed to us nor a multicast frame.
953 */
954 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
955 return RX_CONTINUE;
956
957 /* start without a key */
958 rx->key = NULL;
959
960 if (rx->sta)
961 sta_ptk = rcu_dereference(rx->sta->ptk);
962
963 fc = hdr->frame_control;
964
965 if (!ieee80211_has_protected(fc))
966 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
967
968 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
969 rx->key = sta_ptk;
970 if ((status->flag & RX_FLAG_DECRYPTED) &&
971 (status->flag & RX_FLAG_IV_STRIPPED))
972 return RX_CONTINUE;
973 /* Skip decryption if the frame is not protected. */
974 if (!ieee80211_has_protected(fc))
975 return RX_CONTINUE;
976 } else if (mmie_keyidx >= 0) {
977 /* Broadcast/multicast robust management frame / BIP */
978 if ((status->flag & RX_FLAG_DECRYPTED) &&
979 (status->flag & RX_FLAG_IV_STRIPPED))
980 return RX_CONTINUE;
981
982 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
983 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
984 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
985 if (rx->sta)
986 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
987 if (!rx->key)
988 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
989 } else if (!ieee80211_has_protected(fc)) {
990 /*
991 * The frame was not protected, so skip decryption. However, we
992 * need to set rx->key if there is a key that could have been
993 * used so that the frame may be dropped if encryption would
994 * have been expected.
995 */
996 struct ieee80211_key *key = NULL;
997 struct ieee80211_sub_if_data *sdata = rx->sdata;
998 int i;
999
1000 if (ieee80211_is_mgmt(fc) &&
1001 is_multicast_ether_addr(hdr->addr1) &&
1002 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1003 rx->key = key;
1004 else {
1005 if (rx->sta) {
1006 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1007 key = rcu_dereference(rx->sta->gtk[i]);
1008 if (key)
1009 break;
1010 }
1011 }
1012 if (!key) {
1013 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1014 key = rcu_dereference(sdata->keys[i]);
1015 if (key)
1016 break;
1017 }
1018 }
1019 if (key)
1020 rx->key = key;
1021 }
1022 return RX_CONTINUE;
1023 } else {
1024 u8 keyid;
1025 /*
1026 * The device doesn't give us the IV so we won't be
1027 * able to look up the key. That's ok though, we
1028 * don't need to decrypt the frame, we just won't
1029 * be able to keep statistics accurate.
1030 * Except for key threshold notifications, should
1031 * we somehow allow the driver to tell us which key
1032 * the hardware used if this flag is set?
1033 */
1034 if ((status->flag & RX_FLAG_DECRYPTED) &&
1035 (status->flag & RX_FLAG_IV_STRIPPED))
1036 return RX_CONTINUE;
1037
1038 hdrlen = ieee80211_hdrlen(fc);
1039
1040 if (rx->skb->len < 8 + hdrlen)
1041 return RX_DROP_UNUSABLE; /* TODO: count this? */
1042
1043 /*
1044 * no need to call ieee80211_wep_get_keyidx,
1045 * it verifies a bunch of things we've done already
1046 */
1047 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1048 keyidx = keyid >> 6;
1049
1050 /* check per-station GTK first, if multicast packet */
1051 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1052 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1053
1054 /* if not found, try default key */
1055 if (!rx->key) {
1056 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1057
1058 /*
1059 * RSNA-protected unicast frames should always be
1060 * sent with pairwise or station-to-station keys,
1061 * but for WEP we allow using a key index as well.
1062 */
1063 if (rx->key &&
1064 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1065 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1066 !is_multicast_ether_addr(hdr->addr1))
1067 rx->key = NULL;
1068 }
1069 }
1070
1071 if (rx->key) {
1072 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1073 return RX_DROP_MONITOR;
1074
1075 rx->key->tx_rx_count++;
1076 /* TODO: add threshold stuff again */
1077 } else {
1078 return RX_DROP_MONITOR;
1079 }
1080
1081 switch (rx->key->conf.cipher) {
1082 case WLAN_CIPHER_SUITE_WEP40:
1083 case WLAN_CIPHER_SUITE_WEP104:
1084 result = ieee80211_crypto_wep_decrypt(rx);
1085 break;
1086 case WLAN_CIPHER_SUITE_TKIP:
1087 result = ieee80211_crypto_tkip_decrypt(rx);
1088 break;
1089 case WLAN_CIPHER_SUITE_CCMP:
1090 result = ieee80211_crypto_ccmp_decrypt(rx);
1091 break;
1092 case WLAN_CIPHER_SUITE_AES_CMAC:
1093 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1094 break;
1095 default:
1096 /*
1097 * We can reach here only with HW-only algorithms
1098 * but why didn't it decrypt the frame?!
1099 */
1100 return RX_DROP_UNUSABLE;
1101 }
1102
1103 /* the hdr variable is invalid after the decrypt handlers */
1104
1105 /* either the frame has been decrypted or will be dropped */
1106 status->flag |= RX_FLAG_DECRYPTED;
1107
1108 return result;
1109 }
1110
1111 static ieee80211_rx_result debug_noinline
1112 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1113 {
1114 struct ieee80211_local *local;
1115 struct ieee80211_hdr *hdr;
1116 struct sk_buff *skb;
1117
1118 local = rx->local;
1119 skb = rx->skb;
1120 hdr = (struct ieee80211_hdr *) skb->data;
1121
1122 if (!local->pspolling)
1123 return RX_CONTINUE;
1124
1125 if (!ieee80211_has_fromds(hdr->frame_control))
1126 /* this is not from AP */
1127 return RX_CONTINUE;
1128
1129 if (!ieee80211_is_data(hdr->frame_control))
1130 return RX_CONTINUE;
1131
1132 if (!ieee80211_has_moredata(hdr->frame_control)) {
1133 /* AP has no more frames buffered for us */
1134 local->pspolling = false;
1135 return RX_CONTINUE;
1136 }
1137
1138 /* more data bit is set, let's request a new frame from the AP */
1139 ieee80211_send_pspoll(local, rx->sdata);
1140
1141 return RX_CONTINUE;
1142 }
1143
1144 static void ap_sta_ps_start(struct sta_info *sta)
1145 {
1146 struct ieee80211_sub_if_data *sdata = sta->sdata;
1147 struct ieee80211_local *local = sdata->local;
1148
1149 atomic_inc(&sdata->bss->num_sta_ps);
1150 set_sta_flag(sta, WLAN_STA_PS_STA);
1151 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1152 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1153 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1154 sta->sta.addr, sta->sta.aid);
1155 }
1156
1157 static void ap_sta_ps_end(struct sta_info *sta)
1158 {
1159 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1160 sta->sta.addr, sta->sta.aid);
1161
1162 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1163 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1164 sta->sta.addr, sta->sta.aid);
1165 return;
1166 }
1167
1168 ieee80211_sta_ps_deliver_wakeup(sta);
1169 }
1170
1171 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1172 {
1173 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1174 bool in_ps;
1175
1176 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1177
1178 /* Don't let the same PS state be set twice */
1179 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1180 if ((start && in_ps) || (!start && !in_ps))
1181 return -EINVAL;
1182
1183 if (start)
1184 ap_sta_ps_start(sta_inf);
1185 else
1186 ap_sta_ps_end(sta_inf);
1187
1188 return 0;
1189 }
1190 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1191
1192 static ieee80211_rx_result debug_noinline
1193 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1194 {
1195 struct ieee80211_sub_if_data *sdata = rx->sdata;
1196 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1197 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1198 int tid, ac;
1199
1200 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1201 return RX_CONTINUE;
1202
1203 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1204 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1205 return RX_CONTINUE;
1206
1207 /*
1208 * The device handles station powersave, so don't do anything about
1209 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1210 * it to mac80211 since they're handled.)
1211 */
1212 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1213 return RX_CONTINUE;
1214
1215 /*
1216 * Don't do anything if the station isn't already asleep. In
1217 * the uAPSD case, the station will probably be marked asleep,
1218 * in the PS-Poll case the station must be confused ...
1219 */
1220 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1221 return RX_CONTINUE;
1222
1223 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1224 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1225 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1226 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1227 else
1228 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1229 }
1230
1231 /* Free PS Poll skb here instead of returning RX_DROP that would
1232 * count as an dropped frame. */
1233 dev_kfree_skb(rx->skb);
1234
1235 return RX_QUEUED;
1236 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1237 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1238 ieee80211_has_pm(hdr->frame_control) &&
1239 (ieee80211_is_data_qos(hdr->frame_control) ||
1240 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1241 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1242 ac = ieee802_1d_to_ac[tid & 7];
1243
1244 /*
1245 * If this AC is not trigger-enabled do nothing.
1246 *
1247 * NB: This could/should check a separate bitmap of trigger-
1248 * enabled queues, but for now we only implement uAPSD w/o
1249 * TSPEC changes to the ACs, so they're always the same.
1250 */
1251 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1252 return RX_CONTINUE;
1253
1254 /* if we are in a service period, do nothing */
1255 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1256 return RX_CONTINUE;
1257
1258 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1259 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1260 else
1261 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1262 }
1263
1264 return RX_CONTINUE;
1265 }
1266
1267 static ieee80211_rx_result debug_noinline
1268 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1269 {
1270 struct sta_info *sta = rx->sta;
1271 struct sk_buff *skb = rx->skb;
1272 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1273 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1274
1275 if (!sta)
1276 return RX_CONTINUE;
1277
1278 /*
1279 * Update last_rx only for IBSS packets which are for the current
1280 * BSSID to avoid keeping the current IBSS network alive in cases
1281 * where other STAs start using different BSSID.
1282 */
1283 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1284 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1285 NL80211_IFTYPE_ADHOC);
1286 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
1287 sta->last_rx = jiffies;
1288 if (ieee80211_is_data(hdr->frame_control)) {
1289 sta->last_rx_rate_idx = status->rate_idx;
1290 sta->last_rx_rate_flag = status->flag;
1291 }
1292 }
1293 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1294 /*
1295 * Mesh beacons will update last_rx when if they are found to
1296 * match the current local configuration when processed.
1297 */
1298 sta->last_rx = jiffies;
1299 if (ieee80211_is_data(hdr->frame_control)) {
1300 sta->last_rx_rate_idx = status->rate_idx;
1301 sta->last_rx_rate_flag = status->flag;
1302 }
1303 }
1304
1305 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1306 return RX_CONTINUE;
1307
1308 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1309 ieee80211_sta_rx_notify(rx->sdata, hdr);
1310
1311 sta->rx_fragments++;
1312 sta->rx_bytes += rx->skb->len;
1313 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1314 sta->last_signal = status->signal;
1315 ewma_add(&sta->avg_signal, -status->signal);
1316 }
1317
1318 /*
1319 * Change STA power saving mode only at the end of a frame
1320 * exchange sequence.
1321 */
1322 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1323 !ieee80211_has_morefrags(hdr->frame_control) &&
1324 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1325 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1326 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1327 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1328 /*
1329 * Ignore doze->wake transitions that are
1330 * indicated by non-data frames, the standard
1331 * is unclear here, but for example going to
1332 * PS mode and then scanning would cause a
1333 * doze->wake transition for the probe request,
1334 * and that is clearly undesirable.
1335 */
1336 if (ieee80211_is_data(hdr->frame_control) &&
1337 !ieee80211_has_pm(hdr->frame_control))
1338 ap_sta_ps_end(sta);
1339 } else {
1340 if (ieee80211_has_pm(hdr->frame_control))
1341 ap_sta_ps_start(sta);
1342 }
1343 }
1344
1345 /*
1346 * Drop (qos-)data::nullfunc frames silently, since they
1347 * are used only to control station power saving mode.
1348 */
1349 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1350 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1351 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1352
1353 /*
1354 * If we receive a 4-addr nullfunc frame from a STA
1355 * that was not moved to a 4-addr STA vlan yet send
1356 * the event to userspace and for older hostapd drop
1357 * the frame to the monitor interface.
1358 */
1359 if (ieee80211_has_a4(hdr->frame_control) &&
1360 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1361 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1362 !rx->sdata->u.vlan.sta))) {
1363 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1364 cfg80211_rx_unexpected_4addr_frame(
1365 rx->sdata->dev, sta->sta.addr,
1366 GFP_ATOMIC);
1367 return RX_DROP_MONITOR;
1368 }
1369 /*
1370 * Update counter and free packet here to avoid
1371 * counting this as a dropped packed.
1372 */
1373 sta->rx_packets++;
1374 dev_kfree_skb(rx->skb);
1375 return RX_QUEUED;
1376 }
1377
1378 return RX_CONTINUE;
1379 } /* ieee80211_rx_h_sta_process */
1380
1381 static inline struct ieee80211_fragment_entry *
1382 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1383 unsigned int frag, unsigned int seq, int rx_queue,
1384 struct sk_buff **skb)
1385 {
1386 struct ieee80211_fragment_entry *entry;
1387 int idx;
1388
1389 idx = sdata->fragment_next;
1390 entry = &sdata->fragments[sdata->fragment_next++];
1391 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1392 sdata->fragment_next = 0;
1393
1394 if (!skb_queue_empty(&entry->skb_list))
1395 __skb_queue_purge(&entry->skb_list);
1396
1397 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1398 *skb = NULL;
1399 entry->first_frag_time = jiffies;
1400 entry->seq = seq;
1401 entry->rx_queue = rx_queue;
1402 entry->last_frag = frag;
1403 entry->ccmp = 0;
1404 entry->extra_len = 0;
1405
1406 return entry;
1407 }
1408
1409 static inline struct ieee80211_fragment_entry *
1410 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1411 unsigned int frag, unsigned int seq,
1412 int rx_queue, struct ieee80211_hdr *hdr)
1413 {
1414 struct ieee80211_fragment_entry *entry;
1415 int i, idx;
1416
1417 idx = sdata->fragment_next;
1418 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1419 struct ieee80211_hdr *f_hdr;
1420
1421 idx--;
1422 if (idx < 0)
1423 idx = IEEE80211_FRAGMENT_MAX - 1;
1424
1425 entry = &sdata->fragments[idx];
1426 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1427 entry->rx_queue != rx_queue ||
1428 entry->last_frag + 1 != frag)
1429 continue;
1430
1431 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1432
1433 /*
1434 * Check ftype and addresses are equal, else check next fragment
1435 */
1436 if (((hdr->frame_control ^ f_hdr->frame_control) &
1437 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1438 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1439 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1440 continue;
1441
1442 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1443 __skb_queue_purge(&entry->skb_list);
1444 continue;
1445 }
1446 return entry;
1447 }
1448
1449 return NULL;
1450 }
1451
1452 static ieee80211_rx_result debug_noinline
1453 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1454 {
1455 struct ieee80211_hdr *hdr;
1456 u16 sc;
1457 __le16 fc;
1458 unsigned int frag, seq;
1459 struct ieee80211_fragment_entry *entry;
1460 struct sk_buff *skb;
1461 struct ieee80211_rx_status *status;
1462
1463 hdr = (struct ieee80211_hdr *)rx->skb->data;
1464 fc = hdr->frame_control;
1465 sc = le16_to_cpu(hdr->seq_ctrl);
1466 frag = sc & IEEE80211_SCTL_FRAG;
1467
1468 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1469 (rx->skb)->len < 24 ||
1470 is_multicast_ether_addr(hdr->addr1))) {
1471 /* not fragmented */
1472 goto out;
1473 }
1474 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1475
1476 if (skb_linearize(rx->skb))
1477 return RX_DROP_UNUSABLE;
1478
1479 /*
1480 * skb_linearize() might change the skb->data and
1481 * previously cached variables (in this case, hdr) need to
1482 * be refreshed with the new data.
1483 */
1484 hdr = (struct ieee80211_hdr *)rx->skb->data;
1485 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1486
1487 if (frag == 0) {
1488 /* This is the first fragment of a new frame. */
1489 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1490 rx->seqno_idx, &(rx->skb));
1491 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1492 ieee80211_has_protected(fc)) {
1493 int queue = rx->security_idx;
1494 /* Store CCMP PN so that we can verify that the next
1495 * fragment has a sequential PN value. */
1496 entry->ccmp = 1;
1497 memcpy(entry->last_pn,
1498 rx->key->u.ccmp.rx_pn[queue],
1499 CCMP_PN_LEN);
1500 }
1501 return RX_QUEUED;
1502 }
1503
1504 /* This is a fragment for a frame that should already be pending in
1505 * fragment cache. Add this fragment to the end of the pending entry.
1506 */
1507 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1508 rx->seqno_idx, hdr);
1509 if (!entry) {
1510 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1511 return RX_DROP_MONITOR;
1512 }
1513
1514 /* Verify that MPDUs within one MSDU have sequential PN values.
1515 * (IEEE 802.11i, 8.3.3.4.5) */
1516 if (entry->ccmp) {
1517 int i;
1518 u8 pn[CCMP_PN_LEN], *rpn;
1519 int queue;
1520 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1521 return RX_DROP_UNUSABLE;
1522 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1523 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1524 pn[i]++;
1525 if (pn[i])
1526 break;
1527 }
1528 queue = rx->security_idx;
1529 rpn = rx->key->u.ccmp.rx_pn[queue];
1530 if (memcmp(pn, rpn, CCMP_PN_LEN))
1531 return RX_DROP_UNUSABLE;
1532 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1533 }
1534
1535 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1536 __skb_queue_tail(&entry->skb_list, rx->skb);
1537 entry->last_frag = frag;
1538 entry->extra_len += rx->skb->len;
1539 if (ieee80211_has_morefrags(fc)) {
1540 rx->skb = NULL;
1541 return RX_QUEUED;
1542 }
1543
1544 rx->skb = __skb_dequeue(&entry->skb_list);
1545 if (skb_tailroom(rx->skb) < entry->extra_len) {
1546 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1547 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1548 GFP_ATOMIC))) {
1549 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1550 __skb_queue_purge(&entry->skb_list);
1551 return RX_DROP_UNUSABLE;
1552 }
1553 }
1554 while ((skb = __skb_dequeue(&entry->skb_list))) {
1555 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1556 dev_kfree_skb(skb);
1557 }
1558
1559 /* Complete frame has been reassembled - process it now */
1560 status = IEEE80211_SKB_RXCB(rx->skb);
1561 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1562
1563 out:
1564 if (rx->sta)
1565 rx->sta->rx_packets++;
1566 if (is_multicast_ether_addr(hdr->addr1))
1567 rx->local->dot11MulticastReceivedFrameCount++;
1568 else
1569 ieee80211_led_rx(rx->local);
1570 return RX_CONTINUE;
1571 }
1572
1573 static int
1574 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1575 {
1576 if (unlikely(!rx->sta ||
1577 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1578 return -EACCES;
1579
1580 return 0;
1581 }
1582
1583 static int
1584 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1585 {
1586 struct sk_buff *skb = rx->skb;
1587 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1588
1589 /*
1590 * Pass through unencrypted frames if the hardware has
1591 * decrypted them already.
1592 */
1593 if (status->flag & RX_FLAG_DECRYPTED)
1594 return 0;
1595
1596 /* Drop unencrypted frames if key is set. */
1597 if (unlikely(!ieee80211_has_protected(fc) &&
1598 !ieee80211_is_nullfunc(fc) &&
1599 ieee80211_is_data(fc) &&
1600 (rx->key || rx->sdata->drop_unencrypted)))
1601 return -EACCES;
1602
1603 return 0;
1604 }
1605
1606 static int
1607 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1608 {
1609 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1610 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1611 __le16 fc = hdr->frame_control;
1612
1613 /*
1614 * Pass through unencrypted frames if the hardware has
1615 * decrypted them already.
1616 */
1617 if (status->flag & RX_FLAG_DECRYPTED)
1618 return 0;
1619
1620 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1621 if (unlikely(!ieee80211_has_protected(fc) &&
1622 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1623 rx->key)) {
1624 if (ieee80211_is_deauth(fc))
1625 cfg80211_send_unprot_deauth(rx->sdata->dev,
1626 rx->skb->data,
1627 rx->skb->len);
1628 else if (ieee80211_is_disassoc(fc))
1629 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1630 rx->skb->data,
1631 rx->skb->len);
1632 return -EACCES;
1633 }
1634 /* BIP does not use Protected field, so need to check MMIE */
1635 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1636 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1637 if (ieee80211_is_deauth(fc))
1638 cfg80211_send_unprot_deauth(rx->sdata->dev,
1639 rx->skb->data,
1640 rx->skb->len);
1641 else if (ieee80211_is_disassoc(fc))
1642 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1643 rx->skb->data,
1644 rx->skb->len);
1645 return -EACCES;
1646 }
1647 /*
1648 * When using MFP, Action frames are not allowed prior to
1649 * having configured keys.
1650 */
1651 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1652 ieee80211_is_robust_mgmt_frame(
1653 (struct ieee80211_hdr *) rx->skb->data)))
1654 return -EACCES;
1655 }
1656
1657 return 0;
1658 }
1659
1660 static int
1661 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1662 {
1663 struct ieee80211_sub_if_data *sdata = rx->sdata;
1664 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1665 bool check_port_control = false;
1666 struct ethhdr *ehdr;
1667 int ret;
1668
1669 *port_control = false;
1670 if (ieee80211_has_a4(hdr->frame_control) &&
1671 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1672 return -1;
1673
1674 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1675 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1676
1677 if (!sdata->u.mgd.use_4addr)
1678 return -1;
1679 else
1680 check_port_control = true;
1681 }
1682
1683 if (is_multicast_ether_addr(hdr->addr1) &&
1684 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1685 return -1;
1686
1687 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1688 if (ret < 0)
1689 return ret;
1690
1691 ehdr = (struct ethhdr *) rx->skb->data;
1692 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1693 *port_control = true;
1694 else if (check_port_control)
1695 return -1;
1696
1697 return 0;
1698 }
1699
1700 /*
1701 * requires that rx->skb is a frame with ethernet header
1702 */
1703 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1704 {
1705 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1706 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1707 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1708
1709 /*
1710 * Allow EAPOL frames to us/the PAE group address regardless
1711 * of whether the frame was encrypted or not.
1712 */
1713 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1714 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
1715 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
1716 return true;
1717
1718 if (ieee80211_802_1x_port_control(rx) ||
1719 ieee80211_drop_unencrypted(rx, fc))
1720 return false;
1721
1722 return true;
1723 }
1724
1725 /*
1726 * requires that rx->skb is a frame with ethernet header
1727 */
1728 static void
1729 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1730 {
1731 struct ieee80211_sub_if_data *sdata = rx->sdata;
1732 struct net_device *dev = sdata->dev;
1733 struct sk_buff *skb, *xmit_skb;
1734 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1735 struct sta_info *dsta;
1736 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1737
1738 skb = rx->skb;
1739 xmit_skb = NULL;
1740
1741 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1742 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1743 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1744 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1745 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1746 if (is_multicast_ether_addr(ehdr->h_dest)) {
1747 /*
1748 * send multicast frames both to higher layers in
1749 * local net stack and back to the wireless medium
1750 */
1751 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1752 if (!xmit_skb)
1753 net_info_ratelimited("%s: failed to clone multicast frame\n",
1754 dev->name);
1755 } else {
1756 dsta = sta_info_get(sdata, skb->data);
1757 if (dsta) {
1758 /*
1759 * The destination station is associated to
1760 * this AP (in this VLAN), so send the frame
1761 * directly to it and do not pass it to local
1762 * net stack.
1763 */
1764 xmit_skb = skb;
1765 skb = NULL;
1766 }
1767 }
1768 }
1769
1770 if (skb) {
1771 int align __maybe_unused;
1772
1773 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1774 /*
1775 * 'align' will only take the values 0 or 2 here
1776 * since all frames are required to be aligned
1777 * to 2-byte boundaries when being passed to
1778 * mac80211. That also explains the __skb_push()
1779 * below.
1780 */
1781 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1782 if (align) {
1783 if (WARN_ON(skb_headroom(skb) < 3)) {
1784 dev_kfree_skb(skb);
1785 skb = NULL;
1786 } else {
1787 u8 *data = skb->data;
1788 size_t len = skb_headlen(skb);
1789 skb->data -= align;
1790 memmove(skb->data, data, len);
1791 skb_set_tail_pointer(skb, len);
1792 }
1793 }
1794 #endif
1795
1796 if (skb) {
1797 /* deliver to local stack */
1798 skb->protocol = eth_type_trans(skb, dev);
1799 memset(skb->cb, 0, sizeof(skb->cb));
1800 netif_receive_skb(skb);
1801 }
1802 }
1803
1804 if (xmit_skb) {
1805 /*
1806 * Send to wireless media and increase priority by 256 to
1807 * keep the received priority instead of reclassifying
1808 * the frame (see cfg80211_classify8021d).
1809 */
1810 xmit_skb->priority += 256;
1811 xmit_skb->protocol = htons(ETH_P_802_3);
1812 skb_reset_network_header(xmit_skb);
1813 skb_reset_mac_header(xmit_skb);
1814 dev_queue_xmit(xmit_skb);
1815 }
1816 }
1817
1818 static ieee80211_rx_result debug_noinline
1819 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1820 {
1821 struct net_device *dev = rx->sdata->dev;
1822 struct sk_buff *skb = rx->skb;
1823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1824 __le16 fc = hdr->frame_control;
1825 struct sk_buff_head frame_list;
1826 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1827
1828 if (unlikely(!ieee80211_is_data(fc)))
1829 return RX_CONTINUE;
1830
1831 if (unlikely(!ieee80211_is_data_present(fc)))
1832 return RX_DROP_MONITOR;
1833
1834 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1835 return RX_CONTINUE;
1836
1837 if (ieee80211_has_a4(hdr->frame_control) &&
1838 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1839 !rx->sdata->u.vlan.sta)
1840 return RX_DROP_UNUSABLE;
1841
1842 if (is_multicast_ether_addr(hdr->addr1) &&
1843 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1844 rx->sdata->u.vlan.sta) ||
1845 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1846 rx->sdata->u.mgd.use_4addr)))
1847 return RX_DROP_UNUSABLE;
1848
1849 skb->dev = dev;
1850 __skb_queue_head_init(&frame_list);
1851
1852 if (skb_linearize(skb))
1853 return RX_DROP_UNUSABLE;
1854
1855 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1856 rx->sdata->vif.type,
1857 rx->local->hw.extra_tx_headroom, true);
1858
1859 while (!skb_queue_empty(&frame_list)) {
1860 rx->skb = __skb_dequeue(&frame_list);
1861
1862 if (!ieee80211_frame_allowed(rx, fc)) {
1863 dev_kfree_skb(rx->skb);
1864 continue;
1865 }
1866 dev->stats.rx_packets++;
1867 dev->stats.rx_bytes += rx->skb->len;
1868
1869 ieee80211_deliver_skb(rx);
1870 }
1871
1872 return RX_QUEUED;
1873 }
1874
1875 #ifdef CONFIG_MAC80211_MESH
1876 static ieee80211_rx_result
1877 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1878 {
1879 struct ieee80211_hdr *fwd_hdr, *hdr;
1880 struct ieee80211_tx_info *info;
1881 struct ieee80211s_hdr *mesh_hdr;
1882 struct sk_buff *skb = rx->skb, *fwd_skb;
1883 struct ieee80211_local *local = rx->local;
1884 struct ieee80211_sub_if_data *sdata = rx->sdata;
1885 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1886 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1887 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
1888 u16 q, hdrlen;
1889
1890 hdr = (struct ieee80211_hdr *) skb->data;
1891 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1892 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1893
1894 /* frame is in RMC, don't forward */
1895 if (ieee80211_is_data(hdr->frame_control) &&
1896 is_multicast_ether_addr(hdr->addr1) &&
1897 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1898 return RX_DROP_MONITOR;
1899
1900 if (!ieee80211_is_data(hdr->frame_control))
1901 return RX_CONTINUE;
1902
1903 if (!mesh_hdr->ttl)
1904 return RX_DROP_MONITOR;
1905
1906 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1907 struct mesh_path *mppath;
1908 char *proxied_addr;
1909 char *mpp_addr;
1910
1911 if (is_multicast_ether_addr(hdr->addr1)) {
1912 mpp_addr = hdr->addr3;
1913 proxied_addr = mesh_hdr->eaddr1;
1914 } else {
1915 mpp_addr = hdr->addr4;
1916 proxied_addr = mesh_hdr->eaddr2;
1917 }
1918
1919 rcu_read_lock();
1920 mppath = mpp_path_lookup(proxied_addr, sdata);
1921 if (!mppath) {
1922 mpp_path_add(proxied_addr, mpp_addr, sdata);
1923 } else {
1924 spin_lock_bh(&mppath->state_lock);
1925 if (!ether_addr_equal(mppath->mpp, mpp_addr))
1926 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1927 spin_unlock_bh(&mppath->state_lock);
1928 }
1929 rcu_read_unlock();
1930 }
1931
1932 /* Frame has reached destination. Don't forward */
1933 if (!is_multicast_ether_addr(hdr->addr1) &&
1934 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1935 return RX_CONTINUE;
1936
1937 q = ieee80211_select_queue_80211(sdata, skb, hdr);
1938 if (ieee80211_queue_stopped(&local->hw, q)) {
1939 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
1940 return RX_DROP_MONITOR;
1941 }
1942 skb_set_queue_mapping(skb, q);
1943
1944 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1945 goto out;
1946
1947 if (!--mesh_hdr->ttl) {
1948 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
1949 return RX_DROP_MONITOR;
1950 }
1951
1952 if (!ifmsh->mshcfg.dot11MeshForwarding)
1953 goto out;
1954
1955 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1956 if (!fwd_skb) {
1957 net_info_ratelimited("%s: failed to clone mesh frame\n",
1958 sdata->name);
1959 goto out;
1960 }
1961
1962 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1963 info = IEEE80211_SKB_CB(fwd_skb);
1964 memset(info, 0, sizeof(*info));
1965 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1966 info->control.vif = &rx->sdata->vif;
1967 info->control.jiffies = jiffies;
1968 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1969 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
1970 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1971 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
1972 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
1973 } else {
1974 /* unable to resolve next hop */
1975 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
1976 0, reason, fwd_hdr->addr2, sdata);
1977 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
1978 kfree_skb(fwd_skb);
1979 return RX_DROP_MONITOR;
1980 }
1981
1982 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
1983 ieee80211_add_pending_skb(local, fwd_skb);
1984 out:
1985 if (is_multicast_ether_addr(hdr->addr1) ||
1986 sdata->dev->flags & IFF_PROMISC)
1987 return RX_CONTINUE;
1988 else
1989 return RX_DROP_MONITOR;
1990 }
1991 #endif
1992
1993 static ieee80211_rx_result debug_noinline
1994 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1995 {
1996 struct ieee80211_sub_if_data *sdata = rx->sdata;
1997 struct ieee80211_local *local = rx->local;
1998 struct net_device *dev = sdata->dev;
1999 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2000 __le16 fc = hdr->frame_control;
2001 bool port_control;
2002 int err;
2003
2004 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2005 return RX_CONTINUE;
2006
2007 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2008 return RX_DROP_MONITOR;
2009
2010 /*
2011 * Send unexpected-4addr-frame event to hostapd. For older versions,
2012 * also drop the frame to cooked monitor interfaces.
2013 */
2014 if (ieee80211_has_a4(hdr->frame_control) &&
2015 sdata->vif.type == NL80211_IFTYPE_AP) {
2016 if (rx->sta &&
2017 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2018 cfg80211_rx_unexpected_4addr_frame(
2019 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2020 return RX_DROP_MONITOR;
2021 }
2022
2023 err = __ieee80211_data_to_8023(rx, &port_control);
2024 if (unlikely(err))
2025 return RX_DROP_UNUSABLE;
2026
2027 if (!ieee80211_frame_allowed(rx, fc))
2028 return RX_DROP_MONITOR;
2029
2030 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2031 unlikely(port_control) && sdata->bss) {
2032 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2033 u.ap);
2034 dev = sdata->dev;
2035 rx->sdata = sdata;
2036 }
2037
2038 rx->skb->dev = dev;
2039
2040 dev->stats.rx_packets++;
2041 dev->stats.rx_bytes += rx->skb->len;
2042
2043 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2044 !is_multicast_ether_addr(
2045 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2046 (!local->scanning &&
2047 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2048 mod_timer(&local->dynamic_ps_timer, jiffies +
2049 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2050 }
2051
2052 ieee80211_deliver_skb(rx);
2053
2054 return RX_QUEUED;
2055 }
2056
2057 static ieee80211_rx_result debug_noinline
2058 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2059 {
2060 struct sk_buff *skb = rx->skb;
2061 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2062 struct tid_ampdu_rx *tid_agg_rx;
2063 u16 start_seq_num;
2064 u16 tid;
2065
2066 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2067 return RX_CONTINUE;
2068
2069 if (ieee80211_is_back_req(bar->frame_control)) {
2070 struct {
2071 __le16 control, start_seq_num;
2072 } __packed bar_data;
2073
2074 if (!rx->sta)
2075 return RX_DROP_MONITOR;
2076
2077 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2078 &bar_data, sizeof(bar_data)))
2079 return RX_DROP_MONITOR;
2080
2081 tid = le16_to_cpu(bar_data.control) >> 12;
2082
2083 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2084 if (!tid_agg_rx)
2085 return RX_DROP_MONITOR;
2086
2087 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2088
2089 /* reset session timer */
2090 if (tid_agg_rx->timeout)
2091 mod_timer(&tid_agg_rx->session_timer,
2092 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2093
2094 spin_lock(&tid_agg_rx->reorder_lock);
2095 /* release stored frames up to start of BAR */
2096 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2097 start_seq_num);
2098 spin_unlock(&tid_agg_rx->reorder_lock);
2099
2100 kfree_skb(skb);
2101 return RX_QUEUED;
2102 }
2103
2104 /*
2105 * After this point, we only want management frames,
2106 * so we can drop all remaining control frames to
2107 * cooked monitor interfaces.
2108 */
2109 return RX_DROP_MONITOR;
2110 }
2111
2112 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2113 struct ieee80211_mgmt *mgmt,
2114 size_t len)
2115 {
2116 struct ieee80211_local *local = sdata->local;
2117 struct sk_buff *skb;
2118 struct ieee80211_mgmt *resp;
2119
2120 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2121 /* Not to own unicast address */
2122 return;
2123 }
2124
2125 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2126 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2127 /* Not from the current AP or not associated yet. */
2128 return;
2129 }
2130
2131 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2132 /* Too short SA Query request frame */
2133 return;
2134 }
2135
2136 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2137 if (skb == NULL)
2138 return;
2139
2140 skb_reserve(skb, local->hw.extra_tx_headroom);
2141 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2142 memset(resp, 0, 24);
2143 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2144 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2145 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2146 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2147 IEEE80211_STYPE_ACTION);
2148 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2149 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2150 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2151 memcpy(resp->u.action.u.sa_query.trans_id,
2152 mgmt->u.action.u.sa_query.trans_id,
2153 WLAN_SA_QUERY_TR_ID_LEN);
2154
2155 ieee80211_tx_skb(sdata, skb);
2156 }
2157
2158 static ieee80211_rx_result debug_noinline
2159 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2160 {
2161 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2162 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2163
2164 /*
2165 * From here on, look only at management frames.
2166 * Data and control frames are already handled,
2167 * and unknown (reserved) frames are useless.
2168 */
2169 if (rx->skb->len < 24)
2170 return RX_DROP_MONITOR;
2171
2172 if (!ieee80211_is_mgmt(mgmt->frame_control))
2173 return RX_DROP_MONITOR;
2174
2175 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2176 ieee80211_is_beacon(mgmt->frame_control) &&
2177 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2178 int sig = 0;
2179
2180 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2181 sig = status->signal;
2182
2183 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2184 rx->skb->data, rx->skb->len,
2185 status->freq, sig, GFP_ATOMIC);
2186 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2187 }
2188
2189 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2190 return RX_DROP_MONITOR;
2191
2192 if (ieee80211_drop_unencrypted_mgmt(rx))
2193 return RX_DROP_UNUSABLE;
2194
2195 return RX_CONTINUE;
2196 }
2197
2198 static ieee80211_rx_result debug_noinline
2199 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2200 {
2201 struct ieee80211_local *local = rx->local;
2202 struct ieee80211_sub_if_data *sdata = rx->sdata;
2203 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2204 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2205 int len = rx->skb->len;
2206
2207 if (!ieee80211_is_action(mgmt->frame_control))
2208 return RX_CONTINUE;
2209
2210 /* drop too small frames */
2211 if (len < IEEE80211_MIN_ACTION_SIZE)
2212 return RX_DROP_UNUSABLE;
2213
2214 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2215 return RX_DROP_UNUSABLE;
2216
2217 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2218 return RX_DROP_UNUSABLE;
2219
2220 switch (mgmt->u.action.category) {
2221 case WLAN_CATEGORY_HT:
2222 /* reject HT action frames from stations not supporting HT */
2223 if (!rx->sta->sta.ht_cap.ht_supported)
2224 goto invalid;
2225
2226 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2227 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2228 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2229 sdata->vif.type != NL80211_IFTYPE_AP &&
2230 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2231 break;
2232
2233 /* verify action & smps_control are present */
2234 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2235 goto invalid;
2236
2237 switch (mgmt->u.action.u.ht_smps.action) {
2238 case WLAN_HT_ACTION_SMPS: {
2239 struct ieee80211_supported_band *sband;
2240 u8 smps;
2241
2242 /* convert to HT capability */
2243 switch (mgmt->u.action.u.ht_smps.smps_control) {
2244 case WLAN_HT_SMPS_CONTROL_DISABLED:
2245 smps = WLAN_HT_CAP_SM_PS_DISABLED;
2246 break;
2247 case WLAN_HT_SMPS_CONTROL_STATIC:
2248 smps = WLAN_HT_CAP_SM_PS_STATIC;
2249 break;
2250 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2251 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
2252 break;
2253 default:
2254 goto invalid;
2255 }
2256 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2257
2258 /* if no change do nothing */
2259 if ((rx->sta->sta.ht_cap.cap &
2260 IEEE80211_HT_CAP_SM_PS) == smps)
2261 goto handled;
2262
2263 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
2264 rx->sta->sta.ht_cap.cap |= smps;
2265
2266 sband = rx->local->hw.wiphy->bands[status->band];
2267
2268 rate_control_rate_update(local, sband, rx->sta,
2269 IEEE80211_RC_SMPS_CHANGED);
2270 goto handled;
2271 }
2272 default:
2273 goto invalid;
2274 }
2275
2276 break;
2277 case WLAN_CATEGORY_BACK:
2278 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2279 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2280 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2281 sdata->vif.type != NL80211_IFTYPE_AP &&
2282 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2283 break;
2284
2285 /* verify action_code is present */
2286 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2287 break;
2288
2289 switch (mgmt->u.action.u.addba_req.action_code) {
2290 case WLAN_ACTION_ADDBA_REQ:
2291 if (len < (IEEE80211_MIN_ACTION_SIZE +
2292 sizeof(mgmt->u.action.u.addba_req)))
2293 goto invalid;
2294 break;
2295 case WLAN_ACTION_ADDBA_RESP:
2296 if (len < (IEEE80211_MIN_ACTION_SIZE +
2297 sizeof(mgmt->u.action.u.addba_resp)))
2298 goto invalid;
2299 break;
2300 case WLAN_ACTION_DELBA:
2301 if (len < (IEEE80211_MIN_ACTION_SIZE +
2302 sizeof(mgmt->u.action.u.delba)))
2303 goto invalid;
2304 break;
2305 default:
2306 goto invalid;
2307 }
2308
2309 goto queue;
2310 case WLAN_CATEGORY_SPECTRUM_MGMT:
2311 if (status->band != IEEE80211_BAND_5GHZ)
2312 break;
2313
2314 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2315 break;
2316
2317 /* verify action_code is present */
2318 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2319 break;
2320
2321 switch (mgmt->u.action.u.measurement.action_code) {
2322 case WLAN_ACTION_SPCT_MSR_REQ:
2323 if (len < (IEEE80211_MIN_ACTION_SIZE +
2324 sizeof(mgmt->u.action.u.measurement)))
2325 break;
2326 ieee80211_process_measurement_req(sdata, mgmt, len);
2327 goto handled;
2328 case WLAN_ACTION_SPCT_CHL_SWITCH:
2329 if (len < (IEEE80211_MIN_ACTION_SIZE +
2330 sizeof(mgmt->u.action.u.chan_switch)))
2331 break;
2332
2333 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2334 break;
2335
2336 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2337 break;
2338
2339 goto queue;
2340 }
2341 break;
2342 case WLAN_CATEGORY_SA_QUERY:
2343 if (len < (IEEE80211_MIN_ACTION_SIZE +
2344 sizeof(mgmt->u.action.u.sa_query)))
2345 break;
2346
2347 switch (mgmt->u.action.u.sa_query.action) {
2348 case WLAN_ACTION_SA_QUERY_REQUEST:
2349 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2350 break;
2351 ieee80211_process_sa_query_req(sdata, mgmt, len);
2352 goto handled;
2353 }
2354 break;
2355 case WLAN_CATEGORY_SELF_PROTECTED:
2356 switch (mgmt->u.action.u.self_prot.action_code) {
2357 case WLAN_SP_MESH_PEERING_OPEN:
2358 case WLAN_SP_MESH_PEERING_CLOSE:
2359 case WLAN_SP_MESH_PEERING_CONFIRM:
2360 if (!ieee80211_vif_is_mesh(&sdata->vif))
2361 goto invalid;
2362 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2363 /* userspace handles this frame */
2364 break;
2365 goto queue;
2366 case WLAN_SP_MGK_INFORM:
2367 case WLAN_SP_MGK_ACK:
2368 if (!ieee80211_vif_is_mesh(&sdata->vif))
2369 goto invalid;
2370 break;
2371 }
2372 break;
2373 case WLAN_CATEGORY_MESH_ACTION:
2374 if (!ieee80211_vif_is_mesh(&sdata->vif))
2375 break;
2376 if (mesh_action_is_path_sel(mgmt) &&
2377 (!mesh_path_sel_is_hwmp(sdata)))
2378 break;
2379 goto queue;
2380 }
2381
2382 return RX_CONTINUE;
2383
2384 invalid:
2385 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2386 /* will return in the next handlers */
2387 return RX_CONTINUE;
2388
2389 handled:
2390 if (rx->sta)
2391 rx->sta->rx_packets++;
2392 dev_kfree_skb(rx->skb);
2393 return RX_QUEUED;
2394
2395 queue:
2396 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2397 skb_queue_tail(&sdata->skb_queue, rx->skb);
2398 ieee80211_queue_work(&local->hw, &sdata->work);
2399 if (rx->sta)
2400 rx->sta->rx_packets++;
2401 return RX_QUEUED;
2402 }
2403
2404 static ieee80211_rx_result debug_noinline
2405 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2406 {
2407 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2408 int sig = 0;
2409
2410 /* skip known-bad action frames and return them in the next handler */
2411 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2412 return RX_CONTINUE;
2413
2414 /*
2415 * Getting here means the kernel doesn't know how to handle
2416 * it, but maybe userspace does ... include returned frames
2417 * so userspace can register for those to know whether ones
2418 * it transmitted were processed or returned.
2419 */
2420
2421 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2422 sig = status->signal;
2423
2424 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
2425 rx->skb->data, rx->skb->len,
2426 GFP_ATOMIC)) {
2427 if (rx->sta)
2428 rx->sta->rx_packets++;
2429 dev_kfree_skb(rx->skb);
2430 return RX_QUEUED;
2431 }
2432
2433
2434 return RX_CONTINUE;
2435 }
2436
2437 static ieee80211_rx_result debug_noinline
2438 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2439 {
2440 struct ieee80211_local *local = rx->local;
2441 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2442 struct sk_buff *nskb;
2443 struct ieee80211_sub_if_data *sdata = rx->sdata;
2444 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2445
2446 if (!ieee80211_is_action(mgmt->frame_control))
2447 return RX_CONTINUE;
2448
2449 /*
2450 * For AP mode, hostapd is responsible for handling any action
2451 * frames that we didn't handle, including returning unknown
2452 * ones. For all other modes we will return them to the sender,
2453 * setting the 0x80 bit in the action category, as required by
2454 * 802.11-2012 9.24.4.
2455 * Newer versions of hostapd shall also use the management frame
2456 * registration mechanisms, but older ones still use cooked
2457 * monitor interfaces so push all frames there.
2458 */
2459 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2460 (sdata->vif.type == NL80211_IFTYPE_AP ||
2461 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2462 return RX_DROP_MONITOR;
2463
2464 if (is_multicast_ether_addr(mgmt->da))
2465 return RX_DROP_MONITOR;
2466
2467 /* do not return rejected action frames */
2468 if (mgmt->u.action.category & 0x80)
2469 return RX_DROP_UNUSABLE;
2470
2471 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2472 GFP_ATOMIC);
2473 if (nskb) {
2474 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2475
2476 nmgmt->u.action.category |= 0x80;
2477 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2478 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2479
2480 memset(nskb->cb, 0, sizeof(nskb->cb));
2481
2482 ieee80211_tx_skb(rx->sdata, nskb);
2483 }
2484 dev_kfree_skb(rx->skb);
2485 return RX_QUEUED;
2486 }
2487
2488 static ieee80211_rx_result debug_noinline
2489 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2490 {
2491 struct ieee80211_sub_if_data *sdata = rx->sdata;
2492 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2493 __le16 stype;
2494
2495 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2496
2497 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2498 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2499 sdata->vif.type != NL80211_IFTYPE_STATION)
2500 return RX_DROP_MONITOR;
2501
2502 switch (stype) {
2503 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2504 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2505 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2506 /* process for all: mesh, mlme, ibss */
2507 break;
2508 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2509 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2510 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2511 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2512 if (is_multicast_ether_addr(mgmt->da) &&
2513 !is_broadcast_ether_addr(mgmt->da))
2514 return RX_DROP_MONITOR;
2515
2516 /* process only for station */
2517 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2518 return RX_DROP_MONITOR;
2519 break;
2520 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2521 /* process only for ibss */
2522 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2523 return RX_DROP_MONITOR;
2524 break;
2525 default:
2526 return RX_DROP_MONITOR;
2527 }
2528
2529 /* queue up frame and kick off work to process it */
2530 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2531 skb_queue_tail(&sdata->skb_queue, rx->skb);
2532 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2533 if (rx->sta)
2534 rx->sta->rx_packets++;
2535
2536 return RX_QUEUED;
2537 }
2538
2539 /* TODO: use IEEE80211_RX_FRAGMENTED */
2540 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2541 struct ieee80211_rate *rate)
2542 {
2543 struct ieee80211_sub_if_data *sdata;
2544 struct ieee80211_local *local = rx->local;
2545 struct sk_buff *skb = rx->skb, *skb2;
2546 struct net_device *prev_dev = NULL;
2547 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2548 int needed_headroom;
2549
2550 /*
2551 * If cooked monitor has been processed already, then
2552 * don't do it again. If not, set the flag.
2553 */
2554 if (rx->flags & IEEE80211_RX_CMNTR)
2555 goto out_free_skb;
2556 rx->flags |= IEEE80211_RX_CMNTR;
2557
2558 /* If there are no cooked monitor interfaces, just free the SKB */
2559 if (!local->cooked_mntrs)
2560 goto out_free_skb;
2561
2562 /* room for the radiotap header based on driver features */
2563 needed_headroom = ieee80211_rx_radiotap_len(local, status);
2564
2565 if (skb_headroom(skb) < needed_headroom &&
2566 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
2567 goto out_free_skb;
2568
2569 /* prepend radiotap information */
2570 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2571 false);
2572
2573 skb_set_mac_header(skb, 0);
2574 skb->ip_summed = CHECKSUM_UNNECESSARY;
2575 skb->pkt_type = PACKET_OTHERHOST;
2576 skb->protocol = htons(ETH_P_802_2);
2577
2578 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2579 if (!ieee80211_sdata_running(sdata))
2580 continue;
2581
2582 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2583 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2584 continue;
2585
2586 if (prev_dev) {
2587 skb2 = skb_clone(skb, GFP_ATOMIC);
2588 if (skb2) {
2589 skb2->dev = prev_dev;
2590 netif_receive_skb(skb2);
2591 }
2592 }
2593
2594 prev_dev = sdata->dev;
2595 sdata->dev->stats.rx_packets++;
2596 sdata->dev->stats.rx_bytes += skb->len;
2597 }
2598
2599 if (prev_dev) {
2600 skb->dev = prev_dev;
2601 netif_receive_skb(skb);
2602 return;
2603 }
2604
2605 out_free_skb:
2606 dev_kfree_skb(skb);
2607 }
2608
2609 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2610 ieee80211_rx_result res)
2611 {
2612 switch (res) {
2613 case RX_DROP_MONITOR:
2614 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2615 if (rx->sta)
2616 rx->sta->rx_dropped++;
2617 /* fall through */
2618 case RX_CONTINUE: {
2619 struct ieee80211_rate *rate = NULL;
2620 struct ieee80211_supported_band *sband;
2621 struct ieee80211_rx_status *status;
2622
2623 status = IEEE80211_SKB_RXCB((rx->skb));
2624
2625 sband = rx->local->hw.wiphy->bands[status->band];
2626 if (!(status->flag & RX_FLAG_HT))
2627 rate = &sband->bitrates[status->rate_idx];
2628
2629 ieee80211_rx_cooked_monitor(rx, rate);
2630 break;
2631 }
2632 case RX_DROP_UNUSABLE:
2633 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2634 if (rx->sta)
2635 rx->sta->rx_dropped++;
2636 dev_kfree_skb(rx->skb);
2637 break;
2638 case RX_QUEUED:
2639 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2640 break;
2641 }
2642 }
2643
2644 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2645 {
2646 ieee80211_rx_result res = RX_DROP_MONITOR;
2647 struct sk_buff *skb;
2648
2649 #define CALL_RXH(rxh) \
2650 do { \
2651 res = rxh(rx); \
2652 if (res != RX_CONTINUE) \
2653 goto rxh_next; \
2654 } while (0);
2655
2656 spin_lock(&rx->local->rx_skb_queue.lock);
2657 if (rx->local->running_rx_handler)
2658 goto unlock;
2659
2660 rx->local->running_rx_handler = true;
2661
2662 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2663 spin_unlock(&rx->local->rx_skb_queue.lock);
2664
2665 /*
2666 * all the other fields are valid across frames
2667 * that belong to an aMPDU since they are on the
2668 * same TID from the same station
2669 */
2670 rx->skb = skb;
2671
2672 CALL_RXH(ieee80211_rx_h_decrypt)
2673 CALL_RXH(ieee80211_rx_h_check_more_data)
2674 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2675 CALL_RXH(ieee80211_rx_h_sta_process)
2676 CALL_RXH(ieee80211_rx_h_defragment)
2677 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2678 /* must be after MMIC verify so header is counted in MPDU mic */
2679 #ifdef CONFIG_MAC80211_MESH
2680 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2681 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2682 #endif
2683 CALL_RXH(ieee80211_rx_h_amsdu)
2684 CALL_RXH(ieee80211_rx_h_data)
2685 CALL_RXH(ieee80211_rx_h_ctrl);
2686 CALL_RXH(ieee80211_rx_h_mgmt_check)
2687 CALL_RXH(ieee80211_rx_h_action)
2688 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2689 CALL_RXH(ieee80211_rx_h_action_return)
2690 CALL_RXH(ieee80211_rx_h_mgmt)
2691
2692 rxh_next:
2693 ieee80211_rx_handlers_result(rx, res);
2694 spin_lock(&rx->local->rx_skb_queue.lock);
2695 #undef CALL_RXH
2696 }
2697
2698 rx->local->running_rx_handler = false;
2699
2700 unlock:
2701 spin_unlock(&rx->local->rx_skb_queue.lock);
2702 }
2703
2704 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2705 {
2706 ieee80211_rx_result res = RX_DROP_MONITOR;
2707
2708 #define CALL_RXH(rxh) \
2709 do { \
2710 res = rxh(rx); \
2711 if (res != RX_CONTINUE) \
2712 goto rxh_next; \
2713 } while (0);
2714
2715 CALL_RXH(ieee80211_rx_h_check)
2716
2717 ieee80211_rx_reorder_ampdu(rx);
2718
2719 ieee80211_rx_handlers(rx);
2720 return;
2721
2722 rxh_next:
2723 ieee80211_rx_handlers_result(rx, res);
2724
2725 #undef CALL_RXH
2726 }
2727
2728 /*
2729 * This function makes calls into the RX path, therefore
2730 * it has to be invoked under RCU read lock.
2731 */
2732 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2733 {
2734 struct ieee80211_rx_data rx = {
2735 .sta = sta,
2736 .sdata = sta->sdata,
2737 .local = sta->local,
2738 /* This is OK -- must be QoS data frame */
2739 .security_idx = tid,
2740 .seqno_idx = tid,
2741 .flags = 0,
2742 };
2743 struct tid_ampdu_rx *tid_agg_rx;
2744
2745 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2746 if (!tid_agg_rx)
2747 return;
2748
2749 spin_lock(&tid_agg_rx->reorder_lock);
2750 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
2751 spin_unlock(&tid_agg_rx->reorder_lock);
2752
2753 ieee80211_rx_handlers(&rx);
2754 }
2755
2756 /* main receive path */
2757
2758 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2759 struct ieee80211_hdr *hdr)
2760 {
2761 struct ieee80211_sub_if_data *sdata = rx->sdata;
2762 struct sk_buff *skb = rx->skb;
2763 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2764 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2765 int multicast = is_multicast_ether_addr(hdr->addr1);
2766
2767 switch (sdata->vif.type) {
2768 case NL80211_IFTYPE_STATION:
2769 if (!bssid && !sdata->u.mgd.use_4addr)
2770 return 0;
2771 if (!multicast &&
2772 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2773 if (!(sdata->dev->flags & IFF_PROMISC) ||
2774 sdata->u.mgd.use_4addr)
2775 return 0;
2776 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2777 }
2778 break;
2779 case NL80211_IFTYPE_ADHOC:
2780 if (!bssid)
2781 return 0;
2782 if (ieee80211_is_beacon(hdr->frame_control)) {
2783 return 1;
2784 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2785 return 0;
2786 } else if (!multicast &&
2787 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2788 if (!(sdata->dev->flags & IFF_PROMISC))
2789 return 0;
2790 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2791 } else if (!rx->sta) {
2792 int rate_idx;
2793 if (status->flag & RX_FLAG_HT)
2794 rate_idx = 0; /* TODO: HT rates */
2795 else
2796 rate_idx = status->rate_idx;
2797 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
2798 BIT(rate_idx));
2799 }
2800 break;
2801 case NL80211_IFTYPE_MESH_POINT:
2802 if (!multicast &&
2803 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2804 if (!(sdata->dev->flags & IFF_PROMISC))
2805 return 0;
2806
2807 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2808 }
2809 break;
2810 case NL80211_IFTYPE_AP_VLAN:
2811 case NL80211_IFTYPE_AP:
2812 if (!bssid) {
2813 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2814 return 0;
2815 } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
2816 /*
2817 * Accept public action frames even when the
2818 * BSSID doesn't match, this is used for P2P
2819 * and location updates. Note that mac80211
2820 * itself never looks at these frames.
2821 */
2822 if (ieee80211_is_public_action(hdr, skb->len))
2823 return 1;
2824 if (!ieee80211_is_beacon(hdr->frame_control))
2825 return 0;
2826 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2827 }
2828 break;
2829 case NL80211_IFTYPE_WDS:
2830 if (bssid || !ieee80211_is_data(hdr->frame_control))
2831 return 0;
2832 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2833 return 0;
2834 break;
2835 case NL80211_IFTYPE_P2P_DEVICE:
2836 if (!ieee80211_is_public_action(hdr, skb->len) &&
2837 !ieee80211_is_probe_req(hdr->frame_control) &&
2838 !ieee80211_is_probe_resp(hdr->frame_control) &&
2839 !ieee80211_is_beacon(hdr->frame_control))
2840 return 0;
2841 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2842 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2843 break;
2844 default:
2845 /* should never get here */
2846 WARN_ON_ONCE(1);
2847 break;
2848 }
2849
2850 return 1;
2851 }
2852
2853 /*
2854 * This function returns whether or not the SKB
2855 * was destined for RX processing or not, which,
2856 * if consume is true, is equivalent to whether
2857 * or not the skb was consumed.
2858 */
2859 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2860 struct sk_buff *skb, bool consume)
2861 {
2862 struct ieee80211_local *local = rx->local;
2863 struct ieee80211_sub_if_data *sdata = rx->sdata;
2864 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2865 struct ieee80211_hdr *hdr = (void *)skb->data;
2866 int prepares;
2867
2868 rx->skb = skb;
2869 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2870 prepares = prepare_for_handlers(rx, hdr);
2871
2872 if (!prepares)
2873 return false;
2874
2875 if (!consume) {
2876 skb = skb_copy(skb, GFP_ATOMIC);
2877 if (!skb) {
2878 if (net_ratelimit())
2879 wiphy_debug(local->hw.wiphy,
2880 "failed to copy skb for %s\n",
2881 sdata->name);
2882 return true;
2883 }
2884
2885 rx->skb = skb;
2886 }
2887
2888 ieee80211_invoke_rx_handlers(rx);
2889 return true;
2890 }
2891
2892 /*
2893 * This is the actual Rx frames handler. as it blongs to Rx path it must
2894 * be called with rcu_read_lock protection.
2895 */
2896 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2897 struct sk_buff *skb)
2898 {
2899 struct ieee80211_local *local = hw_to_local(hw);
2900 struct ieee80211_sub_if_data *sdata;
2901 struct ieee80211_hdr *hdr;
2902 __le16 fc;
2903 struct ieee80211_rx_data rx;
2904 struct ieee80211_sub_if_data *prev;
2905 struct sta_info *sta, *tmp, *prev_sta;
2906 int err = 0;
2907
2908 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2909 memset(&rx, 0, sizeof(rx));
2910 rx.skb = skb;
2911 rx.local = local;
2912
2913 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2914 local->dot11ReceivedFragmentCount++;
2915
2916 if (ieee80211_is_mgmt(fc))
2917 err = skb_linearize(skb);
2918 else
2919 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2920
2921 if (err) {
2922 dev_kfree_skb(skb);
2923 return;
2924 }
2925
2926 hdr = (struct ieee80211_hdr *)skb->data;
2927 ieee80211_parse_qos(&rx);
2928 ieee80211_verify_alignment(&rx);
2929
2930 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
2931 ieee80211_is_beacon(hdr->frame_control)))
2932 ieee80211_scan_rx(local, skb);
2933
2934 if (ieee80211_is_data(fc)) {
2935 prev_sta = NULL;
2936
2937 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2938 if (!prev_sta) {
2939 prev_sta = sta;
2940 continue;
2941 }
2942
2943 rx.sta = prev_sta;
2944 rx.sdata = prev_sta->sdata;
2945 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2946
2947 prev_sta = sta;
2948 }
2949
2950 if (prev_sta) {
2951 rx.sta = prev_sta;
2952 rx.sdata = prev_sta->sdata;
2953
2954 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2955 return;
2956 goto out;
2957 }
2958 }
2959
2960 prev = NULL;
2961
2962 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2963 if (!ieee80211_sdata_running(sdata))
2964 continue;
2965
2966 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2967 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2968 continue;
2969
2970 /*
2971 * frame is destined for this interface, but if it's
2972 * not also for the previous one we handle that after
2973 * the loop to avoid copying the SKB once too much
2974 */
2975
2976 if (!prev) {
2977 prev = sdata;
2978 continue;
2979 }
2980
2981 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2982 rx.sdata = prev;
2983 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2984
2985 prev = sdata;
2986 }
2987
2988 if (prev) {
2989 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2990 rx.sdata = prev;
2991
2992 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2993 return;
2994 }
2995
2996 out:
2997 dev_kfree_skb(skb);
2998 }
2999
3000 /*
3001 * This is the receive path handler. It is called by a low level driver when an
3002 * 802.11 MPDU is received from the hardware.
3003 */
3004 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3005 {
3006 struct ieee80211_local *local = hw_to_local(hw);
3007 struct ieee80211_rate *rate = NULL;
3008 struct ieee80211_supported_band *sband;
3009 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3010
3011 WARN_ON_ONCE(softirq_count() == 0);
3012
3013 if (WARN_ON(status->band < 0 ||
3014 status->band >= IEEE80211_NUM_BANDS))
3015 goto drop;
3016
3017 sband = local->hw.wiphy->bands[status->band];
3018 if (WARN_ON(!sband))
3019 goto drop;
3020
3021 /*
3022 * If we're suspending, it is possible although not too likely
3023 * that we'd be receiving frames after having already partially
3024 * quiesced the stack. We can't process such frames then since
3025 * that might, for example, cause stations to be added or other
3026 * driver callbacks be invoked.
3027 */
3028 if (unlikely(local->quiescing || local->suspended))
3029 goto drop;
3030
3031 /* We might be during a HW reconfig, prevent Rx for the same reason */
3032 if (unlikely(local->in_reconfig))
3033 goto drop;
3034
3035 /*
3036 * The same happens when we're not even started,
3037 * but that's worth a warning.
3038 */
3039 if (WARN_ON(!local->started))
3040 goto drop;
3041
3042 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
3043 /*
3044 * Validate the rate, unless a PLCP error means that
3045 * we probably can't have a valid rate here anyway.
3046 */
3047
3048 if (status->flag & RX_FLAG_HT) {
3049 /*
3050 * rate_idx is MCS index, which can be [0-76]
3051 * as documented on:
3052 *
3053 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3054 *
3055 * Anything else would be some sort of driver or
3056 * hardware error. The driver should catch hardware
3057 * errors.
3058 */
3059 if (WARN((status->rate_idx < 0 ||
3060 status->rate_idx > 76),
3061 "Rate marked as an HT rate but passed "
3062 "status->rate_idx is not "
3063 "an MCS index [0-76]: %d (0x%02x)\n",
3064 status->rate_idx,
3065 status->rate_idx))
3066 goto drop;
3067 } else {
3068 if (WARN_ON(status->rate_idx < 0 ||
3069 status->rate_idx >= sband->n_bitrates))
3070 goto drop;
3071 rate = &sband->bitrates[status->rate_idx];
3072 }
3073 }
3074
3075 status->rx_flags = 0;
3076
3077 /*
3078 * key references and virtual interfaces are protected using RCU
3079 * and this requires that we are in a read-side RCU section during
3080 * receive processing
3081 */
3082 rcu_read_lock();
3083
3084 /*
3085 * Frames with failed FCS/PLCP checksum are not returned,
3086 * all other frames are returned without radiotap header
3087 * if it was previously present.
3088 * Also, frames with less than 16 bytes are dropped.
3089 */
3090 skb = ieee80211_rx_monitor(local, skb, rate);
3091 if (!skb) {
3092 rcu_read_unlock();
3093 return;
3094 }
3095
3096 ieee80211_tpt_led_trig_rx(local,
3097 ((struct ieee80211_hdr *)skb->data)->frame_control,
3098 skb->len);
3099 __ieee80211_rx_handle_packet(hw, skb);
3100
3101 rcu_read_unlock();
3102
3103 return;
3104 drop:
3105 kfree_skb(skb);
3106 }
3107 EXPORT_SYMBOL(ieee80211_rx);
3108
3109 /* This is a version of the rx handler that can be called from hard irq
3110 * context. Post the skb on the queue and schedule the tasklet */
3111 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3112 {
3113 struct ieee80211_local *local = hw_to_local(hw);
3114
3115 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3116
3117 skb->pkt_type = IEEE80211_RX_MSG;
3118 skb_queue_tail(&local->skb_queue, skb);
3119 tasklet_schedule(&local->tasklet);
3120 }
3121 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.126138 seconds and 5 git commands to generate.