mac80211: report OBSS beacons
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h>
21
22 #include "ieee80211_i.h"
23 #include "driver-ops.h"
24 #include "led.h"
25 #include "mesh.h"
26 #include "wep.h"
27 #include "wpa.h"
28 #include "tkip.h"
29 #include "wme.h"
30
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb)
39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else {
44 /* driver bug */
45 WARN_ON(1);
46 dev_kfree_skb(skb);
47 skb = NULL;
48 }
49 }
50
51 return skb;
52 }
53
54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len)
56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1;
68 return 0;
69 }
70
71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status)
74 {
75 int len;
76
77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79
80 if (status->flag & RX_FLAG_MACTIME_MPDU)
81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1;
84
85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++;
87
88 if (status->flag & RX_FLAG_HT) /* HT info */
89 len += 3;
90
91 return len;
92 }
93
94 /*
95 * ieee80211_add_rx_radiotap_header - add radiotap header
96 *
97 * add a radiotap header containing all the fields which the hardware provided.
98 */
99 static void
100 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
101 struct sk_buff *skb,
102 struct ieee80211_rate *rate,
103 int rtap_len)
104 {
105 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
106 struct ieee80211_radiotap_header *rthdr;
107 unsigned char *pos;
108 u16 rx_flags = 0;
109
110 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
111 memset(rthdr, 0, rtap_len);
112
113 /* radiotap header, set always present flags */
114 rthdr->it_present =
115 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
116 (1 << IEEE80211_RADIOTAP_CHANNEL) |
117 (1 << IEEE80211_RADIOTAP_ANTENNA) |
118 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
119 rthdr->it_len = cpu_to_le16(rtap_len);
120
121 pos = (unsigned char *)(rthdr+1);
122
123 /* the order of the following fields is important */
124
125 /* IEEE80211_RADIOTAP_TSFT */
126 if (status->flag & RX_FLAG_MACTIME_MPDU) {
127 put_unaligned_le64(status->mactime, pos);
128 rthdr->it_present |=
129 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
130 pos += 8;
131 }
132
133 /* IEEE80211_RADIOTAP_FLAGS */
134 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
135 *pos |= IEEE80211_RADIOTAP_F_FCS;
136 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
137 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
138 if (status->flag & RX_FLAG_SHORTPRE)
139 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
140 pos++;
141
142 /* IEEE80211_RADIOTAP_RATE */
143 if (status->flag & RX_FLAG_HT) {
144 /*
145 * MCS information is a separate field in radiotap,
146 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0.
148 */
149 *pos = 0;
150 } else {
151 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
152 *pos = rate->bitrate / 5;
153 }
154 pos++;
155
156 /* IEEE80211_RADIOTAP_CHANNEL */
157 put_unaligned_le16(status->freq, pos);
158 pos += 2;
159 if (status->band == IEEE80211_BAND_5GHZ)
160 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
161 pos);
162 else if (status->flag & RX_FLAG_HT)
163 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
164 pos);
165 else if (rate->flags & IEEE80211_RATE_ERP_G)
166 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
167 pos);
168 else
169 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
170 pos);
171 pos += 2;
172
173 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
174 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
175 *pos = status->signal;
176 rthdr->it_present |=
177 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
178 pos++;
179 }
180
181 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
182
183 /* IEEE80211_RADIOTAP_ANTENNA */
184 *pos = status->antenna;
185 pos++;
186
187 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
188
189 /* IEEE80211_RADIOTAP_RX_FLAGS */
190 /* ensure 2 byte alignment for the 2 byte field as required */
191 if ((pos - (u8 *)rthdr) & 1)
192 pos++;
193 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
194 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
195 put_unaligned_le16(rx_flags, pos);
196 pos += 2;
197
198 if (status->flag & RX_FLAG_HT) {
199 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
200 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
201 IEEE80211_RADIOTAP_MCS_HAVE_GI |
202 IEEE80211_RADIOTAP_MCS_HAVE_BW;
203 *pos = 0;
204 if (status->flag & RX_FLAG_SHORT_GI)
205 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
206 if (status->flag & RX_FLAG_40MHZ)
207 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
208 pos++;
209 *pos++ = status->rate_idx;
210 }
211 }
212
213 /*
214 * This function copies a received frame to all monitor interfaces and
215 * returns a cleaned-up SKB that no longer includes the FCS nor the
216 * radiotap header the driver might have added.
217 */
218 static struct sk_buff *
219 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
220 struct ieee80211_rate *rate)
221 {
222 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
223 struct ieee80211_sub_if_data *sdata;
224 int needed_headroom = 0;
225 struct sk_buff *skb, *skb2;
226 struct net_device *prev_dev = NULL;
227 int present_fcs_len = 0;
228
229 /*
230 * First, we may need to make a copy of the skb because
231 * (1) we need to modify it for radiotap (if not present), and
232 * (2) the other RX handlers will modify the skb we got.
233 *
234 * We don't need to, of course, if we aren't going to return
235 * the SKB because it has a bad FCS/PLCP checksum.
236 */
237
238 /* room for the radiotap header based on driver features */
239 needed_headroom = ieee80211_rx_radiotap_len(local, status);
240
241 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
242 present_fcs_len = FCS_LEN;
243
244 /* make sure hdr->frame_control is on the linear part */
245 if (!pskb_may_pull(origskb, 2)) {
246 dev_kfree_skb(origskb);
247 return NULL;
248 }
249
250 if (!local->monitors) {
251 if (should_drop_frame(origskb, present_fcs_len)) {
252 dev_kfree_skb(origskb);
253 return NULL;
254 }
255
256 return remove_monitor_info(local, origskb);
257 }
258
259 if (should_drop_frame(origskb, present_fcs_len)) {
260 /* only need to expand headroom if necessary */
261 skb = origskb;
262 origskb = NULL;
263
264 /*
265 * This shouldn't trigger often because most devices have an
266 * RX header they pull before we get here, and that should
267 * be big enough for our radiotap information. We should
268 * probably export the length to drivers so that we can have
269 * them allocate enough headroom to start with.
270 */
271 if (skb_headroom(skb) < needed_headroom &&
272 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
273 dev_kfree_skb(skb);
274 return NULL;
275 }
276 } else {
277 /*
278 * Need to make a copy and possibly remove radiotap header
279 * and FCS from the original.
280 */
281 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
282
283 origskb = remove_monitor_info(local, origskb);
284
285 if (!skb)
286 return origskb;
287 }
288
289 /* prepend radiotap information */
290 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
291
292 skb_reset_mac_header(skb);
293 skb->ip_summed = CHECKSUM_UNNECESSARY;
294 skb->pkt_type = PACKET_OTHERHOST;
295 skb->protocol = htons(ETH_P_802_2);
296
297 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
298 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
299 continue;
300
301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
302 continue;
303
304 if (!ieee80211_sdata_running(sdata))
305 continue;
306
307 if (prev_dev) {
308 skb2 = skb_clone(skb, GFP_ATOMIC);
309 if (skb2) {
310 skb2->dev = prev_dev;
311 netif_receive_skb(skb2);
312 }
313 }
314
315 prev_dev = sdata->dev;
316 sdata->dev->stats.rx_packets++;
317 sdata->dev->stats.rx_bytes += skb->len;
318 }
319
320 if (prev_dev) {
321 skb->dev = prev_dev;
322 netif_receive_skb(skb);
323 } else
324 dev_kfree_skb(skb);
325
326 return origskb;
327 }
328
329
330 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
331 {
332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
333 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
334 int tid, seqno_idx, security_idx;
335
336 /* does the frame have a qos control field? */
337 if (ieee80211_is_data_qos(hdr->frame_control)) {
338 u8 *qc = ieee80211_get_qos_ctl(hdr);
339 /* frame has qos control */
340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
341 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
342 status->rx_flags |= IEEE80211_RX_AMSDU;
343
344 seqno_idx = tid;
345 security_idx = tid;
346 } else {
347 /*
348 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
349 *
350 * Sequence numbers for management frames, QoS data
351 * frames with a broadcast/multicast address in the
352 * Address 1 field, and all non-QoS data frames sent
353 * by QoS STAs are assigned using an additional single
354 * modulo-4096 counter, [...]
355 *
356 * We also use that counter for non-QoS STAs.
357 */
358 seqno_idx = NUM_RX_DATA_QUEUES;
359 security_idx = 0;
360 if (ieee80211_is_mgmt(hdr->frame_control))
361 security_idx = NUM_RX_DATA_QUEUES;
362 tid = 0;
363 }
364
365 rx->seqno_idx = seqno_idx;
366 rx->security_idx = security_idx;
367 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
368 * For now, set skb->priority to 0 for other cases. */
369 rx->skb->priority = (tid > 7) ? 0 : tid;
370 }
371
372 /**
373 * DOC: Packet alignment
374 *
375 * Drivers always need to pass packets that are aligned to two-byte boundaries
376 * to the stack.
377 *
378 * Additionally, should, if possible, align the payload data in a way that
379 * guarantees that the contained IP header is aligned to a four-byte
380 * boundary. In the case of regular frames, this simply means aligning the
381 * payload to a four-byte boundary (because either the IP header is directly
382 * contained, or IV/RFC1042 headers that have a length divisible by four are
383 * in front of it). If the payload data is not properly aligned and the
384 * architecture doesn't support efficient unaligned operations, mac80211
385 * will align the data.
386 *
387 * With A-MSDU frames, however, the payload data address must yield two modulo
388 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
389 * push the IP header further back to a multiple of four again. Thankfully, the
390 * specs were sane enough this time around to require padding each A-MSDU
391 * subframe to a length that is a multiple of four.
392 *
393 * Padding like Atheros hardware adds which is between the 802.11 header and
394 * the payload is not supported, the driver is required to move the 802.11
395 * header to be directly in front of the payload in that case.
396 */
397 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
398 {
399 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
400 WARN_ONCE((unsigned long)rx->skb->data & 1,
401 "unaligned packet at 0x%p\n", rx->skb->data);
402 #endif
403 }
404
405
406 /* rx handlers */
407
408 static ieee80211_rx_result debug_noinline
409 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
410 {
411 struct ieee80211_local *local = rx->local;
412 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
413 struct sk_buff *skb = rx->skb;
414
415 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
416 !local->sched_scanning))
417 return RX_CONTINUE;
418
419 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
420 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
421 local->sched_scanning)
422 return ieee80211_scan_rx(rx->sdata, skb);
423
424 /* scanning finished during invoking of handlers */
425 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
426 return RX_DROP_UNUSABLE;
427 }
428
429
430 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
431 {
432 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
433
434 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
435 return 0;
436
437 return ieee80211_is_robust_mgmt_frame(hdr);
438 }
439
440
441 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
442 {
443 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
444
445 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
446 return 0;
447
448 return ieee80211_is_robust_mgmt_frame(hdr);
449 }
450
451
452 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
453 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
454 {
455 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
456 struct ieee80211_mmie *mmie;
457
458 if (skb->len < 24 + sizeof(*mmie) ||
459 !is_multicast_ether_addr(hdr->da))
460 return -1;
461
462 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
463 return -1; /* not a robust management frame */
464
465 mmie = (struct ieee80211_mmie *)
466 (skb->data + skb->len - sizeof(*mmie));
467 if (mmie->element_id != WLAN_EID_MMIE ||
468 mmie->length != sizeof(*mmie) - 2)
469 return -1;
470
471 return le16_to_cpu(mmie->key_id);
472 }
473
474
475 static ieee80211_rx_result
476 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
477 {
478 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
479 char *dev_addr = rx->sdata->vif.addr;
480
481 if (ieee80211_is_data(hdr->frame_control)) {
482 if (is_multicast_ether_addr(hdr->addr1)) {
483 if (ieee80211_has_tods(hdr->frame_control) ||
484 !ieee80211_has_fromds(hdr->frame_control))
485 return RX_DROP_MONITOR;
486 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
487 return RX_DROP_MONITOR;
488 } else {
489 if (!ieee80211_has_a4(hdr->frame_control))
490 return RX_DROP_MONITOR;
491 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
492 return RX_DROP_MONITOR;
493 }
494 }
495
496 /* If there is not an established peer link and this is not a peer link
497 * establisment frame, beacon or probe, drop the frame.
498 */
499
500 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
501 struct ieee80211_mgmt *mgmt;
502
503 if (!ieee80211_is_mgmt(hdr->frame_control))
504 return RX_DROP_MONITOR;
505
506 if (ieee80211_is_action(hdr->frame_control)) {
507 u8 category;
508 mgmt = (struct ieee80211_mgmt *)hdr;
509 category = mgmt->u.action.category;
510 if (category != WLAN_CATEGORY_MESH_ACTION &&
511 category != WLAN_CATEGORY_SELF_PROTECTED)
512 return RX_DROP_MONITOR;
513 return RX_CONTINUE;
514 }
515
516 if (ieee80211_is_probe_req(hdr->frame_control) ||
517 ieee80211_is_probe_resp(hdr->frame_control) ||
518 ieee80211_is_beacon(hdr->frame_control) ||
519 ieee80211_is_auth(hdr->frame_control))
520 return RX_CONTINUE;
521
522 return RX_DROP_MONITOR;
523
524 }
525
526 return RX_CONTINUE;
527 }
528
529 #define SEQ_MODULO 0x1000
530 #define SEQ_MASK 0xfff
531
532 static inline int seq_less(u16 sq1, u16 sq2)
533 {
534 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
535 }
536
537 static inline u16 seq_inc(u16 sq)
538 {
539 return (sq + 1) & SEQ_MASK;
540 }
541
542 static inline u16 seq_sub(u16 sq1, u16 sq2)
543 {
544 return (sq1 - sq2) & SEQ_MASK;
545 }
546
547
548 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
549 struct tid_ampdu_rx *tid_agg_rx,
550 int index)
551 {
552 struct ieee80211_local *local = hw_to_local(hw);
553 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
554 struct ieee80211_rx_status *status;
555
556 lockdep_assert_held(&tid_agg_rx->reorder_lock);
557
558 if (!skb)
559 goto no_frame;
560
561 /* release the frame from the reorder ring buffer */
562 tid_agg_rx->stored_mpdu_num--;
563 tid_agg_rx->reorder_buf[index] = NULL;
564 status = IEEE80211_SKB_RXCB(skb);
565 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
566 skb_queue_tail(&local->rx_skb_queue, skb);
567
568 no_frame:
569 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
570 }
571
572 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
573 struct tid_ampdu_rx *tid_agg_rx,
574 u16 head_seq_num)
575 {
576 int index;
577
578 lockdep_assert_held(&tid_agg_rx->reorder_lock);
579
580 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
581 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
582 tid_agg_rx->buf_size;
583 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
584 }
585 }
586
587 /*
588 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
589 * the skb was added to the buffer longer than this time ago, the earlier
590 * frames that have not yet been received are assumed to be lost and the skb
591 * can be released for processing. This may also release other skb's from the
592 * reorder buffer if there are no additional gaps between the frames.
593 *
594 * Callers must hold tid_agg_rx->reorder_lock.
595 */
596 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
597
598 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
599 struct tid_ampdu_rx *tid_agg_rx)
600 {
601 int index, j;
602
603 lockdep_assert_held(&tid_agg_rx->reorder_lock);
604
605 /* release the buffer until next missing frame */
606 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
607 tid_agg_rx->buf_size;
608 if (!tid_agg_rx->reorder_buf[index] &&
609 tid_agg_rx->stored_mpdu_num > 1) {
610 /*
611 * No buffers ready to be released, but check whether any
612 * frames in the reorder buffer have timed out.
613 */
614 int skipped = 1;
615 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
616 j = (j + 1) % tid_agg_rx->buf_size) {
617 if (!tid_agg_rx->reorder_buf[j]) {
618 skipped++;
619 continue;
620 }
621 if (skipped &&
622 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
623 HT_RX_REORDER_BUF_TIMEOUT))
624 goto set_release_timer;
625
626 #ifdef CONFIG_MAC80211_HT_DEBUG
627 if (net_ratelimit())
628 wiphy_debug(hw->wiphy,
629 "release an RX reorder frame due to timeout on earlier frames\n");
630 #endif
631 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
632
633 /*
634 * Increment the head seq# also for the skipped slots.
635 */
636 tid_agg_rx->head_seq_num =
637 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
638 skipped = 0;
639 }
640 } else while (tid_agg_rx->reorder_buf[index]) {
641 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
642 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
643 tid_agg_rx->buf_size;
644 }
645
646 if (tid_agg_rx->stored_mpdu_num) {
647 j = index = seq_sub(tid_agg_rx->head_seq_num,
648 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
649
650 for (; j != (index - 1) % tid_agg_rx->buf_size;
651 j = (j + 1) % tid_agg_rx->buf_size) {
652 if (tid_agg_rx->reorder_buf[j])
653 break;
654 }
655
656 set_release_timer:
657
658 mod_timer(&tid_agg_rx->reorder_timer,
659 tid_agg_rx->reorder_time[j] + 1 +
660 HT_RX_REORDER_BUF_TIMEOUT);
661 } else {
662 del_timer(&tid_agg_rx->reorder_timer);
663 }
664 }
665
666 /*
667 * As this function belongs to the RX path it must be under
668 * rcu_read_lock protection. It returns false if the frame
669 * can be processed immediately, true if it was consumed.
670 */
671 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
672 struct tid_ampdu_rx *tid_agg_rx,
673 struct sk_buff *skb)
674 {
675 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
676 u16 sc = le16_to_cpu(hdr->seq_ctrl);
677 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
678 u16 head_seq_num, buf_size;
679 int index;
680 bool ret = true;
681
682 spin_lock(&tid_agg_rx->reorder_lock);
683
684 buf_size = tid_agg_rx->buf_size;
685 head_seq_num = tid_agg_rx->head_seq_num;
686
687 /* frame with out of date sequence number */
688 if (seq_less(mpdu_seq_num, head_seq_num)) {
689 dev_kfree_skb(skb);
690 goto out;
691 }
692
693 /*
694 * If frame the sequence number exceeds our buffering window
695 * size release some previous frames to make room for this one.
696 */
697 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
698 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
699 /* release stored frames up to new head to stack */
700 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
701 }
702
703 /* Now the new frame is always in the range of the reordering buffer */
704
705 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
706
707 /* check if we already stored this frame */
708 if (tid_agg_rx->reorder_buf[index]) {
709 dev_kfree_skb(skb);
710 goto out;
711 }
712
713 /*
714 * If the current MPDU is in the right order and nothing else
715 * is stored we can process it directly, no need to buffer it.
716 * If it is first but there's something stored, we may be able
717 * to release frames after this one.
718 */
719 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
720 tid_agg_rx->stored_mpdu_num == 0) {
721 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
722 ret = false;
723 goto out;
724 }
725
726 /* put the frame in the reordering buffer */
727 tid_agg_rx->reorder_buf[index] = skb;
728 tid_agg_rx->reorder_time[index] = jiffies;
729 tid_agg_rx->stored_mpdu_num++;
730 ieee80211_sta_reorder_release(hw, tid_agg_rx);
731
732 out:
733 spin_unlock(&tid_agg_rx->reorder_lock);
734 return ret;
735 }
736
737 /*
738 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
739 * true if the MPDU was buffered, false if it should be processed.
740 */
741 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
742 {
743 struct sk_buff *skb = rx->skb;
744 struct ieee80211_local *local = rx->local;
745 struct ieee80211_hw *hw = &local->hw;
746 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
748 struct sta_info *sta = rx->sta;
749 struct tid_ampdu_rx *tid_agg_rx;
750 u16 sc;
751 u8 tid, ack_policy;
752
753 if (!ieee80211_is_data_qos(hdr->frame_control))
754 goto dont_reorder;
755
756 /*
757 * filter the QoS data rx stream according to
758 * STA/TID and check if this STA/TID is on aggregation
759 */
760
761 if (!sta)
762 goto dont_reorder;
763
764 ack_policy = *ieee80211_get_qos_ctl(hdr) &
765 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
766 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
767
768 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
769 if (!tid_agg_rx)
770 goto dont_reorder;
771
772 /* qos null data frames are excluded */
773 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
774 goto dont_reorder;
775
776 /* not part of a BA session */
777 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
778 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
779 goto dont_reorder;
780
781 /* not actually part of this BA session */
782 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
783 goto dont_reorder;
784
785 /* new, potentially un-ordered, ampdu frame - process it */
786
787 /* reset session timer */
788 if (tid_agg_rx->timeout)
789 mod_timer(&tid_agg_rx->session_timer,
790 TU_TO_EXP_TIME(tid_agg_rx->timeout));
791
792 /* if this mpdu is fragmented - terminate rx aggregation session */
793 sc = le16_to_cpu(hdr->seq_ctrl);
794 if (sc & IEEE80211_SCTL_FRAG) {
795 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
796 skb_queue_tail(&rx->sdata->skb_queue, skb);
797 ieee80211_queue_work(&local->hw, &rx->sdata->work);
798 return;
799 }
800
801 /*
802 * No locking needed -- we will only ever process one
803 * RX packet at a time, and thus own tid_agg_rx. All
804 * other code manipulating it needs to (and does) make
805 * sure that we cannot get to it any more before doing
806 * anything with it.
807 */
808 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
809 return;
810
811 dont_reorder:
812 skb_queue_tail(&local->rx_skb_queue, skb);
813 }
814
815 static ieee80211_rx_result debug_noinline
816 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
817 {
818 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
819 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
820
821 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
822 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
823 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
824 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
825 hdr->seq_ctrl)) {
826 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
827 rx->local->dot11FrameDuplicateCount++;
828 rx->sta->num_duplicates++;
829 }
830 return RX_DROP_UNUSABLE;
831 } else
832 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
833 }
834
835 if (unlikely(rx->skb->len < 16)) {
836 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
837 return RX_DROP_MONITOR;
838 }
839
840 /* Drop disallowed frame classes based on STA auth/assoc state;
841 * IEEE 802.11, Chap 5.5.
842 *
843 * mac80211 filters only based on association state, i.e. it drops
844 * Class 3 frames from not associated stations. hostapd sends
845 * deauth/disassoc frames when needed. In addition, hostapd is
846 * responsible for filtering on both auth and assoc states.
847 */
848
849 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
850 return ieee80211_rx_mesh_check(rx);
851
852 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
853 ieee80211_is_pspoll(hdr->frame_control)) &&
854 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
855 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
856 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
857 if (rx->sta && rx->sta->dummy &&
858 ieee80211_is_data_present(hdr->frame_control)) {
859 u16 ethertype;
860 u8 *payload;
861
862 payload = rx->skb->data +
863 ieee80211_hdrlen(hdr->frame_control);
864 ethertype = (payload[6] << 8) | payload[7];
865 if (cpu_to_be16(ethertype) ==
866 rx->sdata->control_port_protocol)
867 return RX_CONTINUE;
868 }
869
870 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
871 cfg80211_rx_spurious_frame(rx->sdata->dev,
872 hdr->addr2,
873 GFP_ATOMIC))
874 return RX_DROP_UNUSABLE;
875
876 return RX_DROP_MONITOR;
877 }
878
879 return RX_CONTINUE;
880 }
881
882
883 static ieee80211_rx_result debug_noinline
884 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
885 {
886 struct sk_buff *skb = rx->skb;
887 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
888 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
889 int keyidx;
890 int hdrlen;
891 ieee80211_rx_result result = RX_DROP_UNUSABLE;
892 struct ieee80211_key *sta_ptk = NULL;
893 int mmie_keyidx = -1;
894 __le16 fc;
895
896 /*
897 * Key selection 101
898 *
899 * There are four types of keys:
900 * - GTK (group keys)
901 * - IGTK (group keys for management frames)
902 * - PTK (pairwise keys)
903 * - STK (station-to-station pairwise keys)
904 *
905 * When selecting a key, we have to distinguish between multicast
906 * (including broadcast) and unicast frames, the latter can only
907 * use PTKs and STKs while the former always use GTKs and IGTKs.
908 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
909 * unicast frames can also use key indices like GTKs. Hence, if we
910 * don't have a PTK/STK we check the key index for a WEP key.
911 *
912 * Note that in a regular BSS, multicast frames are sent by the
913 * AP only, associated stations unicast the frame to the AP first
914 * which then multicasts it on their behalf.
915 *
916 * There is also a slight problem in IBSS mode: GTKs are negotiated
917 * with each station, that is something we don't currently handle.
918 * The spec seems to expect that one negotiates the same key with
919 * every station but there's no such requirement; VLANs could be
920 * possible.
921 */
922
923 /*
924 * No point in finding a key and decrypting if the frame is neither
925 * addressed to us nor a multicast frame.
926 */
927 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
928 return RX_CONTINUE;
929
930 /* start without a key */
931 rx->key = NULL;
932
933 if (rx->sta)
934 sta_ptk = rcu_dereference(rx->sta->ptk);
935
936 fc = hdr->frame_control;
937
938 if (!ieee80211_has_protected(fc))
939 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
940
941 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
942 rx->key = sta_ptk;
943 if ((status->flag & RX_FLAG_DECRYPTED) &&
944 (status->flag & RX_FLAG_IV_STRIPPED))
945 return RX_CONTINUE;
946 /* Skip decryption if the frame is not protected. */
947 if (!ieee80211_has_protected(fc))
948 return RX_CONTINUE;
949 } else if (mmie_keyidx >= 0) {
950 /* Broadcast/multicast robust management frame / BIP */
951 if ((status->flag & RX_FLAG_DECRYPTED) &&
952 (status->flag & RX_FLAG_IV_STRIPPED))
953 return RX_CONTINUE;
954
955 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
956 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
957 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
958 if (rx->sta)
959 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
960 if (!rx->key)
961 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
962 } else if (!ieee80211_has_protected(fc)) {
963 /*
964 * The frame was not protected, so skip decryption. However, we
965 * need to set rx->key if there is a key that could have been
966 * used so that the frame may be dropped if encryption would
967 * have been expected.
968 */
969 struct ieee80211_key *key = NULL;
970 struct ieee80211_sub_if_data *sdata = rx->sdata;
971 int i;
972
973 if (ieee80211_is_mgmt(fc) &&
974 is_multicast_ether_addr(hdr->addr1) &&
975 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
976 rx->key = key;
977 else {
978 if (rx->sta) {
979 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
980 key = rcu_dereference(rx->sta->gtk[i]);
981 if (key)
982 break;
983 }
984 }
985 if (!key) {
986 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
987 key = rcu_dereference(sdata->keys[i]);
988 if (key)
989 break;
990 }
991 }
992 if (key)
993 rx->key = key;
994 }
995 return RX_CONTINUE;
996 } else {
997 u8 keyid;
998 /*
999 * The device doesn't give us the IV so we won't be
1000 * able to look up the key. That's ok though, we
1001 * don't need to decrypt the frame, we just won't
1002 * be able to keep statistics accurate.
1003 * Except for key threshold notifications, should
1004 * we somehow allow the driver to tell us which key
1005 * the hardware used if this flag is set?
1006 */
1007 if ((status->flag & RX_FLAG_DECRYPTED) &&
1008 (status->flag & RX_FLAG_IV_STRIPPED))
1009 return RX_CONTINUE;
1010
1011 hdrlen = ieee80211_hdrlen(fc);
1012
1013 if (rx->skb->len < 8 + hdrlen)
1014 return RX_DROP_UNUSABLE; /* TODO: count this? */
1015
1016 /*
1017 * no need to call ieee80211_wep_get_keyidx,
1018 * it verifies a bunch of things we've done already
1019 */
1020 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1021 keyidx = keyid >> 6;
1022
1023 /* check per-station GTK first, if multicast packet */
1024 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1025 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1026
1027 /* if not found, try default key */
1028 if (!rx->key) {
1029 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1030
1031 /*
1032 * RSNA-protected unicast frames should always be
1033 * sent with pairwise or station-to-station keys,
1034 * but for WEP we allow using a key index as well.
1035 */
1036 if (rx->key &&
1037 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1038 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1039 !is_multicast_ether_addr(hdr->addr1))
1040 rx->key = NULL;
1041 }
1042 }
1043
1044 if (rx->key) {
1045 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1046 return RX_DROP_MONITOR;
1047
1048 rx->key->tx_rx_count++;
1049 /* TODO: add threshold stuff again */
1050 } else {
1051 return RX_DROP_MONITOR;
1052 }
1053
1054 if (skb_linearize(rx->skb))
1055 return RX_DROP_UNUSABLE;
1056 /* the hdr variable is invalid now! */
1057
1058 switch (rx->key->conf.cipher) {
1059 case WLAN_CIPHER_SUITE_WEP40:
1060 case WLAN_CIPHER_SUITE_WEP104:
1061 /* Check for weak IVs if possible */
1062 if (rx->sta && ieee80211_is_data(fc) &&
1063 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1064 !(status->flag & RX_FLAG_DECRYPTED)) &&
1065 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1066 rx->sta->wep_weak_iv_count++;
1067
1068 result = ieee80211_crypto_wep_decrypt(rx);
1069 break;
1070 case WLAN_CIPHER_SUITE_TKIP:
1071 result = ieee80211_crypto_tkip_decrypt(rx);
1072 break;
1073 case WLAN_CIPHER_SUITE_CCMP:
1074 result = ieee80211_crypto_ccmp_decrypt(rx);
1075 break;
1076 case WLAN_CIPHER_SUITE_AES_CMAC:
1077 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1078 break;
1079 default:
1080 /*
1081 * We can reach here only with HW-only algorithms
1082 * but why didn't it decrypt the frame?!
1083 */
1084 return RX_DROP_UNUSABLE;
1085 }
1086
1087 /* either the frame has been decrypted or will be dropped */
1088 status->flag |= RX_FLAG_DECRYPTED;
1089
1090 return result;
1091 }
1092
1093 static ieee80211_rx_result debug_noinline
1094 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1095 {
1096 struct ieee80211_local *local;
1097 struct ieee80211_hdr *hdr;
1098 struct sk_buff *skb;
1099
1100 local = rx->local;
1101 skb = rx->skb;
1102 hdr = (struct ieee80211_hdr *) skb->data;
1103
1104 if (!local->pspolling)
1105 return RX_CONTINUE;
1106
1107 if (!ieee80211_has_fromds(hdr->frame_control))
1108 /* this is not from AP */
1109 return RX_CONTINUE;
1110
1111 if (!ieee80211_is_data(hdr->frame_control))
1112 return RX_CONTINUE;
1113
1114 if (!ieee80211_has_moredata(hdr->frame_control)) {
1115 /* AP has no more frames buffered for us */
1116 local->pspolling = false;
1117 return RX_CONTINUE;
1118 }
1119
1120 /* more data bit is set, let's request a new frame from the AP */
1121 ieee80211_send_pspoll(local, rx->sdata);
1122
1123 return RX_CONTINUE;
1124 }
1125
1126 static void ap_sta_ps_start(struct sta_info *sta)
1127 {
1128 struct ieee80211_sub_if_data *sdata = sta->sdata;
1129 struct ieee80211_local *local = sdata->local;
1130
1131 atomic_inc(&sdata->bss->num_sta_ps);
1132 set_sta_flag(sta, WLAN_STA_PS_STA);
1133 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1134 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1135 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1136 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1137 sdata->name, sta->sta.addr, sta->sta.aid);
1138 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1139 }
1140
1141 static void ap_sta_ps_end(struct sta_info *sta)
1142 {
1143 struct ieee80211_sub_if_data *sdata = sta->sdata;
1144
1145 atomic_dec(&sdata->bss->num_sta_ps);
1146
1147 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1148 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1149 sdata->name, sta->sta.addr, sta->sta.aid);
1150 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1151
1152 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1153 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1154 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1155 sdata->name, sta->sta.addr, sta->sta.aid);
1156 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1157 return;
1158 }
1159
1160 ieee80211_sta_ps_deliver_wakeup(sta);
1161 }
1162
1163 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1164 {
1165 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1166 bool in_ps;
1167
1168 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1169
1170 /* Don't let the same PS state be set twice */
1171 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1172 if ((start && in_ps) || (!start && !in_ps))
1173 return -EINVAL;
1174
1175 if (start)
1176 ap_sta_ps_start(sta_inf);
1177 else
1178 ap_sta_ps_end(sta_inf);
1179
1180 return 0;
1181 }
1182 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1183
1184 static ieee80211_rx_result debug_noinline
1185 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1186 {
1187 struct ieee80211_sub_if_data *sdata = rx->sdata;
1188 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1189 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1190 int tid, ac;
1191
1192 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1193 return RX_CONTINUE;
1194
1195 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1196 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1197 return RX_CONTINUE;
1198
1199 /*
1200 * The device handles station powersave, so don't do anything about
1201 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1202 * it to mac80211 since they're handled.)
1203 */
1204 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1205 return RX_CONTINUE;
1206
1207 /*
1208 * Don't do anything if the station isn't already asleep. In
1209 * the uAPSD case, the station will probably be marked asleep,
1210 * in the PS-Poll case the station must be confused ...
1211 */
1212 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1213 return RX_CONTINUE;
1214
1215 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1216 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1217 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1218 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1219 else
1220 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1221 }
1222
1223 /* Free PS Poll skb here instead of returning RX_DROP that would
1224 * count as an dropped frame. */
1225 dev_kfree_skb(rx->skb);
1226
1227 return RX_QUEUED;
1228 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1229 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1230 ieee80211_has_pm(hdr->frame_control) &&
1231 (ieee80211_is_data_qos(hdr->frame_control) ||
1232 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1233 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1234 ac = ieee802_1d_to_ac[tid & 7];
1235
1236 /*
1237 * If this AC is not trigger-enabled do nothing.
1238 *
1239 * NB: This could/should check a separate bitmap of trigger-
1240 * enabled queues, but for now we only implement uAPSD w/o
1241 * TSPEC changes to the ACs, so they're always the same.
1242 */
1243 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1244 return RX_CONTINUE;
1245
1246 /* if we are in a service period, do nothing */
1247 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1248 return RX_CONTINUE;
1249
1250 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1251 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1252 else
1253 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1254 }
1255
1256 return RX_CONTINUE;
1257 }
1258
1259 static ieee80211_rx_result debug_noinline
1260 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1261 {
1262 struct sta_info *sta = rx->sta;
1263 struct sk_buff *skb = rx->skb;
1264 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1265 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1266
1267 if (!sta)
1268 return RX_CONTINUE;
1269
1270 /*
1271 * Update last_rx only for IBSS packets which are for the current
1272 * BSSID to avoid keeping the current IBSS network alive in cases
1273 * where other STAs start using different BSSID.
1274 */
1275 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1276 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1277 NL80211_IFTYPE_ADHOC);
1278 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1279 sta->last_rx = jiffies;
1280 if (ieee80211_is_data(hdr->frame_control)) {
1281 sta->last_rx_rate_idx = status->rate_idx;
1282 sta->last_rx_rate_flag = status->flag;
1283 }
1284 }
1285 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1286 /*
1287 * Mesh beacons will update last_rx when if they are found to
1288 * match the current local configuration when processed.
1289 */
1290 sta->last_rx = jiffies;
1291 if (ieee80211_is_data(hdr->frame_control)) {
1292 sta->last_rx_rate_idx = status->rate_idx;
1293 sta->last_rx_rate_flag = status->flag;
1294 }
1295 }
1296
1297 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1298 return RX_CONTINUE;
1299
1300 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1301 ieee80211_sta_rx_notify(rx->sdata, hdr);
1302
1303 sta->rx_fragments++;
1304 sta->rx_bytes += rx->skb->len;
1305 sta->last_signal = status->signal;
1306 ewma_add(&sta->avg_signal, -status->signal);
1307
1308 /*
1309 * Change STA power saving mode only at the end of a frame
1310 * exchange sequence.
1311 */
1312 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1313 !ieee80211_has_morefrags(hdr->frame_control) &&
1314 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1315 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1316 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1317 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1318 /*
1319 * Ignore doze->wake transitions that are
1320 * indicated by non-data frames, the standard
1321 * is unclear here, but for example going to
1322 * PS mode and then scanning would cause a
1323 * doze->wake transition for the probe request,
1324 * and that is clearly undesirable.
1325 */
1326 if (ieee80211_is_data(hdr->frame_control) &&
1327 !ieee80211_has_pm(hdr->frame_control))
1328 ap_sta_ps_end(sta);
1329 } else {
1330 if (ieee80211_has_pm(hdr->frame_control))
1331 ap_sta_ps_start(sta);
1332 }
1333 }
1334
1335 /*
1336 * Drop (qos-)data::nullfunc frames silently, since they
1337 * are used only to control station power saving mode.
1338 */
1339 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1340 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1341 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1342
1343 /*
1344 * If we receive a 4-addr nullfunc frame from a STA
1345 * that was not moved to a 4-addr STA vlan yet, drop
1346 * the frame to the monitor interface, to make sure
1347 * that hostapd sees it
1348 */
1349 if (ieee80211_has_a4(hdr->frame_control) &&
1350 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1351 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1352 !rx->sdata->u.vlan.sta)))
1353 return RX_DROP_MONITOR;
1354 /*
1355 * Update counter and free packet here to avoid
1356 * counting this as a dropped packed.
1357 */
1358 sta->rx_packets++;
1359 dev_kfree_skb(rx->skb);
1360 return RX_QUEUED;
1361 }
1362
1363 return RX_CONTINUE;
1364 } /* ieee80211_rx_h_sta_process */
1365
1366 static inline struct ieee80211_fragment_entry *
1367 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1368 unsigned int frag, unsigned int seq, int rx_queue,
1369 struct sk_buff **skb)
1370 {
1371 struct ieee80211_fragment_entry *entry;
1372 int idx;
1373
1374 idx = sdata->fragment_next;
1375 entry = &sdata->fragments[sdata->fragment_next++];
1376 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1377 sdata->fragment_next = 0;
1378
1379 if (!skb_queue_empty(&entry->skb_list)) {
1380 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1381 struct ieee80211_hdr *hdr =
1382 (struct ieee80211_hdr *) entry->skb_list.next->data;
1383 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1384 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1385 "addr1=%pM addr2=%pM\n",
1386 sdata->name, idx,
1387 jiffies - entry->first_frag_time, entry->seq,
1388 entry->last_frag, hdr->addr1, hdr->addr2);
1389 #endif
1390 __skb_queue_purge(&entry->skb_list);
1391 }
1392
1393 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1394 *skb = NULL;
1395 entry->first_frag_time = jiffies;
1396 entry->seq = seq;
1397 entry->rx_queue = rx_queue;
1398 entry->last_frag = frag;
1399 entry->ccmp = 0;
1400 entry->extra_len = 0;
1401
1402 return entry;
1403 }
1404
1405 static inline struct ieee80211_fragment_entry *
1406 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1407 unsigned int frag, unsigned int seq,
1408 int rx_queue, struct ieee80211_hdr *hdr)
1409 {
1410 struct ieee80211_fragment_entry *entry;
1411 int i, idx;
1412
1413 idx = sdata->fragment_next;
1414 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1415 struct ieee80211_hdr *f_hdr;
1416
1417 idx--;
1418 if (idx < 0)
1419 idx = IEEE80211_FRAGMENT_MAX - 1;
1420
1421 entry = &sdata->fragments[idx];
1422 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1423 entry->rx_queue != rx_queue ||
1424 entry->last_frag + 1 != frag)
1425 continue;
1426
1427 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1428
1429 /*
1430 * Check ftype and addresses are equal, else check next fragment
1431 */
1432 if (((hdr->frame_control ^ f_hdr->frame_control) &
1433 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1434 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1435 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1436 continue;
1437
1438 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1439 __skb_queue_purge(&entry->skb_list);
1440 continue;
1441 }
1442 return entry;
1443 }
1444
1445 return NULL;
1446 }
1447
1448 static ieee80211_rx_result debug_noinline
1449 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1450 {
1451 struct ieee80211_hdr *hdr;
1452 u16 sc;
1453 __le16 fc;
1454 unsigned int frag, seq;
1455 struct ieee80211_fragment_entry *entry;
1456 struct sk_buff *skb;
1457 struct ieee80211_rx_status *status;
1458
1459 hdr = (struct ieee80211_hdr *)rx->skb->data;
1460 fc = hdr->frame_control;
1461 sc = le16_to_cpu(hdr->seq_ctrl);
1462 frag = sc & IEEE80211_SCTL_FRAG;
1463
1464 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1465 (rx->skb)->len < 24 ||
1466 is_multicast_ether_addr(hdr->addr1))) {
1467 /* not fragmented */
1468 goto out;
1469 }
1470 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1471
1472 if (skb_linearize(rx->skb))
1473 return RX_DROP_UNUSABLE;
1474
1475 /*
1476 * skb_linearize() might change the skb->data and
1477 * previously cached variables (in this case, hdr) need to
1478 * be refreshed with the new data.
1479 */
1480 hdr = (struct ieee80211_hdr *)rx->skb->data;
1481 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1482
1483 if (frag == 0) {
1484 /* This is the first fragment of a new frame. */
1485 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1486 rx->seqno_idx, &(rx->skb));
1487 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1488 ieee80211_has_protected(fc)) {
1489 int queue = rx->security_idx;
1490 /* Store CCMP PN so that we can verify that the next
1491 * fragment has a sequential PN value. */
1492 entry->ccmp = 1;
1493 memcpy(entry->last_pn,
1494 rx->key->u.ccmp.rx_pn[queue],
1495 CCMP_PN_LEN);
1496 }
1497 return RX_QUEUED;
1498 }
1499
1500 /* This is a fragment for a frame that should already be pending in
1501 * fragment cache. Add this fragment to the end of the pending entry.
1502 */
1503 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1504 rx->seqno_idx, hdr);
1505 if (!entry) {
1506 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1507 return RX_DROP_MONITOR;
1508 }
1509
1510 /* Verify that MPDUs within one MSDU have sequential PN values.
1511 * (IEEE 802.11i, 8.3.3.4.5) */
1512 if (entry->ccmp) {
1513 int i;
1514 u8 pn[CCMP_PN_LEN], *rpn;
1515 int queue;
1516 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1517 return RX_DROP_UNUSABLE;
1518 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1519 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1520 pn[i]++;
1521 if (pn[i])
1522 break;
1523 }
1524 queue = rx->security_idx;
1525 rpn = rx->key->u.ccmp.rx_pn[queue];
1526 if (memcmp(pn, rpn, CCMP_PN_LEN))
1527 return RX_DROP_UNUSABLE;
1528 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1529 }
1530
1531 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1532 __skb_queue_tail(&entry->skb_list, rx->skb);
1533 entry->last_frag = frag;
1534 entry->extra_len += rx->skb->len;
1535 if (ieee80211_has_morefrags(fc)) {
1536 rx->skb = NULL;
1537 return RX_QUEUED;
1538 }
1539
1540 rx->skb = __skb_dequeue(&entry->skb_list);
1541 if (skb_tailroom(rx->skb) < entry->extra_len) {
1542 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1543 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1544 GFP_ATOMIC))) {
1545 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1546 __skb_queue_purge(&entry->skb_list);
1547 return RX_DROP_UNUSABLE;
1548 }
1549 }
1550 while ((skb = __skb_dequeue(&entry->skb_list))) {
1551 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1552 dev_kfree_skb(skb);
1553 }
1554
1555 /* Complete frame has been reassembled - process it now */
1556 status = IEEE80211_SKB_RXCB(rx->skb);
1557 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1558
1559 out:
1560 if (rx->sta)
1561 rx->sta->rx_packets++;
1562 if (is_multicast_ether_addr(hdr->addr1))
1563 rx->local->dot11MulticastReceivedFrameCount++;
1564 else
1565 ieee80211_led_rx(rx->local);
1566 return RX_CONTINUE;
1567 }
1568
1569 static ieee80211_rx_result debug_noinline
1570 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1571 {
1572 u8 *data = rx->skb->data;
1573 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1574
1575 if (!ieee80211_is_data_qos(hdr->frame_control))
1576 return RX_CONTINUE;
1577
1578 /* remove the qos control field, update frame type and meta-data */
1579 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1580 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1581 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1582 /* change frame type to non QOS */
1583 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1584
1585 return RX_CONTINUE;
1586 }
1587
1588 static int
1589 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1590 {
1591 if (unlikely(!rx->sta ||
1592 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1593 return -EACCES;
1594
1595 return 0;
1596 }
1597
1598 static int
1599 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1600 {
1601 struct sk_buff *skb = rx->skb;
1602 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1603
1604 /*
1605 * Pass through unencrypted frames if the hardware has
1606 * decrypted them already.
1607 */
1608 if (status->flag & RX_FLAG_DECRYPTED)
1609 return 0;
1610
1611 /* Drop unencrypted frames if key is set. */
1612 if (unlikely(!ieee80211_has_protected(fc) &&
1613 !ieee80211_is_nullfunc(fc) &&
1614 ieee80211_is_data(fc) &&
1615 (rx->key || rx->sdata->drop_unencrypted)))
1616 return -EACCES;
1617
1618 return 0;
1619 }
1620
1621 static int
1622 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1623 {
1624 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1625 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1626 __le16 fc = hdr->frame_control;
1627
1628 /*
1629 * Pass through unencrypted frames if the hardware has
1630 * decrypted them already.
1631 */
1632 if (status->flag & RX_FLAG_DECRYPTED)
1633 return 0;
1634
1635 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1636 if (unlikely(!ieee80211_has_protected(fc) &&
1637 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1638 rx->key)) {
1639 if (ieee80211_is_deauth(fc))
1640 cfg80211_send_unprot_deauth(rx->sdata->dev,
1641 rx->skb->data,
1642 rx->skb->len);
1643 else if (ieee80211_is_disassoc(fc))
1644 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1645 rx->skb->data,
1646 rx->skb->len);
1647 return -EACCES;
1648 }
1649 /* BIP does not use Protected field, so need to check MMIE */
1650 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1651 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1652 if (ieee80211_is_deauth(fc))
1653 cfg80211_send_unprot_deauth(rx->sdata->dev,
1654 rx->skb->data,
1655 rx->skb->len);
1656 else if (ieee80211_is_disassoc(fc))
1657 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1658 rx->skb->data,
1659 rx->skb->len);
1660 return -EACCES;
1661 }
1662 /*
1663 * When using MFP, Action frames are not allowed prior to
1664 * having configured keys.
1665 */
1666 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1667 ieee80211_is_robust_mgmt_frame(
1668 (struct ieee80211_hdr *) rx->skb->data)))
1669 return -EACCES;
1670 }
1671
1672 return 0;
1673 }
1674
1675 static int
1676 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1677 {
1678 struct ieee80211_sub_if_data *sdata = rx->sdata;
1679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1680 bool check_port_control = false;
1681 struct ethhdr *ehdr;
1682 int ret;
1683
1684 *port_control = false;
1685 if (ieee80211_has_a4(hdr->frame_control) &&
1686 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1687 return -1;
1688
1689 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1690 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1691
1692 if (!sdata->u.mgd.use_4addr)
1693 return -1;
1694 else
1695 check_port_control = true;
1696 }
1697
1698 if (is_multicast_ether_addr(hdr->addr1) &&
1699 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1700 return -1;
1701
1702 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1703 if (ret < 0)
1704 return ret;
1705
1706 ehdr = (struct ethhdr *) rx->skb->data;
1707 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1708 *port_control = true;
1709 else if (check_port_control)
1710 return -1;
1711
1712 return 0;
1713 }
1714
1715 /*
1716 * requires that rx->skb is a frame with ethernet header
1717 */
1718 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1719 {
1720 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1721 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1722 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1723
1724 /*
1725 * Allow EAPOL frames to us/the PAE group address regardless
1726 * of whether the frame was encrypted or not.
1727 */
1728 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1729 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1730 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1731 return true;
1732
1733 if (ieee80211_802_1x_port_control(rx) ||
1734 ieee80211_drop_unencrypted(rx, fc))
1735 return false;
1736
1737 return true;
1738 }
1739
1740 /*
1741 * requires that rx->skb is a frame with ethernet header
1742 */
1743 static void
1744 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1745 {
1746 struct ieee80211_sub_if_data *sdata = rx->sdata;
1747 struct net_device *dev = sdata->dev;
1748 struct sk_buff *skb, *xmit_skb;
1749 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1750 struct sta_info *dsta;
1751 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1752
1753 skb = rx->skb;
1754 xmit_skb = NULL;
1755
1756 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1757 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1758 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1759 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1760 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1761 if (is_multicast_ether_addr(ehdr->h_dest)) {
1762 /*
1763 * send multicast frames both to higher layers in
1764 * local net stack and back to the wireless medium
1765 */
1766 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1767 if (!xmit_skb && net_ratelimit())
1768 printk(KERN_DEBUG "%s: failed to clone "
1769 "multicast frame\n", dev->name);
1770 } else {
1771 dsta = sta_info_get(sdata, skb->data);
1772 if (dsta) {
1773 /*
1774 * The destination station is associated to
1775 * this AP (in this VLAN), so send the frame
1776 * directly to it and do not pass it to local
1777 * net stack.
1778 */
1779 xmit_skb = skb;
1780 skb = NULL;
1781 }
1782 }
1783 }
1784
1785 if (skb) {
1786 int align __maybe_unused;
1787
1788 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1789 /*
1790 * 'align' will only take the values 0 or 2 here
1791 * since all frames are required to be aligned
1792 * to 2-byte boundaries when being passed to
1793 * mac80211. That also explains the __skb_push()
1794 * below.
1795 */
1796 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1797 if (align) {
1798 if (WARN_ON(skb_headroom(skb) < 3)) {
1799 dev_kfree_skb(skb);
1800 skb = NULL;
1801 } else {
1802 u8 *data = skb->data;
1803 size_t len = skb_headlen(skb);
1804 skb->data -= align;
1805 memmove(skb->data, data, len);
1806 skb_set_tail_pointer(skb, len);
1807 }
1808 }
1809 #endif
1810
1811 if (skb) {
1812 /* deliver to local stack */
1813 skb->protocol = eth_type_trans(skb, dev);
1814 memset(skb->cb, 0, sizeof(skb->cb));
1815 netif_receive_skb(skb);
1816 }
1817 }
1818
1819 if (xmit_skb) {
1820 /* send to wireless media */
1821 xmit_skb->protocol = htons(ETH_P_802_3);
1822 skb_reset_network_header(xmit_skb);
1823 skb_reset_mac_header(xmit_skb);
1824 dev_queue_xmit(xmit_skb);
1825 }
1826 }
1827
1828 static ieee80211_rx_result debug_noinline
1829 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1830 {
1831 struct net_device *dev = rx->sdata->dev;
1832 struct sk_buff *skb = rx->skb;
1833 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1834 __le16 fc = hdr->frame_control;
1835 struct sk_buff_head frame_list;
1836 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1837
1838 if (unlikely(!ieee80211_is_data(fc)))
1839 return RX_CONTINUE;
1840
1841 if (unlikely(!ieee80211_is_data_present(fc)))
1842 return RX_DROP_MONITOR;
1843
1844 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1845 return RX_CONTINUE;
1846
1847 if (ieee80211_has_a4(hdr->frame_control) &&
1848 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1849 !rx->sdata->u.vlan.sta)
1850 return RX_DROP_UNUSABLE;
1851
1852 if (is_multicast_ether_addr(hdr->addr1) &&
1853 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1854 rx->sdata->u.vlan.sta) ||
1855 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1856 rx->sdata->u.mgd.use_4addr)))
1857 return RX_DROP_UNUSABLE;
1858
1859 skb->dev = dev;
1860 __skb_queue_head_init(&frame_list);
1861
1862 if (skb_linearize(skb))
1863 return RX_DROP_UNUSABLE;
1864
1865 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1866 rx->sdata->vif.type,
1867 rx->local->hw.extra_tx_headroom, true);
1868
1869 while (!skb_queue_empty(&frame_list)) {
1870 rx->skb = __skb_dequeue(&frame_list);
1871
1872 if (!ieee80211_frame_allowed(rx, fc)) {
1873 dev_kfree_skb(rx->skb);
1874 continue;
1875 }
1876 dev->stats.rx_packets++;
1877 dev->stats.rx_bytes += rx->skb->len;
1878
1879 ieee80211_deliver_skb(rx);
1880 }
1881
1882 return RX_QUEUED;
1883 }
1884
1885 #ifdef CONFIG_MAC80211_MESH
1886 static ieee80211_rx_result
1887 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1888 {
1889 struct ieee80211_hdr *hdr;
1890 struct ieee80211s_hdr *mesh_hdr;
1891 unsigned int hdrlen;
1892 struct sk_buff *skb = rx->skb, *fwd_skb;
1893 struct ieee80211_local *local = rx->local;
1894 struct ieee80211_sub_if_data *sdata = rx->sdata;
1895 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1896
1897 hdr = (struct ieee80211_hdr *) skb->data;
1898 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1899 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1900
1901 /* frame is in RMC, don't forward */
1902 if (ieee80211_is_data(hdr->frame_control) &&
1903 is_multicast_ether_addr(hdr->addr1) &&
1904 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1905 return RX_DROP_MONITOR;
1906
1907 if (!ieee80211_is_data(hdr->frame_control))
1908 return RX_CONTINUE;
1909
1910 if (!mesh_hdr->ttl)
1911 /* illegal frame */
1912 return RX_DROP_MONITOR;
1913
1914 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1915 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1916 dropped_frames_congestion);
1917 return RX_DROP_MONITOR;
1918 }
1919
1920 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1921 struct mesh_path *mppath;
1922 char *proxied_addr;
1923 char *mpp_addr;
1924
1925 if (is_multicast_ether_addr(hdr->addr1)) {
1926 mpp_addr = hdr->addr3;
1927 proxied_addr = mesh_hdr->eaddr1;
1928 } else {
1929 mpp_addr = hdr->addr4;
1930 proxied_addr = mesh_hdr->eaddr2;
1931 }
1932
1933 rcu_read_lock();
1934 mppath = mpp_path_lookup(proxied_addr, sdata);
1935 if (!mppath) {
1936 mpp_path_add(proxied_addr, mpp_addr, sdata);
1937 } else {
1938 spin_lock_bh(&mppath->state_lock);
1939 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1940 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1941 spin_unlock_bh(&mppath->state_lock);
1942 }
1943 rcu_read_unlock();
1944 }
1945
1946 /* Frame has reached destination. Don't forward */
1947 if (!is_multicast_ether_addr(hdr->addr1) &&
1948 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1949 return RX_CONTINUE;
1950
1951 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
1952 mesh_hdr->ttl--;
1953
1954 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1955 if (!mesh_hdr->ttl)
1956 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1957 dropped_frames_ttl);
1958 else {
1959 struct ieee80211_hdr *fwd_hdr;
1960 struct ieee80211_tx_info *info;
1961
1962 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1963
1964 if (!fwd_skb && net_ratelimit())
1965 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1966 sdata->name);
1967 if (!fwd_skb)
1968 goto out;
1969
1970 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1971 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1972 info = IEEE80211_SKB_CB(fwd_skb);
1973 memset(info, 0, sizeof(*info));
1974 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1975 info->control.vif = &rx->sdata->vif;
1976 info->control.jiffies = jiffies;
1977 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1978 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1979 fwded_mcast);
1980 } else {
1981 int err;
1982 /*
1983 * Save TA to addr1 to send TA a path error if a
1984 * suitable next hop is not found
1985 */
1986 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1987 ETH_ALEN);
1988 err = mesh_nexthop_lookup(fwd_skb, sdata);
1989 /* Failed to immediately resolve next hop:
1990 * fwded frame was dropped or will be added
1991 * later to the pending skb queue. */
1992 if (err)
1993 return RX_DROP_MONITOR;
1994
1995 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1996 fwded_unicast);
1997 }
1998 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1999 fwded_frames);
2000 ieee80211_add_pending_skb(local, fwd_skb);
2001 }
2002 }
2003
2004 out:
2005 if (is_multicast_ether_addr(hdr->addr1) ||
2006 sdata->dev->flags & IFF_PROMISC)
2007 return RX_CONTINUE;
2008 else
2009 return RX_DROP_MONITOR;
2010 }
2011 #endif
2012
2013 static ieee80211_rx_result debug_noinline
2014 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2015 {
2016 struct ieee80211_sub_if_data *sdata = rx->sdata;
2017 struct ieee80211_local *local = rx->local;
2018 struct net_device *dev = sdata->dev;
2019 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2020 __le16 fc = hdr->frame_control;
2021 bool port_control;
2022 int err;
2023
2024 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2025 return RX_CONTINUE;
2026
2027 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2028 return RX_DROP_MONITOR;
2029
2030 /*
2031 * Allow the cooked monitor interface of an AP to see 4-addr frames so
2032 * that a 4-addr station can be detected and moved into a separate VLAN
2033 */
2034 if (ieee80211_has_a4(hdr->frame_control) &&
2035 sdata->vif.type == NL80211_IFTYPE_AP)
2036 return RX_DROP_MONITOR;
2037
2038 err = __ieee80211_data_to_8023(rx, &port_control);
2039 if (unlikely(err))
2040 return RX_DROP_UNUSABLE;
2041
2042 if (!ieee80211_frame_allowed(rx, fc))
2043 return RX_DROP_MONITOR;
2044
2045 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2046 unlikely(port_control) && sdata->bss) {
2047 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2048 u.ap);
2049 dev = sdata->dev;
2050 rx->sdata = sdata;
2051 }
2052
2053 rx->skb->dev = dev;
2054
2055 dev->stats.rx_packets++;
2056 dev->stats.rx_bytes += rx->skb->len;
2057
2058 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2059 !is_multicast_ether_addr(
2060 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2061 (!local->scanning &&
2062 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2063 mod_timer(&local->dynamic_ps_timer, jiffies +
2064 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2065 }
2066
2067 ieee80211_deliver_skb(rx);
2068
2069 return RX_QUEUED;
2070 }
2071
2072 static ieee80211_rx_result debug_noinline
2073 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2074 {
2075 struct ieee80211_local *local = rx->local;
2076 struct ieee80211_hw *hw = &local->hw;
2077 struct sk_buff *skb = rx->skb;
2078 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2079 struct tid_ampdu_rx *tid_agg_rx;
2080 u16 start_seq_num;
2081 u16 tid;
2082
2083 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2084 return RX_CONTINUE;
2085
2086 if (ieee80211_is_back_req(bar->frame_control)) {
2087 struct {
2088 __le16 control, start_seq_num;
2089 } __packed bar_data;
2090
2091 if (!rx->sta)
2092 return RX_DROP_MONITOR;
2093
2094 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2095 &bar_data, sizeof(bar_data)))
2096 return RX_DROP_MONITOR;
2097
2098 tid = le16_to_cpu(bar_data.control) >> 12;
2099
2100 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2101 if (!tid_agg_rx)
2102 return RX_DROP_MONITOR;
2103
2104 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2105
2106 /* reset session timer */
2107 if (tid_agg_rx->timeout)
2108 mod_timer(&tid_agg_rx->session_timer,
2109 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2110
2111 spin_lock(&tid_agg_rx->reorder_lock);
2112 /* release stored frames up to start of BAR */
2113 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2114 spin_unlock(&tid_agg_rx->reorder_lock);
2115
2116 kfree_skb(skb);
2117 return RX_QUEUED;
2118 }
2119
2120 /*
2121 * After this point, we only want management frames,
2122 * so we can drop all remaining control frames to
2123 * cooked monitor interfaces.
2124 */
2125 return RX_DROP_MONITOR;
2126 }
2127
2128 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2129 struct ieee80211_mgmt *mgmt,
2130 size_t len)
2131 {
2132 struct ieee80211_local *local = sdata->local;
2133 struct sk_buff *skb;
2134 struct ieee80211_mgmt *resp;
2135
2136 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2137 /* Not to own unicast address */
2138 return;
2139 }
2140
2141 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2142 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2143 /* Not from the current AP or not associated yet. */
2144 return;
2145 }
2146
2147 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2148 /* Too short SA Query request frame */
2149 return;
2150 }
2151
2152 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2153 if (skb == NULL)
2154 return;
2155
2156 skb_reserve(skb, local->hw.extra_tx_headroom);
2157 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2158 memset(resp, 0, 24);
2159 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2160 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2161 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2162 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2163 IEEE80211_STYPE_ACTION);
2164 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2165 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2166 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2167 memcpy(resp->u.action.u.sa_query.trans_id,
2168 mgmt->u.action.u.sa_query.trans_id,
2169 WLAN_SA_QUERY_TR_ID_LEN);
2170
2171 ieee80211_tx_skb(sdata, skb);
2172 }
2173
2174 static ieee80211_rx_result debug_noinline
2175 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2176 {
2177 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2178 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2179
2180 /*
2181 * From here on, look only at management frames.
2182 * Data and control frames are already handled,
2183 * and unknown (reserved) frames are useless.
2184 */
2185 if (rx->skb->len < 24)
2186 return RX_DROP_MONITOR;
2187
2188 if (!ieee80211_is_mgmt(mgmt->frame_control))
2189 return RX_DROP_MONITOR;
2190
2191 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2192 ieee80211_is_beacon(mgmt->frame_control) &&
2193 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2194 struct ieee80211_rx_status *status;
2195
2196 status = IEEE80211_SKB_RXCB(rx->skb);
2197 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2198 rx->skb->data, rx->skb->len,
2199 status->freq, GFP_ATOMIC);
2200 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2201 }
2202
2203 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2204 return RX_DROP_MONITOR;
2205
2206 if (ieee80211_drop_unencrypted_mgmt(rx))
2207 return RX_DROP_UNUSABLE;
2208
2209 return RX_CONTINUE;
2210 }
2211
2212 static ieee80211_rx_result debug_noinline
2213 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2214 {
2215 struct ieee80211_local *local = rx->local;
2216 struct ieee80211_sub_if_data *sdata = rx->sdata;
2217 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2218 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2219 int len = rx->skb->len;
2220
2221 if (!ieee80211_is_action(mgmt->frame_control))
2222 return RX_CONTINUE;
2223
2224 /* drop too small frames */
2225 if (len < IEEE80211_MIN_ACTION_SIZE)
2226 return RX_DROP_UNUSABLE;
2227
2228 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2229 return RX_DROP_UNUSABLE;
2230
2231 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2232 return RX_DROP_UNUSABLE;
2233
2234 switch (mgmt->u.action.category) {
2235 case WLAN_CATEGORY_BACK:
2236 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2237 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2238 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2239 sdata->vif.type != NL80211_IFTYPE_AP)
2240 break;
2241
2242 /* verify action_code is present */
2243 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2244 break;
2245
2246 switch (mgmt->u.action.u.addba_req.action_code) {
2247 case WLAN_ACTION_ADDBA_REQ:
2248 if (len < (IEEE80211_MIN_ACTION_SIZE +
2249 sizeof(mgmt->u.action.u.addba_req)))
2250 goto invalid;
2251 break;
2252 case WLAN_ACTION_ADDBA_RESP:
2253 if (len < (IEEE80211_MIN_ACTION_SIZE +
2254 sizeof(mgmt->u.action.u.addba_resp)))
2255 goto invalid;
2256 break;
2257 case WLAN_ACTION_DELBA:
2258 if (len < (IEEE80211_MIN_ACTION_SIZE +
2259 sizeof(mgmt->u.action.u.delba)))
2260 goto invalid;
2261 break;
2262 default:
2263 goto invalid;
2264 }
2265
2266 goto queue;
2267 case WLAN_CATEGORY_SPECTRUM_MGMT:
2268 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2269 break;
2270
2271 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2272 break;
2273
2274 /* verify action_code is present */
2275 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2276 break;
2277
2278 switch (mgmt->u.action.u.measurement.action_code) {
2279 case WLAN_ACTION_SPCT_MSR_REQ:
2280 if (len < (IEEE80211_MIN_ACTION_SIZE +
2281 sizeof(mgmt->u.action.u.measurement)))
2282 break;
2283 ieee80211_process_measurement_req(sdata, mgmt, len);
2284 goto handled;
2285 case WLAN_ACTION_SPCT_CHL_SWITCH:
2286 if (len < (IEEE80211_MIN_ACTION_SIZE +
2287 sizeof(mgmt->u.action.u.chan_switch)))
2288 break;
2289
2290 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2291 break;
2292
2293 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2294 break;
2295
2296 goto queue;
2297 }
2298 break;
2299 case WLAN_CATEGORY_SA_QUERY:
2300 if (len < (IEEE80211_MIN_ACTION_SIZE +
2301 sizeof(mgmt->u.action.u.sa_query)))
2302 break;
2303
2304 switch (mgmt->u.action.u.sa_query.action) {
2305 case WLAN_ACTION_SA_QUERY_REQUEST:
2306 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2307 break;
2308 ieee80211_process_sa_query_req(sdata, mgmt, len);
2309 goto handled;
2310 }
2311 break;
2312 case WLAN_CATEGORY_SELF_PROTECTED:
2313 switch (mgmt->u.action.u.self_prot.action_code) {
2314 case WLAN_SP_MESH_PEERING_OPEN:
2315 case WLAN_SP_MESH_PEERING_CLOSE:
2316 case WLAN_SP_MESH_PEERING_CONFIRM:
2317 if (!ieee80211_vif_is_mesh(&sdata->vif))
2318 goto invalid;
2319 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2320 /* userspace handles this frame */
2321 break;
2322 goto queue;
2323 case WLAN_SP_MGK_INFORM:
2324 case WLAN_SP_MGK_ACK:
2325 if (!ieee80211_vif_is_mesh(&sdata->vif))
2326 goto invalid;
2327 break;
2328 }
2329 break;
2330 case WLAN_CATEGORY_MESH_ACTION:
2331 if (!ieee80211_vif_is_mesh(&sdata->vif))
2332 break;
2333 if (mesh_action_is_path_sel(mgmt) &&
2334 (!mesh_path_sel_is_hwmp(sdata)))
2335 break;
2336 goto queue;
2337 }
2338
2339 return RX_CONTINUE;
2340
2341 invalid:
2342 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2343 /* will return in the next handlers */
2344 return RX_CONTINUE;
2345
2346 handled:
2347 if (rx->sta)
2348 rx->sta->rx_packets++;
2349 dev_kfree_skb(rx->skb);
2350 return RX_QUEUED;
2351
2352 queue:
2353 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2354 skb_queue_tail(&sdata->skb_queue, rx->skb);
2355 ieee80211_queue_work(&local->hw, &sdata->work);
2356 if (rx->sta)
2357 rx->sta->rx_packets++;
2358 return RX_QUEUED;
2359 }
2360
2361 static ieee80211_rx_result debug_noinline
2362 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2363 {
2364 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2365
2366 /* skip known-bad action frames and return them in the next handler */
2367 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2368 return RX_CONTINUE;
2369
2370 /*
2371 * Getting here means the kernel doesn't know how to handle
2372 * it, but maybe userspace does ... include returned frames
2373 * so userspace can register for those to know whether ones
2374 * it transmitted were processed or returned.
2375 */
2376
2377 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2378 rx->skb->data, rx->skb->len,
2379 GFP_ATOMIC)) {
2380 if (rx->sta)
2381 rx->sta->rx_packets++;
2382 dev_kfree_skb(rx->skb);
2383 return RX_QUEUED;
2384 }
2385
2386
2387 return RX_CONTINUE;
2388 }
2389
2390 static ieee80211_rx_result debug_noinline
2391 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2392 {
2393 struct ieee80211_local *local = rx->local;
2394 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2395 struct sk_buff *nskb;
2396 struct ieee80211_sub_if_data *sdata = rx->sdata;
2397 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2398
2399 if (!ieee80211_is_action(mgmt->frame_control))
2400 return RX_CONTINUE;
2401
2402 /*
2403 * For AP mode, hostapd is responsible for handling any action
2404 * frames that we didn't handle, including returning unknown
2405 * ones. For all other modes we will return them to the sender,
2406 * setting the 0x80 bit in the action category, as required by
2407 * 802.11-2007 7.3.1.11.
2408 * Newer versions of hostapd shall also use the management frame
2409 * registration mechanisms, but older ones still use cooked
2410 * monitor interfaces so push all frames there.
2411 */
2412 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2413 (sdata->vif.type == NL80211_IFTYPE_AP ||
2414 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2415 return RX_DROP_MONITOR;
2416
2417 /* do not return rejected action frames */
2418 if (mgmt->u.action.category & 0x80)
2419 return RX_DROP_UNUSABLE;
2420
2421 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2422 GFP_ATOMIC);
2423 if (nskb) {
2424 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2425
2426 nmgmt->u.action.category |= 0x80;
2427 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2428 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2429
2430 memset(nskb->cb, 0, sizeof(nskb->cb));
2431
2432 ieee80211_tx_skb(rx->sdata, nskb);
2433 }
2434 dev_kfree_skb(rx->skb);
2435 return RX_QUEUED;
2436 }
2437
2438 static ieee80211_rx_result debug_noinline
2439 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2440 {
2441 struct ieee80211_sub_if_data *sdata = rx->sdata;
2442 ieee80211_rx_result rxs;
2443 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2444 __le16 stype;
2445
2446 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2447 if (rxs != RX_CONTINUE)
2448 return rxs;
2449
2450 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2451
2452 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2453 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2454 sdata->vif.type != NL80211_IFTYPE_STATION)
2455 return RX_DROP_MONITOR;
2456
2457 switch (stype) {
2458 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2459 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2460 /* process for all: mesh, mlme, ibss */
2461 break;
2462 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2463 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2464 if (is_multicast_ether_addr(mgmt->da) &&
2465 !is_broadcast_ether_addr(mgmt->da))
2466 return RX_DROP_MONITOR;
2467
2468 /* process only for station */
2469 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2470 return RX_DROP_MONITOR;
2471 break;
2472 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2473 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2474 /* process only for ibss */
2475 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2476 return RX_DROP_MONITOR;
2477 break;
2478 default:
2479 return RX_DROP_MONITOR;
2480 }
2481
2482 /* queue up frame and kick off work to process it */
2483 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2484 skb_queue_tail(&sdata->skb_queue, rx->skb);
2485 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2486 if (rx->sta)
2487 rx->sta->rx_packets++;
2488
2489 return RX_QUEUED;
2490 }
2491
2492 /* TODO: use IEEE80211_RX_FRAGMENTED */
2493 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2494 struct ieee80211_rate *rate)
2495 {
2496 struct ieee80211_sub_if_data *sdata;
2497 struct ieee80211_local *local = rx->local;
2498 struct ieee80211_rtap_hdr {
2499 struct ieee80211_radiotap_header hdr;
2500 u8 flags;
2501 u8 rate_or_pad;
2502 __le16 chan_freq;
2503 __le16 chan_flags;
2504 } __packed *rthdr;
2505 struct sk_buff *skb = rx->skb, *skb2;
2506 struct net_device *prev_dev = NULL;
2507 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2508
2509 /*
2510 * If cooked monitor has been processed already, then
2511 * don't do it again. If not, set the flag.
2512 */
2513 if (rx->flags & IEEE80211_RX_CMNTR)
2514 goto out_free_skb;
2515 rx->flags |= IEEE80211_RX_CMNTR;
2516
2517 /* If there are no cooked monitor interfaces, just free the SKB */
2518 if (!local->cooked_mntrs)
2519 goto out_free_skb;
2520
2521 if (skb_headroom(skb) < sizeof(*rthdr) &&
2522 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2523 goto out_free_skb;
2524
2525 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2526 memset(rthdr, 0, sizeof(*rthdr));
2527 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2528 rthdr->hdr.it_present =
2529 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2530 (1 << IEEE80211_RADIOTAP_CHANNEL));
2531
2532 if (rate) {
2533 rthdr->rate_or_pad = rate->bitrate / 5;
2534 rthdr->hdr.it_present |=
2535 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2536 }
2537 rthdr->chan_freq = cpu_to_le16(status->freq);
2538
2539 if (status->band == IEEE80211_BAND_5GHZ)
2540 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2541 IEEE80211_CHAN_5GHZ);
2542 else
2543 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2544 IEEE80211_CHAN_2GHZ);
2545
2546 skb_set_mac_header(skb, 0);
2547 skb->ip_summed = CHECKSUM_UNNECESSARY;
2548 skb->pkt_type = PACKET_OTHERHOST;
2549 skb->protocol = htons(ETH_P_802_2);
2550
2551 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2552 if (!ieee80211_sdata_running(sdata))
2553 continue;
2554
2555 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2556 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2557 continue;
2558
2559 if (prev_dev) {
2560 skb2 = skb_clone(skb, GFP_ATOMIC);
2561 if (skb2) {
2562 skb2->dev = prev_dev;
2563 netif_receive_skb(skb2);
2564 }
2565 }
2566
2567 prev_dev = sdata->dev;
2568 sdata->dev->stats.rx_packets++;
2569 sdata->dev->stats.rx_bytes += skb->len;
2570 }
2571
2572 if (prev_dev) {
2573 skb->dev = prev_dev;
2574 netif_receive_skb(skb);
2575 return;
2576 }
2577
2578 out_free_skb:
2579 dev_kfree_skb(skb);
2580 }
2581
2582 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2583 ieee80211_rx_result res)
2584 {
2585 switch (res) {
2586 case RX_DROP_MONITOR:
2587 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2588 if (rx->sta)
2589 rx->sta->rx_dropped++;
2590 /* fall through */
2591 case RX_CONTINUE: {
2592 struct ieee80211_rate *rate = NULL;
2593 struct ieee80211_supported_band *sband;
2594 struct ieee80211_rx_status *status;
2595
2596 status = IEEE80211_SKB_RXCB((rx->skb));
2597
2598 sband = rx->local->hw.wiphy->bands[status->band];
2599 if (!(status->flag & RX_FLAG_HT))
2600 rate = &sband->bitrates[status->rate_idx];
2601
2602 ieee80211_rx_cooked_monitor(rx, rate);
2603 break;
2604 }
2605 case RX_DROP_UNUSABLE:
2606 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2607 if (rx->sta)
2608 rx->sta->rx_dropped++;
2609 dev_kfree_skb(rx->skb);
2610 break;
2611 case RX_QUEUED:
2612 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2613 break;
2614 }
2615 }
2616
2617 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2618 {
2619 ieee80211_rx_result res = RX_DROP_MONITOR;
2620 struct sk_buff *skb;
2621
2622 #define CALL_RXH(rxh) \
2623 do { \
2624 res = rxh(rx); \
2625 if (res != RX_CONTINUE) \
2626 goto rxh_next; \
2627 } while (0);
2628
2629 spin_lock(&rx->local->rx_skb_queue.lock);
2630 if (rx->local->running_rx_handler)
2631 goto unlock;
2632
2633 rx->local->running_rx_handler = true;
2634
2635 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2636 spin_unlock(&rx->local->rx_skb_queue.lock);
2637
2638 /*
2639 * all the other fields are valid across frames
2640 * that belong to an aMPDU since they are on the
2641 * same TID from the same station
2642 */
2643 rx->skb = skb;
2644
2645 CALL_RXH(ieee80211_rx_h_decrypt)
2646 CALL_RXH(ieee80211_rx_h_check_more_data)
2647 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2648 CALL_RXH(ieee80211_rx_h_sta_process)
2649 CALL_RXH(ieee80211_rx_h_defragment)
2650 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2651 /* must be after MMIC verify so header is counted in MPDU mic */
2652 #ifdef CONFIG_MAC80211_MESH
2653 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2654 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2655 #endif
2656 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2657 CALL_RXH(ieee80211_rx_h_amsdu)
2658 CALL_RXH(ieee80211_rx_h_data)
2659 CALL_RXH(ieee80211_rx_h_ctrl);
2660 CALL_RXH(ieee80211_rx_h_mgmt_check)
2661 CALL_RXH(ieee80211_rx_h_action)
2662 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2663 CALL_RXH(ieee80211_rx_h_action_return)
2664 CALL_RXH(ieee80211_rx_h_mgmt)
2665
2666 rxh_next:
2667 ieee80211_rx_handlers_result(rx, res);
2668 spin_lock(&rx->local->rx_skb_queue.lock);
2669 #undef CALL_RXH
2670 }
2671
2672 rx->local->running_rx_handler = false;
2673
2674 unlock:
2675 spin_unlock(&rx->local->rx_skb_queue.lock);
2676 }
2677
2678 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2679 {
2680 ieee80211_rx_result res = RX_DROP_MONITOR;
2681
2682 #define CALL_RXH(rxh) \
2683 do { \
2684 res = rxh(rx); \
2685 if (res != RX_CONTINUE) \
2686 goto rxh_next; \
2687 } while (0);
2688
2689 CALL_RXH(ieee80211_rx_h_passive_scan)
2690 CALL_RXH(ieee80211_rx_h_check)
2691
2692 ieee80211_rx_reorder_ampdu(rx);
2693
2694 ieee80211_rx_handlers(rx);
2695 return;
2696
2697 rxh_next:
2698 ieee80211_rx_handlers_result(rx, res);
2699
2700 #undef CALL_RXH
2701 }
2702
2703 /*
2704 * This function makes calls into the RX path, therefore
2705 * it has to be invoked under RCU read lock.
2706 */
2707 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2708 {
2709 struct ieee80211_rx_data rx = {
2710 .sta = sta,
2711 .sdata = sta->sdata,
2712 .local = sta->local,
2713 /* This is OK -- must be QoS data frame */
2714 .security_idx = tid,
2715 .seqno_idx = tid,
2716 .flags = 0,
2717 };
2718 struct tid_ampdu_rx *tid_agg_rx;
2719
2720 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2721 if (!tid_agg_rx)
2722 return;
2723
2724 spin_lock(&tid_agg_rx->reorder_lock);
2725 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2726 spin_unlock(&tid_agg_rx->reorder_lock);
2727
2728 ieee80211_rx_handlers(&rx);
2729 }
2730
2731 /* main receive path */
2732
2733 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2734 struct ieee80211_hdr *hdr)
2735 {
2736 struct ieee80211_sub_if_data *sdata = rx->sdata;
2737 struct sk_buff *skb = rx->skb;
2738 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2739 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2740 int multicast = is_multicast_ether_addr(hdr->addr1);
2741
2742 switch (sdata->vif.type) {
2743 case NL80211_IFTYPE_STATION:
2744 if (!bssid && !sdata->u.mgd.use_4addr)
2745 return 0;
2746 if (!multicast &&
2747 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2748 if (!(sdata->dev->flags & IFF_PROMISC) ||
2749 sdata->u.mgd.use_4addr)
2750 return 0;
2751 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2752 }
2753 break;
2754 case NL80211_IFTYPE_ADHOC:
2755 if (!bssid)
2756 return 0;
2757 if (ieee80211_is_beacon(hdr->frame_control)) {
2758 return 1;
2759 }
2760 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2761 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2762 return 0;
2763 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2764 } else if (!multicast &&
2765 compare_ether_addr(sdata->vif.addr,
2766 hdr->addr1) != 0) {
2767 if (!(sdata->dev->flags & IFF_PROMISC))
2768 return 0;
2769 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2770 } else if (!rx->sta) {
2771 int rate_idx;
2772 if (status->flag & RX_FLAG_HT)
2773 rate_idx = 0; /* TODO: HT rates */
2774 else
2775 rate_idx = status->rate_idx;
2776 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2777 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2778 }
2779 break;
2780 case NL80211_IFTYPE_MESH_POINT:
2781 if (!multicast &&
2782 compare_ether_addr(sdata->vif.addr,
2783 hdr->addr1) != 0) {
2784 if (!(sdata->dev->flags & IFF_PROMISC))
2785 return 0;
2786
2787 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2788 }
2789 break;
2790 case NL80211_IFTYPE_AP_VLAN:
2791 case NL80211_IFTYPE_AP:
2792 if (!bssid) {
2793 if (compare_ether_addr(sdata->vif.addr,
2794 hdr->addr1))
2795 return 0;
2796 } else if (!ieee80211_bssid_match(bssid,
2797 sdata->vif.addr)) {
2798 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2799 !ieee80211_is_beacon(hdr->frame_control) &&
2800 !(ieee80211_is_action(hdr->frame_control) &&
2801 sdata->vif.p2p))
2802 return 0;
2803 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2804 }
2805 break;
2806 case NL80211_IFTYPE_WDS:
2807 if (bssid || !ieee80211_is_data(hdr->frame_control))
2808 return 0;
2809 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2810 return 0;
2811 break;
2812 default:
2813 /* should never get here */
2814 WARN_ON(1);
2815 break;
2816 }
2817
2818 return 1;
2819 }
2820
2821 /*
2822 * This function returns whether or not the SKB
2823 * was destined for RX processing or not, which,
2824 * if consume is true, is equivalent to whether
2825 * or not the skb was consumed.
2826 */
2827 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2828 struct sk_buff *skb, bool consume)
2829 {
2830 struct ieee80211_local *local = rx->local;
2831 struct ieee80211_sub_if_data *sdata = rx->sdata;
2832 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2833 struct ieee80211_hdr *hdr = (void *)skb->data;
2834 int prepares;
2835
2836 rx->skb = skb;
2837 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2838 prepares = prepare_for_handlers(rx, hdr);
2839
2840 if (!prepares)
2841 return false;
2842
2843 if (!consume) {
2844 skb = skb_copy(skb, GFP_ATOMIC);
2845 if (!skb) {
2846 if (net_ratelimit())
2847 wiphy_debug(local->hw.wiphy,
2848 "failed to copy skb for %s\n",
2849 sdata->name);
2850 return true;
2851 }
2852
2853 rx->skb = skb;
2854 }
2855
2856 ieee80211_invoke_rx_handlers(rx);
2857 return true;
2858 }
2859
2860 /*
2861 * This is the actual Rx frames handler. as it blongs to Rx path it must
2862 * be called with rcu_read_lock protection.
2863 */
2864 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2865 struct sk_buff *skb)
2866 {
2867 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2868 struct ieee80211_local *local = hw_to_local(hw);
2869 struct ieee80211_sub_if_data *sdata;
2870 struct ieee80211_hdr *hdr;
2871 __le16 fc;
2872 struct ieee80211_rx_data rx;
2873 struct ieee80211_sub_if_data *prev;
2874 struct sta_info *sta, *tmp, *prev_sta;
2875 int err = 0;
2876
2877 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2878 memset(&rx, 0, sizeof(rx));
2879 rx.skb = skb;
2880 rx.local = local;
2881
2882 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2883 local->dot11ReceivedFragmentCount++;
2884
2885 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2886 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2887 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2888
2889 if (ieee80211_is_mgmt(fc))
2890 err = skb_linearize(skb);
2891 else
2892 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2893
2894 if (err) {
2895 dev_kfree_skb(skb);
2896 return;
2897 }
2898
2899 hdr = (struct ieee80211_hdr *)skb->data;
2900 ieee80211_parse_qos(&rx);
2901 ieee80211_verify_alignment(&rx);
2902
2903 if (ieee80211_is_data(fc)) {
2904 prev_sta = NULL;
2905
2906 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2907 if (!prev_sta) {
2908 prev_sta = sta;
2909 continue;
2910 }
2911
2912 rx.sta = prev_sta;
2913 rx.sdata = prev_sta->sdata;
2914 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2915
2916 prev_sta = sta;
2917 }
2918
2919 if (prev_sta) {
2920 rx.sta = prev_sta;
2921 rx.sdata = prev_sta->sdata;
2922
2923 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2924 return;
2925 goto out;
2926 }
2927 }
2928
2929 prev = NULL;
2930
2931 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2932 if (!ieee80211_sdata_running(sdata))
2933 continue;
2934
2935 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2936 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2937 continue;
2938
2939 /*
2940 * frame is destined for this interface, but if it's
2941 * not also for the previous one we handle that after
2942 * the loop to avoid copying the SKB once too much
2943 */
2944
2945 if (!prev) {
2946 prev = sdata;
2947 continue;
2948 }
2949
2950 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2951 rx.sdata = prev;
2952 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2953
2954 prev = sdata;
2955 }
2956
2957 if (prev) {
2958 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2959 rx.sdata = prev;
2960
2961 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2962 return;
2963 }
2964
2965 out:
2966 dev_kfree_skb(skb);
2967 }
2968
2969 /*
2970 * This is the receive path handler. It is called by a low level driver when an
2971 * 802.11 MPDU is received from the hardware.
2972 */
2973 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2974 {
2975 struct ieee80211_local *local = hw_to_local(hw);
2976 struct ieee80211_rate *rate = NULL;
2977 struct ieee80211_supported_band *sband;
2978 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2979
2980 WARN_ON_ONCE(softirq_count() == 0);
2981
2982 if (WARN_ON(status->band < 0 ||
2983 status->band >= IEEE80211_NUM_BANDS))
2984 goto drop;
2985
2986 sband = local->hw.wiphy->bands[status->band];
2987 if (WARN_ON(!sband))
2988 goto drop;
2989
2990 /*
2991 * If we're suspending, it is possible although not too likely
2992 * that we'd be receiving frames after having already partially
2993 * quiesced the stack. We can't process such frames then since
2994 * that might, for example, cause stations to be added or other
2995 * driver callbacks be invoked.
2996 */
2997 if (unlikely(local->quiescing || local->suspended))
2998 goto drop;
2999
3000 /*
3001 * The same happens when we're not even started,
3002 * but that's worth a warning.
3003 */
3004 if (WARN_ON(!local->started))
3005 goto drop;
3006
3007 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
3008 /*
3009 * Validate the rate, unless a PLCP error means that
3010 * we probably can't have a valid rate here anyway.
3011 */
3012
3013 if (status->flag & RX_FLAG_HT) {
3014 /*
3015 * rate_idx is MCS index, which can be [0-76]
3016 * as documented on:
3017 *
3018 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3019 *
3020 * Anything else would be some sort of driver or
3021 * hardware error. The driver should catch hardware
3022 * errors.
3023 */
3024 if (WARN((status->rate_idx < 0 ||
3025 status->rate_idx > 76),
3026 "Rate marked as an HT rate but passed "
3027 "status->rate_idx is not "
3028 "an MCS index [0-76]: %d (0x%02x)\n",
3029 status->rate_idx,
3030 status->rate_idx))
3031 goto drop;
3032 } else {
3033 if (WARN_ON(status->rate_idx < 0 ||
3034 status->rate_idx >= sband->n_bitrates))
3035 goto drop;
3036 rate = &sband->bitrates[status->rate_idx];
3037 }
3038 }
3039
3040 status->rx_flags = 0;
3041
3042 /*
3043 * key references and virtual interfaces are protected using RCU
3044 * and this requires that we are in a read-side RCU section during
3045 * receive processing
3046 */
3047 rcu_read_lock();
3048
3049 /*
3050 * Frames with failed FCS/PLCP checksum are not returned,
3051 * all other frames are returned without radiotap header
3052 * if it was previously present.
3053 * Also, frames with less than 16 bytes are dropped.
3054 */
3055 skb = ieee80211_rx_monitor(local, skb, rate);
3056 if (!skb) {
3057 rcu_read_unlock();
3058 return;
3059 }
3060
3061 ieee80211_tpt_led_trig_rx(local,
3062 ((struct ieee80211_hdr *)skb->data)->frame_control,
3063 skb->len);
3064 __ieee80211_rx_handle_packet(hw, skb);
3065
3066 rcu_read_unlock();
3067
3068 return;
3069 drop:
3070 kfree_skb(skb);
3071 }
3072 EXPORT_SYMBOL(ieee80211_rx);
3073
3074 /* This is a version of the rx handler that can be called from hard irq
3075 * context. Post the skb on the queue and schedule the tasklet */
3076 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3077 {
3078 struct ieee80211_local *local = hw_to_local(hw);
3079
3080 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3081
3082 skb->pkt_type = IEEE80211_RX_MSG;
3083 skb_queue_tail(&local->skb_queue, skb);
3084 tasklet_schedule(&local->tasklet);
3085 }
3086 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.297076 seconds and 6 git commands to generate.