mac80211: fix PS-poll response, race
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
29
30 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 struct sk_buff *skb,
33 u16 mpdu_seq_num,
34 int bar_req);
35 /*
36 * monitor mode reception
37 *
38 * This function cleans up the SKB, i.e. it removes all the stuff
39 * only useful for monitoring.
40 */
41 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
42 struct sk_buff *skb,
43 int rtap_len)
44 {
45 skb_pull(skb, rtap_len);
46
47 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
48 if (likely(skb->len > FCS_LEN))
49 skb_trim(skb, skb->len - FCS_LEN);
50 else {
51 /* driver bug */
52 WARN_ON(1);
53 dev_kfree_skb(skb);
54 skb = NULL;
55 }
56 }
57
58 return skb;
59 }
60
61 static inline int should_drop_frame(struct sk_buff *skb,
62 int present_fcs_len,
63 int radiotap_len)
64 {
65 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
66 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
67
68 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
69 return 1;
70 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
71 return 1;
72 if (ieee80211_is_ctl(hdr->frame_control) &&
73 !ieee80211_is_pspoll(hdr->frame_control) &&
74 !ieee80211_is_back_req(hdr->frame_control))
75 return 1;
76 return 0;
77 }
78
79 static int
80 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
81 struct ieee80211_rx_status *status)
82 {
83 int len;
84
85 /* always present fields */
86 len = sizeof(struct ieee80211_radiotap_header) + 9;
87
88 if (status->flag & RX_FLAG_TSFT)
89 len += 8;
90 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
91 len += 1;
92 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
93 len += 1;
94
95 if (len & 1) /* padding for RX_FLAGS if necessary */
96 len++;
97
98 /* make sure radiotap starts at a naturally aligned address */
99 if (len % 8)
100 len = roundup(len, 8);
101
102 return len;
103 }
104
105 /*
106 * ieee80211_add_rx_radiotap_header - add radiotap header
107 *
108 * add a radiotap header containing all the fields which the hardware provided.
109 */
110 static void
111 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
112 struct sk_buff *skb,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115 {
116 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
117 struct ieee80211_radiotap_header *rthdr;
118 unsigned char *pos;
119
120 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
121 memset(rthdr, 0, rtap_len);
122
123 /* radiotap header, set always present flags */
124 rthdr->it_present =
125 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
147 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
148 if (status->flag & RX_FLAG_SHORTPRE)
149 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
150 pos++;
151
152 /* IEEE80211_RADIOTAP_RATE */
153 if (status->flag & RX_FLAG_HT) {
154 /*
155 * TODO: add following information into radiotap header once
156 * suitable fields are defined for it:
157 * - MCS index (status->rate_idx)
158 * - HT40 (status->flag & RX_FLAG_40MHZ)
159 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
160 */
161 *pos = 0;
162 } else {
163 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
164 *pos = rate->bitrate / 5;
165 }
166 pos++;
167
168 /* IEEE80211_RADIOTAP_CHANNEL */
169 *(__le16 *)pos = cpu_to_le16(status->freq);
170 pos += 2;
171 if (status->band == IEEE80211_BAND_5GHZ)
172 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
173 IEEE80211_CHAN_5GHZ);
174 else if (rate->flags & IEEE80211_RATE_ERP_G)
175 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
176 IEEE80211_CHAN_2GHZ);
177 else
178 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
179 IEEE80211_CHAN_2GHZ);
180 pos += 2;
181
182 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
183 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
184 *pos = status->signal;
185 rthdr->it_present |=
186 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
187 pos++;
188 }
189
190 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
191 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
192 *pos = status->noise;
193 rthdr->it_present |=
194 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
195 pos++;
196 }
197
198 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
199
200 /* IEEE80211_RADIOTAP_ANTENNA */
201 *pos = status->antenna;
202 pos++;
203
204 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
205
206 /* IEEE80211_RADIOTAP_RX_FLAGS */
207 /* ensure 2 byte alignment for the 2 byte field as required */
208 if ((pos - (unsigned char *)rthdr) & 1)
209 pos++;
210 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
211 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
212 pos += 2;
213 }
214
215 /*
216 * This function copies a received frame to all monitor interfaces and
217 * returns a cleaned-up SKB that no longer includes the FCS nor the
218 * radiotap header the driver might have added.
219 */
220 static struct sk_buff *
221 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
222 struct ieee80211_rate *rate)
223 {
224 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
225 struct ieee80211_sub_if_data *sdata;
226 int needed_headroom = 0;
227 struct sk_buff *skb, *skb2;
228 struct net_device *prev_dev = NULL;
229 int present_fcs_len = 0;
230 int rtap_len = 0;
231
232 /*
233 * First, we may need to make a copy of the skb because
234 * (1) we need to modify it for radiotap (if not present), and
235 * (2) the other RX handlers will modify the skb we got.
236 *
237 * We don't need to, of course, if we aren't going to return
238 * the SKB because it has a bad FCS/PLCP checksum.
239 */
240 if (status->flag & RX_FLAG_RADIOTAP)
241 rtap_len = ieee80211_get_radiotap_len(origskb->data);
242 else
243 /* room for the radiotap header based on driver features */
244 needed_headroom = ieee80211_rx_radiotap_len(local, status);
245
246 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
247 present_fcs_len = FCS_LEN;
248
249 if (!local->monitors) {
250 if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
251 dev_kfree_skb(origskb);
252 return NULL;
253 }
254
255 return remove_monitor_info(local, origskb, rtap_len);
256 }
257
258 if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
259 /* only need to expand headroom if necessary */
260 skb = origskb;
261 origskb = NULL;
262
263 /*
264 * This shouldn't trigger often because most devices have an
265 * RX header they pull before we get here, and that should
266 * be big enough for our radiotap information. We should
267 * probably export the length to drivers so that we can have
268 * them allocate enough headroom to start with.
269 */
270 if (skb_headroom(skb) < needed_headroom &&
271 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
272 dev_kfree_skb(skb);
273 return NULL;
274 }
275 } else {
276 /*
277 * Need to make a copy and possibly remove radiotap header
278 * and FCS from the original.
279 */
280 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
281
282 origskb = remove_monitor_info(local, origskb, rtap_len);
283
284 if (!skb)
285 return origskb;
286 }
287
288 /* if necessary, prepend radiotap information */
289 if (!(status->flag & RX_FLAG_RADIOTAP))
290 ieee80211_add_rx_radiotap_header(local, skb, rate,
291 needed_headroom);
292
293 skb_reset_mac_header(skb);
294 skb->ip_summed = CHECKSUM_UNNECESSARY;
295 skb->pkt_type = PACKET_OTHERHOST;
296 skb->protocol = htons(ETH_P_802_2);
297
298 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
299 if (!netif_running(sdata->dev))
300 continue;
301
302 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
303 continue;
304
305 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
306 continue;
307
308 if (prev_dev) {
309 skb2 = skb_clone(skb, GFP_ATOMIC);
310 if (skb2) {
311 skb2->dev = prev_dev;
312 netif_rx(skb2);
313 }
314 }
315
316 prev_dev = sdata->dev;
317 sdata->dev->stats.rx_packets++;
318 sdata->dev->stats.rx_bytes += skb->len;
319 }
320
321 if (prev_dev) {
322 skb->dev = prev_dev;
323 netif_rx(skb);
324 } else
325 dev_kfree_skb(skb);
326
327 return origskb;
328 }
329
330
331 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
332 {
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
334 int tid;
335
336 /* does the frame have a qos control field? */
337 if (ieee80211_is_data_qos(hdr->frame_control)) {
338 u8 *qc = ieee80211_get_qos_ctl(hdr);
339 /* frame has qos control */
340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
341 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
342 rx->flags |= IEEE80211_RX_AMSDU;
343 else
344 rx->flags &= ~IEEE80211_RX_AMSDU;
345 } else {
346 /*
347 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
348 *
349 * Sequence numbers for management frames, QoS data
350 * frames with a broadcast/multicast address in the
351 * Address 1 field, and all non-QoS data frames sent
352 * by QoS STAs are assigned using an additional single
353 * modulo-4096 counter, [...]
354 *
355 * We also use that counter for non-QoS STAs.
356 */
357 tid = NUM_RX_DATA_QUEUES - 1;
358 }
359
360 rx->queue = tid;
361 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
362 * For now, set skb->priority to 0 for other cases. */
363 rx->skb->priority = (tid > 7) ? 0 : tid;
364 }
365
366 /**
367 * DOC: Packet alignment
368 *
369 * Drivers always need to pass packets that are aligned to two-byte boundaries
370 * to the stack.
371 *
372 * Additionally, should, if possible, align the payload data in a way that
373 * guarantees that the contained IP header is aligned to a four-byte
374 * boundary. In the case of regular frames, this simply means aligning the
375 * payload to a four-byte boundary (because either the IP header is directly
376 * contained, or IV/RFC1042 headers that have a length divisible by four are
377 * in front of it).
378 *
379 * With A-MSDU frames, however, the payload data address must yield two modulo
380 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
381 * push the IP header further back to a multiple of four again. Thankfully, the
382 * specs were sane enough this time around to require padding each A-MSDU
383 * subframe to a length that is a multiple of four.
384 *
385 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
386 * the payload is not supported, the driver is required to move the 802.11
387 * header to be directly in front of the payload in that case.
388 */
389 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
390 {
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
392 int hdrlen;
393
394 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
395 return;
396 #endif
397
398 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
399 "unaligned packet at 0x%p\n", rx->skb->data))
400 return;
401
402 if (!ieee80211_is_data_present(hdr->frame_control))
403 return;
404
405 hdrlen = ieee80211_hdrlen(hdr->frame_control);
406 if (rx->flags & IEEE80211_RX_AMSDU)
407 hdrlen += ETH_HLEN;
408 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
409 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
410 }
411
412
413 /* rx handlers */
414
415 static ieee80211_rx_result debug_noinline
416 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
417 {
418 struct ieee80211_local *local = rx->local;
419 struct sk_buff *skb = rx->skb;
420
421 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
422 return ieee80211_scan_rx(rx->sdata, skb);
423
424 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
425 (rx->flags & IEEE80211_RX_IN_SCAN))) {
426 /* drop all the other packets during a software scan anyway */
427 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
428 dev_kfree_skb(skb);
429 return RX_QUEUED;
430 }
431
432 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
433 /* scanning finished during invoking of handlers */
434 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
435 return RX_DROP_UNUSABLE;
436 }
437
438 return RX_CONTINUE;
439 }
440
441
442 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
443 {
444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
447 return 0;
448
449 return ieee80211_is_robust_mgmt_frame(hdr);
450 }
451
452
453 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
454 {
455 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
456
457 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
458 return 0;
459
460 return ieee80211_is_robust_mgmt_frame(hdr);
461 }
462
463
464 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
465 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
466 {
467 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
468 struct ieee80211_mmie *mmie;
469
470 if (skb->len < 24 + sizeof(*mmie) ||
471 !is_multicast_ether_addr(hdr->da))
472 return -1;
473
474 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
475 return -1; /* not a robust management frame */
476
477 mmie = (struct ieee80211_mmie *)
478 (skb->data + skb->len - sizeof(*mmie));
479 if (mmie->element_id != WLAN_EID_MMIE ||
480 mmie->length != sizeof(*mmie) - 2)
481 return -1;
482
483 return le16_to_cpu(mmie->key_id);
484 }
485
486
487 static ieee80211_rx_result
488 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
489 {
490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
491 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
492
493 if (ieee80211_is_data(hdr->frame_control)) {
494 if (!ieee80211_has_a4(hdr->frame_control))
495 return RX_DROP_MONITOR;
496 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
497 return RX_DROP_MONITOR;
498 }
499
500 /* If there is not an established peer link and this is not a peer link
501 * establisment frame, beacon or probe, drop the frame.
502 */
503
504 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
505 struct ieee80211_mgmt *mgmt;
506
507 if (!ieee80211_is_mgmt(hdr->frame_control))
508 return RX_DROP_MONITOR;
509
510 if (ieee80211_is_action(hdr->frame_control)) {
511 mgmt = (struct ieee80211_mgmt *)hdr;
512 if (mgmt->u.action.category != PLINK_CATEGORY)
513 return RX_DROP_MONITOR;
514 return RX_CONTINUE;
515 }
516
517 if (ieee80211_is_probe_req(hdr->frame_control) ||
518 ieee80211_is_probe_resp(hdr->frame_control) ||
519 ieee80211_is_beacon(hdr->frame_control))
520 return RX_CONTINUE;
521
522 return RX_DROP_MONITOR;
523
524 }
525
526 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
527
528 if (ieee80211_is_data(hdr->frame_control) &&
529 is_multicast_ether_addr(hdr->addr1) &&
530 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
531 return RX_DROP_MONITOR;
532 #undef msh_h_get
533
534 return RX_CONTINUE;
535 }
536
537
538 static ieee80211_rx_result debug_noinline
539 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
540 {
541 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
542
543 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
544 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
545 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
546 rx->sta->last_seq_ctrl[rx->queue] ==
547 hdr->seq_ctrl)) {
548 if (rx->flags & IEEE80211_RX_RA_MATCH) {
549 rx->local->dot11FrameDuplicateCount++;
550 rx->sta->num_duplicates++;
551 }
552 return RX_DROP_MONITOR;
553 } else
554 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
555 }
556
557 if (unlikely(rx->skb->len < 16)) {
558 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
559 return RX_DROP_MONITOR;
560 }
561
562 /* Drop disallowed frame classes based on STA auth/assoc state;
563 * IEEE 802.11, Chap 5.5.
564 *
565 * mac80211 filters only based on association state, i.e. it drops
566 * Class 3 frames from not associated stations. hostapd sends
567 * deauth/disassoc frames when needed. In addition, hostapd is
568 * responsible for filtering on both auth and assoc states.
569 */
570
571 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
572 return ieee80211_rx_mesh_check(rx);
573
574 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
575 ieee80211_is_pspoll(hdr->frame_control)) &&
576 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
577 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
578 if ((!ieee80211_has_fromds(hdr->frame_control) &&
579 !ieee80211_has_tods(hdr->frame_control) &&
580 ieee80211_is_data(hdr->frame_control)) ||
581 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
582 /* Drop IBSS frames and frames for other hosts
583 * silently. */
584 return RX_DROP_MONITOR;
585 }
586
587 return RX_DROP_MONITOR;
588 }
589
590 return RX_CONTINUE;
591 }
592
593
594 static ieee80211_rx_result debug_noinline
595 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
596 {
597 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
598 int keyidx;
599 int hdrlen;
600 ieee80211_rx_result result = RX_DROP_UNUSABLE;
601 struct ieee80211_key *stakey = NULL;
602 int mmie_keyidx = -1;
603
604 /*
605 * Key selection 101
606 *
607 * There are four types of keys:
608 * - GTK (group keys)
609 * - IGTK (group keys for management frames)
610 * - PTK (pairwise keys)
611 * - STK (station-to-station pairwise keys)
612 *
613 * When selecting a key, we have to distinguish between multicast
614 * (including broadcast) and unicast frames, the latter can only
615 * use PTKs and STKs while the former always use GTKs and IGTKs.
616 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
617 * unicast frames can also use key indices like GTKs. Hence, if we
618 * don't have a PTK/STK we check the key index for a WEP key.
619 *
620 * Note that in a regular BSS, multicast frames are sent by the
621 * AP only, associated stations unicast the frame to the AP first
622 * which then multicasts it on their behalf.
623 *
624 * There is also a slight problem in IBSS mode: GTKs are negotiated
625 * with each station, that is something we don't currently handle.
626 * The spec seems to expect that one negotiates the same key with
627 * every station but there's no such requirement; VLANs could be
628 * possible.
629 */
630
631 /*
632 * No point in finding a key and decrypting if the frame is neither
633 * addressed to us nor a multicast frame.
634 */
635 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
636 return RX_CONTINUE;
637
638 if (rx->sta)
639 stakey = rcu_dereference(rx->sta->key);
640
641 if (!ieee80211_has_protected(hdr->frame_control))
642 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
643
644 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
645 rx->key = stakey;
646 /* Skip decryption if the frame is not protected. */
647 if (!ieee80211_has_protected(hdr->frame_control))
648 return RX_CONTINUE;
649 } else if (mmie_keyidx >= 0) {
650 /* Broadcast/multicast robust management frame / BIP */
651 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
652 (rx->status->flag & RX_FLAG_IV_STRIPPED))
653 return RX_CONTINUE;
654
655 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
656 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
657 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
658 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
659 } else if (!ieee80211_has_protected(hdr->frame_control)) {
660 /*
661 * The frame was not protected, so skip decryption. However, we
662 * need to set rx->key if there is a key that could have been
663 * used so that the frame may be dropped if encryption would
664 * have been expected.
665 */
666 struct ieee80211_key *key = NULL;
667 if (ieee80211_is_mgmt(hdr->frame_control) &&
668 is_multicast_ether_addr(hdr->addr1) &&
669 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
670 rx->key = key;
671 else if ((key = rcu_dereference(rx->sdata->default_key)))
672 rx->key = key;
673 return RX_CONTINUE;
674 } else {
675 /*
676 * The device doesn't give us the IV so we won't be
677 * able to look up the key. That's ok though, we
678 * don't need to decrypt the frame, we just won't
679 * be able to keep statistics accurate.
680 * Except for key threshold notifications, should
681 * we somehow allow the driver to tell us which key
682 * the hardware used if this flag is set?
683 */
684 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
685 (rx->status->flag & RX_FLAG_IV_STRIPPED))
686 return RX_CONTINUE;
687
688 hdrlen = ieee80211_hdrlen(hdr->frame_control);
689
690 if (rx->skb->len < 8 + hdrlen)
691 return RX_DROP_UNUSABLE; /* TODO: count this? */
692
693 /*
694 * no need to call ieee80211_wep_get_keyidx,
695 * it verifies a bunch of things we've done already
696 */
697 keyidx = rx->skb->data[hdrlen + 3] >> 6;
698
699 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
700
701 /*
702 * RSNA-protected unicast frames should always be sent with
703 * pairwise or station-to-station keys, but for WEP we allow
704 * using a key index as well.
705 */
706 if (rx->key && rx->key->conf.alg != ALG_WEP &&
707 !is_multicast_ether_addr(hdr->addr1))
708 rx->key = NULL;
709 }
710
711 if (rx->key) {
712 rx->key->tx_rx_count++;
713 /* TODO: add threshold stuff again */
714 } else {
715 return RX_DROP_MONITOR;
716 }
717
718 /* Check for weak IVs if possible */
719 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
720 ieee80211_is_data(hdr->frame_control) &&
721 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
722 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
723 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
724 rx->sta->wep_weak_iv_count++;
725
726 switch (rx->key->conf.alg) {
727 case ALG_WEP:
728 result = ieee80211_crypto_wep_decrypt(rx);
729 break;
730 case ALG_TKIP:
731 result = ieee80211_crypto_tkip_decrypt(rx);
732 break;
733 case ALG_CCMP:
734 result = ieee80211_crypto_ccmp_decrypt(rx);
735 break;
736 case ALG_AES_CMAC:
737 result = ieee80211_crypto_aes_cmac_decrypt(rx);
738 break;
739 }
740
741 /* either the frame has been decrypted or will be dropped */
742 rx->status->flag |= RX_FLAG_DECRYPTED;
743
744 return result;
745 }
746
747 static ieee80211_rx_result debug_noinline
748 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
749 {
750 struct ieee80211_local *local;
751 struct ieee80211_hdr *hdr;
752 struct sk_buff *skb;
753
754 local = rx->local;
755 skb = rx->skb;
756 hdr = (struct ieee80211_hdr *) skb->data;
757
758 if (!local->pspolling)
759 return RX_CONTINUE;
760
761 if (!ieee80211_has_fromds(hdr->frame_control))
762 /* this is not from AP */
763 return RX_CONTINUE;
764
765 if (!ieee80211_is_data(hdr->frame_control))
766 return RX_CONTINUE;
767
768 if (!ieee80211_has_moredata(hdr->frame_control)) {
769 /* AP has no more frames buffered for us */
770 local->pspolling = false;
771 return RX_CONTINUE;
772 }
773
774 /* more data bit is set, let's request a new frame from the AP */
775 ieee80211_send_pspoll(local, rx->sdata);
776
777 return RX_CONTINUE;
778 }
779
780 static void ap_sta_ps_start(struct sta_info *sta)
781 {
782 struct ieee80211_sub_if_data *sdata = sta->sdata;
783 struct ieee80211_local *local = sdata->local;
784
785 atomic_inc(&sdata->bss->num_sta_ps);
786 set_sta_flags(sta, WLAN_STA_PS);
787 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
788 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
789 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
790 sdata->dev->name, sta->sta.addr, sta->sta.aid);
791 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
792 }
793
794 static int ap_sta_ps_end(struct sta_info *sta)
795 {
796 struct ieee80211_sub_if_data *sdata = sta->sdata;
797 struct ieee80211_local *local = sdata->local;
798 int sent, buffered;
799
800 atomic_dec(&sdata->bss->num_sta_ps);
801
802 clear_sta_flags(sta, WLAN_STA_PS);
803 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
804
805 if (!skb_queue_empty(&sta->ps_tx_buf))
806 sta_info_clear_tim_bit(sta);
807
808 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
809 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
810 sdata->dev->name, sta->sta.addr, sta->sta.aid);
811 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
812
813 /* Send all buffered frames to the station */
814 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
815 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
816 sent += buffered;
817 local->total_ps_buffered -= buffered;
818
819 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
820 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
821 "since STA not sleeping anymore\n", sdata->dev->name,
822 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
823 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
824
825 return sent;
826 }
827
828 static ieee80211_rx_result debug_noinline
829 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
830 {
831 struct sta_info *sta = rx->sta;
832 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
833
834 if (!sta)
835 return RX_CONTINUE;
836
837 /*
838 * Update last_rx only for IBSS packets which are for the current
839 * BSSID to avoid keeping the current IBSS network alive in cases
840 * where other STAs start using different BSSID.
841 */
842 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
843 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
844 NL80211_IFTYPE_ADHOC);
845 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
846 sta->last_rx = jiffies;
847 } else if (!is_multicast_ether_addr(hdr->addr1)) {
848 /*
849 * Mesh beacons will update last_rx when if they are found to
850 * match the current local configuration when processed.
851 */
852 sta->last_rx = jiffies;
853 }
854
855 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
856 return RX_CONTINUE;
857
858 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
859 ieee80211_sta_rx_notify(rx->sdata, hdr);
860
861 sta->rx_fragments++;
862 sta->rx_bytes += rx->skb->len;
863 sta->last_signal = rx->status->signal;
864 sta->last_qual = rx->status->qual;
865 sta->last_noise = rx->status->noise;
866
867 /*
868 * Change STA power saving mode only at the end of a frame
869 * exchange sequence.
870 */
871 if (!ieee80211_has_morefrags(hdr->frame_control) &&
872 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
873 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
874 if (test_sta_flags(sta, WLAN_STA_PS)) {
875 /*
876 * Ignore doze->wake transitions that are
877 * indicated by non-data frames, the standard
878 * is unclear here, but for example going to
879 * PS mode and then scanning would cause a
880 * doze->wake transition for the probe request,
881 * and that is clearly undesirable.
882 */
883 if (ieee80211_is_data(hdr->frame_control) &&
884 !ieee80211_has_pm(hdr->frame_control))
885 rx->sent_ps_buffered += ap_sta_ps_end(sta);
886 } else {
887 if (ieee80211_has_pm(hdr->frame_control))
888 ap_sta_ps_start(sta);
889 }
890 }
891
892 /* Drop data::nullfunc frames silently, since they are used only to
893 * control station power saving mode. */
894 if (ieee80211_is_nullfunc(hdr->frame_control)) {
895 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
896 /* Update counter and free packet here to avoid counting this
897 * as a dropped packed. */
898 sta->rx_packets++;
899 dev_kfree_skb(rx->skb);
900 return RX_QUEUED;
901 }
902
903 return RX_CONTINUE;
904 } /* ieee80211_rx_h_sta_process */
905
906 static inline struct ieee80211_fragment_entry *
907 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
908 unsigned int frag, unsigned int seq, int rx_queue,
909 struct sk_buff **skb)
910 {
911 struct ieee80211_fragment_entry *entry;
912 int idx;
913
914 idx = sdata->fragment_next;
915 entry = &sdata->fragments[sdata->fragment_next++];
916 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
917 sdata->fragment_next = 0;
918
919 if (!skb_queue_empty(&entry->skb_list)) {
920 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
921 struct ieee80211_hdr *hdr =
922 (struct ieee80211_hdr *) entry->skb_list.next->data;
923 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
924 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
925 "addr1=%pM addr2=%pM\n",
926 sdata->dev->name, idx,
927 jiffies - entry->first_frag_time, entry->seq,
928 entry->last_frag, hdr->addr1, hdr->addr2);
929 #endif
930 __skb_queue_purge(&entry->skb_list);
931 }
932
933 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
934 *skb = NULL;
935 entry->first_frag_time = jiffies;
936 entry->seq = seq;
937 entry->rx_queue = rx_queue;
938 entry->last_frag = frag;
939 entry->ccmp = 0;
940 entry->extra_len = 0;
941
942 return entry;
943 }
944
945 static inline struct ieee80211_fragment_entry *
946 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
947 unsigned int frag, unsigned int seq,
948 int rx_queue, struct ieee80211_hdr *hdr)
949 {
950 struct ieee80211_fragment_entry *entry;
951 int i, idx;
952
953 idx = sdata->fragment_next;
954 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
955 struct ieee80211_hdr *f_hdr;
956
957 idx--;
958 if (idx < 0)
959 idx = IEEE80211_FRAGMENT_MAX - 1;
960
961 entry = &sdata->fragments[idx];
962 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
963 entry->rx_queue != rx_queue ||
964 entry->last_frag + 1 != frag)
965 continue;
966
967 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
968
969 /*
970 * Check ftype and addresses are equal, else check next fragment
971 */
972 if (((hdr->frame_control ^ f_hdr->frame_control) &
973 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
974 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
975 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
976 continue;
977
978 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
979 __skb_queue_purge(&entry->skb_list);
980 continue;
981 }
982 return entry;
983 }
984
985 return NULL;
986 }
987
988 static ieee80211_rx_result debug_noinline
989 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
990 {
991 struct ieee80211_hdr *hdr;
992 u16 sc;
993 __le16 fc;
994 unsigned int frag, seq;
995 struct ieee80211_fragment_entry *entry;
996 struct sk_buff *skb;
997
998 hdr = (struct ieee80211_hdr *)rx->skb->data;
999 fc = hdr->frame_control;
1000 sc = le16_to_cpu(hdr->seq_ctrl);
1001 frag = sc & IEEE80211_SCTL_FRAG;
1002
1003 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1004 (rx->skb)->len < 24 ||
1005 is_multicast_ether_addr(hdr->addr1))) {
1006 /* not fragmented */
1007 goto out;
1008 }
1009 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1010
1011 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1012
1013 if (frag == 0) {
1014 /* This is the first fragment of a new frame. */
1015 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1016 rx->queue, &(rx->skb));
1017 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1018 ieee80211_has_protected(fc)) {
1019 /* Store CCMP PN so that we can verify that the next
1020 * fragment has a sequential PN value. */
1021 entry->ccmp = 1;
1022 memcpy(entry->last_pn,
1023 rx->key->u.ccmp.rx_pn[rx->queue],
1024 CCMP_PN_LEN);
1025 }
1026 return RX_QUEUED;
1027 }
1028
1029 /* This is a fragment for a frame that should already be pending in
1030 * fragment cache. Add this fragment to the end of the pending entry.
1031 */
1032 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1033 if (!entry) {
1034 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1035 return RX_DROP_MONITOR;
1036 }
1037
1038 /* Verify that MPDUs within one MSDU have sequential PN values.
1039 * (IEEE 802.11i, 8.3.3.4.5) */
1040 if (entry->ccmp) {
1041 int i;
1042 u8 pn[CCMP_PN_LEN], *rpn;
1043 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1044 return RX_DROP_UNUSABLE;
1045 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1046 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1047 pn[i]++;
1048 if (pn[i])
1049 break;
1050 }
1051 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1052 if (memcmp(pn, rpn, CCMP_PN_LEN))
1053 return RX_DROP_UNUSABLE;
1054 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1055 }
1056
1057 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1058 __skb_queue_tail(&entry->skb_list, rx->skb);
1059 entry->last_frag = frag;
1060 entry->extra_len += rx->skb->len;
1061 if (ieee80211_has_morefrags(fc)) {
1062 rx->skb = NULL;
1063 return RX_QUEUED;
1064 }
1065
1066 rx->skb = __skb_dequeue(&entry->skb_list);
1067 if (skb_tailroom(rx->skb) < entry->extra_len) {
1068 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1069 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1070 GFP_ATOMIC))) {
1071 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1072 __skb_queue_purge(&entry->skb_list);
1073 return RX_DROP_UNUSABLE;
1074 }
1075 }
1076 while ((skb = __skb_dequeue(&entry->skb_list))) {
1077 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1078 dev_kfree_skb(skb);
1079 }
1080
1081 /* Complete frame has been reassembled - process it now */
1082 rx->flags |= IEEE80211_RX_FRAGMENTED;
1083
1084 out:
1085 if (rx->sta)
1086 rx->sta->rx_packets++;
1087 if (is_multicast_ether_addr(hdr->addr1))
1088 rx->local->dot11MulticastReceivedFrameCount++;
1089 else
1090 ieee80211_led_rx(rx->local);
1091 return RX_CONTINUE;
1092 }
1093
1094 static ieee80211_rx_result debug_noinline
1095 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1096 {
1097 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1098 struct sk_buff *skb;
1099 int no_pending_pkts;
1100 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1101
1102 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1103 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1104 return RX_CONTINUE;
1105
1106 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1107 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1108 return RX_DROP_UNUSABLE;
1109
1110 skb = skb_dequeue(&rx->sta->tx_filtered);
1111 if (!skb) {
1112 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1113 if (skb)
1114 rx->local->total_ps_buffered--;
1115 }
1116 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1117 skb_queue_empty(&rx->sta->ps_tx_buf);
1118
1119 if (skb) {
1120 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1121 struct ieee80211_hdr *hdr =
1122 (struct ieee80211_hdr *) skb->data;
1123
1124 /*
1125 * Tell TX path to send this frame even though the STA may
1126 * still remain is PS mode after this frame exchange.
1127 */
1128 info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
1129
1130 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1131 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1132 rx->sta->sta.addr, rx->sta->sta.aid,
1133 skb_queue_len(&rx->sta->ps_tx_buf));
1134 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1135
1136 /* Use MoreData flag to indicate whether there are more
1137 * buffered frames for this STA */
1138 if (no_pending_pkts)
1139 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1140 else
1141 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1142
1143 ieee80211_add_pending_skb(rx->local, skb);
1144
1145 if (no_pending_pkts)
1146 sta_info_clear_tim_bit(rx->sta);
1147 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1148 } else if (!rx->sent_ps_buffered) {
1149 /*
1150 * FIXME: This can be the result of a race condition between
1151 * us expiring a frame and the station polling for it.
1152 * Should we send it a null-func frame indicating we
1153 * have nothing buffered for it?
1154 */
1155 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1156 "though there are no buffered frames for it\n",
1157 rx->dev->name, rx->sta->sta.addr);
1158 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1159 }
1160
1161 /* Free PS Poll skb here instead of returning RX_DROP that would
1162 * count as an dropped frame. */
1163 dev_kfree_skb(rx->skb);
1164
1165 return RX_QUEUED;
1166 }
1167
1168 static ieee80211_rx_result debug_noinline
1169 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1170 {
1171 u8 *data = rx->skb->data;
1172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1173
1174 if (!ieee80211_is_data_qos(hdr->frame_control))
1175 return RX_CONTINUE;
1176
1177 /* remove the qos control field, update frame type and meta-data */
1178 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1179 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1180 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1181 /* change frame type to non QOS */
1182 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1183
1184 return RX_CONTINUE;
1185 }
1186
1187 static int
1188 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1189 {
1190 if (unlikely(!rx->sta ||
1191 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1192 return -EACCES;
1193
1194 return 0;
1195 }
1196
1197 static int
1198 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1199 {
1200 /*
1201 * Pass through unencrypted frames if the hardware has
1202 * decrypted them already.
1203 */
1204 if (rx->status->flag & RX_FLAG_DECRYPTED)
1205 return 0;
1206
1207 /* Drop unencrypted frames if key is set. */
1208 if (unlikely(!ieee80211_has_protected(fc) &&
1209 !ieee80211_is_nullfunc(fc) &&
1210 ieee80211_is_data(fc) &&
1211 (rx->key || rx->sdata->drop_unencrypted)))
1212 return -EACCES;
1213 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1214 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1215 rx->key))
1216 return -EACCES;
1217 /* BIP does not use Protected field, so need to check MMIE */
1218 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1219 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1220 rx->key))
1221 return -EACCES;
1222 /*
1223 * When using MFP, Action frames are not allowed prior to
1224 * having configured keys.
1225 */
1226 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1227 ieee80211_is_robust_mgmt_frame(
1228 (struct ieee80211_hdr *) rx->skb->data)))
1229 return -EACCES;
1230 }
1231
1232 return 0;
1233 }
1234
1235 static int
1236 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1237 {
1238 struct net_device *dev = rx->dev;
1239 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1240
1241 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1242 }
1243
1244 /*
1245 * requires that rx->skb is a frame with ethernet header
1246 */
1247 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1248 {
1249 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1250 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1251 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1252
1253 /*
1254 * Allow EAPOL frames to us/the PAE group address regardless
1255 * of whether the frame was encrypted or not.
1256 */
1257 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1258 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1259 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1260 return true;
1261
1262 if (ieee80211_802_1x_port_control(rx) ||
1263 ieee80211_drop_unencrypted(rx, fc))
1264 return false;
1265
1266 return true;
1267 }
1268
1269 /*
1270 * requires that rx->skb is a frame with ethernet header
1271 */
1272 static void
1273 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1274 {
1275 struct net_device *dev = rx->dev;
1276 struct ieee80211_local *local = rx->local;
1277 struct sk_buff *skb, *xmit_skb;
1278 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1279 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1280 struct sta_info *dsta;
1281
1282 skb = rx->skb;
1283 xmit_skb = NULL;
1284
1285 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1286 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1287 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1288 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1289 if (is_multicast_ether_addr(ehdr->h_dest)) {
1290 /*
1291 * send multicast frames both to higher layers in
1292 * local net stack and back to the wireless medium
1293 */
1294 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1295 if (!xmit_skb && net_ratelimit())
1296 printk(KERN_DEBUG "%s: failed to clone "
1297 "multicast frame\n", dev->name);
1298 } else {
1299 dsta = sta_info_get(local, skb->data);
1300 if (dsta && dsta->sdata->dev == dev) {
1301 /*
1302 * The destination station is associated to
1303 * this AP (in this VLAN), so send the frame
1304 * directly to it and do not pass it to local
1305 * net stack.
1306 */
1307 xmit_skb = skb;
1308 skb = NULL;
1309 }
1310 }
1311 }
1312
1313 if (skb) {
1314 int align __maybe_unused;
1315
1316 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1317 /*
1318 * 'align' will only take the values 0 or 2 here
1319 * since all frames are required to be aligned
1320 * to 2-byte boundaries when being passed to
1321 * mac80211. That also explains the __skb_push()
1322 * below.
1323 */
1324 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1325 if (align) {
1326 if (WARN_ON(skb_headroom(skb) < 3)) {
1327 dev_kfree_skb(skb);
1328 skb = NULL;
1329 } else {
1330 u8 *data = skb->data;
1331 size_t len = skb->len;
1332 u8 *new = __skb_push(skb, align);
1333 memmove(new, data, len);
1334 __skb_trim(skb, len);
1335 }
1336 }
1337 #endif
1338
1339 if (skb) {
1340 /* deliver to local stack */
1341 skb->protocol = eth_type_trans(skb, dev);
1342 memset(skb->cb, 0, sizeof(skb->cb));
1343 netif_rx(skb);
1344 }
1345 }
1346
1347 if (xmit_skb) {
1348 /* send to wireless media */
1349 xmit_skb->protocol = htons(ETH_P_802_3);
1350 skb_reset_network_header(xmit_skb);
1351 skb_reset_mac_header(xmit_skb);
1352 dev_queue_xmit(xmit_skb);
1353 }
1354 }
1355
1356 static ieee80211_rx_result debug_noinline
1357 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1358 {
1359 struct net_device *dev = rx->dev;
1360 struct ieee80211_local *local = rx->local;
1361 u16 ethertype;
1362 u8 *payload;
1363 struct sk_buff *skb = rx->skb, *frame = NULL;
1364 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1365 __le16 fc = hdr->frame_control;
1366 const struct ethhdr *eth;
1367 int remaining, err;
1368 u8 dst[ETH_ALEN];
1369 u8 src[ETH_ALEN];
1370
1371 if (unlikely(!ieee80211_is_data(fc)))
1372 return RX_CONTINUE;
1373
1374 if (unlikely(!ieee80211_is_data_present(fc)))
1375 return RX_DROP_MONITOR;
1376
1377 if (!(rx->flags & IEEE80211_RX_AMSDU))
1378 return RX_CONTINUE;
1379
1380 err = __ieee80211_data_to_8023(rx);
1381 if (unlikely(err))
1382 return RX_DROP_UNUSABLE;
1383
1384 skb->dev = dev;
1385
1386 dev->stats.rx_packets++;
1387 dev->stats.rx_bytes += skb->len;
1388
1389 /* skip the wrapping header */
1390 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1391 if (!eth)
1392 return RX_DROP_UNUSABLE;
1393
1394 while (skb != frame) {
1395 u8 padding;
1396 __be16 len = eth->h_proto;
1397 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1398
1399 remaining = skb->len;
1400 memcpy(dst, eth->h_dest, ETH_ALEN);
1401 memcpy(src, eth->h_source, ETH_ALEN);
1402
1403 padding = ((4 - subframe_len) & 0x3);
1404 /* the last MSDU has no padding */
1405 if (subframe_len > remaining)
1406 return RX_DROP_UNUSABLE;
1407
1408 skb_pull(skb, sizeof(struct ethhdr));
1409 /* if last subframe reuse skb */
1410 if (remaining <= subframe_len + padding)
1411 frame = skb;
1412 else {
1413 /*
1414 * Allocate and reserve two bytes more for payload
1415 * alignment since sizeof(struct ethhdr) is 14.
1416 */
1417 frame = dev_alloc_skb(
1418 ALIGN(local->hw.extra_tx_headroom, 4) +
1419 subframe_len + 2);
1420
1421 if (frame == NULL)
1422 return RX_DROP_UNUSABLE;
1423
1424 skb_reserve(frame,
1425 ALIGN(local->hw.extra_tx_headroom, 4) +
1426 sizeof(struct ethhdr) + 2);
1427 memcpy(skb_put(frame, ntohs(len)), skb->data,
1428 ntohs(len));
1429
1430 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1431 padding);
1432 if (!eth) {
1433 dev_kfree_skb(frame);
1434 return RX_DROP_UNUSABLE;
1435 }
1436 }
1437
1438 skb_reset_network_header(frame);
1439 frame->dev = dev;
1440 frame->priority = skb->priority;
1441 rx->skb = frame;
1442
1443 payload = frame->data;
1444 ethertype = (payload[6] << 8) | payload[7];
1445
1446 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1447 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1448 compare_ether_addr(payload,
1449 bridge_tunnel_header) == 0)) {
1450 /* remove RFC1042 or Bridge-Tunnel
1451 * encapsulation and replace EtherType */
1452 skb_pull(frame, 6);
1453 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1454 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1455 } else {
1456 memcpy(skb_push(frame, sizeof(__be16)),
1457 &len, sizeof(__be16));
1458 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1459 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1460 }
1461
1462 if (!ieee80211_frame_allowed(rx, fc)) {
1463 if (skb == frame) /* last frame */
1464 return RX_DROP_UNUSABLE;
1465 dev_kfree_skb(frame);
1466 continue;
1467 }
1468
1469 ieee80211_deliver_skb(rx);
1470 }
1471
1472 return RX_QUEUED;
1473 }
1474
1475 #ifdef CONFIG_MAC80211_MESH
1476 static ieee80211_rx_result
1477 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1478 {
1479 struct ieee80211_hdr *hdr;
1480 struct ieee80211s_hdr *mesh_hdr;
1481 unsigned int hdrlen;
1482 struct sk_buff *skb = rx->skb, *fwd_skb;
1483 struct ieee80211_local *local = rx->local;
1484 struct ieee80211_sub_if_data *sdata;
1485
1486 hdr = (struct ieee80211_hdr *) skb->data;
1487 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1488 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1489 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1490
1491 if (!ieee80211_is_data(hdr->frame_control))
1492 return RX_CONTINUE;
1493
1494 if (!mesh_hdr->ttl)
1495 /* illegal frame */
1496 return RX_DROP_MONITOR;
1497
1498 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1499 struct mesh_path *mppath;
1500
1501 rcu_read_lock();
1502 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1503 if (!mppath) {
1504 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1505 } else {
1506 spin_lock_bh(&mppath->state_lock);
1507 mppath->exp_time = jiffies;
1508 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1509 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1510 spin_unlock_bh(&mppath->state_lock);
1511 }
1512 rcu_read_unlock();
1513 }
1514
1515 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1516 return RX_CONTINUE;
1517
1518 mesh_hdr->ttl--;
1519
1520 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1521 if (!mesh_hdr->ttl)
1522 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1523 dropped_frames_ttl);
1524 else {
1525 struct ieee80211_hdr *fwd_hdr;
1526 struct ieee80211_tx_info *info;
1527
1528 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1529
1530 if (!fwd_skb && net_ratelimit())
1531 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1532 rx->dev->name);
1533
1534 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1535 /*
1536 * Save TA to addr1 to send TA a path error if a
1537 * suitable next hop is not found
1538 */
1539 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1540 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1541 info = IEEE80211_SKB_CB(fwd_skb);
1542 memset(info, 0, sizeof(*info));
1543 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1544 info->control.vif = &rx->sdata->vif;
1545 ieee80211_select_queue(local, fwd_skb);
1546 if (is_multicast_ether_addr(fwd_hdr->addr3))
1547 memcpy(fwd_hdr->addr1, fwd_hdr->addr3,
1548 ETH_ALEN);
1549 else {
1550 int err = mesh_nexthop_lookup(fwd_skb, sdata);
1551 /* Failed to immediately resolve next hop:
1552 * fwded frame was dropped or will be added
1553 * later to the pending skb queue. */
1554 if (err)
1555 return RX_DROP_MONITOR;
1556 }
1557 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1558 fwded_frames);
1559 ieee80211_add_pending_skb(local, fwd_skb);
1560 }
1561 }
1562
1563 if (is_multicast_ether_addr(hdr->addr3) ||
1564 rx->dev->flags & IFF_PROMISC)
1565 return RX_CONTINUE;
1566 else
1567 return RX_DROP_MONITOR;
1568 }
1569 #endif
1570
1571 static ieee80211_rx_result debug_noinline
1572 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1573 {
1574 struct net_device *dev = rx->dev;
1575 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1576 __le16 fc = hdr->frame_control;
1577 int err;
1578
1579 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1580 return RX_CONTINUE;
1581
1582 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1583 return RX_DROP_MONITOR;
1584
1585 err = __ieee80211_data_to_8023(rx);
1586 if (unlikely(err))
1587 return RX_DROP_UNUSABLE;
1588
1589 if (!ieee80211_frame_allowed(rx, fc))
1590 return RX_DROP_MONITOR;
1591
1592 rx->skb->dev = dev;
1593
1594 dev->stats.rx_packets++;
1595 dev->stats.rx_bytes += rx->skb->len;
1596
1597 ieee80211_deliver_skb(rx);
1598
1599 return RX_QUEUED;
1600 }
1601
1602 static ieee80211_rx_result debug_noinline
1603 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1604 {
1605 struct ieee80211_local *local = rx->local;
1606 struct ieee80211_hw *hw = &local->hw;
1607 struct sk_buff *skb = rx->skb;
1608 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1609 struct tid_ampdu_rx *tid_agg_rx;
1610 u16 start_seq_num;
1611 u16 tid;
1612
1613 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1614 return RX_CONTINUE;
1615
1616 if (ieee80211_is_back_req(bar->frame_control)) {
1617 if (!rx->sta)
1618 return RX_CONTINUE;
1619 tid = le16_to_cpu(bar->control) >> 12;
1620 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1621 != HT_AGG_STATE_OPERATIONAL)
1622 return RX_CONTINUE;
1623 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1624
1625 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1626
1627 /* reset session timer */
1628 if (tid_agg_rx->timeout)
1629 mod_timer(&tid_agg_rx->session_timer,
1630 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1631
1632 /* manage reordering buffer according to requested */
1633 /* sequence number */
1634 rcu_read_lock();
1635 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1636 start_seq_num, 1);
1637 rcu_read_unlock();
1638 return RX_DROP_UNUSABLE;
1639 }
1640
1641 return RX_CONTINUE;
1642 }
1643
1644 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1645 struct ieee80211_mgmt *mgmt,
1646 size_t len)
1647 {
1648 struct ieee80211_local *local = sdata->local;
1649 struct sk_buff *skb;
1650 struct ieee80211_mgmt *resp;
1651
1652 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1653 /* Not to own unicast address */
1654 return;
1655 }
1656
1657 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1658 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1659 /* Not from the current AP or not associated yet. */
1660 return;
1661 }
1662
1663 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1664 /* Too short SA Query request frame */
1665 return;
1666 }
1667
1668 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1669 if (skb == NULL)
1670 return;
1671
1672 skb_reserve(skb, local->hw.extra_tx_headroom);
1673 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1674 memset(resp, 0, 24);
1675 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1676 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1677 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1678 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1679 IEEE80211_STYPE_ACTION);
1680 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1681 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1682 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1683 memcpy(resp->u.action.u.sa_query.trans_id,
1684 mgmt->u.action.u.sa_query.trans_id,
1685 WLAN_SA_QUERY_TR_ID_LEN);
1686
1687 ieee80211_tx_skb(sdata, skb, 1);
1688 }
1689
1690 static ieee80211_rx_result debug_noinline
1691 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1692 {
1693 struct ieee80211_local *local = rx->local;
1694 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1695 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1696 int len = rx->skb->len;
1697
1698 if (!ieee80211_is_action(mgmt->frame_control))
1699 return RX_CONTINUE;
1700
1701 if (!rx->sta)
1702 return RX_DROP_MONITOR;
1703
1704 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1705 return RX_DROP_MONITOR;
1706
1707 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1708 return RX_DROP_MONITOR;
1709
1710 /* all categories we currently handle have action_code */
1711 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1712 return RX_DROP_MONITOR;
1713
1714 switch (mgmt->u.action.category) {
1715 case WLAN_CATEGORY_BACK:
1716 /*
1717 * The aggregation code is not prepared to handle
1718 * anything but STA/AP due to the BSSID handling;
1719 * IBSS could work in the code but isn't supported
1720 * by drivers or the standard.
1721 */
1722 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1723 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1724 sdata->vif.type != NL80211_IFTYPE_AP)
1725 return RX_DROP_MONITOR;
1726
1727 switch (mgmt->u.action.u.addba_req.action_code) {
1728 case WLAN_ACTION_ADDBA_REQ:
1729 if (len < (IEEE80211_MIN_ACTION_SIZE +
1730 sizeof(mgmt->u.action.u.addba_req)))
1731 return RX_DROP_MONITOR;
1732 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1733 break;
1734 case WLAN_ACTION_ADDBA_RESP:
1735 if (len < (IEEE80211_MIN_ACTION_SIZE +
1736 sizeof(mgmt->u.action.u.addba_resp)))
1737 return RX_DROP_MONITOR;
1738 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1739 break;
1740 case WLAN_ACTION_DELBA:
1741 if (len < (IEEE80211_MIN_ACTION_SIZE +
1742 sizeof(mgmt->u.action.u.delba)))
1743 return RX_DROP_MONITOR;
1744 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1745 break;
1746 }
1747 break;
1748 case WLAN_CATEGORY_SPECTRUM_MGMT:
1749 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1750 return RX_DROP_MONITOR;
1751
1752 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1753 return RX_DROP_MONITOR;
1754
1755 switch (mgmt->u.action.u.measurement.action_code) {
1756 case WLAN_ACTION_SPCT_MSR_REQ:
1757 if (len < (IEEE80211_MIN_ACTION_SIZE +
1758 sizeof(mgmt->u.action.u.measurement)))
1759 return RX_DROP_MONITOR;
1760 ieee80211_process_measurement_req(sdata, mgmt, len);
1761 break;
1762 case WLAN_ACTION_SPCT_CHL_SWITCH:
1763 if (len < (IEEE80211_MIN_ACTION_SIZE +
1764 sizeof(mgmt->u.action.u.chan_switch)))
1765 return RX_DROP_MONITOR;
1766
1767 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1768 return RX_DROP_MONITOR;
1769
1770 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1771 return RX_DROP_MONITOR;
1772
1773 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1774 }
1775 break;
1776 case WLAN_CATEGORY_SA_QUERY:
1777 if (len < (IEEE80211_MIN_ACTION_SIZE +
1778 sizeof(mgmt->u.action.u.sa_query)))
1779 return RX_DROP_MONITOR;
1780 switch (mgmt->u.action.u.sa_query.action) {
1781 case WLAN_ACTION_SA_QUERY_REQUEST:
1782 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1783 return RX_DROP_MONITOR;
1784 ieee80211_process_sa_query_req(sdata, mgmt, len);
1785 break;
1786 case WLAN_ACTION_SA_QUERY_RESPONSE:
1787 /*
1788 * SA Query response is currently only used in AP mode
1789 * and it is processed in user space.
1790 */
1791 return RX_CONTINUE;
1792 }
1793 break;
1794 default:
1795 return RX_CONTINUE;
1796 }
1797
1798 rx->sta->rx_packets++;
1799 dev_kfree_skb(rx->skb);
1800 return RX_QUEUED;
1801 }
1802
1803 static ieee80211_rx_result debug_noinline
1804 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1805 {
1806 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1807 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1808
1809 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1810 return RX_DROP_MONITOR;
1811
1812 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1813 return RX_DROP_MONITOR;
1814
1815 if (ieee80211_vif_is_mesh(&sdata->vif))
1816 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1817
1818 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1819 return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
1820
1821 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1822 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1823
1824 return RX_DROP_MONITOR;
1825 }
1826
1827 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
1828 struct ieee80211_rx_data *rx)
1829 {
1830 int keyidx;
1831 unsigned int hdrlen;
1832
1833 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1834 if (rx->skb->len >= hdrlen + 4)
1835 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1836 else
1837 keyidx = -1;
1838
1839 if (!rx->sta) {
1840 /*
1841 * Some hardware seem to generate incorrect Michael MIC
1842 * reports; ignore them to avoid triggering countermeasures.
1843 */
1844 goto ignore;
1845 }
1846
1847 if (!ieee80211_has_protected(hdr->frame_control))
1848 goto ignore;
1849
1850 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1851 /*
1852 * APs with pairwise keys should never receive Michael MIC
1853 * errors for non-zero keyidx because these are reserved for
1854 * group keys and only the AP is sending real multicast
1855 * frames in the BSS.
1856 */
1857 goto ignore;
1858 }
1859
1860 if (!ieee80211_is_data(hdr->frame_control) &&
1861 !ieee80211_is_auth(hdr->frame_control))
1862 goto ignore;
1863
1864 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
1865 GFP_ATOMIC);
1866 ignore:
1867 dev_kfree_skb(rx->skb);
1868 rx->skb = NULL;
1869 }
1870
1871 /* TODO: use IEEE80211_RX_FRAGMENTED */
1872 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1873 {
1874 struct ieee80211_sub_if_data *sdata;
1875 struct ieee80211_local *local = rx->local;
1876 struct ieee80211_rtap_hdr {
1877 struct ieee80211_radiotap_header hdr;
1878 u8 flags;
1879 u8 rate;
1880 __le16 chan_freq;
1881 __le16 chan_flags;
1882 } __attribute__ ((packed)) *rthdr;
1883 struct sk_buff *skb = rx->skb, *skb2;
1884 struct net_device *prev_dev = NULL;
1885 struct ieee80211_rx_status *status = rx->status;
1886
1887 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1888 goto out_free_skb;
1889
1890 if (skb_headroom(skb) < sizeof(*rthdr) &&
1891 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1892 goto out_free_skb;
1893
1894 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1895 memset(rthdr, 0, sizeof(*rthdr));
1896 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1897 rthdr->hdr.it_present =
1898 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1899 (1 << IEEE80211_RADIOTAP_RATE) |
1900 (1 << IEEE80211_RADIOTAP_CHANNEL));
1901
1902 rthdr->rate = rx->rate->bitrate / 5;
1903 rthdr->chan_freq = cpu_to_le16(status->freq);
1904
1905 if (status->band == IEEE80211_BAND_5GHZ)
1906 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1907 IEEE80211_CHAN_5GHZ);
1908 else
1909 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1910 IEEE80211_CHAN_2GHZ);
1911
1912 skb_set_mac_header(skb, 0);
1913 skb->ip_summed = CHECKSUM_UNNECESSARY;
1914 skb->pkt_type = PACKET_OTHERHOST;
1915 skb->protocol = htons(ETH_P_802_2);
1916
1917 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1918 if (!netif_running(sdata->dev))
1919 continue;
1920
1921 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1922 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1923 continue;
1924
1925 if (prev_dev) {
1926 skb2 = skb_clone(skb, GFP_ATOMIC);
1927 if (skb2) {
1928 skb2->dev = prev_dev;
1929 netif_rx(skb2);
1930 }
1931 }
1932
1933 prev_dev = sdata->dev;
1934 sdata->dev->stats.rx_packets++;
1935 sdata->dev->stats.rx_bytes += skb->len;
1936 }
1937
1938 if (prev_dev) {
1939 skb->dev = prev_dev;
1940 netif_rx(skb);
1941 skb = NULL;
1942 } else
1943 goto out_free_skb;
1944
1945 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1946 return;
1947
1948 out_free_skb:
1949 dev_kfree_skb(skb);
1950 }
1951
1952
1953 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1954 struct ieee80211_rx_data *rx,
1955 struct sk_buff *skb)
1956 {
1957 ieee80211_rx_result res = RX_DROP_MONITOR;
1958
1959 rx->skb = skb;
1960 rx->sdata = sdata;
1961 rx->dev = sdata->dev;
1962
1963 #define CALL_RXH(rxh) \
1964 do { \
1965 res = rxh(rx); \
1966 if (res != RX_CONTINUE) \
1967 goto rxh_done; \
1968 } while (0);
1969
1970 CALL_RXH(ieee80211_rx_h_passive_scan)
1971 CALL_RXH(ieee80211_rx_h_check)
1972 CALL_RXH(ieee80211_rx_h_decrypt)
1973 CALL_RXH(ieee80211_rx_h_check_more_data)
1974 CALL_RXH(ieee80211_rx_h_sta_process)
1975 CALL_RXH(ieee80211_rx_h_defragment)
1976 CALL_RXH(ieee80211_rx_h_ps_poll)
1977 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1978 /* must be after MMIC verify so header is counted in MPDU mic */
1979 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1980 CALL_RXH(ieee80211_rx_h_amsdu)
1981 #ifdef CONFIG_MAC80211_MESH
1982 if (ieee80211_vif_is_mesh(&sdata->vif))
1983 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1984 #endif
1985 CALL_RXH(ieee80211_rx_h_data)
1986 CALL_RXH(ieee80211_rx_h_ctrl)
1987 CALL_RXH(ieee80211_rx_h_action)
1988 CALL_RXH(ieee80211_rx_h_mgmt)
1989
1990 #undef CALL_RXH
1991
1992 rxh_done:
1993 switch (res) {
1994 case RX_DROP_MONITOR:
1995 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1996 if (rx->sta)
1997 rx->sta->rx_dropped++;
1998 /* fall through */
1999 case RX_CONTINUE:
2000 ieee80211_rx_cooked_monitor(rx);
2001 break;
2002 case RX_DROP_UNUSABLE:
2003 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2004 if (rx->sta)
2005 rx->sta->rx_dropped++;
2006 dev_kfree_skb(rx->skb);
2007 break;
2008 case RX_QUEUED:
2009 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2010 break;
2011 }
2012 }
2013
2014 /* main receive path */
2015
2016 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2017 struct ieee80211_rx_data *rx,
2018 struct ieee80211_hdr *hdr)
2019 {
2020 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
2021 int multicast = is_multicast_ether_addr(hdr->addr1);
2022
2023 switch (sdata->vif.type) {
2024 case NL80211_IFTYPE_STATION:
2025 if (!bssid)
2026 return 0;
2027 if (!multicast &&
2028 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
2029 if (!(sdata->dev->flags & IFF_PROMISC))
2030 return 0;
2031 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2032 }
2033 break;
2034 case NL80211_IFTYPE_ADHOC:
2035 if (!bssid)
2036 return 0;
2037 if (ieee80211_is_beacon(hdr->frame_control)) {
2038 return 1;
2039 }
2040 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2041 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2042 return 0;
2043 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2044 } else if (!multicast &&
2045 compare_ether_addr(sdata->dev->dev_addr,
2046 hdr->addr1) != 0) {
2047 if (!(sdata->dev->flags & IFF_PROMISC))
2048 return 0;
2049 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2050 } else if (!rx->sta) {
2051 int rate_idx;
2052 if (rx->status->flag & RX_FLAG_HT)
2053 rate_idx = 0; /* TODO: HT rates */
2054 else
2055 rate_idx = rx->status->rate_idx;
2056 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2057 BIT(rate_idx));
2058 }
2059 break;
2060 case NL80211_IFTYPE_MESH_POINT:
2061 if (!multicast &&
2062 compare_ether_addr(sdata->dev->dev_addr,
2063 hdr->addr1) != 0) {
2064 if (!(sdata->dev->flags & IFF_PROMISC))
2065 return 0;
2066
2067 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2068 }
2069 break;
2070 case NL80211_IFTYPE_AP_VLAN:
2071 case NL80211_IFTYPE_AP:
2072 if (!bssid) {
2073 if (compare_ether_addr(sdata->dev->dev_addr,
2074 hdr->addr1))
2075 return 0;
2076 } else if (!ieee80211_bssid_match(bssid,
2077 sdata->dev->dev_addr)) {
2078 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2079 return 0;
2080 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2081 }
2082 break;
2083 case NL80211_IFTYPE_WDS:
2084 if (bssid || !ieee80211_is_data(hdr->frame_control))
2085 return 0;
2086 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2087 return 0;
2088 break;
2089 case NL80211_IFTYPE_MONITOR:
2090 /* take everything */
2091 break;
2092 case NL80211_IFTYPE_UNSPECIFIED:
2093 case __NL80211_IFTYPE_AFTER_LAST:
2094 /* should never get here */
2095 WARN_ON(1);
2096 break;
2097 }
2098
2099 return 1;
2100 }
2101
2102 /*
2103 * This is the actual Rx frames handler. as it blongs to Rx path it must
2104 * be called with rcu_read_lock protection.
2105 */
2106 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2107 struct sk_buff *skb,
2108 struct ieee80211_rate *rate)
2109 {
2110 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2111 struct ieee80211_local *local = hw_to_local(hw);
2112 struct ieee80211_sub_if_data *sdata;
2113 struct ieee80211_hdr *hdr;
2114 struct ieee80211_rx_data rx;
2115 int prepares;
2116 struct ieee80211_sub_if_data *prev = NULL;
2117 struct sk_buff *skb_new;
2118
2119 hdr = (struct ieee80211_hdr *)skb->data;
2120 memset(&rx, 0, sizeof(rx));
2121 rx.skb = skb;
2122 rx.local = local;
2123
2124 rx.status = status;
2125 rx.rate = rate;
2126
2127 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2128 local->dot11ReceivedFragmentCount++;
2129
2130 rx.sta = sta_info_get(local, hdr->addr2);
2131 if (rx.sta) {
2132 rx.sdata = rx.sta->sdata;
2133 rx.dev = rx.sta->sdata->dev;
2134 }
2135
2136 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2137 ieee80211_rx_michael_mic_report(hdr, &rx);
2138 return;
2139 }
2140
2141 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2142 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2143 rx.flags |= IEEE80211_RX_IN_SCAN;
2144
2145 ieee80211_parse_qos(&rx);
2146 ieee80211_verify_alignment(&rx);
2147
2148 skb = rx.skb;
2149
2150 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2151 if (!netif_running(sdata->dev))
2152 continue;
2153
2154 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
2155 continue;
2156
2157 rx.flags |= IEEE80211_RX_RA_MATCH;
2158 prepares = prepare_for_handlers(sdata, &rx, hdr);
2159
2160 if (!prepares)
2161 continue;
2162
2163 /*
2164 * frame is destined for this interface, but if it's not
2165 * also for the previous one we handle that after the
2166 * loop to avoid copying the SKB once too much
2167 */
2168
2169 if (!prev) {
2170 prev = sdata;
2171 continue;
2172 }
2173
2174 /*
2175 * frame was destined for the previous interface
2176 * so invoke RX handlers for it
2177 */
2178
2179 skb_new = skb_copy(skb, GFP_ATOMIC);
2180 if (!skb_new) {
2181 if (net_ratelimit())
2182 printk(KERN_DEBUG "%s: failed to copy "
2183 "multicast frame for %s\n",
2184 wiphy_name(local->hw.wiphy),
2185 prev->dev->name);
2186 continue;
2187 }
2188 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2189 prev = sdata;
2190 }
2191 if (prev)
2192 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2193 else
2194 dev_kfree_skb(skb);
2195 }
2196
2197 #define SEQ_MODULO 0x1000
2198 #define SEQ_MASK 0xfff
2199
2200 static inline int seq_less(u16 sq1, u16 sq2)
2201 {
2202 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2203 }
2204
2205 static inline u16 seq_inc(u16 sq)
2206 {
2207 return (sq + 1) & SEQ_MASK;
2208 }
2209
2210 static inline u16 seq_sub(u16 sq1, u16 sq2)
2211 {
2212 return (sq1 - sq2) & SEQ_MASK;
2213 }
2214
2215
2216 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2217 struct tid_ampdu_rx *tid_agg_rx,
2218 int index)
2219 {
2220 struct ieee80211_supported_band *sband;
2221 struct ieee80211_rate *rate;
2222 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2223 struct ieee80211_rx_status *status;
2224
2225 if (!skb)
2226 goto no_frame;
2227
2228 status = IEEE80211_SKB_RXCB(skb);
2229
2230 /* release the reordered frames to stack */
2231 sband = hw->wiphy->bands[status->band];
2232 if (status->flag & RX_FLAG_HT)
2233 rate = sband->bitrates; /* TODO: HT rates */
2234 else
2235 rate = &sband->bitrates[status->rate_idx];
2236 __ieee80211_rx_handle_packet(hw, skb, rate);
2237 tid_agg_rx->stored_mpdu_num--;
2238 tid_agg_rx->reorder_buf[index] = NULL;
2239
2240 no_frame:
2241 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2242 }
2243
2244
2245 /*
2246 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2247 * the skb was added to the buffer longer than this time ago, the earlier
2248 * frames that have not yet been received are assumed to be lost and the skb
2249 * can be released for processing. This may also release other skb's from the
2250 * reorder buffer if there are no additional gaps between the frames.
2251 */
2252 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2253
2254 /*
2255 * As it function blongs to Rx path it must be called with
2256 * the proper rcu_read_lock protection for its flow.
2257 */
2258 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2259 struct tid_ampdu_rx *tid_agg_rx,
2260 struct sk_buff *skb,
2261 u16 mpdu_seq_num,
2262 int bar_req)
2263 {
2264 u16 head_seq_num, buf_size;
2265 int index;
2266
2267 buf_size = tid_agg_rx->buf_size;
2268 head_seq_num = tid_agg_rx->head_seq_num;
2269
2270 /* frame with out of date sequence number */
2271 if (seq_less(mpdu_seq_num, head_seq_num)) {
2272 dev_kfree_skb(skb);
2273 return 1;
2274 }
2275
2276 /* if frame sequence number exceeds our buffering window size or
2277 * block Ack Request arrived - release stored frames */
2278 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2279 /* new head to the ordering buffer */
2280 if (bar_req)
2281 head_seq_num = mpdu_seq_num;
2282 else
2283 head_seq_num =
2284 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2285 /* release stored frames up to new head to stack */
2286 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2287 index = seq_sub(tid_agg_rx->head_seq_num,
2288 tid_agg_rx->ssn)
2289 % tid_agg_rx->buf_size;
2290 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2291 index);
2292 }
2293 if (bar_req)
2294 return 1;
2295 }
2296
2297 /* now the new frame is always in the range of the reordering */
2298 /* buffer window */
2299 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2300 % tid_agg_rx->buf_size;
2301 /* check if we already stored this frame */
2302 if (tid_agg_rx->reorder_buf[index]) {
2303 dev_kfree_skb(skb);
2304 return 1;
2305 }
2306
2307 /* if arrived mpdu is in the right order and nothing else stored */
2308 /* release it immediately */
2309 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2310 tid_agg_rx->stored_mpdu_num == 0) {
2311 tid_agg_rx->head_seq_num =
2312 seq_inc(tid_agg_rx->head_seq_num);
2313 return 0;
2314 }
2315
2316 /* put the frame in the reordering buffer */
2317 tid_agg_rx->reorder_buf[index] = skb;
2318 tid_agg_rx->reorder_time[index] = jiffies;
2319 tid_agg_rx->stored_mpdu_num++;
2320 /* release the buffer until next missing frame */
2321 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2322 % tid_agg_rx->buf_size;
2323 if (!tid_agg_rx->reorder_buf[index] &&
2324 tid_agg_rx->stored_mpdu_num > 1) {
2325 /*
2326 * No buffers ready to be released, but check whether any
2327 * frames in the reorder buffer have timed out.
2328 */
2329 int j;
2330 int skipped = 1;
2331 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2332 j = (j + 1) % tid_agg_rx->buf_size) {
2333 if (tid_agg_rx->reorder_buf[j] == NULL) {
2334 skipped++;
2335 continue;
2336 }
2337 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2338 HZ / 10))
2339 break;
2340
2341 #ifdef CONFIG_MAC80211_HT_DEBUG
2342 if (net_ratelimit())
2343 printk(KERN_DEBUG "%s: release an RX reorder "
2344 "frame due to timeout on earlier "
2345 "frames\n",
2346 wiphy_name(hw->wiphy));
2347 #endif
2348 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2349
2350 /*
2351 * Increment the head seq# also for the skipped slots.
2352 */
2353 tid_agg_rx->head_seq_num =
2354 (tid_agg_rx->head_seq_num + skipped) &
2355 SEQ_MASK;
2356 skipped = 0;
2357 }
2358 } else while (tid_agg_rx->reorder_buf[index]) {
2359 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2360 index = seq_sub(tid_agg_rx->head_seq_num,
2361 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2362 }
2363 return 1;
2364 }
2365
2366 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2367 struct sk_buff *skb)
2368 {
2369 struct ieee80211_hw *hw = &local->hw;
2370 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2371 struct sta_info *sta;
2372 struct tid_ampdu_rx *tid_agg_rx;
2373 u16 sc;
2374 u16 mpdu_seq_num;
2375 u8 ret = 0;
2376 int tid;
2377
2378 sta = sta_info_get(local, hdr->addr2);
2379 if (!sta)
2380 return ret;
2381
2382 /* filter the QoS data rx stream according to
2383 * STA/TID and check if this STA/TID is on aggregation */
2384 if (!ieee80211_is_data_qos(hdr->frame_control))
2385 goto end_reorder;
2386
2387 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2388
2389 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2390 goto end_reorder;
2391
2392 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2393
2394 /* qos null data frames are excluded */
2395 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2396 goto end_reorder;
2397
2398 /* new un-ordered ampdu frame - process it */
2399
2400 /* reset session timer */
2401 if (tid_agg_rx->timeout)
2402 mod_timer(&tid_agg_rx->session_timer,
2403 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2404
2405 /* if this mpdu is fragmented - terminate rx aggregation session */
2406 sc = le16_to_cpu(hdr->seq_ctrl);
2407 if (sc & IEEE80211_SCTL_FRAG) {
2408 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2409 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2410 ret = 1;
2411 goto end_reorder;
2412 }
2413
2414 /* according to mpdu sequence number deal with reordering buffer */
2415 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2416 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2417 mpdu_seq_num, 0);
2418 end_reorder:
2419 return ret;
2420 }
2421
2422 /*
2423 * This is the receive path handler. It is called by a low level driver when an
2424 * 802.11 MPDU is received from the hardware.
2425 */
2426 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2427 {
2428 struct ieee80211_local *local = hw_to_local(hw);
2429 struct ieee80211_rate *rate = NULL;
2430 struct ieee80211_supported_band *sband;
2431 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2432
2433 if (status->band < 0 ||
2434 status->band >= IEEE80211_NUM_BANDS) {
2435 WARN_ON(1);
2436 return;
2437 }
2438
2439 sband = local->hw.wiphy->bands[status->band];
2440 if (!sband) {
2441 WARN_ON(1);
2442 return;
2443 }
2444
2445 if (status->flag & RX_FLAG_HT) {
2446 /* rate_idx is MCS index */
2447 if (WARN_ON(status->rate_idx < 0 ||
2448 status->rate_idx >= 76))
2449 return;
2450 /* HT rates are not in the table - use the highest legacy rate
2451 * for now since other parts of mac80211 may not yet be fully
2452 * MCS aware. */
2453 rate = &sband->bitrates[sband->n_bitrates - 1];
2454 } else {
2455 if (WARN_ON(status->rate_idx < 0 ||
2456 status->rate_idx >= sband->n_bitrates))
2457 return;
2458 rate = &sband->bitrates[status->rate_idx];
2459 }
2460
2461 /*
2462 * key references and virtual interfaces are protected using RCU
2463 * and this requires that we are in a read-side RCU section during
2464 * receive processing
2465 */
2466 rcu_read_lock();
2467
2468 /*
2469 * Frames with failed FCS/PLCP checksum are not returned,
2470 * all other frames are returned without radiotap header
2471 * if it was previously present.
2472 * Also, frames with less than 16 bytes are dropped.
2473 */
2474 skb = ieee80211_rx_monitor(local, skb, rate);
2475 if (!skb) {
2476 rcu_read_unlock();
2477 return;
2478 }
2479
2480 /*
2481 * In theory, the block ack reordering should happen after duplicate
2482 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2483 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2484 * happen as a new RX handler between ieee80211_rx_h_check and
2485 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2486 * the time being, the call can be here since RX reorder buf processing
2487 * will implicitly skip duplicates. We could, in theory at least,
2488 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2489 * frames from other than operational channel), but that should not
2490 * happen in normal networks.
2491 */
2492 if (!ieee80211_rx_reorder_ampdu(local, skb))
2493 __ieee80211_rx_handle_packet(hw, skb, rate);
2494
2495 rcu_read_unlock();
2496 }
2497 EXPORT_SYMBOL(__ieee80211_rx);
2498
2499 /* This is a version of the rx handler that can be called from hard irq
2500 * context. Post the skb on the queue and schedule the tasklet */
2501 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2502 {
2503 struct ieee80211_local *local = hw_to_local(hw);
2504
2505 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2506
2507 skb->pkt_type = IEEE80211_RX_MSG;
2508 skb_queue_tail(&local->skb_queue, skb);
2509 tasklet_schedule(&local->tasklet);
2510 }
2511 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.642827 seconds and 5 git commands to generate.