replace net_device arguments with ieee80211_{local,sub_if_data} as appropriate
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "led.h"
23 #include "mesh.h"
24 #include "wep.h"
25 #include "wpa.h"
26 #include "tkip.h"
27 #include "wme.h"
28
29 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb, u16 mpdu_seq_num,
32 int bar_req);
33 /*
34 * monitor mode reception
35 *
36 * This function cleans up the SKB, i.e. it removes all the stuff
37 * only useful for monitoring.
38 */
39 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
40 struct sk_buff *skb,
41 int rtap_len)
42 {
43 skb_pull(skb, rtap_len);
44
45 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
46 if (likely(skb->len > FCS_LEN))
47 skb_trim(skb, skb->len - FCS_LEN);
48 else {
49 /* driver bug */
50 WARN_ON(1);
51 dev_kfree_skb(skb);
52 skb = NULL;
53 }
54 }
55
56 return skb;
57 }
58
59 static inline int should_drop_frame(struct ieee80211_rx_status *status,
60 struct sk_buff *skb,
61 int present_fcs_len,
62 int radiotap_len)
63 {
64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
65
66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
67 return 1;
68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
69 return 1;
70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 !ieee80211_is_back_req(hdr->frame_control))
73 return 1;
74 return 0;
75 }
76
77 static int
78 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
79 struct ieee80211_rx_status *status)
80 {
81 int len;
82
83 /* always present fields */
84 len = sizeof(struct ieee80211_radiotap_header) + 9;
85
86 if (status->flag & RX_FLAG_TSFT)
87 len += 8;
88 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
89 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 len += 1;
91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
92 len += 1;
93
94 if (len & 1) /* padding for RX_FLAGS if necessary */
95 len++;
96
97 /* make sure radiotap starts at a naturally aligned address */
98 if (len % 8)
99 len = roundup(len, 8);
100
101 return len;
102 }
103
104 /**
105 * ieee80211_add_rx_radiotap_header - add radiotap header
106 *
107 * add a radiotap header containing all the fields which the hardware provided.
108 */
109 static void
110 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
111 struct sk_buff *skb,
112 struct ieee80211_rx_status *status,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115 {
116 struct ieee80211_radiotap_header *rthdr;
117 unsigned char *pos;
118
119 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
120 memset(rthdr, 0, rtap_len);
121
122 /* radiotap header, set always present flags */
123 rthdr->it_present =
124 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
125 (1 << IEEE80211_RADIOTAP_RATE) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
148 pos++;
149
150 /* IEEE80211_RADIOTAP_RATE */
151 *pos = rate->bitrate / 5;
152 pos++;
153
154 /* IEEE80211_RADIOTAP_CHANNEL */
155 *(__le16 *)pos = cpu_to_le16(status->freq);
156 pos += 2;
157 if (status->band == IEEE80211_BAND_5GHZ)
158 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
159 IEEE80211_CHAN_5GHZ);
160 else if (rate->flags & IEEE80211_RATE_ERP_G)
161 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
162 IEEE80211_CHAN_2GHZ);
163 else
164 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
165 IEEE80211_CHAN_2GHZ);
166 pos += 2;
167
168 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
169 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
170 *pos = status->signal;
171 rthdr->it_present |=
172 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
173 pos++;
174 }
175
176 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
177 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
178 *pos = status->noise;
179 rthdr->it_present |=
180 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
181 pos++;
182 }
183
184 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
185
186 /* IEEE80211_RADIOTAP_ANTENNA */
187 *pos = status->antenna;
188 pos++;
189
190 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
191 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
192 *pos = status->signal;
193 rthdr->it_present |=
194 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
195 pos++;
196 }
197
198 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
199
200 /* IEEE80211_RADIOTAP_RX_FLAGS */
201 /* ensure 2 byte alignment for the 2 byte field as required */
202 if ((pos - (unsigned char *)rthdr) & 1)
203 pos++;
204 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
205 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
206 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
207 pos += 2;
208 }
209
210 /*
211 * This function copies a received frame to all monitor interfaces and
212 * returns a cleaned-up SKB that no longer includes the FCS nor the
213 * radiotap header the driver might have added.
214 */
215 static struct sk_buff *
216 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
217 struct ieee80211_rx_status *status,
218 struct ieee80211_rate *rate)
219 {
220 struct ieee80211_sub_if_data *sdata;
221 int needed_headroom = 0;
222 struct sk_buff *skb, *skb2;
223 struct net_device *prev_dev = NULL;
224 int present_fcs_len = 0;
225 int rtap_len = 0;
226
227 /*
228 * First, we may need to make a copy of the skb because
229 * (1) we need to modify it for radiotap (if not present), and
230 * (2) the other RX handlers will modify the skb we got.
231 *
232 * We don't need to, of course, if we aren't going to return
233 * the SKB because it has a bad FCS/PLCP checksum.
234 */
235 if (status->flag & RX_FLAG_RADIOTAP)
236 rtap_len = ieee80211_get_radiotap_len(origskb->data);
237 else
238 /* room for the radiotap header based on driver features */
239 needed_headroom = ieee80211_rx_radiotap_len(local, status);
240
241 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
242 present_fcs_len = FCS_LEN;
243
244 if (!local->monitors) {
245 if (should_drop_frame(status, origskb, present_fcs_len,
246 rtap_len)) {
247 dev_kfree_skb(origskb);
248 return NULL;
249 }
250
251 return remove_monitor_info(local, origskb, rtap_len);
252 }
253
254 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
255 /* only need to expand headroom if necessary */
256 skb = origskb;
257 origskb = NULL;
258
259 /*
260 * This shouldn't trigger often because most devices have an
261 * RX header they pull before we get here, and that should
262 * be big enough for our radiotap information. We should
263 * probably export the length to drivers so that we can have
264 * them allocate enough headroom to start with.
265 */
266 if (skb_headroom(skb) < needed_headroom &&
267 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
268 dev_kfree_skb(skb);
269 return NULL;
270 }
271 } else {
272 /*
273 * Need to make a copy and possibly remove radiotap header
274 * and FCS from the original.
275 */
276 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
277
278 origskb = remove_monitor_info(local, origskb, rtap_len);
279
280 if (!skb)
281 return origskb;
282 }
283
284 /* if necessary, prepend radiotap information */
285 if (!(status->flag & RX_FLAG_RADIOTAP))
286 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
287 needed_headroom);
288
289 skb_reset_mac_header(skb);
290 skb->ip_summed = CHECKSUM_UNNECESSARY;
291 skb->pkt_type = PACKET_OTHERHOST;
292 skb->protocol = htons(ETH_P_802_2);
293
294 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
295 if (!netif_running(sdata->dev))
296 continue;
297
298 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
299 continue;
300
301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
302 continue;
303
304 if (prev_dev) {
305 skb2 = skb_clone(skb, GFP_ATOMIC);
306 if (skb2) {
307 skb2->dev = prev_dev;
308 netif_rx(skb2);
309 }
310 }
311
312 prev_dev = sdata->dev;
313 sdata->dev->stats.rx_packets++;
314 sdata->dev->stats.rx_bytes += skb->len;
315 }
316
317 if (prev_dev) {
318 skb->dev = prev_dev;
319 netif_rx(skb);
320 } else
321 dev_kfree_skb(skb);
322
323 return origskb;
324 }
325
326
327 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
328 {
329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
330 int tid;
331
332 /* does the frame have a qos control field? */
333 if (ieee80211_is_data_qos(hdr->frame_control)) {
334 u8 *qc = ieee80211_get_qos_ctl(hdr);
335 /* frame has qos control */
336 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
337 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
338 rx->flags |= IEEE80211_RX_AMSDU;
339 else
340 rx->flags &= ~IEEE80211_RX_AMSDU;
341 } else {
342 /*
343 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
344 *
345 * Sequence numbers for management frames, QoS data
346 * frames with a broadcast/multicast address in the
347 * Address 1 field, and all non-QoS data frames sent
348 * by QoS STAs are assigned using an additional single
349 * modulo-4096 counter, [...]
350 *
351 * We also use that counter for non-QoS STAs.
352 */
353 tid = NUM_RX_DATA_QUEUES - 1;
354 }
355
356 rx->queue = tid;
357 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
358 * For now, set skb->priority to 0 for other cases. */
359 rx->skb->priority = (tid > 7) ? 0 : tid;
360 }
361
362 static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
363 {
364 #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
366 int hdrlen;
367
368 if (!ieee80211_is_data_present(hdr->frame_control))
369 return;
370
371 /*
372 * Drivers are required to align the payload data in a way that
373 * guarantees that the contained IP header is aligned to a four-
374 * byte boundary. In the case of regular frames, this simply means
375 * aligning the payload to a four-byte boundary (because either
376 * the IP header is directly contained, or IV/RFC1042 headers that
377 * have a length divisible by four are in front of it.
378 *
379 * With A-MSDU frames, however, the payload data address must
380 * yield two modulo four because there are 14-byte 802.3 headers
381 * within the A-MSDU frames that push the IP header further back
382 * to a multiple of four again. Thankfully, the specs were sane
383 * enough this time around to require padding each A-MSDU subframe
384 * to a length that is a multiple of four.
385 *
386 * Padding like atheros hardware adds which is inbetween the 802.11
387 * header and the payload is not supported, the driver is required
388 * to move the 802.11 header further back in that case.
389 */
390 hdrlen = ieee80211_hdrlen(hdr->frame_control);
391 if (rx->flags & IEEE80211_RX_AMSDU)
392 hdrlen += ETH_HLEN;
393 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
394 #endif
395 }
396
397
398 /* rx handlers */
399
400 static ieee80211_rx_result debug_noinline
401 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
402 {
403 struct ieee80211_local *local = rx->local;
404 struct sk_buff *skb = rx->skb;
405
406 if (unlikely(local->sta_hw_scanning))
407 return ieee80211_sta_rx_scan(rx->sdata, skb, rx->status);
408
409 if (unlikely(local->sta_sw_scanning)) {
410 /* drop all the other packets during a software scan anyway */
411 if (ieee80211_sta_rx_scan(rx->sdata, skb, rx->status)
412 != RX_QUEUED)
413 dev_kfree_skb(skb);
414 return RX_QUEUED;
415 }
416
417 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
418 /* scanning finished during invoking of handlers */
419 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
420 return RX_DROP_UNUSABLE;
421 }
422
423 return RX_CONTINUE;
424 }
425
426 static ieee80211_rx_result
427 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
428 {
429 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
430 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
431
432 if (ieee80211_is_data(hdr->frame_control)) {
433 if (!ieee80211_has_a4(hdr->frame_control))
434 return RX_DROP_MONITOR;
435 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
436 return RX_DROP_MONITOR;
437 }
438
439 /* If there is not an established peer link and this is not a peer link
440 * establisment frame, beacon or probe, drop the frame.
441 */
442
443 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
444 struct ieee80211_mgmt *mgmt;
445
446 if (!ieee80211_is_mgmt(hdr->frame_control))
447 return RX_DROP_MONITOR;
448
449 if (ieee80211_is_action(hdr->frame_control)) {
450 mgmt = (struct ieee80211_mgmt *)hdr;
451 if (mgmt->u.action.category != PLINK_CATEGORY)
452 return RX_DROP_MONITOR;
453 return RX_CONTINUE;
454 }
455
456 if (ieee80211_is_probe_req(hdr->frame_control) ||
457 ieee80211_is_probe_resp(hdr->frame_control) ||
458 ieee80211_is_beacon(hdr->frame_control))
459 return RX_CONTINUE;
460
461 return RX_DROP_MONITOR;
462
463 }
464
465 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
466
467 if (ieee80211_is_data(hdr->frame_control) &&
468 is_multicast_ether_addr(hdr->addr1) &&
469 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
470 return RX_DROP_MONITOR;
471 #undef msh_h_get
472
473 return RX_CONTINUE;
474 }
475
476
477 static ieee80211_rx_result debug_noinline
478 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
479 {
480 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
481
482 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
483 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
484 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
485 rx->sta->last_seq_ctrl[rx->queue] ==
486 hdr->seq_ctrl)) {
487 if (rx->flags & IEEE80211_RX_RA_MATCH) {
488 rx->local->dot11FrameDuplicateCount++;
489 rx->sta->num_duplicates++;
490 }
491 return RX_DROP_MONITOR;
492 } else
493 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
494 }
495
496 if (unlikely(rx->skb->len < 16)) {
497 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
498 return RX_DROP_MONITOR;
499 }
500
501 /* Drop disallowed frame classes based on STA auth/assoc state;
502 * IEEE 802.11, Chap 5.5.
503 *
504 * 80211.o does filtering only based on association state, i.e., it
505 * drops Class 3 frames from not associated stations. hostapd sends
506 * deauth/disassoc frames when needed. In addition, hostapd is
507 * responsible for filtering on both auth and assoc states.
508 */
509
510 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
511 return ieee80211_rx_mesh_check(rx);
512
513 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
514 ieee80211_is_pspoll(hdr->frame_control)) &&
515 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
516 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
517 if ((!ieee80211_has_fromds(hdr->frame_control) &&
518 !ieee80211_has_tods(hdr->frame_control) &&
519 ieee80211_is_data(hdr->frame_control)) ||
520 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
521 /* Drop IBSS frames and frames for other hosts
522 * silently. */
523 return RX_DROP_MONITOR;
524 }
525
526 return RX_DROP_MONITOR;
527 }
528
529 return RX_CONTINUE;
530 }
531
532
533 static ieee80211_rx_result debug_noinline
534 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
535 {
536 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
537 int keyidx;
538 int hdrlen;
539 ieee80211_rx_result result = RX_DROP_UNUSABLE;
540 struct ieee80211_key *stakey = NULL;
541
542 /*
543 * Key selection 101
544 *
545 * There are three types of keys:
546 * - GTK (group keys)
547 * - PTK (pairwise keys)
548 * - STK (station-to-station pairwise keys)
549 *
550 * When selecting a key, we have to distinguish between multicast
551 * (including broadcast) and unicast frames, the latter can only
552 * use PTKs and STKs while the former always use GTKs. Unless, of
553 * course, actual WEP keys ("pre-RSNA") are used, then unicast
554 * frames can also use key indizes like GTKs. Hence, if we don't
555 * have a PTK/STK we check the key index for a WEP key.
556 *
557 * Note that in a regular BSS, multicast frames are sent by the
558 * AP only, associated stations unicast the frame to the AP first
559 * which then multicasts it on their behalf.
560 *
561 * There is also a slight problem in IBSS mode: GTKs are negotiated
562 * with each station, that is something we don't currently handle.
563 * The spec seems to expect that one negotiates the same key with
564 * every station but there's no such requirement; VLANs could be
565 * possible.
566 */
567
568 if (!ieee80211_has_protected(hdr->frame_control))
569 return RX_CONTINUE;
570
571 /*
572 * No point in finding a key and decrypting if the frame is neither
573 * addressed to us nor a multicast frame.
574 */
575 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
576 return RX_CONTINUE;
577
578 if (rx->sta)
579 stakey = rcu_dereference(rx->sta->key);
580
581 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
582 rx->key = stakey;
583 } else {
584 /*
585 * The device doesn't give us the IV so we won't be
586 * able to look up the key. That's ok though, we
587 * don't need to decrypt the frame, we just won't
588 * be able to keep statistics accurate.
589 * Except for key threshold notifications, should
590 * we somehow allow the driver to tell us which key
591 * the hardware used if this flag is set?
592 */
593 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
594 (rx->status->flag & RX_FLAG_IV_STRIPPED))
595 return RX_CONTINUE;
596
597 hdrlen = ieee80211_hdrlen(hdr->frame_control);
598
599 if (rx->skb->len < 8 + hdrlen)
600 return RX_DROP_UNUSABLE; /* TODO: count this? */
601
602 /*
603 * no need to call ieee80211_wep_get_keyidx,
604 * it verifies a bunch of things we've done already
605 */
606 keyidx = rx->skb->data[hdrlen + 3] >> 6;
607
608 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
609
610 /*
611 * RSNA-protected unicast frames should always be sent with
612 * pairwise or station-to-station keys, but for WEP we allow
613 * using a key index as well.
614 */
615 if (rx->key && rx->key->conf.alg != ALG_WEP &&
616 !is_multicast_ether_addr(hdr->addr1))
617 rx->key = NULL;
618 }
619
620 if (rx->key) {
621 rx->key->tx_rx_count++;
622 /* TODO: add threshold stuff again */
623 } else {
624 return RX_DROP_MONITOR;
625 }
626
627 /* Check for weak IVs if possible */
628 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
629 ieee80211_is_data(hdr->frame_control) &&
630 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
631 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
632 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
633 rx->sta->wep_weak_iv_count++;
634
635 switch (rx->key->conf.alg) {
636 case ALG_WEP:
637 result = ieee80211_crypto_wep_decrypt(rx);
638 break;
639 case ALG_TKIP:
640 result = ieee80211_crypto_tkip_decrypt(rx);
641 break;
642 case ALG_CCMP:
643 result = ieee80211_crypto_ccmp_decrypt(rx);
644 break;
645 }
646
647 /* either the frame has been decrypted or will be dropped */
648 rx->status->flag |= RX_FLAG_DECRYPTED;
649
650 return result;
651 }
652
653 static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
654 {
655 struct ieee80211_sub_if_data *sdata;
656 DECLARE_MAC_BUF(mac);
657
658 sdata = sta->sdata;
659
660 atomic_inc(&sdata->bss->num_sta_ps);
661 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
662 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
663 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
664 dev->name, print_mac(mac, sta->addr), sta->aid);
665 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
666 }
667
668 static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
669 {
670 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
671 struct sk_buff *skb;
672 int sent = 0;
673 struct ieee80211_sub_if_data *sdata;
674 struct ieee80211_tx_info *info;
675 DECLARE_MAC_BUF(mac);
676
677 sdata = sta->sdata;
678
679 atomic_dec(&sdata->bss->num_sta_ps);
680
681 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
682
683 if (!skb_queue_empty(&sta->ps_tx_buf))
684 sta_info_clear_tim_bit(sta);
685
686 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
687 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
688 dev->name, print_mac(mac, sta->addr), sta->aid);
689 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
690
691 /* Send all buffered frames to the station */
692 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
693 info = IEEE80211_SKB_CB(skb);
694 sent++;
695 info->flags |= IEEE80211_TX_CTL_REQUEUE;
696 dev_queue_xmit(skb);
697 }
698 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
699 info = IEEE80211_SKB_CB(skb);
700 local->total_ps_buffered--;
701 sent++;
702 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
703 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
704 "since STA not sleeping anymore\n", dev->name,
705 print_mac(mac, sta->addr), sta->aid);
706 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
707 info->flags |= IEEE80211_TX_CTL_REQUEUE;
708 dev_queue_xmit(skb);
709 }
710
711 return sent;
712 }
713
714 static ieee80211_rx_result debug_noinline
715 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
716 {
717 struct sta_info *sta = rx->sta;
718 struct net_device *dev = rx->dev;
719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
720
721 if (!sta)
722 return RX_CONTINUE;
723
724 /* Update last_rx only for IBSS packets which are for the current
725 * BSSID to avoid keeping the current IBSS network alive in cases where
726 * other STAs are using different BSSID. */
727 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
728 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
729 IEEE80211_IF_TYPE_IBSS);
730 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
731 sta->last_rx = jiffies;
732 } else
733 if (!is_multicast_ether_addr(hdr->addr1) ||
734 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) {
735 /* Update last_rx only for unicast frames in order to prevent
736 * the Probe Request frames (the only broadcast frames from a
737 * STA in infrastructure mode) from keeping a connection alive.
738 * Mesh beacons will update last_rx when if they are found to
739 * match the current local configuration when processed.
740 */
741 sta->last_rx = jiffies;
742 }
743
744 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
745 return RX_CONTINUE;
746
747 sta->rx_fragments++;
748 sta->rx_bytes += rx->skb->len;
749 sta->last_signal = rx->status->signal;
750 sta->last_qual = rx->status->qual;
751 sta->last_noise = rx->status->noise;
752
753 if (!ieee80211_has_morefrags(hdr->frame_control) &&
754 (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP ||
755 rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) {
756 /* Change STA power saving mode only in the end of a frame
757 * exchange sequence */
758 if (test_sta_flags(sta, WLAN_STA_PS) &&
759 !ieee80211_has_pm(hdr->frame_control))
760 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
761 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
762 ieee80211_has_pm(hdr->frame_control))
763 ap_sta_ps_start(dev, sta);
764 }
765
766 /* Drop data::nullfunc frames silently, since they are used only to
767 * control station power saving mode. */
768 if (ieee80211_is_nullfunc(hdr->frame_control)) {
769 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
770 /* Update counter and free packet here to avoid counting this
771 * as a dropped packed. */
772 sta->rx_packets++;
773 dev_kfree_skb(rx->skb);
774 return RX_QUEUED;
775 }
776
777 return RX_CONTINUE;
778 } /* ieee80211_rx_h_sta_process */
779
780 static inline struct ieee80211_fragment_entry *
781 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
782 unsigned int frag, unsigned int seq, int rx_queue,
783 struct sk_buff **skb)
784 {
785 struct ieee80211_fragment_entry *entry;
786 int idx;
787
788 idx = sdata->fragment_next;
789 entry = &sdata->fragments[sdata->fragment_next++];
790 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
791 sdata->fragment_next = 0;
792
793 if (!skb_queue_empty(&entry->skb_list)) {
794 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
795 struct ieee80211_hdr *hdr =
796 (struct ieee80211_hdr *) entry->skb_list.next->data;
797 DECLARE_MAC_BUF(mac);
798 DECLARE_MAC_BUF(mac2);
799 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
800 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
801 "addr1=%s addr2=%s\n",
802 sdata->dev->name, idx,
803 jiffies - entry->first_frag_time, entry->seq,
804 entry->last_frag, print_mac(mac, hdr->addr1),
805 print_mac(mac2, hdr->addr2));
806 #endif
807 __skb_queue_purge(&entry->skb_list);
808 }
809
810 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
811 *skb = NULL;
812 entry->first_frag_time = jiffies;
813 entry->seq = seq;
814 entry->rx_queue = rx_queue;
815 entry->last_frag = frag;
816 entry->ccmp = 0;
817 entry->extra_len = 0;
818
819 return entry;
820 }
821
822 static inline struct ieee80211_fragment_entry *
823 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
824 unsigned int frag, unsigned int seq,
825 int rx_queue, struct ieee80211_hdr *hdr)
826 {
827 struct ieee80211_fragment_entry *entry;
828 int i, idx;
829
830 idx = sdata->fragment_next;
831 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
832 struct ieee80211_hdr *f_hdr;
833
834 idx--;
835 if (idx < 0)
836 idx = IEEE80211_FRAGMENT_MAX - 1;
837
838 entry = &sdata->fragments[idx];
839 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
840 entry->rx_queue != rx_queue ||
841 entry->last_frag + 1 != frag)
842 continue;
843
844 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
845
846 /*
847 * Check ftype and addresses are equal, else check next fragment
848 */
849 if (((hdr->frame_control ^ f_hdr->frame_control) &
850 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
851 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
852 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
853 continue;
854
855 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
856 __skb_queue_purge(&entry->skb_list);
857 continue;
858 }
859 return entry;
860 }
861
862 return NULL;
863 }
864
865 static ieee80211_rx_result debug_noinline
866 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
867 {
868 struct ieee80211_hdr *hdr;
869 u16 sc;
870 __le16 fc;
871 unsigned int frag, seq;
872 struct ieee80211_fragment_entry *entry;
873 struct sk_buff *skb;
874 DECLARE_MAC_BUF(mac);
875
876 hdr = (struct ieee80211_hdr *)rx->skb->data;
877 fc = hdr->frame_control;
878 sc = le16_to_cpu(hdr->seq_ctrl);
879 frag = sc & IEEE80211_SCTL_FRAG;
880
881 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
882 (rx->skb)->len < 24 ||
883 is_multicast_ether_addr(hdr->addr1))) {
884 /* not fragmented */
885 goto out;
886 }
887 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
888
889 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
890
891 if (frag == 0) {
892 /* This is the first fragment of a new frame. */
893 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
894 rx->queue, &(rx->skb));
895 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
896 ieee80211_has_protected(fc)) {
897 /* Store CCMP PN so that we can verify that the next
898 * fragment has a sequential PN value. */
899 entry->ccmp = 1;
900 memcpy(entry->last_pn,
901 rx->key->u.ccmp.rx_pn[rx->queue],
902 CCMP_PN_LEN);
903 }
904 return RX_QUEUED;
905 }
906
907 /* This is a fragment for a frame that should already be pending in
908 * fragment cache. Add this fragment to the end of the pending entry.
909 */
910 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
911 if (!entry) {
912 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
913 return RX_DROP_MONITOR;
914 }
915
916 /* Verify that MPDUs within one MSDU have sequential PN values.
917 * (IEEE 802.11i, 8.3.3.4.5) */
918 if (entry->ccmp) {
919 int i;
920 u8 pn[CCMP_PN_LEN], *rpn;
921 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
922 return RX_DROP_UNUSABLE;
923 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
924 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
925 pn[i]++;
926 if (pn[i])
927 break;
928 }
929 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
930 if (memcmp(pn, rpn, CCMP_PN_LEN))
931 return RX_DROP_UNUSABLE;
932 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
933 }
934
935 skb_pull(rx->skb, ieee80211_hdrlen(fc));
936 __skb_queue_tail(&entry->skb_list, rx->skb);
937 entry->last_frag = frag;
938 entry->extra_len += rx->skb->len;
939 if (ieee80211_has_morefrags(fc)) {
940 rx->skb = NULL;
941 return RX_QUEUED;
942 }
943
944 rx->skb = __skb_dequeue(&entry->skb_list);
945 if (skb_tailroom(rx->skb) < entry->extra_len) {
946 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
947 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
948 GFP_ATOMIC))) {
949 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
950 __skb_queue_purge(&entry->skb_list);
951 return RX_DROP_UNUSABLE;
952 }
953 }
954 while ((skb = __skb_dequeue(&entry->skb_list))) {
955 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
956 dev_kfree_skb(skb);
957 }
958
959 /* Complete frame has been reassembled - process it now */
960 rx->flags |= IEEE80211_RX_FRAGMENTED;
961
962 out:
963 if (rx->sta)
964 rx->sta->rx_packets++;
965 if (is_multicast_ether_addr(hdr->addr1))
966 rx->local->dot11MulticastReceivedFrameCount++;
967 else
968 ieee80211_led_rx(rx->local);
969 return RX_CONTINUE;
970 }
971
972 static ieee80211_rx_result debug_noinline
973 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
974 {
975 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
976 struct sk_buff *skb;
977 int no_pending_pkts;
978 DECLARE_MAC_BUF(mac);
979 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
980
981 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
982 !(rx->flags & IEEE80211_RX_RA_MATCH)))
983 return RX_CONTINUE;
984
985 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
986 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
987 return RX_DROP_UNUSABLE;
988
989 skb = skb_dequeue(&rx->sta->tx_filtered);
990 if (!skb) {
991 skb = skb_dequeue(&rx->sta->ps_tx_buf);
992 if (skb)
993 rx->local->total_ps_buffered--;
994 }
995 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
996 skb_queue_empty(&rx->sta->ps_tx_buf);
997
998 if (skb) {
999 struct ieee80211_hdr *hdr =
1000 (struct ieee80211_hdr *) skb->data;
1001
1002 /*
1003 * Tell TX path to send one frame even though the STA may
1004 * still remain is PS mode after this frame exchange.
1005 */
1006 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1007
1008 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1009 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
1010 print_mac(mac, rx->sta->addr), rx->sta->aid,
1011 skb_queue_len(&rx->sta->ps_tx_buf));
1012 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1013
1014 /* Use MoreData flag to indicate whether there are more
1015 * buffered frames for this STA */
1016 if (no_pending_pkts)
1017 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1018 else
1019 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1020
1021 dev_queue_xmit(skb);
1022
1023 if (no_pending_pkts)
1024 sta_info_clear_tim_bit(rx->sta);
1025 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 } else if (!rx->sent_ps_buffered) {
1027 /*
1028 * FIXME: This can be the result of a race condition between
1029 * us expiring a frame and the station polling for it.
1030 * Should we send it a null-func frame indicating we
1031 * have nothing buffered for it?
1032 */
1033 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1034 "though there are no buffered frames for it\n",
1035 rx->dev->name, print_mac(mac, rx->sta->addr));
1036 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1037 }
1038
1039 /* Free PS Poll skb here instead of returning RX_DROP that would
1040 * count as an dropped frame. */
1041 dev_kfree_skb(rx->skb);
1042
1043 return RX_QUEUED;
1044 }
1045
1046 static ieee80211_rx_result debug_noinline
1047 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1048 {
1049 u8 *data = rx->skb->data;
1050 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1051
1052 if (!ieee80211_is_data_qos(hdr->frame_control))
1053 return RX_CONTINUE;
1054
1055 /* remove the qos control field, update frame type and meta-data */
1056 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1057 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1058 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1059 /* change frame type to non QOS */
1060 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1061
1062 return RX_CONTINUE;
1063 }
1064
1065 static int
1066 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1067 {
1068 if (unlikely(!rx->sta ||
1069 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1070 return -EACCES;
1071
1072 return 0;
1073 }
1074
1075 static int
1076 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1077 {
1078 /*
1079 * Pass through unencrypted frames if the hardware has
1080 * decrypted them already.
1081 */
1082 if (rx->status->flag & RX_FLAG_DECRYPTED)
1083 return 0;
1084
1085 /* Drop unencrypted frames if key is set. */
1086 if (unlikely(!ieee80211_has_protected(fc) &&
1087 !ieee80211_is_nullfunc(fc) &&
1088 (rx->key || rx->sdata->drop_unencrypted)))
1089 return -EACCES;
1090
1091 return 0;
1092 }
1093
1094 static int
1095 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1096 {
1097 struct net_device *dev = rx->dev;
1098 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1099 u16 hdrlen, ethertype;
1100 u8 *payload;
1101 u8 dst[ETH_ALEN];
1102 u8 src[ETH_ALEN] __aligned(2);
1103 struct sk_buff *skb = rx->skb;
1104 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1105 DECLARE_MAC_BUF(mac);
1106 DECLARE_MAC_BUF(mac2);
1107 DECLARE_MAC_BUF(mac3);
1108 DECLARE_MAC_BUF(mac4);
1109
1110 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1111 return -1;
1112
1113 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1114
1115 if (ieee80211_vif_is_mesh(&sdata->vif))
1116 hdrlen += ieee80211_get_mesh_hdrlen(
1117 (struct ieee80211s_hdr *) (skb->data + hdrlen));
1118
1119 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1120 * header
1121 * IEEE 802.11 address fields:
1122 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1123 * 0 0 DA SA BSSID n/a
1124 * 0 1 DA BSSID SA n/a
1125 * 1 0 BSSID SA DA n/a
1126 * 1 1 RA TA DA SA
1127 */
1128 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1129 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1130
1131 switch (hdr->frame_control &
1132 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1133 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1134 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1135 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1136 return -1;
1137 break;
1138 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1139 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1140 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1141 return -1;
1142 break;
1143 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1144 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1145 (is_multicast_ether_addr(dst) &&
1146 !compare_ether_addr(src, dev->dev_addr)))
1147 return -1;
1148 break;
1149 case __constant_cpu_to_le16(0):
1150 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1151 return -1;
1152 break;
1153 }
1154
1155 if (unlikely(skb->len - hdrlen < 8))
1156 return -1;
1157
1158 payload = skb->data + hdrlen;
1159 ethertype = (payload[6] << 8) | payload[7];
1160
1161 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1162 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1163 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1164 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1165 * replace EtherType */
1166 skb_pull(skb, hdrlen + 6);
1167 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1168 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1169 } else {
1170 struct ethhdr *ehdr;
1171 __be16 len;
1172
1173 skb_pull(skb, hdrlen);
1174 len = htons(skb->len);
1175 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1176 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1177 memcpy(ehdr->h_source, src, ETH_ALEN);
1178 ehdr->h_proto = len;
1179 }
1180 return 0;
1181 }
1182
1183 /*
1184 * requires that rx->skb is a frame with ethernet header
1185 */
1186 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1187 {
1188 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1189 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1190 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1191
1192 /*
1193 * Allow EAPOL frames to us/the PAE group address regardless
1194 * of whether the frame was encrypted or not.
1195 */
1196 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1197 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1198 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1199 return true;
1200
1201 if (ieee80211_802_1x_port_control(rx) ||
1202 ieee80211_drop_unencrypted(rx, fc))
1203 return false;
1204
1205 return true;
1206 }
1207
1208 /*
1209 * requires that rx->skb is a frame with ethernet header
1210 */
1211 static void
1212 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1213 {
1214 struct net_device *dev = rx->dev;
1215 struct ieee80211_local *local = rx->local;
1216 struct sk_buff *skb, *xmit_skb;
1217 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1218 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1219 struct sta_info *dsta;
1220
1221 skb = rx->skb;
1222 xmit_skb = NULL;
1223
1224 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
1225 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
1226 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1227 if (is_multicast_ether_addr(ehdr->h_dest)) {
1228 /*
1229 * send multicast frames both to higher layers in
1230 * local net stack and back to the wireless medium
1231 */
1232 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1233 if (!xmit_skb && net_ratelimit())
1234 printk(KERN_DEBUG "%s: failed to clone "
1235 "multicast frame\n", dev->name);
1236 } else {
1237 dsta = sta_info_get(local, skb->data);
1238 if (dsta && dsta->sdata->dev == dev) {
1239 /*
1240 * The destination station is associated to
1241 * this AP (in this VLAN), so send the frame
1242 * directly to it and do not pass it to local
1243 * net stack.
1244 */
1245 xmit_skb = skb;
1246 skb = NULL;
1247 }
1248 }
1249 }
1250
1251 if (skb) {
1252 /* deliver to local stack */
1253 skb->protocol = eth_type_trans(skb, dev);
1254 memset(skb->cb, 0, sizeof(skb->cb));
1255 netif_rx(skb);
1256 }
1257
1258 if (xmit_skb) {
1259 /* send to wireless media */
1260 xmit_skb->protocol = htons(ETH_P_802_3);
1261 skb_reset_network_header(xmit_skb);
1262 skb_reset_mac_header(xmit_skb);
1263 dev_queue_xmit(xmit_skb);
1264 }
1265 }
1266
1267 static ieee80211_rx_result debug_noinline
1268 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1269 {
1270 struct net_device *dev = rx->dev;
1271 struct ieee80211_local *local = rx->local;
1272 u16 ethertype;
1273 u8 *payload;
1274 struct sk_buff *skb = rx->skb, *frame = NULL;
1275 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1276 __le16 fc = hdr->frame_control;
1277 const struct ethhdr *eth;
1278 int remaining, err;
1279 u8 dst[ETH_ALEN];
1280 u8 src[ETH_ALEN];
1281 DECLARE_MAC_BUF(mac);
1282
1283 if (unlikely(!ieee80211_is_data(fc)))
1284 return RX_CONTINUE;
1285
1286 if (unlikely(!ieee80211_is_data_present(fc)))
1287 return RX_DROP_MONITOR;
1288
1289 if (!(rx->flags & IEEE80211_RX_AMSDU))
1290 return RX_CONTINUE;
1291
1292 err = ieee80211_data_to_8023(rx);
1293 if (unlikely(err))
1294 return RX_DROP_UNUSABLE;
1295
1296 skb->dev = dev;
1297
1298 dev->stats.rx_packets++;
1299 dev->stats.rx_bytes += skb->len;
1300
1301 /* skip the wrapping header */
1302 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1303 if (!eth)
1304 return RX_DROP_UNUSABLE;
1305
1306 while (skb != frame) {
1307 u8 padding;
1308 __be16 len = eth->h_proto;
1309 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1310
1311 remaining = skb->len;
1312 memcpy(dst, eth->h_dest, ETH_ALEN);
1313 memcpy(src, eth->h_source, ETH_ALEN);
1314
1315 padding = ((4 - subframe_len) & 0x3);
1316 /* the last MSDU has no padding */
1317 if (subframe_len > remaining)
1318 return RX_DROP_UNUSABLE;
1319
1320 skb_pull(skb, sizeof(struct ethhdr));
1321 /* if last subframe reuse skb */
1322 if (remaining <= subframe_len + padding)
1323 frame = skb;
1324 else {
1325 frame = dev_alloc_skb(local->hw.extra_tx_headroom +
1326 subframe_len);
1327
1328 if (frame == NULL)
1329 return RX_DROP_UNUSABLE;
1330
1331 skb_reserve(frame, local->hw.extra_tx_headroom +
1332 sizeof(struct ethhdr));
1333 memcpy(skb_put(frame, ntohs(len)), skb->data,
1334 ntohs(len));
1335
1336 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1337 padding);
1338 if (!eth) {
1339 dev_kfree_skb(frame);
1340 return RX_DROP_UNUSABLE;
1341 }
1342 }
1343
1344 skb_reset_network_header(frame);
1345 frame->dev = dev;
1346 frame->priority = skb->priority;
1347 rx->skb = frame;
1348
1349 payload = frame->data;
1350 ethertype = (payload[6] << 8) | payload[7];
1351
1352 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1353 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1354 compare_ether_addr(payload,
1355 bridge_tunnel_header) == 0)) {
1356 /* remove RFC1042 or Bridge-Tunnel
1357 * encapsulation and replace EtherType */
1358 skb_pull(frame, 6);
1359 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1360 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1361 } else {
1362 memcpy(skb_push(frame, sizeof(__be16)),
1363 &len, sizeof(__be16));
1364 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1365 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1366 }
1367
1368 if (!ieee80211_frame_allowed(rx, fc)) {
1369 if (skb == frame) /* last frame */
1370 return RX_DROP_UNUSABLE;
1371 dev_kfree_skb(frame);
1372 continue;
1373 }
1374
1375 ieee80211_deliver_skb(rx);
1376 }
1377
1378 return RX_QUEUED;
1379 }
1380
1381 static ieee80211_rx_result debug_noinline
1382 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1383 {
1384 struct ieee80211_hdr *hdr;
1385 struct ieee80211s_hdr *mesh_hdr;
1386 unsigned int hdrlen;
1387 struct sk_buff *skb = rx->skb, *fwd_skb;
1388
1389 hdr = (struct ieee80211_hdr *) skb->data;
1390 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1391 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1392
1393 if (!ieee80211_is_data(hdr->frame_control))
1394 return RX_CONTINUE;
1395
1396 if (!mesh_hdr->ttl)
1397 /* illegal frame */
1398 return RX_DROP_MONITOR;
1399
1400 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1401 return RX_CONTINUE;
1402
1403 mesh_hdr->ttl--;
1404
1405 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1406 if (!mesh_hdr->ttl)
1407 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta,
1408 dropped_frames_ttl);
1409 else {
1410 struct ieee80211_hdr *fwd_hdr;
1411 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1412
1413 if (!fwd_skb && net_ratelimit())
1414 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1415 rx->dev->name);
1416
1417 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1418 /*
1419 * Save TA to addr1 to send TA a path error if a
1420 * suitable next hop is not found
1421 */
1422 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1423 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1424 fwd_skb->dev = rx->local->mdev;
1425 fwd_skb->iif = rx->dev->ifindex;
1426 dev_queue_xmit(fwd_skb);
1427 }
1428 }
1429
1430 if (is_multicast_ether_addr(hdr->addr3) ||
1431 rx->dev->flags & IFF_PROMISC)
1432 return RX_CONTINUE;
1433 else
1434 return RX_DROP_MONITOR;
1435 }
1436
1437
1438 static ieee80211_rx_result debug_noinline
1439 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1440 {
1441 struct net_device *dev = rx->dev;
1442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1443 __le16 fc = hdr->frame_control;
1444 int err;
1445
1446 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1447 return RX_CONTINUE;
1448
1449 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1450 return RX_DROP_MONITOR;
1451
1452 err = ieee80211_data_to_8023(rx);
1453 if (unlikely(err))
1454 return RX_DROP_UNUSABLE;
1455
1456 if (!ieee80211_frame_allowed(rx, fc))
1457 return RX_DROP_MONITOR;
1458
1459 rx->skb->dev = dev;
1460
1461 dev->stats.rx_packets++;
1462 dev->stats.rx_bytes += rx->skb->len;
1463
1464 ieee80211_deliver_skb(rx);
1465
1466 return RX_QUEUED;
1467 }
1468
1469 static ieee80211_rx_result debug_noinline
1470 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1471 {
1472 struct ieee80211_local *local = rx->local;
1473 struct ieee80211_hw *hw = &local->hw;
1474 struct sk_buff *skb = rx->skb;
1475 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1476 struct tid_ampdu_rx *tid_agg_rx;
1477 u16 start_seq_num;
1478 u16 tid;
1479
1480 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1481 return RX_CONTINUE;
1482
1483 if (ieee80211_is_back_req(bar->frame_control)) {
1484 if (!rx->sta)
1485 return RX_CONTINUE;
1486 tid = le16_to_cpu(bar->control) >> 12;
1487 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1488 != HT_AGG_STATE_OPERATIONAL)
1489 return RX_CONTINUE;
1490 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1491
1492 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1493
1494 /* reset session timer */
1495 if (tid_agg_rx->timeout) {
1496 unsigned long expires =
1497 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
1498 mod_timer(&tid_agg_rx->session_timer, expires);
1499 }
1500
1501 /* manage reordering buffer according to requested */
1502 /* sequence number */
1503 rcu_read_lock();
1504 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1505 start_seq_num, 1);
1506 rcu_read_unlock();
1507 return RX_DROP_UNUSABLE;
1508 }
1509
1510 return RX_CONTINUE;
1511 }
1512
1513 static ieee80211_rx_result debug_noinline
1514 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1515 {
1516 struct ieee80211_sub_if_data *sdata;
1517
1518 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1519 return RX_DROP_MONITOR;
1520
1521 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1522 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
1523 sdata->vif.type == IEEE80211_IF_TYPE_IBSS ||
1524 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) &&
1525 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
1526 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1527 else
1528 return RX_DROP_MONITOR;
1529
1530 return RX_QUEUED;
1531 }
1532
1533 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1534 struct ieee80211_hdr *hdr,
1535 struct ieee80211_rx_data *rx)
1536 {
1537 int keyidx;
1538 unsigned int hdrlen;
1539 DECLARE_MAC_BUF(mac);
1540 DECLARE_MAC_BUF(mac2);
1541
1542 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1543 if (rx->skb->len >= hdrlen + 4)
1544 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1545 else
1546 keyidx = -1;
1547
1548 if (!rx->sta) {
1549 /*
1550 * Some hardware seem to generate incorrect Michael MIC
1551 * reports; ignore them to avoid triggering countermeasures.
1552 */
1553 goto ignore;
1554 }
1555
1556 if (!ieee80211_has_protected(hdr->frame_control))
1557 goto ignore;
1558
1559 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
1560 /*
1561 * APs with pairwise keys should never receive Michael MIC
1562 * errors for non-zero keyidx because these are reserved for
1563 * group keys and only the AP is sending real multicast
1564 * frames in the BSS.
1565 */
1566 goto ignore;
1567 }
1568
1569 if (!ieee80211_is_data(hdr->frame_control) &&
1570 !ieee80211_is_auth(hdr->frame_control))
1571 goto ignore;
1572
1573 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1574 ignore:
1575 dev_kfree_skb(rx->skb);
1576 rx->skb = NULL;
1577 }
1578
1579 /* TODO: use IEEE80211_RX_FRAGMENTED */
1580 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1581 {
1582 struct ieee80211_sub_if_data *sdata;
1583 struct ieee80211_local *local = rx->local;
1584 struct ieee80211_rtap_hdr {
1585 struct ieee80211_radiotap_header hdr;
1586 u8 flags;
1587 u8 rate;
1588 __le16 chan_freq;
1589 __le16 chan_flags;
1590 } __attribute__ ((packed)) *rthdr;
1591 struct sk_buff *skb = rx->skb, *skb2;
1592 struct net_device *prev_dev = NULL;
1593 struct ieee80211_rx_status *status = rx->status;
1594
1595 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1596 goto out_free_skb;
1597
1598 if (skb_headroom(skb) < sizeof(*rthdr) &&
1599 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1600 goto out_free_skb;
1601
1602 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1603 memset(rthdr, 0, sizeof(*rthdr));
1604 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1605 rthdr->hdr.it_present =
1606 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1607 (1 << IEEE80211_RADIOTAP_RATE) |
1608 (1 << IEEE80211_RADIOTAP_CHANNEL));
1609
1610 rthdr->rate = rx->rate->bitrate / 5;
1611 rthdr->chan_freq = cpu_to_le16(status->freq);
1612
1613 if (status->band == IEEE80211_BAND_5GHZ)
1614 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1615 IEEE80211_CHAN_5GHZ);
1616 else
1617 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1618 IEEE80211_CHAN_2GHZ);
1619
1620 skb_set_mac_header(skb, 0);
1621 skb->ip_summed = CHECKSUM_UNNECESSARY;
1622 skb->pkt_type = PACKET_OTHERHOST;
1623 skb->protocol = htons(ETH_P_802_2);
1624
1625 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1626 if (!netif_running(sdata->dev))
1627 continue;
1628
1629 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
1630 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1631 continue;
1632
1633 if (prev_dev) {
1634 skb2 = skb_clone(skb, GFP_ATOMIC);
1635 if (skb2) {
1636 skb2->dev = prev_dev;
1637 netif_rx(skb2);
1638 }
1639 }
1640
1641 prev_dev = sdata->dev;
1642 sdata->dev->stats.rx_packets++;
1643 sdata->dev->stats.rx_bytes += skb->len;
1644 }
1645
1646 if (prev_dev) {
1647 skb->dev = prev_dev;
1648 netif_rx(skb);
1649 skb = NULL;
1650 } else
1651 goto out_free_skb;
1652
1653 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1654 return;
1655
1656 out_free_skb:
1657 dev_kfree_skb(skb);
1658 }
1659
1660
1661 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1662 struct ieee80211_rx_data *rx,
1663 struct sk_buff *skb)
1664 {
1665 ieee80211_rx_result res = RX_DROP_MONITOR;
1666
1667 rx->skb = skb;
1668 rx->sdata = sdata;
1669 rx->dev = sdata->dev;
1670
1671 #define CALL_RXH(rxh) \
1672 do { \
1673 res = rxh(rx); \
1674 if (res != RX_CONTINUE) \
1675 goto rxh_done; \
1676 } while (0);
1677
1678 CALL_RXH(ieee80211_rx_h_passive_scan)
1679 CALL_RXH(ieee80211_rx_h_check)
1680 CALL_RXH(ieee80211_rx_h_decrypt)
1681 CALL_RXH(ieee80211_rx_h_sta_process)
1682 CALL_RXH(ieee80211_rx_h_defragment)
1683 CALL_RXH(ieee80211_rx_h_ps_poll)
1684 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1685 /* must be after MMIC verify so header is counted in MPDU mic */
1686 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1687 CALL_RXH(ieee80211_rx_h_amsdu)
1688 if (ieee80211_vif_is_mesh(&sdata->vif))
1689 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1690 CALL_RXH(ieee80211_rx_h_data)
1691 CALL_RXH(ieee80211_rx_h_ctrl)
1692 CALL_RXH(ieee80211_rx_h_mgmt)
1693
1694 #undef CALL_RXH
1695
1696 rxh_done:
1697 switch (res) {
1698 case RX_DROP_MONITOR:
1699 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1700 if (rx->sta)
1701 rx->sta->rx_dropped++;
1702 /* fall through */
1703 case RX_CONTINUE:
1704 ieee80211_rx_cooked_monitor(rx);
1705 break;
1706 case RX_DROP_UNUSABLE:
1707 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1708 if (rx->sta)
1709 rx->sta->rx_dropped++;
1710 dev_kfree_skb(rx->skb);
1711 break;
1712 case RX_QUEUED:
1713 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1714 break;
1715 }
1716 }
1717
1718 /* main receive path */
1719
1720 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1721 u8 *bssid, struct ieee80211_rx_data *rx,
1722 struct ieee80211_hdr *hdr)
1723 {
1724 int multicast = is_multicast_ether_addr(hdr->addr1);
1725
1726 switch (sdata->vif.type) {
1727 case IEEE80211_IF_TYPE_STA:
1728 if (!bssid)
1729 return 0;
1730 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1731 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1732 return 0;
1733 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1734 } else if (!multicast &&
1735 compare_ether_addr(sdata->dev->dev_addr,
1736 hdr->addr1) != 0) {
1737 if (!(sdata->dev->flags & IFF_PROMISC))
1738 return 0;
1739 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1740 }
1741 break;
1742 case IEEE80211_IF_TYPE_IBSS:
1743 if (!bssid)
1744 return 0;
1745 if (ieee80211_is_beacon(hdr->frame_control)) {
1746 if (!rx->sta)
1747 rx->sta = ieee80211_ibss_add_sta(sdata,
1748 rx->skb, bssid, hdr->addr2,
1749 BIT(rx->status->rate_idx));
1750 return 1;
1751 }
1752 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1753 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1754 return 0;
1755 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1756 } else if (!multicast &&
1757 compare_ether_addr(sdata->dev->dev_addr,
1758 hdr->addr1) != 0) {
1759 if (!(sdata->dev->flags & IFF_PROMISC))
1760 return 0;
1761 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1762 } else if (!rx->sta)
1763 rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
1764 bssid, hdr->addr2,
1765 BIT(rx->status->rate_idx));
1766 break;
1767 case IEEE80211_IF_TYPE_MESH_POINT:
1768 if (!multicast &&
1769 compare_ether_addr(sdata->dev->dev_addr,
1770 hdr->addr1) != 0) {
1771 if (!(sdata->dev->flags & IFF_PROMISC))
1772 return 0;
1773
1774 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1775 }
1776 break;
1777 case IEEE80211_IF_TYPE_VLAN:
1778 case IEEE80211_IF_TYPE_AP:
1779 if (!bssid) {
1780 if (compare_ether_addr(sdata->dev->dev_addr,
1781 hdr->addr1))
1782 return 0;
1783 } else if (!ieee80211_bssid_match(bssid,
1784 sdata->dev->dev_addr)) {
1785 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1786 return 0;
1787 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1788 }
1789 break;
1790 case IEEE80211_IF_TYPE_WDS:
1791 if (bssid || !ieee80211_is_data(hdr->frame_control))
1792 return 0;
1793 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1794 return 0;
1795 break;
1796 case IEEE80211_IF_TYPE_MNTR:
1797 /* take everything */
1798 break;
1799 case IEEE80211_IF_TYPE_INVALID:
1800 /* should never get here */
1801 WARN_ON(1);
1802 break;
1803 }
1804
1805 return 1;
1806 }
1807
1808 /*
1809 * This is the actual Rx frames handler. as it blongs to Rx path it must
1810 * be called with rcu_read_lock protection.
1811 */
1812 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1813 struct sk_buff *skb,
1814 struct ieee80211_rx_status *status,
1815 struct ieee80211_rate *rate)
1816 {
1817 struct ieee80211_local *local = hw_to_local(hw);
1818 struct ieee80211_sub_if_data *sdata;
1819 struct ieee80211_hdr *hdr;
1820 struct ieee80211_rx_data rx;
1821 int prepares;
1822 struct ieee80211_sub_if_data *prev = NULL;
1823 struct sk_buff *skb_new;
1824 u8 *bssid;
1825
1826 hdr = (struct ieee80211_hdr *)skb->data;
1827 memset(&rx, 0, sizeof(rx));
1828 rx.skb = skb;
1829 rx.local = local;
1830
1831 rx.status = status;
1832 rx.rate = rate;
1833
1834 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
1835 local->dot11ReceivedFragmentCount++;
1836
1837 rx.sta = sta_info_get(local, hdr->addr2);
1838 if (rx.sta) {
1839 rx.sdata = rx.sta->sdata;
1840 rx.dev = rx.sta->sdata->dev;
1841 }
1842
1843 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
1844 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
1845 return;
1846 }
1847
1848 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
1849 rx.flags |= IEEE80211_RX_IN_SCAN;
1850
1851 ieee80211_parse_qos(&rx);
1852 ieee80211_verify_ip_alignment(&rx);
1853
1854 skb = rx.skb;
1855
1856 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1857 if (!netif_running(sdata->dev))
1858 continue;
1859
1860 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR)
1861 continue;
1862
1863 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1864 rx.flags |= IEEE80211_RX_RA_MATCH;
1865 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
1866
1867 if (!prepares)
1868 continue;
1869
1870 /*
1871 * frame is destined for this interface, but if it's not
1872 * also for the previous one we handle that after the
1873 * loop to avoid copying the SKB once too much
1874 */
1875
1876 if (!prev) {
1877 prev = sdata;
1878 continue;
1879 }
1880
1881 /*
1882 * frame was destined for the previous interface
1883 * so invoke RX handlers for it
1884 */
1885
1886 skb_new = skb_copy(skb, GFP_ATOMIC);
1887 if (!skb_new) {
1888 if (net_ratelimit())
1889 printk(KERN_DEBUG "%s: failed to copy "
1890 "multicast frame for %s\n",
1891 wiphy_name(local->hw.wiphy),
1892 prev->dev->name);
1893 continue;
1894 }
1895 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1896 prev = sdata;
1897 }
1898 if (prev)
1899 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1900 else
1901 dev_kfree_skb(skb);
1902 }
1903
1904 #define SEQ_MODULO 0x1000
1905 #define SEQ_MASK 0xfff
1906
1907 static inline int seq_less(u16 sq1, u16 sq2)
1908 {
1909 return (((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1));
1910 }
1911
1912 static inline u16 seq_inc(u16 sq)
1913 {
1914 return ((sq + 1) & SEQ_MASK);
1915 }
1916
1917 static inline u16 seq_sub(u16 sq1, u16 sq2)
1918 {
1919 return ((sq1 - sq2) & SEQ_MASK);
1920 }
1921
1922
1923 /*
1924 * As it function blongs to Rx path it must be called with
1925 * the proper rcu_read_lock protection for its flow.
1926 */
1927 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1928 struct tid_ampdu_rx *tid_agg_rx,
1929 struct sk_buff *skb, u16 mpdu_seq_num,
1930 int bar_req)
1931 {
1932 struct ieee80211_local *local = hw_to_local(hw);
1933 struct ieee80211_rx_status status;
1934 u16 head_seq_num, buf_size;
1935 int index;
1936 struct ieee80211_supported_band *sband;
1937 struct ieee80211_rate *rate;
1938
1939 buf_size = tid_agg_rx->buf_size;
1940 head_seq_num = tid_agg_rx->head_seq_num;
1941
1942 /* frame with out of date sequence number */
1943 if (seq_less(mpdu_seq_num, head_seq_num)) {
1944 dev_kfree_skb(skb);
1945 return 1;
1946 }
1947
1948 /* if frame sequence number exceeds our buffering window size or
1949 * block Ack Request arrived - release stored frames */
1950 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
1951 /* new head to the ordering buffer */
1952 if (bar_req)
1953 head_seq_num = mpdu_seq_num;
1954 else
1955 head_seq_num =
1956 seq_inc(seq_sub(mpdu_seq_num, buf_size));
1957 /* release stored frames up to new head to stack */
1958 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1959 index = seq_sub(tid_agg_rx->head_seq_num,
1960 tid_agg_rx->ssn)
1961 % tid_agg_rx->buf_size;
1962
1963 if (tid_agg_rx->reorder_buf[index]) {
1964 /* release the reordered frames to stack */
1965 memcpy(&status,
1966 tid_agg_rx->reorder_buf[index]->cb,
1967 sizeof(status));
1968 sband = local->hw.wiphy->bands[status.band];
1969 rate = &sband->bitrates[status.rate_idx];
1970 __ieee80211_rx_handle_packet(hw,
1971 tid_agg_rx->reorder_buf[index],
1972 &status, rate);
1973 tid_agg_rx->stored_mpdu_num--;
1974 tid_agg_rx->reorder_buf[index] = NULL;
1975 }
1976 tid_agg_rx->head_seq_num =
1977 seq_inc(tid_agg_rx->head_seq_num);
1978 }
1979 if (bar_req)
1980 return 1;
1981 }
1982
1983 /* now the new frame is always in the range of the reordering */
1984 /* buffer window */
1985 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
1986 % tid_agg_rx->buf_size;
1987 /* check if we already stored this frame */
1988 if (tid_agg_rx->reorder_buf[index]) {
1989 dev_kfree_skb(skb);
1990 return 1;
1991 }
1992
1993 /* if arrived mpdu is in the right order and nothing else stored */
1994 /* release it immediately */
1995 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1996 tid_agg_rx->stored_mpdu_num == 0) {
1997 tid_agg_rx->head_seq_num =
1998 seq_inc(tid_agg_rx->head_seq_num);
1999 return 0;
2000 }
2001
2002 /* put the frame in the reordering buffer */
2003 tid_agg_rx->reorder_buf[index] = skb;
2004 tid_agg_rx->stored_mpdu_num++;
2005 /* release the buffer until next missing frame */
2006 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2007 % tid_agg_rx->buf_size;
2008 while (tid_agg_rx->reorder_buf[index]) {
2009 /* release the reordered frame back to stack */
2010 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
2011 sizeof(status));
2012 sband = local->hw.wiphy->bands[status.band];
2013 rate = &sband->bitrates[status.rate_idx];
2014 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2015 &status, rate);
2016 tid_agg_rx->stored_mpdu_num--;
2017 tid_agg_rx->reorder_buf[index] = NULL;
2018 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2019 index = seq_sub(tid_agg_rx->head_seq_num,
2020 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2021 }
2022 return 1;
2023 }
2024
2025 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2026 struct sk_buff *skb)
2027 {
2028 struct ieee80211_hw *hw = &local->hw;
2029 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2030 struct sta_info *sta;
2031 struct tid_ampdu_rx *tid_agg_rx;
2032 u16 sc;
2033 u16 mpdu_seq_num;
2034 u8 ret = 0;
2035 int tid;
2036
2037 sta = sta_info_get(local, hdr->addr2);
2038 if (!sta)
2039 return ret;
2040
2041 /* filter the QoS data rx stream according to
2042 * STA/TID and check if this STA/TID is on aggregation */
2043 if (!ieee80211_is_data_qos(hdr->frame_control))
2044 goto end_reorder;
2045
2046 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2047
2048 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2049 goto end_reorder;
2050
2051 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2052
2053 /* qos null data frames are excluded */
2054 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2055 goto end_reorder;
2056
2057 /* new un-ordered ampdu frame - process it */
2058
2059 /* reset session timer */
2060 if (tid_agg_rx->timeout) {
2061 unsigned long expires =
2062 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
2063 mod_timer(&tid_agg_rx->session_timer, expires);
2064 }
2065
2066 /* if this mpdu is fragmented - terminate rx aggregation session */
2067 sc = le16_to_cpu(hdr->seq_ctrl);
2068 if (sc & IEEE80211_SCTL_FRAG) {
2069 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr,
2070 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2071 ret = 1;
2072 goto end_reorder;
2073 }
2074
2075 /* according to mpdu sequence number deal with reordering buffer */
2076 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2077 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2078 mpdu_seq_num, 0);
2079 end_reorder:
2080 return ret;
2081 }
2082
2083 /*
2084 * This is the receive path handler. It is called by a low level driver when an
2085 * 802.11 MPDU is received from the hardware.
2086 */
2087 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2088 struct ieee80211_rx_status *status)
2089 {
2090 struct ieee80211_local *local = hw_to_local(hw);
2091 struct ieee80211_rate *rate = NULL;
2092 struct ieee80211_supported_band *sband;
2093
2094 if (status->band < 0 ||
2095 status->band >= IEEE80211_NUM_BANDS) {
2096 WARN_ON(1);
2097 return;
2098 }
2099
2100 sband = local->hw.wiphy->bands[status->band];
2101
2102 if (!sband ||
2103 status->rate_idx < 0 ||
2104 status->rate_idx >= sband->n_bitrates) {
2105 WARN_ON(1);
2106 return;
2107 }
2108
2109 rate = &sband->bitrates[status->rate_idx];
2110
2111 /*
2112 * key references and virtual interfaces are protected using RCU
2113 * and this requires that we are in a read-side RCU section during
2114 * receive processing
2115 */
2116 rcu_read_lock();
2117
2118 /*
2119 * Frames with failed FCS/PLCP checksum are not returned,
2120 * all other frames are returned without radiotap header
2121 * if it was previously present.
2122 * Also, frames with less than 16 bytes are dropped.
2123 */
2124 skb = ieee80211_rx_monitor(local, skb, status, rate);
2125 if (!skb) {
2126 rcu_read_unlock();
2127 return;
2128 }
2129
2130 if (!ieee80211_rx_reorder_ampdu(local, skb))
2131 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2132
2133 rcu_read_unlock();
2134 }
2135 EXPORT_SYMBOL(__ieee80211_rx);
2136
2137 /* This is a version of the rx handler that can be called from hard irq
2138 * context. Post the skb on the queue and schedule the tasklet */
2139 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2140 struct ieee80211_rx_status *status)
2141 {
2142 struct ieee80211_local *local = hw_to_local(hw);
2143
2144 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2145
2146 skb->dev = local->mdev;
2147 /* copy status into skb->cb for use by tasklet */
2148 memcpy(skb->cb, status, sizeof(*status));
2149 skb->pkt_type = IEEE80211_RX_MSG;
2150 skb_queue_tail(&local->skb_queue, skb);
2151 tasklet_schedule(&local->tasklet);
2152 }
2153 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.104955 seconds and 5 git commands to generate.