mac80211: use RCU for TX aggregation
[deliverable/linux.git] / net / mac80211 / tx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Transmit and frame generation functions.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/etherdevice.h>
19 #include <linux/bitmap.h>
20 #include <linux/rcupdate.h>
21 #include <net/net_namespace.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <net/cfg80211.h>
24 #include <net/mac80211.h>
25 #include <asm/unaligned.h>
26
27 #include "ieee80211_i.h"
28 #include "driver-ops.h"
29 #include "led.h"
30 #include "mesh.h"
31 #include "wep.h"
32 #include "wpa.h"
33 #include "wme.h"
34 #include "rate.h"
35
36 #define IEEE80211_TX_OK 0
37 #define IEEE80211_TX_AGAIN 1
38 #define IEEE80211_TX_PENDING 2
39
40 /* misc utils */
41
42 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
43 int next_frag_len)
44 {
45 int rate, mrate, erp, dur, i;
46 struct ieee80211_rate *txrate;
47 struct ieee80211_local *local = tx->local;
48 struct ieee80211_supported_band *sband;
49 struct ieee80211_hdr *hdr;
50 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
51
52 /* assume HW handles this */
53 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
54 return 0;
55
56 /* uh huh? */
57 if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
58 return 0;
59
60 sband = local->hw.wiphy->bands[tx->channel->band];
61 txrate = &sband->bitrates[info->control.rates[0].idx];
62
63 erp = txrate->flags & IEEE80211_RATE_ERP_G;
64
65 /*
66 * data and mgmt (except PS Poll):
67 * - during CFP: 32768
68 * - during contention period:
69 * if addr1 is group address: 0
70 * if more fragments = 0 and addr1 is individual address: time to
71 * transmit one ACK plus SIFS
72 * if more fragments = 1 and addr1 is individual address: time to
73 * transmit next fragment plus 2 x ACK plus 3 x SIFS
74 *
75 * IEEE 802.11, 9.6:
76 * - control response frame (CTS or ACK) shall be transmitted using the
77 * same rate as the immediately previous frame in the frame exchange
78 * sequence, if this rate belongs to the PHY mandatory rates, or else
79 * at the highest possible rate belonging to the PHY rates in the
80 * BSSBasicRateSet
81 */
82 hdr = (struct ieee80211_hdr *)tx->skb->data;
83 if (ieee80211_is_ctl(hdr->frame_control)) {
84 /* TODO: These control frames are not currently sent by
85 * mac80211, but should they be implemented, this function
86 * needs to be updated to support duration field calculation.
87 *
88 * RTS: time needed to transmit pending data/mgmt frame plus
89 * one CTS frame plus one ACK frame plus 3 x SIFS
90 * CTS: duration of immediately previous RTS minus time
91 * required to transmit CTS and its SIFS
92 * ACK: 0 if immediately previous directed data/mgmt had
93 * more=0, with more=1 duration in ACK frame is duration
94 * from previous frame minus time needed to transmit ACK
95 * and its SIFS
96 * PS Poll: BIT(15) | BIT(14) | aid
97 */
98 return 0;
99 }
100
101 /* data/mgmt */
102 if (0 /* FIX: data/mgmt during CFP */)
103 return cpu_to_le16(32768);
104
105 if (group_addr) /* Group address as the destination - no ACK */
106 return 0;
107
108 /* Individual destination address:
109 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
110 * CTS and ACK frames shall be transmitted using the highest rate in
111 * basic rate set that is less than or equal to the rate of the
112 * immediately previous frame and that is using the same modulation
113 * (CCK or OFDM). If no basic rate set matches with these requirements,
114 * the highest mandatory rate of the PHY that is less than or equal to
115 * the rate of the previous frame is used.
116 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
117 */
118 rate = -1;
119 /* use lowest available if everything fails */
120 mrate = sband->bitrates[0].bitrate;
121 for (i = 0; i < sband->n_bitrates; i++) {
122 struct ieee80211_rate *r = &sband->bitrates[i];
123
124 if (r->bitrate > txrate->bitrate)
125 break;
126
127 if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
128 rate = r->bitrate;
129
130 switch (sband->band) {
131 case IEEE80211_BAND_2GHZ: {
132 u32 flag;
133 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
134 flag = IEEE80211_RATE_MANDATORY_G;
135 else
136 flag = IEEE80211_RATE_MANDATORY_B;
137 if (r->flags & flag)
138 mrate = r->bitrate;
139 break;
140 }
141 case IEEE80211_BAND_5GHZ:
142 if (r->flags & IEEE80211_RATE_MANDATORY_A)
143 mrate = r->bitrate;
144 break;
145 case IEEE80211_NUM_BANDS:
146 WARN_ON(1);
147 break;
148 }
149 }
150 if (rate == -1) {
151 /* No matching basic rate found; use highest suitable mandatory
152 * PHY rate */
153 rate = mrate;
154 }
155
156 /* Time needed to transmit ACK
157 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
158 * to closest integer */
159
160 dur = ieee80211_frame_duration(local, 10, rate, erp,
161 tx->sdata->vif.bss_conf.use_short_preamble);
162
163 if (next_frag_len) {
164 /* Frame is fragmented: duration increases with time needed to
165 * transmit next fragment plus ACK and 2 x SIFS. */
166 dur *= 2; /* ACK + SIFS */
167 /* next fragment */
168 dur += ieee80211_frame_duration(local, next_frag_len,
169 txrate->bitrate, erp,
170 tx->sdata->vif.bss_conf.use_short_preamble);
171 }
172
173 return cpu_to_le16(dur);
174 }
175
176 static int inline is_ieee80211_device(struct ieee80211_local *local,
177 struct net_device *dev)
178 {
179 return local == wdev_priv(dev->ieee80211_ptr);
180 }
181
182 /* tx handlers */
183 static ieee80211_tx_result debug_noinline
184 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
185 {
186 struct ieee80211_local *local = tx->local;
187 struct ieee80211_if_managed *ifmgd;
188
189 /* driver doesn't support power save */
190 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
191 return TX_CONTINUE;
192
193 /* hardware does dynamic power save */
194 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
195 return TX_CONTINUE;
196
197 /* dynamic power save disabled */
198 if (local->hw.conf.dynamic_ps_timeout <= 0)
199 return TX_CONTINUE;
200
201 /* we are scanning, don't enable power save */
202 if (local->scanning)
203 return TX_CONTINUE;
204
205 if (!local->ps_sdata)
206 return TX_CONTINUE;
207
208 /* No point if we're going to suspend */
209 if (local->quiescing)
210 return TX_CONTINUE;
211
212 /* dynamic ps is supported only in managed mode */
213 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
214 return TX_CONTINUE;
215
216 ifmgd = &tx->sdata->u.mgd;
217
218 /*
219 * Don't wakeup from power save if u-apsd is enabled, voip ac has
220 * u-apsd enabled and the frame is in voip class. This effectively
221 * means that even if all access categories have u-apsd enabled, in
222 * practise u-apsd is only used with the voip ac. This is a
223 * workaround for the case when received voip class packets do not
224 * have correct qos tag for some reason, due the network or the
225 * peer application.
226 *
227 * Note: local->uapsd_queues access is racy here. If the value is
228 * changed via debugfs, user needs to reassociate manually to have
229 * everything in sync.
230 */
231 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
232 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
233 && skb_get_queue_mapping(tx->skb) == 0)
234 return TX_CONTINUE;
235
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS);
239 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work);
241 }
242
243 mod_timer(&local->dynamic_ps_timer, jiffies +
244 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
245
246 return TX_CONTINUE;
247 }
248
249 static ieee80211_tx_result debug_noinline
250 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
251 {
252
253 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
254 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
255 u32 sta_flags;
256
257 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
258 return TX_CONTINUE;
259
260 if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
261 !ieee80211_is_probe_req(hdr->frame_control) &&
262 !ieee80211_is_nullfunc(hdr->frame_control))
263 /*
264 * When software scanning only nullfunc frames (to notify
265 * the sleep state to the AP) and probe requests (for the
266 * active scan) are allowed, all other frames should not be
267 * sent and we should not get here, but if we do
268 * nonetheless, drop them to avoid sending them
269 * off-channel. See the link below and
270 * ieee80211_start_scan() for more.
271 *
272 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
273 */
274 return TX_DROP;
275
276 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
277 return TX_CONTINUE;
278
279 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
280 return TX_CONTINUE;
281
282 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
283
284 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
285 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
286 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
287 ieee80211_is_data(hdr->frame_control))) {
288 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
289 printk(KERN_DEBUG "%s: dropped data frame to not "
290 "associated station %pM\n",
291 tx->sdata->name, hdr->addr1);
292 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
293 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
294 return TX_DROP;
295 }
296 } else {
297 if (unlikely(ieee80211_is_data(hdr->frame_control) &&
298 tx->local->num_sta == 0 &&
299 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
300 /*
301 * No associated STAs - no need to send multicast
302 * frames.
303 */
304 return TX_DROP;
305 }
306 return TX_CONTINUE;
307 }
308
309 return TX_CONTINUE;
310 }
311
312 /* This function is called whenever the AP is about to exceed the maximum limit
313 * of buffered frames for power saving STAs. This situation should not really
314 * happen often during normal operation, so dropping the oldest buffered packet
315 * from each queue should be OK to make some room for new frames. */
316 static void purge_old_ps_buffers(struct ieee80211_local *local)
317 {
318 int total = 0, purged = 0;
319 struct sk_buff *skb;
320 struct ieee80211_sub_if_data *sdata;
321 struct sta_info *sta;
322
323 /*
324 * virtual interfaces are protected by RCU
325 */
326 rcu_read_lock();
327
328 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
329 struct ieee80211_if_ap *ap;
330 if (sdata->vif.type != NL80211_IFTYPE_AP)
331 continue;
332 ap = &sdata->u.ap;
333 skb = skb_dequeue(&ap->ps_bc_buf);
334 if (skb) {
335 purged++;
336 dev_kfree_skb(skb);
337 }
338 total += skb_queue_len(&ap->ps_bc_buf);
339 }
340
341 list_for_each_entry_rcu(sta, &local->sta_list, list) {
342 skb = skb_dequeue(&sta->ps_tx_buf);
343 if (skb) {
344 purged++;
345 dev_kfree_skb(skb);
346 }
347 total += skb_queue_len(&sta->ps_tx_buf);
348 }
349
350 rcu_read_unlock();
351
352 local->total_ps_buffered = total;
353 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
354 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
355 wiphy_name(local->hw.wiphy), purged);
356 #endif
357 }
358
359 static ieee80211_tx_result
360 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
361 {
362 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
363 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
364
365 /*
366 * broadcast/multicast frame
367 *
368 * If any of the associated stations is in power save mode,
369 * the frame is buffered to be sent after DTIM beacon frame.
370 * This is done either by the hardware or us.
371 */
372
373 /* powersaving STAs only in AP/VLAN mode */
374 if (!tx->sdata->bss)
375 return TX_CONTINUE;
376
377 /* no buffering for ordered frames */
378 if (ieee80211_has_order(hdr->frame_control))
379 return TX_CONTINUE;
380
381 /* no stations in PS mode */
382 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
383 return TX_CONTINUE;
384
385 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
386
387 /* device releases frame after DTIM beacon */
388 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
389 return TX_CONTINUE;
390
391 /* buffered in mac80211 */
392 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
393 purge_old_ps_buffers(tx->local);
394
395 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
396 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
397 if (net_ratelimit())
398 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
399 tx->sdata->name);
400 #endif
401 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
402 } else
403 tx->local->total_ps_buffered++;
404
405 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
406
407 return TX_QUEUED;
408 }
409
410 static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
411 struct sk_buff *skb)
412 {
413 if (!ieee80211_is_mgmt(fc))
414 return 0;
415
416 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
417 return 0;
418
419 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
420 skb->data))
421 return 0;
422
423 return 1;
424 }
425
426 static ieee80211_tx_result
427 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
428 {
429 struct sta_info *sta = tx->sta;
430 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
431 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
432 struct ieee80211_local *local = tx->local;
433 u32 staflags;
434
435 if (unlikely(!sta ||
436 ieee80211_is_probe_resp(hdr->frame_control) ||
437 ieee80211_is_auth(hdr->frame_control) ||
438 ieee80211_is_assoc_resp(hdr->frame_control) ||
439 ieee80211_is_reassoc_resp(hdr->frame_control)))
440 return TX_CONTINUE;
441
442 staflags = get_sta_flags(sta);
443
444 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
445 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
446 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
447 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
448 "before %d)\n",
449 sta->sta.addr, sta->sta.aid,
450 skb_queue_len(&sta->ps_tx_buf));
451 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
452 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
453 purge_old_ps_buffers(tx->local);
454 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
455 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
456 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
457 if (net_ratelimit()) {
458 printk(KERN_DEBUG "%s: STA %pM TX "
459 "buffer full - dropping oldest frame\n",
460 tx->sdata->name, sta->sta.addr);
461 }
462 #endif
463 dev_kfree_skb(old);
464 } else
465 tx->local->total_ps_buffered++;
466
467 /*
468 * Queue frame to be sent after STA wakes up/polls,
469 * but don't set the TIM bit if the driver is blocking
470 * wakeup or poll response transmissions anyway.
471 */
472 if (skb_queue_empty(&sta->ps_tx_buf) &&
473 !(staflags & WLAN_STA_PS_DRIVER))
474 sta_info_set_tim_bit(sta);
475
476 info->control.jiffies = jiffies;
477 info->control.vif = &tx->sdata->vif;
478 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
479 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
480
481 if (!timer_pending(&local->sta_cleanup))
482 mod_timer(&local->sta_cleanup,
483 round_jiffies(jiffies +
484 STA_INFO_CLEANUP_INTERVAL));
485
486 return TX_QUEUED;
487 }
488 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
489 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
490 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
491 "set -> send frame\n", tx->sdata->name,
492 sta->sta.addr);
493 }
494 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
495
496 return TX_CONTINUE;
497 }
498
499 static ieee80211_tx_result debug_noinline
500 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
501 {
502 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
503 return TX_CONTINUE;
504
505 if (tx->flags & IEEE80211_TX_UNICAST)
506 return ieee80211_tx_h_unicast_ps_buf(tx);
507 else
508 return ieee80211_tx_h_multicast_ps_buf(tx);
509 }
510
511 static ieee80211_tx_result debug_noinline
512 ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
513 {
514 struct ieee80211_key *key = NULL;
515 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
516 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
517
518 if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
519 tx->key = NULL;
520 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
521 tx->key = key;
522 else if (ieee80211_is_mgmt(hdr->frame_control) &&
523 is_multicast_ether_addr(hdr->addr1) &&
524 ieee80211_is_robust_mgmt_frame(hdr) &&
525 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
526 tx->key = key;
527 else if ((key = rcu_dereference(tx->sdata->default_key)))
528 tx->key = key;
529 else if (tx->sdata->drop_unencrypted &&
530 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) &&
531 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
532 (!ieee80211_is_robust_mgmt_frame(hdr) ||
533 (ieee80211_is_action(hdr->frame_control) &&
534 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
535 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
536 return TX_DROP;
537 } else
538 tx->key = NULL;
539
540 if (tx->key) {
541 bool skip_hw = false;
542
543 tx->key->tx_rx_count++;
544 /* TODO: add threshold stuff again */
545
546 switch (tx->key->conf.alg) {
547 case ALG_WEP:
548 if (ieee80211_is_auth(hdr->frame_control))
549 break;
550 case ALG_TKIP:
551 if (!ieee80211_is_data_present(hdr->frame_control))
552 tx->key = NULL;
553 break;
554 case ALG_CCMP:
555 if (!ieee80211_is_data_present(hdr->frame_control) &&
556 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
557 tx->skb))
558 tx->key = NULL;
559 else
560 skip_hw = (tx->key->conf.flags &
561 IEEE80211_KEY_FLAG_SW_MGMT) &&
562 ieee80211_is_mgmt(hdr->frame_control);
563 break;
564 case ALG_AES_CMAC:
565 if (!ieee80211_is_mgmt(hdr->frame_control))
566 tx->key = NULL;
567 break;
568 }
569
570 if (!skip_hw && tx->key &&
571 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
572 info->control.hw_key = &tx->key->conf;
573 }
574
575 return TX_CONTINUE;
576 }
577
578 static ieee80211_tx_result debug_noinline
579 ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
580 {
581 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
582
583 if (tx->sta && tx->sta->uploaded)
584 info->control.sta = &tx->sta->sta;
585
586 return TX_CONTINUE;
587 }
588
589 static ieee80211_tx_result debug_noinline
590 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
591 {
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
593 struct ieee80211_hdr *hdr = (void *)tx->skb->data;
594 struct ieee80211_supported_band *sband;
595 struct ieee80211_rate *rate;
596 int i;
597 u32 len;
598 bool inval = false, rts = false, short_preamble = false;
599 struct ieee80211_tx_rate_control txrc;
600 u32 sta_flags;
601
602 memset(&txrc, 0, sizeof(txrc));
603
604 sband = tx->local->hw.wiphy->bands[tx->channel->band];
605
606 len = min_t(u32, tx->skb->len + FCS_LEN,
607 tx->local->hw.wiphy->frag_threshold);
608
609 /* set up the tx rate control struct we give the RC algo */
610 txrc.hw = local_to_hw(tx->local);
611 txrc.sband = sband;
612 txrc.bss_conf = &tx->sdata->vif.bss_conf;
613 txrc.skb = tx->skb;
614 txrc.reported_rate.idx = -1;
615 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
616 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
617 txrc.max_rate_idx = -1;
618 else
619 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
620 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
621
622 /* set up RTS protection if desired */
623 if (len > tx->local->hw.wiphy->rts_threshold) {
624 txrc.rts = rts = true;
625 }
626
627 /*
628 * Use short preamble if the BSS can handle it, but not for
629 * management frames unless we know the receiver can handle
630 * that -- the management frame might be to a station that
631 * just wants a probe response.
632 */
633 if (tx->sdata->vif.bss_conf.use_short_preamble &&
634 (ieee80211_is_data(hdr->frame_control) ||
635 (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
636 txrc.short_preamble = short_preamble = true;
637
638 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
639
640 /*
641 * Lets not bother rate control if we're associated and cannot
642 * talk to the sta. This should not happen.
643 */
644 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
645 (sta_flags & WLAN_STA_ASSOC) &&
646 !rate_usable_index_exists(sband, &tx->sta->sta),
647 "%s: Dropped data frame as no usable bitrate found while "
648 "scanning and associated. Target station: "
649 "%pM on %d GHz band\n",
650 tx->sdata->name, hdr->addr1,
651 tx->channel->band ? 5 : 2))
652 return TX_DROP;
653
654 /*
655 * If we're associated with the sta at this point we know we can at
656 * least send the frame at the lowest bit rate.
657 */
658 rate_control_get_rate(tx->sdata, tx->sta, &txrc);
659
660 if (unlikely(info->control.rates[0].idx < 0))
661 return TX_DROP;
662
663 if (txrc.reported_rate.idx < 0)
664 txrc.reported_rate = info->control.rates[0];
665
666 if (tx->sta)
667 tx->sta->last_tx_rate = txrc.reported_rate;
668
669 if (unlikely(!info->control.rates[0].count))
670 info->control.rates[0].count = 1;
671
672 if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
673 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
674 info->control.rates[0].count = 1;
675
676 if (is_multicast_ether_addr(hdr->addr1)) {
677 /*
678 * XXX: verify the rate is in the basic rateset
679 */
680 return TX_CONTINUE;
681 }
682
683 /*
684 * set up the RTS/CTS rate as the fastest basic rate
685 * that is not faster than the data rate
686 *
687 * XXX: Should this check all retry rates?
688 */
689 if (!(info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
690 s8 baserate = 0;
691
692 rate = &sband->bitrates[info->control.rates[0].idx];
693
694 for (i = 0; i < sband->n_bitrates; i++) {
695 /* must be a basic rate */
696 if (!(tx->sdata->vif.bss_conf.basic_rates & BIT(i)))
697 continue;
698 /* must not be faster than the data rate */
699 if (sband->bitrates[i].bitrate > rate->bitrate)
700 continue;
701 /* maximum */
702 if (sband->bitrates[baserate].bitrate <
703 sband->bitrates[i].bitrate)
704 baserate = i;
705 }
706
707 info->control.rts_cts_rate_idx = baserate;
708 }
709
710 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
711 /*
712 * make sure there's no valid rate following
713 * an invalid one, just in case drivers don't
714 * take the API seriously to stop at -1.
715 */
716 if (inval) {
717 info->control.rates[i].idx = -1;
718 continue;
719 }
720 if (info->control.rates[i].idx < 0) {
721 inval = true;
722 continue;
723 }
724
725 /*
726 * For now assume MCS is already set up correctly, this
727 * needs to be fixed.
728 */
729 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) {
730 WARN_ON(info->control.rates[i].idx > 76);
731 continue;
732 }
733
734 /* set up RTS protection if desired */
735 if (rts)
736 info->control.rates[i].flags |=
737 IEEE80211_TX_RC_USE_RTS_CTS;
738
739 /* RC is busted */
740 if (WARN_ON_ONCE(info->control.rates[i].idx >=
741 sband->n_bitrates)) {
742 info->control.rates[i].idx = -1;
743 continue;
744 }
745
746 rate = &sband->bitrates[info->control.rates[i].idx];
747
748 /* set up short preamble */
749 if (short_preamble &&
750 rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
751 info->control.rates[i].flags |=
752 IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
753
754 /* set up G protection */
755 if (!rts && tx->sdata->vif.bss_conf.use_cts_prot &&
756 rate->flags & IEEE80211_RATE_ERP_G)
757 info->control.rates[i].flags |=
758 IEEE80211_TX_RC_USE_CTS_PROTECT;
759 }
760
761 return TX_CONTINUE;
762 }
763
764 static ieee80211_tx_result debug_noinline
765 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
766 {
767 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
769 u16 *seq;
770 u8 *qc;
771 int tid;
772
773 /*
774 * Packet injection may want to control the sequence
775 * number, if we have no matching interface then we
776 * neither assign one ourselves nor ask the driver to.
777 */
778 if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
779 return TX_CONTINUE;
780
781 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
782 return TX_CONTINUE;
783
784 if (ieee80211_hdrlen(hdr->frame_control) < 24)
785 return TX_CONTINUE;
786
787 /*
788 * Anything but QoS data that has a sequence number field
789 * (is long enough) gets a sequence number from the global
790 * counter.
791 */
792 if (!ieee80211_is_data_qos(hdr->frame_control)) {
793 /* driver should assign sequence number */
794 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
795 /* for pure STA mode without beacons, we can do it */
796 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
797 tx->sdata->sequence_number += 0x10;
798 return TX_CONTINUE;
799 }
800
801 /*
802 * This should be true for injected/management frames only, for
803 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
804 * above since they are not QoS-data frames.
805 */
806 if (!tx->sta)
807 return TX_CONTINUE;
808
809 /* include per-STA, per-TID sequence counter */
810
811 qc = ieee80211_get_qos_ctl(hdr);
812 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
813 seq = &tx->sta->tid_seq[tid];
814
815 hdr->seq_ctrl = cpu_to_le16(*seq);
816
817 /* Increase the sequence number. */
818 *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
819
820 return TX_CONTINUE;
821 }
822
823 static int ieee80211_fragment(struct ieee80211_local *local,
824 struct sk_buff *skb, int hdrlen,
825 int frag_threshold)
826 {
827 struct sk_buff *tail = skb, *tmp;
828 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
829 int pos = hdrlen + per_fragm;
830 int rem = skb->len - hdrlen - per_fragm;
831
832 if (WARN_ON(rem < 0))
833 return -EINVAL;
834
835 while (rem) {
836 int fraglen = per_fragm;
837
838 if (fraglen > rem)
839 fraglen = rem;
840 rem -= fraglen;
841 tmp = dev_alloc_skb(local->tx_headroom +
842 frag_threshold +
843 IEEE80211_ENCRYPT_HEADROOM +
844 IEEE80211_ENCRYPT_TAILROOM);
845 if (!tmp)
846 return -ENOMEM;
847 tail->next = tmp;
848 tail = tmp;
849 skb_reserve(tmp, local->tx_headroom +
850 IEEE80211_ENCRYPT_HEADROOM);
851 /* copy control information */
852 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
853 skb_copy_queue_mapping(tmp, skb);
854 tmp->priority = skb->priority;
855 tmp->dev = skb->dev;
856
857 /* copy header and data */
858 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
859 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
860
861 pos += fraglen;
862 }
863
864 skb->len = hdrlen + per_fragm;
865 return 0;
866 }
867
868 static ieee80211_tx_result debug_noinline
869 ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
870 {
871 struct sk_buff *skb = tx->skb;
872 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
873 struct ieee80211_hdr *hdr = (void *)skb->data;
874 int frag_threshold = tx->local->hw.wiphy->frag_threshold;
875 int hdrlen;
876 int fragnum;
877
878 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
879 return TX_CONTINUE;
880
881 /*
882 * Warn when submitting a fragmented A-MPDU frame and drop it.
883 * This scenario is handled in ieee80211_tx_prepare but extra
884 * caution taken here as fragmented ampdu may cause Tx stop.
885 */
886 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
887 return TX_DROP;
888
889 hdrlen = ieee80211_hdrlen(hdr->frame_control);
890
891 /* internal error, why is TX_FRAGMENTED set? */
892 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
893 return TX_DROP;
894
895 /*
896 * Now fragment the frame. This will allocate all the fragments and
897 * chain them (using skb as the first fragment) to skb->next.
898 * During transmission, we will remove the successfully transmitted
899 * fragments from this list. When the low-level driver rejects one
900 * of the fragments then we will simply pretend to accept the skb
901 * but store it away as pending.
902 */
903 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
904 return TX_DROP;
905
906 /* update duration/seq/flags of fragments */
907 fragnum = 0;
908 do {
909 int next_len;
910 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
911
912 hdr = (void *)skb->data;
913 info = IEEE80211_SKB_CB(skb);
914
915 if (skb->next) {
916 hdr->frame_control |= morefrags;
917 next_len = skb->next->len;
918 /*
919 * No multi-rate retries for fragmented frames, that
920 * would completely throw off the NAV at other STAs.
921 */
922 info->control.rates[1].idx = -1;
923 info->control.rates[2].idx = -1;
924 info->control.rates[3].idx = -1;
925 info->control.rates[4].idx = -1;
926 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
927 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
928 } else {
929 hdr->frame_control &= ~morefrags;
930 next_len = 0;
931 }
932 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
933 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
934 fragnum++;
935 } while ((skb = skb->next));
936
937 return TX_CONTINUE;
938 }
939
940 static ieee80211_tx_result debug_noinline
941 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
942 {
943 struct sk_buff *skb = tx->skb;
944
945 if (!tx->sta)
946 return TX_CONTINUE;
947
948 tx->sta->tx_packets++;
949 do {
950 tx->sta->tx_fragments++;
951 tx->sta->tx_bytes += skb->len;
952 } while ((skb = skb->next));
953
954 return TX_CONTINUE;
955 }
956
957 static ieee80211_tx_result debug_noinline
958 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
959 {
960 if (!tx->key)
961 return TX_CONTINUE;
962
963 switch (tx->key->conf.alg) {
964 case ALG_WEP:
965 return ieee80211_crypto_wep_encrypt(tx);
966 case ALG_TKIP:
967 return ieee80211_crypto_tkip_encrypt(tx);
968 case ALG_CCMP:
969 return ieee80211_crypto_ccmp_encrypt(tx);
970 case ALG_AES_CMAC:
971 return ieee80211_crypto_aes_cmac_encrypt(tx);
972 }
973
974 /* not reached */
975 WARN_ON(1);
976 return TX_DROP;
977 }
978
979 static ieee80211_tx_result debug_noinline
980 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
981 {
982 struct sk_buff *skb = tx->skb;
983 struct ieee80211_hdr *hdr;
984 int next_len;
985 bool group_addr;
986
987 do {
988 hdr = (void *) skb->data;
989 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
990 break; /* must not overwrite AID */
991 next_len = skb->next ? skb->next->len : 0;
992 group_addr = is_multicast_ether_addr(hdr->addr1);
993
994 hdr->duration_id =
995 ieee80211_duration(tx, group_addr, next_len);
996 } while ((skb = skb->next));
997
998 return TX_CONTINUE;
999 }
1000
1001 /* actual transmit path */
1002
1003 /*
1004 * deal with packet injection down monitor interface
1005 * with Radiotap Header -- only called for monitor mode interface
1006 */
1007 static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1008 struct sk_buff *skb)
1009 {
1010 /*
1011 * this is the moment to interpret and discard the radiotap header that
1012 * must be at the start of the packet injected in Monitor mode
1013 *
1014 * Need to take some care with endian-ness since radiotap
1015 * args are little-endian
1016 */
1017
1018 struct ieee80211_radiotap_iterator iterator;
1019 struct ieee80211_radiotap_header *rthdr =
1020 (struct ieee80211_radiotap_header *) skb->data;
1021 struct ieee80211_supported_band *sband;
1022 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1023 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1024 NULL);
1025
1026 sband = tx->local->hw.wiphy->bands[tx->channel->band];
1027
1028 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1029 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1030
1031 /*
1032 * for every radiotap entry that is present
1033 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1034 * entries present, or -EINVAL on error)
1035 */
1036
1037 while (!ret) {
1038 ret = ieee80211_radiotap_iterator_next(&iterator);
1039
1040 if (ret)
1041 continue;
1042
1043 /* see if this argument is something we can use */
1044 switch (iterator.this_arg_index) {
1045 /*
1046 * You must take care when dereferencing iterator.this_arg
1047 * for multibyte types... the pointer is not aligned. Use
1048 * get_unaligned((type *)iterator.this_arg) to dereference
1049 * iterator.this_arg for type "type" safely on all arches.
1050 */
1051 case IEEE80211_RADIOTAP_FLAGS:
1052 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1053 /*
1054 * this indicates that the skb we have been
1055 * handed has the 32-bit FCS CRC at the end...
1056 * we should react to that by snipping it off
1057 * because it will be recomputed and added
1058 * on transmission
1059 */
1060 if (skb->len < (iterator._max_length + FCS_LEN))
1061 return false;
1062
1063 skb_trim(skb, skb->len - FCS_LEN);
1064 }
1065 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1066 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1067 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
1068 tx->flags |= IEEE80211_TX_FRAGMENTED;
1069 break;
1070
1071 /*
1072 * Please update the file
1073 * Documentation/networking/mac80211-injection.txt
1074 * when parsing new fields here.
1075 */
1076
1077 default:
1078 break;
1079 }
1080 }
1081
1082 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1083 return false;
1084
1085 /*
1086 * remove the radiotap header
1087 * iterator->_max_length was sanity-checked against
1088 * skb->len by iterator init
1089 */
1090 skb_pull(skb, iterator._max_length);
1091
1092 return true;
1093 }
1094
1095 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1096 struct sk_buff *skb,
1097 struct ieee80211_tx_info *info,
1098 struct tid_ampdu_tx *tid_tx,
1099 int tid)
1100 {
1101 bool queued = false;
1102
1103 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1104 info->flags |= IEEE80211_TX_CTL_AMPDU;
1105 } else {
1106 spin_lock(&tx->sta->lock);
1107 /*
1108 * Need to re-check now, because we may get here
1109 *
1110 * 1) in the window during which the setup is actually
1111 * already done, but not marked yet because not all
1112 * packets are spliced over to the driver pending
1113 * queue yet -- if this happened we acquire the lock
1114 * either before or after the splice happens, but
1115 * need to recheck which of these cases happened.
1116 *
1117 * 2) during session teardown, if the OPERATIONAL bit
1118 * was cleared due to the teardown but the pointer
1119 * hasn't been assigned NULL yet (or we loaded it
1120 * before it was assigned) -- in this case it may
1121 * now be NULL which means we should just let the
1122 * packet pass through because splicing the frames
1123 * back is already done.
1124 */
1125 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1126
1127 if (!tid_tx) {
1128 /* do nothing, let packet pass through */
1129 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1130 info->flags |= IEEE80211_TX_CTL_AMPDU;
1131 } else {
1132 queued = true;
1133 info->control.vif = &tx->sdata->vif;
1134 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1135 __skb_queue_tail(&tid_tx->pending, skb);
1136 }
1137 spin_unlock(&tx->sta->lock);
1138 }
1139
1140 return queued;
1141 }
1142
1143 /*
1144 * initialises @tx
1145 */
1146 static ieee80211_tx_result
1147 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1148 struct ieee80211_tx_data *tx,
1149 struct sk_buff *skb)
1150 {
1151 struct ieee80211_local *local = sdata->local;
1152 struct ieee80211_hdr *hdr;
1153 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1154 int hdrlen, tid;
1155 u8 *qc;
1156
1157 memset(tx, 0, sizeof(*tx));
1158 tx->skb = skb;
1159 tx->local = local;
1160 tx->sdata = sdata;
1161 tx->channel = local->hw.conf.channel;
1162 /*
1163 * Set this flag (used below to indicate "automatic fragmentation"),
1164 * it will be cleared/left by radiotap as desired.
1165 */
1166 tx->flags |= IEEE80211_TX_FRAGMENTED;
1167
1168 /* process and remove the injection radiotap header */
1169 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1170 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1171 return TX_DROP;
1172
1173 /*
1174 * __ieee80211_parse_tx_radiotap has now removed
1175 * the radiotap header that was present and pre-filled
1176 * 'tx' with tx control information.
1177 */
1178 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1179 }
1180
1181 /*
1182 * If this flag is set to true anywhere, and we get here,
1183 * we are doing the needed processing, so remove the flag
1184 * now.
1185 */
1186 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1187
1188 hdr = (struct ieee80211_hdr *) skb->data;
1189
1190 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1191 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1192 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1193 return TX_DROP;
1194 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1195 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1196 }
1197 if (!tx->sta)
1198 tx->sta = sta_info_get(sdata, hdr->addr1);
1199
1200 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1201 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1202 struct tid_ampdu_tx *tid_tx;
1203
1204 qc = ieee80211_get_qos_ctl(hdr);
1205 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1206
1207 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1208 if (tid_tx) {
1209 bool queued;
1210
1211 queued = ieee80211_tx_prep_agg(tx, skb, info,
1212 tid_tx, tid);
1213
1214 if (unlikely(queued))
1215 return TX_QUEUED;
1216 }
1217 }
1218
1219 if (is_multicast_ether_addr(hdr->addr1)) {
1220 tx->flags &= ~IEEE80211_TX_UNICAST;
1221 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1222 } else {
1223 tx->flags |= IEEE80211_TX_UNICAST;
1224 if (unlikely(local->wifi_wme_noack_test))
1225 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1226 else
1227 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1228 }
1229
1230 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1231 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1232 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
1233 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1234 tx->flags |= IEEE80211_TX_FRAGMENTED;
1235 else
1236 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1237 }
1238
1239 if (!tx->sta)
1240 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1241 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1242 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1243
1244 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1245 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1246 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1247 tx->ethertype = (pos[0] << 8) | pos[1];
1248 }
1249 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1250
1251 return TX_CONTINUE;
1252 }
1253
1254 static int __ieee80211_tx(struct ieee80211_local *local,
1255 struct sk_buff **skbp,
1256 struct sta_info *sta,
1257 bool txpending)
1258 {
1259 struct sk_buff *skb = *skbp, *next;
1260 struct ieee80211_tx_info *info;
1261 struct ieee80211_sub_if_data *sdata;
1262 unsigned long flags;
1263 int ret, len;
1264 bool fragm = false;
1265
1266 while (skb) {
1267 int q = skb_get_queue_mapping(skb);
1268
1269 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1270 ret = IEEE80211_TX_OK;
1271 if (local->queue_stop_reasons[q] ||
1272 (!txpending && !skb_queue_empty(&local->pending[q])))
1273 ret = IEEE80211_TX_PENDING;
1274 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1275 if (ret != IEEE80211_TX_OK)
1276 return ret;
1277
1278 info = IEEE80211_SKB_CB(skb);
1279
1280 if (fragm)
1281 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1282 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1283
1284 next = skb->next;
1285 len = skb->len;
1286
1287 if (next)
1288 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
1289
1290 sdata = vif_to_sdata(info->control.vif);
1291
1292 switch (sdata->vif.type) {
1293 case NL80211_IFTYPE_MONITOR:
1294 info->control.vif = NULL;
1295 break;
1296 case NL80211_IFTYPE_AP_VLAN:
1297 info->control.vif = &container_of(sdata->bss,
1298 struct ieee80211_sub_if_data, u.ap)->vif;
1299 break;
1300 default:
1301 /* keep */
1302 break;
1303 }
1304
1305 ret = drv_tx(local, skb);
1306 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1307 dev_kfree_skb(skb);
1308 ret = NETDEV_TX_OK;
1309 }
1310 if (ret != NETDEV_TX_OK) {
1311 info->control.vif = &sdata->vif;
1312 return IEEE80211_TX_AGAIN;
1313 }
1314
1315 *skbp = skb = next;
1316 ieee80211_led_tx(local, 1);
1317 fragm = true;
1318 }
1319
1320 return IEEE80211_TX_OK;
1321 }
1322
1323 /*
1324 * Invoke TX handlers, return 0 on success and non-zero if the
1325 * frame was dropped or queued.
1326 */
1327 static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1328 {
1329 struct sk_buff *skb = tx->skb;
1330 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1331 ieee80211_tx_result res = TX_DROP;
1332
1333 #define CALL_TXH(txh) \
1334 do { \
1335 res = txh(tx); \
1336 if (res != TX_CONTINUE) \
1337 goto txh_done; \
1338 } while (0)
1339
1340 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1341 CALL_TXH(ieee80211_tx_h_check_assoc);
1342 CALL_TXH(ieee80211_tx_h_ps_buf);
1343 CALL_TXH(ieee80211_tx_h_select_key);
1344 CALL_TXH(ieee80211_tx_h_sta);
1345 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1346 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1347
1348 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
1349 goto txh_done;
1350
1351 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1352 CALL_TXH(ieee80211_tx_h_sequence);
1353 CALL_TXH(ieee80211_tx_h_fragment);
1354 /* handlers after fragment must be aware of tx info fragmentation! */
1355 CALL_TXH(ieee80211_tx_h_stats);
1356 CALL_TXH(ieee80211_tx_h_encrypt);
1357 CALL_TXH(ieee80211_tx_h_calculate_duration);
1358 #undef CALL_TXH
1359
1360 txh_done:
1361 if (unlikely(res == TX_DROP)) {
1362 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1363 while (skb) {
1364 struct sk_buff *next;
1365
1366 next = skb->next;
1367 dev_kfree_skb(skb);
1368 skb = next;
1369 }
1370 return -1;
1371 } else if (unlikely(res == TX_QUEUED)) {
1372 I802_DEBUG_INC(tx->local->tx_handlers_queued);
1373 return -1;
1374 }
1375
1376 return 0;
1377 }
1378
1379 static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1380 struct sk_buff *skb, bool txpending)
1381 {
1382 struct ieee80211_local *local = sdata->local;
1383 struct ieee80211_tx_data tx;
1384 ieee80211_tx_result res_prepare;
1385 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1386 struct sk_buff *next;
1387 unsigned long flags;
1388 int ret, retries;
1389 u16 queue;
1390
1391 queue = skb_get_queue_mapping(skb);
1392
1393 if (unlikely(skb->len < 10)) {
1394 dev_kfree_skb(skb);
1395 return;
1396 }
1397
1398 rcu_read_lock();
1399
1400 /* initialises tx */
1401 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
1402
1403 if (unlikely(res_prepare == TX_DROP)) {
1404 dev_kfree_skb(skb);
1405 rcu_read_unlock();
1406 return;
1407 } else if (unlikely(res_prepare == TX_QUEUED)) {
1408 rcu_read_unlock();
1409 return;
1410 }
1411
1412 tx.channel = local->hw.conf.channel;
1413 info->band = tx.channel->band;
1414
1415 if (invoke_tx_handlers(&tx))
1416 goto out;
1417
1418 retries = 0;
1419 retry:
1420 ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1421 switch (ret) {
1422 case IEEE80211_TX_OK:
1423 break;
1424 case IEEE80211_TX_AGAIN:
1425 /*
1426 * Since there are no fragmented frames on A-MPDU
1427 * queues, there's no reason for a driver to reject
1428 * a frame there, warn and drop it.
1429 */
1430 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1431 goto drop;
1432 /* fall through */
1433 case IEEE80211_TX_PENDING:
1434 skb = tx.skb;
1435
1436 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1437
1438 if (local->queue_stop_reasons[queue] ||
1439 !skb_queue_empty(&local->pending[queue])) {
1440 /*
1441 * if queue is stopped, queue up frames for later
1442 * transmission from the tasklet
1443 */
1444 do {
1445 next = skb->next;
1446 skb->next = NULL;
1447 if (unlikely(txpending))
1448 __skb_queue_head(&local->pending[queue],
1449 skb);
1450 else
1451 __skb_queue_tail(&local->pending[queue],
1452 skb);
1453 } while ((skb = next));
1454
1455 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1456 flags);
1457 } else {
1458 /*
1459 * otherwise retry, but this is a race condition or
1460 * a driver bug (which we warn about if it persists)
1461 */
1462 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1463 flags);
1464
1465 retries++;
1466 if (WARN(retries > 10, "tx refused but queue active\n"))
1467 goto drop;
1468 goto retry;
1469 }
1470 }
1471 out:
1472 rcu_read_unlock();
1473 return;
1474
1475 drop:
1476 rcu_read_unlock();
1477
1478 skb = tx.skb;
1479 while (skb) {
1480 next = skb->next;
1481 dev_kfree_skb(skb);
1482 skb = next;
1483 }
1484 }
1485
1486 /* device xmit handlers */
1487
1488 static int ieee80211_skb_resize(struct ieee80211_local *local,
1489 struct sk_buff *skb,
1490 int head_need, bool may_encrypt)
1491 {
1492 int tail_need = 0;
1493
1494 /*
1495 * This could be optimised, devices that do full hardware
1496 * crypto (including TKIP MMIC) need no tailroom... But we
1497 * have no drivers for such devices currently.
1498 */
1499 if (may_encrypt) {
1500 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1501 tail_need -= skb_tailroom(skb);
1502 tail_need = max_t(int, tail_need, 0);
1503 }
1504
1505 if (head_need || tail_need) {
1506 /* Sorry. Can't account for this any more */
1507 skb_orphan(skb);
1508 }
1509
1510 if (skb_header_cloned(skb))
1511 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1512 else
1513 I802_DEBUG_INC(local->tx_expand_skb_head);
1514
1515 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1516 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n",
1517 wiphy_name(local->hw.wiphy));
1518 return -ENOMEM;
1519 }
1520
1521 /* update truesize too */
1522 skb->truesize += head_need + tail_need;
1523
1524 return 0;
1525 }
1526
1527 static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1528 struct sk_buff *skb)
1529 {
1530 struct ieee80211_local *local = sdata->local;
1531 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1532 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1533 struct ieee80211_sub_if_data *tmp_sdata;
1534 int headroom;
1535 bool may_encrypt;
1536
1537 rcu_read_lock();
1538
1539 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1540 int hdrlen;
1541 u16 len_rthdr;
1542
1543 info->flags |= IEEE80211_TX_CTL_INJECTED |
1544 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1545
1546 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1547 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1548 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1549
1550 /* check the header is complete in the frame */
1551 if (likely(skb->len >= len_rthdr + hdrlen)) {
1552 /*
1553 * We process outgoing injected frames that have a
1554 * local address we handle as though they are our
1555 * own frames.
1556 * This code here isn't entirely correct, the local
1557 * MAC address is not necessarily enough to find
1558 * the interface to use; for that proper VLAN/WDS
1559 * support we will need a different mechanism.
1560 */
1561
1562 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1563 list) {
1564 if (!ieee80211_sdata_running(tmp_sdata))
1565 continue;
1566 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1567 continue;
1568 if (compare_ether_addr(tmp_sdata->vif.addr,
1569 hdr->addr2) == 0) {
1570 sdata = tmp_sdata;
1571 break;
1572 }
1573 }
1574 }
1575 }
1576
1577 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1578
1579 headroom = local->tx_headroom;
1580 if (may_encrypt)
1581 headroom += IEEE80211_ENCRYPT_HEADROOM;
1582 headroom -= skb_headroom(skb);
1583 headroom = max_t(int, 0, headroom);
1584
1585 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
1586 dev_kfree_skb(skb);
1587 rcu_read_unlock();
1588 return;
1589 }
1590
1591 info->control.vif = &sdata->vif;
1592
1593 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1594 ieee80211_is_data(hdr->frame_control) &&
1595 !is_multicast_ether_addr(hdr->addr1))
1596 if (mesh_nexthop_lookup(skb, sdata)) {
1597 /* skb queued: don't free */
1598 rcu_read_unlock();
1599 return;
1600 }
1601
1602 ieee80211_set_qos_hdr(local, skb);
1603 ieee80211_tx(sdata, skb, false);
1604 rcu_read_unlock();
1605 }
1606
1607 netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1608 struct net_device *dev)
1609 {
1610 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1611 struct ieee80211_channel *chan = local->hw.conf.channel;
1612 struct ieee80211_radiotap_header *prthdr =
1613 (struct ieee80211_radiotap_header *)skb->data;
1614 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1615 u16 len_rthdr;
1616
1617 /*
1618 * Frame injection is not allowed if beaconing is not allowed
1619 * or if we need radar detection. Beaconing is usually not allowed when
1620 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1621 * Passive scan is also used in world regulatory domains where
1622 * your country is not known and as such it should be treated as
1623 * NO TX unless the channel is explicitly allowed in which case
1624 * your current regulatory domain would not have the passive scan
1625 * flag.
1626 *
1627 * Since AP mode uses monitor interfaces to inject/TX management
1628 * frames we can make AP mode the exception to this rule once it
1629 * supports radar detection as its implementation can deal with
1630 * radar detection by itself. We can do that later by adding a
1631 * monitor flag interfaces used for AP support.
1632 */
1633 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1634 IEEE80211_CHAN_PASSIVE_SCAN)))
1635 goto fail;
1636
1637 /* check for not even having the fixed radiotap header part */
1638 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1639 goto fail; /* too short to be possibly valid */
1640
1641 /* is it a header version we can trust to find length from? */
1642 if (unlikely(prthdr->it_version))
1643 goto fail; /* only version 0 is supported */
1644
1645 /* then there must be a radiotap header with a length we can use */
1646 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1647
1648 /* does the skb contain enough to deliver on the alleged length? */
1649 if (unlikely(skb->len < len_rthdr))
1650 goto fail; /* skb too short for claimed rt header extent */
1651
1652 /*
1653 * fix up the pointers accounting for the radiotap
1654 * header still being in there. We are being given
1655 * a precooked IEEE80211 header so no need for
1656 * normal processing
1657 */
1658 skb_set_mac_header(skb, len_rthdr);
1659 /*
1660 * these are just fixed to the end of the rt area since we
1661 * don't have any better information and at this point, nobody cares
1662 */
1663 skb_set_network_header(skb, len_rthdr);
1664 skb_set_transport_header(skb, len_rthdr);
1665
1666 memset(info, 0, sizeof(*info));
1667
1668 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1669
1670 /* pass the radiotap header up to xmit */
1671 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1672 return NETDEV_TX_OK;
1673
1674 fail:
1675 dev_kfree_skb(skb);
1676 return NETDEV_TX_OK; /* meaning, we dealt with the skb */
1677 }
1678
1679 /**
1680 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
1681 * subinterfaces (wlan#, WDS, and VLAN interfaces)
1682 * @skb: packet to be sent
1683 * @dev: incoming interface
1684 *
1685 * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will
1686 * not be freed, and caller is responsible for either retrying later or freeing
1687 * skb).
1688 *
1689 * This function takes in an Ethernet header and encapsulates it with suitable
1690 * IEEE 802.11 header based on which interface the packet is coming in. The
1691 * encapsulated packet will then be passed to master interface, wlan#.11, for
1692 * transmission (through low-level driver).
1693 */
1694 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1695 struct net_device *dev)
1696 {
1697 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1698 struct ieee80211_local *local = sdata->local;
1699 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1700 int ret = NETDEV_TX_BUSY, head_need;
1701 u16 ethertype, hdrlen, meshhdrlen = 0;
1702 __le16 fc;
1703 struct ieee80211_hdr hdr;
1704 struct ieee80211s_hdr mesh_hdr;
1705 const u8 *encaps_data;
1706 int encaps_len, skip_header_bytes;
1707 int nh_pos, h_pos;
1708 struct sta_info *sta = NULL;
1709 u32 sta_flags = 0;
1710
1711 if (unlikely(skb->len < ETH_HLEN)) {
1712 ret = NETDEV_TX_OK;
1713 goto fail;
1714 }
1715
1716 nh_pos = skb_network_header(skb) - skb->data;
1717 h_pos = skb_transport_header(skb) - skb->data;
1718
1719 /* convert Ethernet header to proper 802.11 header (based on
1720 * operation mode) */
1721 ethertype = (skb->data[12] << 8) | skb->data[13];
1722 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1723
1724 switch (sdata->vif.type) {
1725 case NL80211_IFTYPE_AP_VLAN:
1726 rcu_read_lock();
1727 sta = rcu_dereference(sdata->u.vlan.sta);
1728 if (sta) {
1729 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1730 /* RA TA DA SA */
1731 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1732 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1733 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1734 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1735 hdrlen = 30;
1736 sta_flags = get_sta_flags(sta);
1737 }
1738 rcu_read_unlock();
1739 if (sta)
1740 break;
1741 /* fall through */
1742 case NL80211_IFTYPE_AP:
1743 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1744 /* DA BSSID SA */
1745 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1746 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1747 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1748 hdrlen = 24;
1749 break;
1750 case NL80211_IFTYPE_WDS:
1751 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1752 /* RA TA DA SA */
1753 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1754 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1755 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1756 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1757 hdrlen = 30;
1758 break;
1759 #ifdef CONFIG_MAC80211_MESH
1760 case NL80211_IFTYPE_MESH_POINT:
1761 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1762 /* Do not send frames with mesh_ttl == 0 */
1763 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1764 ret = NETDEV_TX_OK;
1765 goto fail;
1766 }
1767
1768 if (compare_ether_addr(sdata->vif.addr,
1769 skb->data + ETH_ALEN) == 0) {
1770 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1771 skb->data, skb->data + ETH_ALEN);
1772 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1773 sdata, NULL, NULL, NULL);
1774 } else {
1775 /* packet from other interface */
1776 struct mesh_path *mppath;
1777 int is_mesh_mcast = 1;
1778 const u8 *mesh_da;
1779
1780 rcu_read_lock();
1781 if (is_multicast_ether_addr(skb->data))
1782 /* DA TA mSA AE:SA */
1783 mesh_da = skb->data;
1784 else {
1785 static const u8 bcast[ETH_ALEN] =
1786 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1787
1788 mppath = mpp_path_lookup(skb->data, sdata);
1789 if (mppath) {
1790 /* RA TA mDA mSA AE:DA SA */
1791 mesh_da = mppath->mpp;
1792 is_mesh_mcast = 0;
1793 } else {
1794 /* DA TA mSA AE:SA */
1795 mesh_da = bcast;
1796 }
1797 }
1798 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1799 mesh_da, sdata->vif.addr);
1800 rcu_read_unlock();
1801 if (is_mesh_mcast)
1802 meshhdrlen =
1803 ieee80211_new_mesh_header(&mesh_hdr,
1804 sdata,
1805 skb->data + ETH_ALEN,
1806 NULL,
1807 NULL);
1808 else
1809 meshhdrlen =
1810 ieee80211_new_mesh_header(&mesh_hdr,
1811 sdata,
1812 NULL,
1813 skb->data,
1814 skb->data + ETH_ALEN);
1815
1816 }
1817 break;
1818 #endif
1819 case NL80211_IFTYPE_STATION:
1820 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1821 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1822 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1823 /* RA TA DA SA */
1824 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1825 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1826 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1827 hdrlen = 30;
1828 } else {
1829 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1830 /* BSSID SA DA */
1831 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1832 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1833 hdrlen = 24;
1834 }
1835 break;
1836 case NL80211_IFTYPE_ADHOC:
1837 /* DA SA BSSID */
1838 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1839 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1840 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
1841 hdrlen = 24;
1842 break;
1843 default:
1844 ret = NETDEV_TX_OK;
1845 goto fail;
1846 }
1847
1848 /*
1849 * There's no need to try to look up the destination
1850 * if it is a multicast address (which can only happen
1851 * in AP mode)
1852 */
1853 if (!is_multicast_ether_addr(hdr.addr1)) {
1854 rcu_read_lock();
1855 sta = sta_info_get(sdata, hdr.addr1);
1856 if (sta)
1857 sta_flags = get_sta_flags(sta);
1858 rcu_read_unlock();
1859 }
1860
1861 /* receiver and we are QoS enabled, use a QoS type frame */
1862 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1863 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1864 hdrlen += 2;
1865 }
1866
1867 /*
1868 * Drop unicast frames to unauthorised stations unless they are
1869 * EAPOL frames from the local station.
1870 */
1871 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1872 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1873 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1874 !(ethertype == ETH_P_PAE &&
1875 compare_ether_addr(sdata->vif.addr,
1876 skb->data + ETH_ALEN) == 0))) {
1877 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1878 if (net_ratelimit())
1879 printk(KERN_DEBUG "%s: dropped frame to %pM"
1880 " (unauthorized port)\n", dev->name,
1881 hdr.addr1);
1882 #endif
1883
1884 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1885
1886 ret = NETDEV_TX_OK;
1887 goto fail;
1888 }
1889
1890 hdr.frame_control = fc;
1891 hdr.duration_id = 0;
1892 hdr.seq_ctrl = 0;
1893
1894 skip_header_bytes = ETH_HLEN;
1895 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
1896 encaps_data = bridge_tunnel_header;
1897 encaps_len = sizeof(bridge_tunnel_header);
1898 skip_header_bytes -= 2;
1899 } else if (ethertype >= 0x600) {
1900 encaps_data = rfc1042_header;
1901 encaps_len = sizeof(rfc1042_header);
1902 skip_header_bytes -= 2;
1903 } else {
1904 encaps_data = NULL;
1905 encaps_len = 0;
1906 }
1907
1908 skb_pull(skb, skip_header_bytes);
1909 nh_pos -= skip_header_bytes;
1910 h_pos -= skip_header_bytes;
1911
1912 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1913
1914 /*
1915 * So we need to modify the skb header and hence need a copy of
1916 * that. The head_need variable above doesn't, so far, include
1917 * the needed header space that we don't need right away. If we
1918 * can, then we don't reallocate right now but only after the
1919 * frame arrives at the master device (if it does...)
1920 *
1921 * If we cannot, however, then we will reallocate to include all
1922 * the ever needed space. Also, if we need to reallocate it anyway,
1923 * make it big enough for everything we may ever need.
1924 */
1925
1926 if (head_need > 0 || skb_cloned(skb)) {
1927 head_need += IEEE80211_ENCRYPT_HEADROOM;
1928 head_need += local->tx_headroom;
1929 head_need = max_t(int, 0, head_need);
1930 if (ieee80211_skb_resize(local, skb, head_need, true))
1931 goto fail;
1932 }
1933
1934 if (encaps_data) {
1935 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
1936 nh_pos += encaps_len;
1937 h_pos += encaps_len;
1938 }
1939
1940 if (meshhdrlen > 0) {
1941 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
1942 nh_pos += meshhdrlen;
1943 h_pos += meshhdrlen;
1944 }
1945
1946 if (ieee80211_is_data_qos(fc)) {
1947 __le16 *qos_control;
1948
1949 qos_control = (__le16*) skb_push(skb, 2);
1950 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
1951 /*
1952 * Maybe we could actually set some fields here, for now just
1953 * initialise to zero to indicate no special operation.
1954 */
1955 *qos_control = 0;
1956 } else
1957 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
1958
1959 nh_pos += hdrlen;
1960 h_pos += hdrlen;
1961
1962 dev->stats.tx_packets++;
1963 dev->stats.tx_bytes += skb->len;
1964
1965 /* Update skb pointers to various headers since this modified frame
1966 * is going to go through Linux networking code that may potentially
1967 * need things like pointer to IP header. */
1968 skb_set_mac_header(skb, 0);
1969 skb_set_network_header(skb, nh_pos);
1970 skb_set_transport_header(skb, h_pos);
1971
1972 memset(info, 0, sizeof(*info));
1973
1974 dev->trans_start = jiffies;
1975 ieee80211_xmit(sdata, skb);
1976
1977 return NETDEV_TX_OK;
1978
1979 fail:
1980 if (ret == NETDEV_TX_OK)
1981 dev_kfree_skb(skb);
1982
1983 return ret;
1984 }
1985
1986
1987 /*
1988 * ieee80211_clear_tx_pending may not be called in a context where
1989 * it is possible that it packets could come in again.
1990 */
1991 void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1992 {
1993 int i;
1994
1995 for (i = 0; i < local->hw.queues; i++)
1996 skb_queue_purge(&local->pending[i]);
1997 }
1998
1999 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2000 struct sk_buff *skb)
2001 {
2002 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2003 struct ieee80211_sub_if_data *sdata;
2004 struct sta_info *sta;
2005 struct ieee80211_hdr *hdr;
2006 int ret;
2007 bool result = true;
2008
2009 sdata = vif_to_sdata(info->control.vif);
2010
2011 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2012 ieee80211_tx(sdata, skb, true);
2013 } else {
2014 hdr = (struct ieee80211_hdr *)skb->data;
2015 sta = sta_info_get(sdata, hdr->addr1);
2016
2017 ret = __ieee80211_tx(local, &skb, sta, true);
2018 if (ret != IEEE80211_TX_OK)
2019 result = false;
2020 }
2021
2022 return result;
2023 }
2024
2025 /*
2026 * Transmit all pending packets. Called from tasklet.
2027 */
2028 void ieee80211_tx_pending(unsigned long data)
2029 {
2030 struct ieee80211_local *local = (struct ieee80211_local *)data;
2031 struct ieee80211_sub_if_data *sdata;
2032 unsigned long flags;
2033 int i;
2034 bool txok;
2035
2036 rcu_read_lock();
2037
2038 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
2039 for (i = 0; i < local->hw.queues; i++) {
2040 /*
2041 * If queue is stopped by something other than due to pending
2042 * frames, or we have no pending frames, proceed to next queue.
2043 */
2044 if (local->queue_stop_reasons[i] ||
2045 skb_queue_empty(&local->pending[i]))
2046 continue;
2047
2048 while (!skb_queue_empty(&local->pending[i])) {
2049 struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
2050 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2051
2052 if (WARN_ON(!info->control.vif)) {
2053 kfree_skb(skb);
2054 continue;
2055 }
2056
2057 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
2058 flags);
2059
2060 txok = ieee80211_tx_pending_skb(local, skb);
2061 if (!txok)
2062 __skb_queue_head(&local->pending[i], skb);
2063 spin_lock_irqsave(&local->queue_stop_reason_lock,
2064 flags);
2065 if (!txok)
2066 break;
2067 }
2068
2069 if (skb_queue_empty(&local->pending[i]))
2070 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2071 netif_tx_wake_queue(
2072 netdev_get_tx_queue(sdata->dev, i));
2073 }
2074 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2075
2076 rcu_read_unlock();
2077 }
2078
2079 /* functions for drivers to get certain frames */
2080
2081 static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2082 struct sk_buff *skb,
2083 struct beacon_data *beacon)
2084 {
2085 u8 *pos, *tim;
2086 int aid0 = 0;
2087 int i, have_bits = 0, n1, n2;
2088
2089 /* Generate bitmap for TIM only if there are any STAs in power save
2090 * mode. */
2091 if (atomic_read(&bss->num_sta_ps) > 0)
2092 /* in the hope that this is faster than
2093 * checking byte-for-byte */
2094 have_bits = !bitmap_empty((unsigned long*)bss->tim,
2095 IEEE80211_MAX_AID+1);
2096
2097 if (bss->dtim_count == 0)
2098 bss->dtim_count = beacon->dtim_period - 1;
2099 else
2100 bss->dtim_count--;
2101
2102 tim = pos = (u8 *) skb_put(skb, 6);
2103 *pos++ = WLAN_EID_TIM;
2104 *pos++ = 4;
2105 *pos++ = bss->dtim_count;
2106 *pos++ = beacon->dtim_period;
2107
2108 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
2109 aid0 = 1;
2110
2111 if (have_bits) {
2112 /* Find largest even number N1 so that bits numbered 1 through
2113 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
2114 * (N2 + 1) x 8 through 2007 are 0. */
2115 n1 = 0;
2116 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
2117 if (bss->tim[i]) {
2118 n1 = i & 0xfe;
2119 break;
2120 }
2121 }
2122 n2 = n1;
2123 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
2124 if (bss->tim[i]) {
2125 n2 = i;
2126 break;
2127 }
2128 }
2129
2130 /* Bitmap control */
2131 *pos++ = n1 | aid0;
2132 /* Part Virt Bitmap */
2133 memcpy(pos, bss->tim + n1, n2 - n1 + 1);
2134
2135 tim[1] = n2 - n1 + 4;
2136 skb_put(skb, n2 - n1);
2137 } else {
2138 *pos++ = aid0; /* Bitmap control */
2139 *pos++ = 0; /* Part Virt Bitmap */
2140 }
2141 }
2142
2143 struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2144 struct ieee80211_vif *vif,
2145 u16 *tim_offset, u16 *tim_length)
2146 {
2147 struct ieee80211_local *local = hw_to_local(hw);
2148 struct sk_buff *skb = NULL;
2149 struct ieee80211_tx_info *info;
2150 struct ieee80211_sub_if_data *sdata = NULL;
2151 struct ieee80211_if_ap *ap = NULL;
2152 struct beacon_data *beacon;
2153 struct ieee80211_supported_band *sband;
2154 enum ieee80211_band band = local->hw.conf.channel->band;
2155 struct ieee80211_tx_rate_control txrc;
2156
2157 sband = local->hw.wiphy->bands[band];
2158
2159 rcu_read_lock();
2160
2161 sdata = vif_to_sdata(vif);
2162
2163 if (tim_offset)
2164 *tim_offset = 0;
2165 if (tim_length)
2166 *tim_length = 0;
2167
2168 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2169 ap = &sdata->u.ap;
2170 beacon = rcu_dereference(ap->beacon);
2171 if (ap && beacon) {
2172 /*
2173 * headroom, head length,
2174 * tail length and maximum TIM length
2175 */
2176 skb = dev_alloc_skb(local->tx_headroom +
2177 beacon->head_len +
2178 beacon->tail_len + 256);
2179 if (!skb)
2180 goto out;
2181
2182 skb_reserve(skb, local->tx_headroom);
2183 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2184 beacon->head_len);
2185
2186 /*
2187 * Not very nice, but we want to allow the driver to call
2188 * ieee80211_beacon_get() as a response to the set_tim()
2189 * callback. That, however, is already invoked under the
2190 * sta_lock to guarantee consistent and race-free update
2191 * of the tim bitmap in mac80211 and the driver.
2192 */
2193 if (local->tim_in_locked_section) {
2194 ieee80211_beacon_add_tim(ap, skb, beacon);
2195 } else {
2196 unsigned long flags;
2197
2198 spin_lock_irqsave(&local->sta_lock, flags);
2199 ieee80211_beacon_add_tim(ap, skb, beacon);
2200 spin_unlock_irqrestore(&local->sta_lock, flags);
2201 }
2202
2203 if (tim_offset)
2204 *tim_offset = beacon->head_len;
2205 if (tim_length)
2206 *tim_length = skb->len - beacon->head_len;
2207
2208 if (beacon->tail)
2209 memcpy(skb_put(skb, beacon->tail_len),
2210 beacon->tail, beacon->tail_len);
2211 } else
2212 goto out;
2213 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2214 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2215 struct ieee80211_hdr *hdr;
2216 struct sk_buff *presp = rcu_dereference(ifibss->presp);
2217
2218 if (!presp)
2219 goto out;
2220
2221 skb = skb_copy(presp, GFP_ATOMIC);
2222 if (!skb)
2223 goto out;
2224
2225 hdr = (struct ieee80211_hdr *) skb->data;
2226 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2227 IEEE80211_STYPE_BEACON);
2228 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2229 struct ieee80211_mgmt *mgmt;
2230 u8 *pos;
2231
2232 /* headroom, head length, tail length and maximum TIM length */
2233 skb = dev_alloc_skb(local->tx_headroom + 400);
2234 if (!skb)
2235 goto out;
2236
2237 skb_reserve(skb, local->hw.extra_tx_headroom);
2238 mgmt = (struct ieee80211_mgmt *)
2239 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2240 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2241 mgmt->frame_control =
2242 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2243 memset(mgmt->da, 0xff, ETH_ALEN);
2244 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2245 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2246 mgmt->u.beacon.beacon_int =
2247 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2248 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
2249
2250 pos = skb_put(skb, 2);
2251 *pos++ = WLAN_EID_SSID;
2252 *pos++ = 0x0;
2253
2254 mesh_mgmt_ies_add(skb, sdata);
2255 } else {
2256 WARN_ON(1);
2257 goto out;
2258 }
2259
2260 info = IEEE80211_SKB_CB(skb);
2261
2262 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2263 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2264 info->band = band;
2265
2266 memset(&txrc, 0, sizeof(txrc));
2267 txrc.hw = hw;
2268 txrc.sband = sband;
2269 txrc.bss_conf = &sdata->vif.bss_conf;
2270 txrc.skb = skb;
2271 txrc.reported_rate.idx = -1;
2272 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2273 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2274 txrc.max_rate_idx = -1;
2275 else
2276 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2277 txrc.ap = true;
2278 rate_control_get_rate(sdata, NULL, &txrc);
2279
2280 info->control.vif = vif;
2281
2282 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
2283 IEEE80211_TX_CTL_ASSIGN_SEQ |
2284 IEEE80211_TX_CTL_FIRST_FRAGMENT;
2285 out:
2286 rcu_read_unlock();
2287 return skb;
2288 }
2289 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2290
2291 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2292 struct ieee80211_vif *vif)
2293 {
2294 struct ieee80211_sub_if_data *sdata;
2295 struct ieee80211_if_managed *ifmgd;
2296 struct ieee80211_pspoll *pspoll;
2297 struct ieee80211_local *local;
2298 struct sk_buff *skb;
2299
2300 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2301 return NULL;
2302
2303 sdata = vif_to_sdata(vif);
2304 ifmgd = &sdata->u.mgd;
2305 local = sdata->local;
2306
2307 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2308 if (!skb) {
2309 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2310 "pspoll template\n", sdata->name);
2311 return NULL;
2312 }
2313 skb_reserve(skb, local->hw.extra_tx_headroom);
2314
2315 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2316 memset(pspoll, 0, sizeof(*pspoll));
2317 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2318 IEEE80211_STYPE_PSPOLL);
2319 pspoll->aid = cpu_to_le16(ifmgd->aid);
2320
2321 /* aid in PS-Poll has its two MSBs each set to 1 */
2322 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2323
2324 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2325 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2326
2327 return skb;
2328 }
2329 EXPORT_SYMBOL(ieee80211_pspoll_get);
2330
2331 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2332 struct ieee80211_vif *vif)
2333 {
2334 struct ieee80211_hdr_3addr *nullfunc;
2335 struct ieee80211_sub_if_data *sdata;
2336 struct ieee80211_if_managed *ifmgd;
2337 struct ieee80211_local *local;
2338 struct sk_buff *skb;
2339
2340 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2341 return NULL;
2342
2343 sdata = vif_to_sdata(vif);
2344 ifmgd = &sdata->u.mgd;
2345 local = sdata->local;
2346
2347 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2348 if (!skb) {
2349 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2350 "template\n", sdata->name);
2351 return NULL;
2352 }
2353 skb_reserve(skb, local->hw.extra_tx_headroom);
2354
2355 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2356 sizeof(*nullfunc));
2357 memset(nullfunc, 0, sizeof(*nullfunc));
2358 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2359 IEEE80211_STYPE_NULLFUNC |
2360 IEEE80211_FCTL_TODS);
2361 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2362 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2363 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2364
2365 return skb;
2366 }
2367 EXPORT_SYMBOL(ieee80211_nullfunc_get);
2368
2369 struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2370 struct ieee80211_vif *vif,
2371 const u8 *ssid, size_t ssid_len,
2372 const u8 *ie, size_t ie_len)
2373 {
2374 struct ieee80211_sub_if_data *sdata;
2375 struct ieee80211_local *local;
2376 struct ieee80211_hdr_3addr *hdr;
2377 struct sk_buff *skb;
2378 size_t ie_ssid_len;
2379 u8 *pos;
2380
2381 sdata = vif_to_sdata(vif);
2382 local = sdata->local;
2383 ie_ssid_len = 2 + ssid_len;
2384
2385 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2386 ie_ssid_len + ie_len);
2387 if (!skb) {
2388 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2389 "request template\n", sdata->name);
2390 return NULL;
2391 }
2392
2393 skb_reserve(skb, local->hw.extra_tx_headroom);
2394
2395 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2396 memset(hdr, 0, sizeof(*hdr));
2397 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2398 IEEE80211_STYPE_PROBE_REQ);
2399 memset(hdr->addr1, 0xff, ETH_ALEN);
2400 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2401 memset(hdr->addr3, 0xff, ETH_ALEN);
2402
2403 pos = skb_put(skb, ie_ssid_len);
2404 *pos++ = WLAN_EID_SSID;
2405 *pos++ = ssid_len;
2406 if (ssid)
2407 memcpy(pos, ssid, ssid_len);
2408 pos += ssid_len;
2409
2410 if (ie) {
2411 pos = skb_put(skb, ie_len);
2412 memcpy(pos, ie, ie_len);
2413 }
2414
2415 return skb;
2416 }
2417 EXPORT_SYMBOL(ieee80211_probereq_get);
2418
2419 void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2420 const void *frame, size_t frame_len,
2421 const struct ieee80211_tx_info *frame_txctl,
2422 struct ieee80211_rts *rts)
2423 {
2424 const struct ieee80211_hdr *hdr = frame;
2425
2426 rts->frame_control =
2427 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
2428 rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
2429 frame_txctl);
2430 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
2431 memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
2432 }
2433 EXPORT_SYMBOL(ieee80211_rts_get);
2434
2435 void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2436 const void *frame, size_t frame_len,
2437 const struct ieee80211_tx_info *frame_txctl,
2438 struct ieee80211_cts *cts)
2439 {
2440 const struct ieee80211_hdr *hdr = frame;
2441
2442 cts->frame_control =
2443 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
2444 cts->duration = ieee80211_ctstoself_duration(hw, vif,
2445 frame_len, frame_txctl);
2446 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
2447 }
2448 EXPORT_SYMBOL(ieee80211_ctstoself_get);
2449
2450 struct sk_buff *
2451 ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2452 struct ieee80211_vif *vif)
2453 {
2454 struct ieee80211_local *local = hw_to_local(hw);
2455 struct sk_buff *skb = NULL;
2456 struct sta_info *sta;
2457 struct ieee80211_tx_data tx;
2458 struct ieee80211_sub_if_data *sdata;
2459 struct ieee80211_if_ap *bss = NULL;
2460 struct beacon_data *beacon;
2461 struct ieee80211_tx_info *info;
2462
2463 sdata = vif_to_sdata(vif);
2464 bss = &sdata->u.ap;
2465
2466 rcu_read_lock();
2467 beacon = rcu_dereference(bss->beacon);
2468
2469 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
2470 goto out;
2471
2472 if (bss->dtim_count != 0)
2473 goto out; /* send buffered bc/mc only after DTIM beacon */
2474
2475 while (1) {
2476 skb = skb_dequeue(&bss->ps_bc_buf);
2477 if (!skb)
2478 goto out;
2479 local->total_ps_buffered--;
2480
2481 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
2482 struct ieee80211_hdr *hdr =
2483 (struct ieee80211_hdr *) skb->data;
2484 /* more buffered multicast/broadcast frames ==> set
2485 * MoreData flag in IEEE 802.11 header to inform PS
2486 * STAs */
2487 hdr->frame_control |=
2488 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2489 }
2490
2491 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2492 break;
2493 dev_kfree_skb_any(skb);
2494 }
2495
2496 info = IEEE80211_SKB_CB(skb);
2497
2498 sta = tx.sta;
2499 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2500 tx.channel = local->hw.conf.channel;
2501 info->band = tx.channel->band;
2502
2503 if (invoke_tx_handlers(&tx))
2504 skb = NULL;
2505 out:
2506 rcu_read_unlock();
2507
2508 return skb;
2509 }
2510 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2511
2512 void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2513 {
2514 skb_set_mac_header(skb, 0);
2515 skb_set_network_header(skb, 0);
2516 skb_set_transport_header(skb, 0);
2517
2518 /* send all internal mgmt frames on VO */
2519 skb_set_queue_mapping(skb, 0);
2520
2521 /*
2522 * The other path calling ieee80211_xmit is from the tasklet,
2523 * and while we can handle concurrent transmissions locking
2524 * requirements are that we do not come into tx with bhs on.
2525 */
2526 local_bh_disable();
2527 ieee80211_xmit(sdata, skb);
2528 local_bh_enable();
2529 }
This page took 0.132065 seconds and 5 git commands to generate.