f82301b6cef2511d98a8c3970232b1b148669bc4
[deliverable/linux.git] / net / mac80211 / tx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Transmit and frame generation functions.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/etherdevice.h>
19 #include <linux/bitmap.h>
20 #include <linux/rcupdate.h>
21 #include <linux/export.h>
22 #include <net/net_namespace.h>
23 #include <net/ieee80211_radiotap.h>
24 #include <net/cfg80211.h>
25 #include <net/mac80211.h>
26 #include <asm/unaligned.h>
27
28 #include "ieee80211_i.h"
29 #include "driver-ops.h"
30 #include "led.h"
31 #include "mesh.h"
32 #include "wep.h"
33 #include "wpa.h"
34 #include "wme.h"
35 #include "rate.h"
36
37 /* misc utils */
38
39 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
40 struct sk_buff *skb, int group_addr,
41 int next_frag_len)
42 {
43 int rate, mrate, erp, dur, i, shift = 0;
44 struct ieee80211_rate *txrate;
45 struct ieee80211_local *local = tx->local;
46 struct ieee80211_supported_band *sband;
47 struct ieee80211_hdr *hdr;
48 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
49 struct ieee80211_chanctx_conf *chanctx_conf;
50 u32 rate_flags = 0;
51
52 rcu_read_lock();
53 chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
54 if (chanctx_conf) {
55 shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
56 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
57 }
58 rcu_read_unlock();
59
60 /* assume HW handles this */
61 if (tx->rate.flags & IEEE80211_TX_RC_MCS)
62 return 0;
63
64 /* uh huh? */
65 if (WARN_ON_ONCE(tx->rate.idx < 0))
66 return 0;
67
68 sband = local->hw.wiphy->bands[info->band];
69 txrate = &sband->bitrates[tx->rate.idx];
70
71 erp = txrate->flags & IEEE80211_RATE_ERP_G;
72
73 /*
74 * data and mgmt (except PS Poll):
75 * - during CFP: 32768
76 * - during contention period:
77 * if addr1 is group address: 0
78 * if more fragments = 0 and addr1 is individual address: time to
79 * transmit one ACK plus SIFS
80 * if more fragments = 1 and addr1 is individual address: time to
81 * transmit next fragment plus 2 x ACK plus 3 x SIFS
82 *
83 * IEEE 802.11, 9.6:
84 * - control response frame (CTS or ACK) shall be transmitted using the
85 * same rate as the immediately previous frame in the frame exchange
86 * sequence, if this rate belongs to the PHY mandatory rates, or else
87 * at the highest possible rate belonging to the PHY rates in the
88 * BSSBasicRateSet
89 */
90 hdr = (struct ieee80211_hdr *)skb->data;
91 if (ieee80211_is_ctl(hdr->frame_control)) {
92 /* TODO: These control frames are not currently sent by
93 * mac80211, but should they be implemented, this function
94 * needs to be updated to support duration field calculation.
95 *
96 * RTS: time needed to transmit pending data/mgmt frame plus
97 * one CTS frame plus one ACK frame plus 3 x SIFS
98 * CTS: duration of immediately previous RTS minus time
99 * required to transmit CTS and its SIFS
100 * ACK: 0 if immediately previous directed data/mgmt had
101 * more=0, with more=1 duration in ACK frame is duration
102 * from previous frame minus time needed to transmit ACK
103 * and its SIFS
104 * PS Poll: BIT(15) | BIT(14) | aid
105 */
106 return 0;
107 }
108
109 /* data/mgmt */
110 if (0 /* FIX: data/mgmt during CFP */)
111 return cpu_to_le16(32768);
112
113 if (group_addr) /* Group address as the destination - no ACK */
114 return 0;
115
116 /* Individual destination address:
117 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
118 * CTS and ACK frames shall be transmitted using the highest rate in
119 * basic rate set that is less than or equal to the rate of the
120 * immediately previous frame and that is using the same modulation
121 * (CCK or OFDM). If no basic rate set matches with these requirements,
122 * the highest mandatory rate of the PHY that is less than or equal to
123 * the rate of the previous frame is used.
124 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
125 */
126 rate = -1;
127 /* use lowest available if everything fails */
128 mrate = sband->bitrates[0].bitrate;
129 for (i = 0; i < sband->n_bitrates; i++) {
130 struct ieee80211_rate *r = &sband->bitrates[i];
131
132 if (r->bitrate > txrate->bitrate)
133 break;
134
135 if ((rate_flags & r->flags) != rate_flags)
136 continue;
137
138 if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
139 rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
140
141 switch (sband->band) {
142 case IEEE80211_BAND_2GHZ: {
143 u32 flag;
144 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
145 flag = IEEE80211_RATE_MANDATORY_G;
146 else
147 flag = IEEE80211_RATE_MANDATORY_B;
148 if (r->flags & flag)
149 mrate = r->bitrate;
150 break;
151 }
152 case IEEE80211_BAND_5GHZ:
153 if (r->flags & IEEE80211_RATE_MANDATORY_A)
154 mrate = r->bitrate;
155 break;
156 case IEEE80211_BAND_60GHZ:
157 /* TODO, for now fall through */
158 case IEEE80211_NUM_BANDS:
159 WARN_ON(1);
160 break;
161 }
162 }
163 if (rate == -1) {
164 /* No matching basic rate found; use highest suitable mandatory
165 * PHY rate */
166 rate = DIV_ROUND_UP(mrate, 1 << shift);
167 }
168
169 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
170 if (ieee80211_is_data_qos(hdr->frame_control) &&
171 *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
172 dur = 0;
173 else
174 /* Time needed to transmit ACK
175 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
176 * to closest integer */
177 dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
178 tx->sdata->vif.bss_conf.use_short_preamble,
179 shift);
180
181 if (next_frag_len) {
182 /* Frame is fragmented: duration increases with time needed to
183 * transmit next fragment plus ACK and 2 x SIFS. */
184 dur *= 2; /* ACK + SIFS */
185 /* next fragment */
186 dur += ieee80211_frame_duration(sband->band, next_frag_len,
187 txrate->bitrate, erp,
188 tx->sdata->vif.bss_conf.use_short_preamble,
189 shift);
190 }
191
192 return cpu_to_le16(dur);
193 }
194
195 /* tx handlers */
196 static ieee80211_tx_result debug_noinline
197 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
198 {
199 struct ieee80211_local *local = tx->local;
200 struct ieee80211_if_managed *ifmgd;
201
202 /* driver doesn't support power save */
203 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
204 return TX_CONTINUE;
205
206 /* hardware does dynamic power save */
207 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
208 return TX_CONTINUE;
209
210 /* dynamic power save disabled */
211 if (local->hw.conf.dynamic_ps_timeout <= 0)
212 return TX_CONTINUE;
213
214 /* we are scanning, don't enable power save */
215 if (local->scanning)
216 return TX_CONTINUE;
217
218 if (!local->ps_sdata)
219 return TX_CONTINUE;
220
221 /* No point if we're going to suspend */
222 if (local->quiescing)
223 return TX_CONTINUE;
224
225 /* dynamic ps is supported only in managed mode */
226 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
227 return TX_CONTINUE;
228
229 ifmgd = &tx->sdata->u.mgd;
230
231 /*
232 * Don't wakeup from power save if u-apsd is enabled, voip ac has
233 * u-apsd enabled and the frame is in voip class. This effectively
234 * means that even if all access categories have u-apsd enabled, in
235 * practise u-apsd is only used with the voip ac. This is a
236 * workaround for the case when received voip class packets do not
237 * have correct qos tag for some reason, due the network or the
238 * peer application.
239 *
240 * Note: ifmgd->uapsd_queues access is racy here. If the value is
241 * changed via debugfs, user needs to reassociate manually to have
242 * everything in sync.
243 */
244 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
245 (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
246 skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
247 return TX_CONTINUE;
248
249 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
250 ieee80211_stop_queues_by_reason(&local->hw,
251 IEEE80211_MAX_QUEUE_MAP,
252 IEEE80211_QUEUE_STOP_REASON_PS);
253 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
254 ieee80211_queue_work(&local->hw,
255 &local->dynamic_ps_disable_work);
256 }
257
258 /* Don't restart the timer if we're not disassociated */
259 if (!ifmgd->associated)
260 return TX_CONTINUE;
261
262 mod_timer(&local->dynamic_ps_timer, jiffies +
263 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
264
265 return TX_CONTINUE;
266 }
267
268 static ieee80211_tx_result debug_noinline
269 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
270 {
271
272 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
273 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
274 bool assoc = false;
275
276 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
277 return TX_CONTINUE;
278
279 if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
280 test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
281 !ieee80211_is_probe_req(hdr->frame_control) &&
282 !ieee80211_is_nullfunc(hdr->frame_control))
283 /*
284 * When software scanning only nullfunc frames (to notify
285 * the sleep state to the AP) and probe requests (for the
286 * active scan) are allowed, all other frames should not be
287 * sent and we should not get here, but if we do
288 * nonetheless, drop them to avoid sending them
289 * off-channel. See the link below and
290 * ieee80211_start_scan() for more.
291 *
292 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
293 */
294 return TX_DROP;
295
296 if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
297 return TX_CONTINUE;
298
299 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
300 return TX_CONTINUE;
301
302 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
303 return TX_CONTINUE;
304
305 if (tx->sta)
306 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
307
308 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
309 if (unlikely(!assoc &&
310 ieee80211_is_data(hdr->frame_control))) {
311 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
312 sdata_info(tx->sdata,
313 "dropped data frame to not associated station %pM\n",
314 hdr->addr1);
315 #endif
316 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
317 return TX_DROP;
318 }
319 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
320 ieee80211_is_data(hdr->frame_control) &&
321 !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) {
322 /*
323 * No associated STAs - no need to send multicast
324 * frames.
325 */
326 return TX_DROP;
327 }
328
329 return TX_CONTINUE;
330 }
331
332 /* This function is called whenever the AP is about to exceed the maximum limit
333 * of buffered frames for power saving STAs. This situation should not really
334 * happen often during normal operation, so dropping the oldest buffered packet
335 * from each queue should be OK to make some room for new frames. */
336 static void purge_old_ps_buffers(struct ieee80211_local *local)
337 {
338 int total = 0, purged = 0;
339 struct sk_buff *skb;
340 struct ieee80211_sub_if_data *sdata;
341 struct sta_info *sta;
342
343 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
344 struct ps_data *ps;
345
346 if (sdata->vif.type == NL80211_IFTYPE_AP)
347 ps = &sdata->u.ap.ps;
348 else if (ieee80211_vif_is_mesh(&sdata->vif))
349 ps = &sdata->u.mesh.ps;
350 else
351 continue;
352
353 skb = skb_dequeue(&ps->bc_buf);
354 if (skb) {
355 purged++;
356 dev_kfree_skb(skb);
357 }
358 total += skb_queue_len(&ps->bc_buf);
359 }
360
361 /*
362 * Drop one frame from each station from the lowest-priority
363 * AC that has frames at all.
364 */
365 list_for_each_entry_rcu(sta, &local->sta_list, list) {
366 int ac;
367
368 for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
369 skb = skb_dequeue(&sta->ps_tx_buf[ac]);
370 total += skb_queue_len(&sta->ps_tx_buf[ac]);
371 if (skb) {
372 purged++;
373 ieee80211_free_txskb(&local->hw, skb);
374 break;
375 }
376 }
377 }
378
379 local->total_ps_buffered = total;
380 ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
381 }
382
383 static ieee80211_tx_result
384 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
385 {
386 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
387 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
388 struct ps_data *ps;
389
390 /*
391 * broadcast/multicast frame
392 *
393 * If any of the associated/peer stations is in power save mode,
394 * the frame is buffered to be sent after DTIM beacon frame.
395 * This is done either by the hardware or us.
396 */
397
398 /* powersaving STAs currently only in AP/VLAN/mesh mode */
399 if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
400 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
401 if (!tx->sdata->bss)
402 return TX_CONTINUE;
403
404 ps = &tx->sdata->bss->ps;
405 } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
406 ps = &tx->sdata->u.mesh.ps;
407 } else {
408 return TX_CONTINUE;
409 }
410
411
412 /* no buffering for ordered frames */
413 if (ieee80211_has_order(hdr->frame_control))
414 return TX_CONTINUE;
415
416 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
417 info->hw_queue = tx->sdata->vif.cab_queue;
418
419 /* no stations in PS mode */
420 if (!atomic_read(&ps->num_sta_ps))
421 return TX_CONTINUE;
422
423 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
424
425 /* device releases frame after DTIM beacon */
426 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
427 return TX_CONTINUE;
428
429 /* buffered in mac80211 */
430 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
431 purge_old_ps_buffers(tx->local);
432
433 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
434 ps_dbg(tx->sdata,
435 "BC TX buffer full - dropping the oldest frame\n");
436 dev_kfree_skb(skb_dequeue(&ps->bc_buf));
437 } else
438 tx->local->total_ps_buffered++;
439
440 skb_queue_tail(&ps->bc_buf, tx->skb);
441
442 return TX_QUEUED;
443 }
444
445 static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
446 struct sk_buff *skb)
447 {
448 if (!ieee80211_is_mgmt(fc))
449 return 0;
450
451 if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
452 return 0;
453
454 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
455 skb->data))
456 return 0;
457
458 return 1;
459 }
460
461 static ieee80211_tx_result
462 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
463 {
464 struct sta_info *sta = tx->sta;
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
467 struct ieee80211_local *local = tx->local;
468
469 if (unlikely(!sta))
470 return TX_CONTINUE;
471
472 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
473 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
475 int ac = skb_get_queue_mapping(tx->skb);
476
477 /* only deauth, disassoc and action are bufferable MMPDUs */
478 if (ieee80211_is_mgmt(hdr->frame_control) &&
479 !ieee80211_is_deauth(hdr->frame_control) &&
480 !ieee80211_is_disassoc(hdr->frame_control) &&
481 !ieee80211_is_action(hdr->frame_control)) {
482 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
483 return TX_CONTINUE;
484 }
485
486 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
487 sta->sta.addr, sta->sta.aid, ac);
488 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
489 purge_old_ps_buffers(tx->local);
490 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
491 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
492 ps_dbg(tx->sdata,
493 "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
494 sta->sta.addr, ac);
495 ieee80211_free_txskb(&local->hw, old);
496 } else
497 tx->local->total_ps_buffered++;
498
499 info->control.jiffies = jiffies;
500 info->control.vif = &tx->sdata->vif;
501 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
502 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
503
504 if (!timer_pending(&local->sta_cleanup))
505 mod_timer(&local->sta_cleanup,
506 round_jiffies(jiffies +
507 STA_INFO_CLEANUP_INTERVAL));
508
509 /*
510 * We queued up some frames, so the TIM bit might
511 * need to be set, recalculate it.
512 */
513 sta_info_recalc_tim(sta);
514
515 return TX_QUEUED;
516 } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
517 ps_dbg(tx->sdata,
518 "STA %pM in PS mode, but polling/in SP -> send frame\n",
519 sta->sta.addr);
520 }
521
522 return TX_CONTINUE;
523 }
524
525 static ieee80211_tx_result debug_noinline
526 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
527 {
528 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
529 return TX_CONTINUE;
530
531 if (tx->flags & IEEE80211_TX_UNICAST)
532 return ieee80211_tx_h_unicast_ps_buf(tx);
533 else
534 return ieee80211_tx_h_multicast_ps_buf(tx);
535 }
536
537 static ieee80211_tx_result debug_noinline
538 ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
539 {
540 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
541
542 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
543 tx->sdata->control_port_no_encrypt))
544 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
545
546 return TX_CONTINUE;
547 }
548
549 static ieee80211_tx_result debug_noinline
550 ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
551 {
552 struct ieee80211_key *key;
553 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
554 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
555
556 if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
557 tx->key = NULL;
558 else if (tx->sta && (key = rcu_dereference(tx->sta->ptk)))
559 tx->key = key;
560 else if (ieee80211_is_mgmt(hdr->frame_control) &&
561 is_multicast_ether_addr(hdr->addr1) &&
562 ieee80211_is_robust_mgmt_frame(hdr) &&
563 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
564 tx->key = key;
565 else if (is_multicast_ether_addr(hdr->addr1) &&
566 (key = rcu_dereference(tx->sdata->default_multicast_key)))
567 tx->key = key;
568 else if (!is_multicast_ether_addr(hdr->addr1) &&
569 (key = rcu_dereference(tx->sdata->default_unicast_key)))
570 tx->key = key;
571 else if (info->flags & IEEE80211_TX_CTL_INJECTED)
572 tx->key = NULL;
573 else if (!tx->sdata->drop_unencrypted)
574 tx->key = NULL;
575 else if (tx->skb->protocol == tx->sdata->control_port_protocol)
576 tx->key = NULL;
577 else if (ieee80211_is_robust_mgmt_frame(hdr) &&
578 !(ieee80211_is_action(hdr->frame_control) &&
579 tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
580 tx->key = NULL;
581 else if (ieee80211_is_mgmt(hdr->frame_control) &&
582 !ieee80211_is_robust_mgmt_frame(hdr))
583 tx->key = NULL;
584 else {
585 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
586 return TX_DROP;
587 }
588
589 if (tx->key) {
590 bool skip_hw = false;
591
592 tx->key->tx_rx_count++;
593 /* TODO: add threshold stuff again */
594
595 switch (tx->key->conf.cipher) {
596 case WLAN_CIPHER_SUITE_WEP40:
597 case WLAN_CIPHER_SUITE_WEP104:
598 case WLAN_CIPHER_SUITE_TKIP:
599 if (!ieee80211_is_data_present(hdr->frame_control))
600 tx->key = NULL;
601 break;
602 case WLAN_CIPHER_SUITE_CCMP:
603 if (!ieee80211_is_data_present(hdr->frame_control) &&
604 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
605 tx->skb))
606 tx->key = NULL;
607 else
608 skip_hw = (tx->key->conf.flags &
609 IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
610 ieee80211_is_mgmt(hdr->frame_control);
611 break;
612 case WLAN_CIPHER_SUITE_AES_CMAC:
613 if (!ieee80211_is_mgmt(hdr->frame_control))
614 tx->key = NULL;
615 break;
616 }
617
618 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
619 !ieee80211_is_deauth(hdr->frame_control)))
620 return TX_DROP;
621
622 if (!skip_hw && tx->key &&
623 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
624 info->control.hw_key = &tx->key->conf;
625 }
626
627 return TX_CONTINUE;
628 }
629
630 static ieee80211_tx_result debug_noinline
631 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
632 {
633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
634 struct ieee80211_hdr *hdr = (void *)tx->skb->data;
635 struct ieee80211_supported_band *sband;
636 u32 len;
637 struct ieee80211_tx_rate_control txrc;
638 struct ieee80211_sta_rates *ratetbl = NULL;
639 bool assoc = false;
640
641 memset(&txrc, 0, sizeof(txrc));
642
643 sband = tx->local->hw.wiphy->bands[info->band];
644
645 len = min_t(u32, tx->skb->len + FCS_LEN,
646 tx->local->hw.wiphy->frag_threshold);
647
648 /* set up the tx rate control struct we give the RC algo */
649 txrc.hw = &tx->local->hw;
650 txrc.sband = sband;
651 txrc.bss_conf = &tx->sdata->vif.bss_conf;
652 txrc.skb = tx->skb;
653 txrc.reported_rate.idx = -1;
654 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
655 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
656 txrc.max_rate_idx = -1;
657 else
658 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
659
660 if (tx->sdata->rc_has_mcs_mask[info->band])
661 txrc.rate_idx_mcs_mask =
662 tx->sdata->rc_rateidx_mcs_mask[info->band];
663
664 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
665 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
666 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
667
668 /* set up RTS protection if desired */
669 if (len > tx->local->hw.wiphy->rts_threshold) {
670 txrc.rts = true;
671 }
672
673 info->control.use_rts = txrc.rts;
674 info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
675
676 /*
677 * Use short preamble if the BSS can handle it, but not for
678 * management frames unless we know the receiver can handle
679 * that -- the management frame might be to a station that
680 * just wants a probe response.
681 */
682 if (tx->sdata->vif.bss_conf.use_short_preamble &&
683 (ieee80211_is_data(hdr->frame_control) ||
684 (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
685 txrc.short_preamble = true;
686
687 info->control.short_preamble = txrc.short_preamble;
688
689 if (tx->sta)
690 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
691
692 /*
693 * Lets not bother rate control if we're associated and cannot
694 * talk to the sta. This should not happen.
695 */
696 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
697 !rate_usable_index_exists(sband, &tx->sta->sta),
698 "%s: Dropped data frame as no usable bitrate found while "
699 "scanning and associated. Target station: "
700 "%pM on %d GHz band\n",
701 tx->sdata->name, hdr->addr1,
702 info->band ? 5 : 2))
703 return TX_DROP;
704
705 /*
706 * If we're associated with the sta at this point we know we can at
707 * least send the frame at the lowest bit rate.
708 */
709 rate_control_get_rate(tx->sdata, tx->sta, &txrc);
710
711 if (tx->sta && !info->control.skip_table)
712 ratetbl = rcu_dereference(tx->sta->sta.rates);
713
714 if (unlikely(info->control.rates[0].idx < 0)) {
715 if (ratetbl) {
716 struct ieee80211_tx_rate rate = {
717 .idx = ratetbl->rate[0].idx,
718 .flags = ratetbl->rate[0].flags,
719 .count = ratetbl->rate[0].count
720 };
721
722 if (ratetbl->rate[0].idx < 0)
723 return TX_DROP;
724
725 tx->rate = rate;
726 } else {
727 return TX_DROP;
728 }
729 } else {
730 tx->rate = info->control.rates[0];
731 }
732
733 if (txrc.reported_rate.idx < 0) {
734 txrc.reported_rate = tx->rate;
735 if (tx->sta && ieee80211_is_data(hdr->frame_control))
736 tx->sta->last_tx_rate = txrc.reported_rate;
737 } else if (tx->sta)
738 tx->sta->last_tx_rate = txrc.reported_rate;
739
740 if (ratetbl)
741 return TX_CONTINUE;
742
743 if (unlikely(!info->control.rates[0].count))
744 info->control.rates[0].count = 1;
745
746 if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
747 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
748 info->control.rates[0].count = 1;
749
750 return TX_CONTINUE;
751 }
752
753 static ieee80211_tx_result debug_noinline
754 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
755 {
756 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
757 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
758 u16 *seq;
759 u8 *qc;
760 int tid;
761
762 /*
763 * Packet injection may want to control the sequence
764 * number, if we have no matching interface then we
765 * neither assign one ourselves nor ask the driver to.
766 */
767 if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
768 return TX_CONTINUE;
769
770 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
771 return TX_CONTINUE;
772
773 if (ieee80211_hdrlen(hdr->frame_control) < 24)
774 return TX_CONTINUE;
775
776 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
777 return TX_CONTINUE;
778
779 /*
780 * Anything but QoS data that has a sequence number field
781 * (is long enough) gets a sequence number from the global
782 * counter.
783 */
784 if (!ieee80211_is_data_qos(hdr->frame_control)) {
785 /* driver should assign sequence number */
786 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
787 /* for pure STA mode without beacons, we can do it */
788 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
789 tx->sdata->sequence_number += 0x10;
790 return TX_CONTINUE;
791 }
792
793 /*
794 * This should be true for injected/management frames only, for
795 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
796 * above since they are not QoS-data frames.
797 */
798 if (!tx->sta)
799 return TX_CONTINUE;
800
801 /* include per-STA, per-TID sequence counter */
802
803 qc = ieee80211_get_qos_ctl(hdr);
804 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
805 seq = &tx->sta->tid_seq[tid];
806
807 hdr->seq_ctrl = cpu_to_le16(*seq);
808
809 /* Increase the sequence number. */
810 *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
811
812 return TX_CONTINUE;
813 }
814
815 static int ieee80211_fragment(struct ieee80211_tx_data *tx,
816 struct sk_buff *skb, int hdrlen,
817 int frag_threshold)
818 {
819 struct ieee80211_local *local = tx->local;
820 struct ieee80211_tx_info *info;
821 struct sk_buff *tmp;
822 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
823 int pos = hdrlen + per_fragm;
824 int rem = skb->len - hdrlen - per_fragm;
825
826 if (WARN_ON(rem < 0))
827 return -EINVAL;
828
829 /* first fragment was already added to queue by caller */
830
831 while (rem) {
832 int fraglen = per_fragm;
833
834 if (fraglen > rem)
835 fraglen = rem;
836 rem -= fraglen;
837 tmp = dev_alloc_skb(local->tx_headroom +
838 frag_threshold +
839 IEEE80211_ENCRYPT_HEADROOM +
840 IEEE80211_ENCRYPT_TAILROOM);
841 if (!tmp)
842 return -ENOMEM;
843
844 __skb_queue_tail(&tx->skbs, tmp);
845
846 skb_reserve(tmp, local->tx_headroom +
847 IEEE80211_ENCRYPT_HEADROOM);
848 /* copy control information */
849 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
850
851 info = IEEE80211_SKB_CB(tmp);
852 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
853 IEEE80211_TX_CTL_FIRST_FRAGMENT);
854
855 if (rem)
856 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
857
858 skb_copy_queue_mapping(tmp, skb);
859 tmp->priority = skb->priority;
860 tmp->dev = skb->dev;
861
862 /* copy header and data */
863 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
864 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
865
866 pos += fraglen;
867 }
868
869 /* adjust first fragment's length */
870 skb->len = hdrlen + per_fragm;
871 return 0;
872 }
873
874 static ieee80211_tx_result debug_noinline
875 ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
876 {
877 struct sk_buff *skb = tx->skb;
878 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
879 struct ieee80211_hdr *hdr = (void *)skb->data;
880 int frag_threshold = tx->local->hw.wiphy->frag_threshold;
881 int hdrlen;
882 int fragnum;
883
884 /* no matter what happens, tx->skb moves to tx->skbs */
885 __skb_queue_tail(&tx->skbs, skb);
886 tx->skb = NULL;
887
888 if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
889 return TX_CONTINUE;
890
891 if (tx->local->ops->set_frag_threshold)
892 return TX_CONTINUE;
893
894 /*
895 * Warn when submitting a fragmented A-MPDU frame and drop it.
896 * This scenario is handled in ieee80211_tx_prepare but extra
897 * caution taken here as fragmented ampdu may cause Tx stop.
898 */
899 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
900 return TX_DROP;
901
902 hdrlen = ieee80211_hdrlen(hdr->frame_control);
903
904 /* internal error, why isn't DONTFRAG set? */
905 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
906 return TX_DROP;
907
908 /*
909 * Now fragment the frame. This will allocate all the fragments and
910 * chain them (using skb as the first fragment) to skb->next.
911 * During transmission, we will remove the successfully transmitted
912 * fragments from this list. When the low-level driver rejects one
913 * of the fragments then we will simply pretend to accept the skb
914 * but store it away as pending.
915 */
916 if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
917 return TX_DROP;
918
919 /* update duration/seq/flags of fragments */
920 fragnum = 0;
921
922 skb_queue_walk(&tx->skbs, skb) {
923 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
924
925 hdr = (void *)skb->data;
926 info = IEEE80211_SKB_CB(skb);
927
928 if (!skb_queue_is_last(&tx->skbs, skb)) {
929 hdr->frame_control |= morefrags;
930 /*
931 * No multi-rate retries for fragmented frames, that
932 * would completely throw off the NAV at other STAs.
933 */
934 info->control.rates[1].idx = -1;
935 info->control.rates[2].idx = -1;
936 info->control.rates[3].idx = -1;
937 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
938 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
939 } else {
940 hdr->frame_control &= ~morefrags;
941 }
942 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
943 fragnum++;
944 }
945
946 return TX_CONTINUE;
947 }
948
949 static ieee80211_tx_result debug_noinline
950 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
951 {
952 struct sk_buff *skb;
953 int ac = -1;
954
955 if (!tx->sta)
956 return TX_CONTINUE;
957
958 skb_queue_walk(&tx->skbs, skb) {
959 ac = skb_get_queue_mapping(skb);
960 tx->sta->tx_fragments++;
961 tx->sta->tx_bytes[ac] += skb->len;
962 }
963 if (ac >= 0)
964 tx->sta->tx_packets[ac]++;
965
966 return TX_CONTINUE;
967 }
968
969 static ieee80211_tx_result debug_noinline
970 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
971 {
972 if (!tx->key)
973 return TX_CONTINUE;
974
975 switch (tx->key->conf.cipher) {
976 case WLAN_CIPHER_SUITE_WEP40:
977 case WLAN_CIPHER_SUITE_WEP104:
978 return ieee80211_crypto_wep_encrypt(tx);
979 case WLAN_CIPHER_SUITE_TKIP:
980 return ieee80211_crypto_tkip_encrypt(tx);
981 case WLAN_CIPHER_SUITE_CCMP:
982 return ieee80211_crypto_ccmp_encrypt(tx);
983 case WLAN_CIPHER_SUITE_AES_CMAC:
984 return ieee80211_crypto_aes_cmac_encrypt(tx);
985 default:
986 return ieee80211_crypto_hw_encrypt(tx);
987 }
988
989 return TX_DROP;
990 }
991
992 static ieee80211_tx_result debug_noinline
993 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
994 {
995 struct sk_buff *skb;
996 struct ieee80211_hdr *hdr;
997 int next_len;
998 bool group_addr;
999
1000 skb_queue_walk(&tx->skbs, skb) {
1001 hdr = (void *) skb->data;
1002 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
1003 break; /* must not overwrite AID */
1004 if (!skb_queue_is_last(&tx->skbs, skb)) {
1005 struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
1006 next_len = next->len;
1007 } else
1008 next_len = 0;
1009 group_addr = is_multicast_ether_addr(hdr->addr1);
1010
1011 hdr->duration_id =
1012 ieee80211_duration(tx, skb, group_addr, next_len);
1013 }
1014
1015 return TX_CONTINUE;
1016 }
1017
1018 /* actual transmit path */
1019
1020 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1021 struct sk_buff *skb,
1022 struct ieee80211_tx_info *info,
1023 struct tid_ampdu_tx *tid_tx,
1024 int tid)
1025 {
1026 bool queued = false;
1027 bool reset_agg_timer = false;
1028 struct sk_buff *purge_skb = NULL;
1029
1030 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1031 info->flags |= IEEE80211_TX_CTL_AMPDU;
1032 reset_agg_timer = true;
1033 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
1034 /*
1035 * nothing -- this aggregation session is being started
1036 * but that might still fail with the driver
1037 */
1038 } else {
1039 spin_lock(&tx->sta->lock);
1040 /*
1041 * Need to re-check now, because we may get here
1042 *
1043 * 1) in the window during which the setup is actually
1044 * already done, but not marked yet because not all
1045 * packets are spliced over to the driver pending
1046 * queue yet -- if this happened we acquire the lock
1047 * either before or after the splice happens, but
1048 * need to recheck which of these cases happened.
1049 *
1050 * 2) during session teardown, if the OPERATIONAL bit
1051 * was cleared due to the teardown but the pointer
1052 * hasn't been assigned NULL yet (or we loaded it
1053 * before it was assigned) -- in this case it may
1054 * now be NULL which means we should just let the
1055 * packet pass through because splicing the frames
1056 * back is already done.
1057 */
1058 tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
1059
1060 if (!tid_tx) {
1061 /* do nothing, let packet pass through */
1062 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1063 info->flags |= IEEE80211_TX_CTL_AMPDU;
1064 reset_agg_timer = true;
1065 } else {
1066 queued = true;
1067 info->control.vif = &tx->sdata->vif;
1068 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1069 __skb_queue_tail(&tid_tx->pending, skb);
1070 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1071 purge_skb = __skb_dequeue(&tid_tx->pending);
1072 }
1073 spin_unlock(&tx->sta->lock);
1074
1075 if (purge_skb)
1076 ieee80211_free_txskb(&tx->local->hw, purge_skb);
1077 }
1078
1079 /* reset session timer */
1080 if (reset_agg_timer && tid_tx->timeout)
1081 tid_tx->last_tx = jiffies;
1082
1083 return queued;
1084 }
1085
1086 /*
1087 * initialises @tx
1088 */
1089 static ieee80211_tx_result
1090 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1091 struct ieee80211_tx_data *tx,
1092 struct sk_buff *skb)
1093 {
1094 struct ieee80211_local *local = sdata->local;
1095 struct ieee80211_hdr *hdr;
1096 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1097 int tid;
1098 u8 *qc;
1099
1100 memset(tx, 0, sizeof(*tx));
1101 tx->skb = skb;
1102 tx->local = local;
1103 tx->sdata = sdata;
1104 __skb_queue_head_init(&tx->skbs);
1105
1106 /*
1107 * If this flag is set to true anywhere, and we get here,
1108 * we are doing the needed processing, so remove the flag
1109 * now.
1110 */
1111 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1112
1113 hdr = (struct ieee80211_hdr *) skb->data;
1114
1115 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1116 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1117 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1118 return TX_DROP;
1119 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1120 tx->sdata->control_port_protocol == tx->skb->protocol) {
1121 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1122 }
1123 if (!tx->sta)
1124 tx->sta = sta_info_get(sdata, hdr->addr1);
1125
1126 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1127 !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
1128 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
1129 !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
1130 struct tid_ampdu_tx *tid_tx;
1131
1132 qc = ieee80211_get_qos_ctl(hdr);
1133 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1134
1135 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1136 if (tid_tx) {
1137 bool queued;
1138
1139 queued = ieee80211_tx_prep_agg(tx, skb, info,
1140 tid_tx, tid);
1141
1142 if (unlikely(queued))
1143 return TX_QUEUED;
1144 }
1145 }
1146
1147 if (is_multicast_ether_addr(hdr->addr1)) {
1148 tx->flags &= ~IEEE80211_TX_UNICAST;
1149 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1150 } else
1151 tx->flags |= IEEE80211_TX_UNICAST;
1152
1153 if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
1154 if (!(tx->flags & IEEE80211_TX_UNICAST) ||
1155 skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
1156 info->flags & IEEE80211_TX_CTL_AMPDU)
1157 info->flags |= IEEE80211_TX_CTL_DONTFRAG;
1158 }
1159
1160 if (!tx->sta)
1161 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1162 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1163 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1164
1165 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1166
1167 return TX_CONTINUE;
1168 }
1169
1170 static bool ieee80211_tx_frags(struct ieee80211_local *local,
1171 struct ieee80211_vif *vif,
1172 struct ieee80211_sta *sta,
1173 struct sk_buff_head *skbs,
1174 bool txpending)
1175 {
1176 struct ieee80211_tx_control control;
1177 struct sk_buff *skb, *tmp;
1178 unsigned long flags;
1179
1180 skb_queue_walk_safe(skbs, skb, tmp) {
1181 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1182 int q = info->hw_queue;
1183
1184 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1185 if (WARN_ON_ONCE(q >= local->hw.queues)) {
1186 __skb_unlink(skb, skbs);
1187 ieee80211_free_txskb(&local->hw, skb);
1188 continue;
1189 }
1190 #endif
1191
1192 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1193 if (local->queue_stop_reasons[q] ||
1194 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1195 if (unlikely(info->flags &
1196 IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1197 if (local->queue_stop_reasons[q] &
1198 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1199 /*
1200 * Drop off-channel frames if queues
1201 * are stopped for any reason other
1202 * than off-channel operation. Never
1203 * queue them.
1204 */
1205 spin_unlock_irqrestore(
1206 &local->queue_stop_reason_lock,
1207 flags);
1208 ieee80211_purge_tx_queue(&local->hw,
1209 skbs);
1210 return true;
1211 }
1212 } else {
1213
1214 /*
1215 * Since queue is stopped, queue up frames for
1216 * later transmission from the tx-pending
1217 * tasklet when the queue is woken again.
1218 */
1219 if (txpending)
1220 skb_queue_splice_init(skbs,
1221 &local->pending[q]);
1222 else
1223 skb_queue_splice_tail_init(skbs,
1224 &local->pending[q]);
1225
1226 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1227 flags);
1228 return false;
1229 }
1230 }
1231 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1232
1233 info->control.vif = vif;
1234 control.sta = sta;
1235
1236 __skb_unlink(skb, skbs);
1237 drv_tx(local, &control, skb);
1238 }
1239
1240 return true;
1241 }
1242
1243 /*
1244 * Returns false if the frame couldn't be transmitted but was queued instead.
1245 */
1246 static bool __ieee80211_tx(struct ieee80211_local *local,
1247 struct sk_buff_head *skbs, int led_len,
1248 struct sta_info *sta, bool txpending)
1249 {
1250 struct ieee80211_tx_info *info;
1251 struct ieee80211_sub_if_data *sdata;
1252 struct ieee80211_vif *vif;
1253 struct ieee80211_sta *pubsta;
1254 struct sk_buff *skb;
1255 bool result = true;
1256 __le16 fc;
1257
1258 if (WARN_ON(skb_queue_empty(skbs)))
1259 return true;
1260
1261 skb = skb_peek(skbs);
1262 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1263 info = IEEE80211_SKB_CB(skb);
1264 sdata = vif_to_sdata(info->control.vif);
1265 if (sta && !sta->uploaded)
1266 sta = NULL;
1267
1268 if (sta)
1269 pubsta = &sta->sta;
1270 else
1271 pubsta = NULL;
1272
1273 switch (sdata->vif.type) {
1274 case NL80211_IFTYPE_MONITOR:
1275 sdata = rcu_dereference(local->monitor_sdata);
1276 if (sdata) {
1277 vif = &sdata->vif;
1278 info->hw_queue =
1279 vif->hw_queue[skb_get_queue_mapping(skb)];
1280 } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
1281 dev_kfree_skb(skb);
1282 return true;
1283 } else
1284 vif = NULL;
1285 break;
1286 case NL80211_IFTYPE_AP_VLAN:
1287 sdata = container_of(sdata->bss,
1288 struct ieee80211_sub_if_data, u.ap);
1289 /* fall through */
1290 default:
1291 vif = &sdata->vif;
1292 break;
1293 }
1294
1295 result = ieee80211_tx_frags(local, vif, pubsta, skbs,
1296 txpending);
1297
1298 ieee80211_tpt_led_trig_tx(local, fc, led_len);
1299 ieee80211_led_tx(local, 1);
1300
1301 WARN_ON_ONCE(!skb_queue_empty(skbs));
1302
1303 return result;
1304 }
1305
1306 /*
1307 * Invoke TX handlers, return 0 on success and non-zero if the
1308 * frame was dropped or queued.
1309 */
1310 static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1311 {
1312 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1313 ieee80211_tx_result res = TX_DROP;
1314
1315 #define CALL_TXH(txh) \
1316 do { \
1317 res = txh(tx); \
1318 if (res != TX_CONTINUE) \
1319 goto txh_done; \
1320 } while (0)
1321
1322 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1323 CALL_TXH(ieee80211_tx_h_check_assoc);
1324 CALL_TXH(ieee80211_tx_h_ps_buf);
1325 CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
1326 CALL_TXH(ieee80211_tx_h_select_key);
1327 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1328 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1329
1330 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
1331 __skb_queue_tail(&tx->skbs, tx->skb);
1332 tx->skb = NULL;
1333 goto txh_done;
1334 }
1335
1336 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1337 CALL_TXH(ieee80211_tx_h_sequence);
1338 CALL_TXH(ieee80211_tx_h_fragment);
1339 /* handlers after fragment must be aware of tx info fragmentation! */
1340 CALL_TXH(ieee80211_tx_h_stats);
1341 CALL_TXH(ieee80211_tx_h_encrypt);
1342 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1343 CALL_TXH(ieee80211_tx_h_calculate_duration);
1344 #undef CALL_TXH
1345
1346 txh_done:
1347 if (unlikely(res == TX_DROP)) {
1348 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1349 if (tx->skb)
1350 ieee80211_free_txskb(&tx->local->hw, tx->skb);
1351 else
1352 ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
1353 return -1;
1354 } else if (unlikely(res == TX_QUEUED)) {
1355 I802_DEBUG_INC(tx->local->tx_handlers_queued);
1356 return -1;
1357 }
1358
1359 return 0;
1360 }
1361
1362 /*
1363 * Returns false if the frame couldn't be transmitted but was queued instead.
1364 */
1365 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1366 struct sk_buff *skb, bool txpending,
1367 enum ieee80211_band band)
1368 {
1369 struct ieee80211_local *local = sdata->local;
1370 struct ieee80211_tx_data tx;
1371 ieee80211_tx_result res_prepare;
1372 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1373 bool result = true;
1374 int led_len;
1375
1376 if (unlikely(skb->len < 10)) {
1377 dev_kfree_skb(skb);
1378 return true;
1379 }
1380
1381 /* initialises tx */
1382 led_len = skb->len;
1383 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
1384
1385 if (unlikely(res_prepare == TX_DROP)) {
1386 ieee80211_free_txskb(&local->hw, skb);
1387 return true;
1388 } else if (unlikely(res_prepare == TX_QUEUED)) {
1389 return true;
1390 }
1391
1392 info->band = band;
1393
1394 /* set up hw_queue value early */
1395 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
1396 !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
1397 info->hw_queue =
1398 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1399
1400 if (!invoke_tx_handlers(&tx))
1401 result = __ieee80211_tx(local, &tx.skbs, led_len,
1402 tx.sta, txpending);
1403
1404 return result;
1405 }
1406
1407 /* device xmit handlers */
1408
1409 static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1410 struct sk_buff *skb,
1411 int head_need, bool may_encrypt)
1412 {
1413 struct ieee80211_local *local = sdata->local;
1414 int tail_need = 0;
1415
1416 if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
1417 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1418 tail_need -= skb_tailroom(skb);
1419 tail_need = max_t(int, tail_need, 0);
1420 }
1421
1422 if (skb_cloned(skb))
1423 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1424 else if (head_need || tail_need)
1425 I802_DEBUG_INC(local->tx_expand_skb_head);
1426 else
1427 return 0;
1428
1429 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1430 wiphy_debug(local->hw.wiphy,
1431 "failed to reallocate TX buffer\n");
1432 return -ENOMEM;
1433 }
1434
1435 return 0;
1436 }
1437
1438 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1439 enum ieee80211_band band)
1440 {
1441 struct ieee80211_local *local = sdata->local;
1442 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1443 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1444 int headroom;
1445 bool may_encrypt;
1446
1447 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1448
1449 headroom = local->tx_headroom;
1450 if (may_encrypt)
1451 headroom += IEEE80211_ENCRYPT_HEADROOM;
1452 headroom -= skb_headroom(skb);
1453 headroom = max_t(int, 0, headroom);
1454
1455 if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
1456 ieee80211_free_txskb(&local->hw, skb);
1457 return;
1458 }
1459
1460 hdr = (struct ieee80211_hdr *) skb->data;
1461 info->control.vif = &sdata->vif;
1462
1463 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1464 if (ieee80211_is_data(hdr->frame_control) &&
1465 is_unicast_ether_addr(hdr->addr1)) {
1466 if (mesh_nexthop_resolve(sdata, skb))
1467 return; /* skb queued: don't free */
1468 } else {
1469 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
1470 }
1471 }
1472
1473 ieee80211_set_qos_hdr(sdata, skb);
1474 ieee80211_tx(sdata, skb, false, band);
1475 }
1476
1477 static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
1478 {
1479 struct ieee80211_radiotap_iterator iterator;
1480 struct ieee80211_radiotap_header *rthdr =
1481 (struct ieee80211_radiotap_header *) skb->data;
1482 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1483 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1484 NULL);
1485 u16 txflags;
1486
1487 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
1488 IEEE80211_TX_CTL_DONTFRAG;
1489
1490 /*
1491 * for every radiotap entry that is present
1492 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1493 * entries present, or -EINVAL on error)
1494 */
1495
1496 while (!ret) {
1497 ret = ieee80211_radiotap_iterator_next(&iterator);
1498
1499 if (ret)
1500 continue;
1501
1502 /* see if this argument is something we can use */
1503 switch (iterator.this_arg_index) {
1504 /*
1505 * You must take care when dereferencing iterator.this_arg
1506 * for multibyte types... the pointer is not aligned. Use
1507 * get_unaligned((type *)iterator.this_arg) to dereference
1508 * iterator.this_arg for type "type" safely on all arches.
1509 */
1510 case IEEE80211_RADIOTAP_FLAGS:
1511 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1512 /*
1513 * this indicates that the skb we have been
1514 * handed has the 32-bit FCS CRC at the end...
1515 * we should react to that by snipping it off
1516 * because it will be recomputed and added
1517 * on transmission
1518 */
1519 if (skb->len < (iterator._max_length + FCS_LEN))
1520 return false;
1521
1522 skb_trim(skb, skb->len - FCS_LEN);
1523 }
1524 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
1525 info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
1526 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
1527 info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
1528 break;
1529
1530 case IEEE80211_RADIOTAP_TX_FLAGS:
1531 txflags = get_unaligned_le16(iterator.this_arg);
1532 if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
1533 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1534 break;
1535
1536 /*
1537 * Please update the file
1538 * Documentation/networking/mac80211-injection.txt
1539 * when parsing new fields here.
1540 */
1541
1542 default:
1543 break;
1544 }
1545 }
1546
1547 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1548 return false;
1549
1550 /*
1551 * remove the radiotap header
1552 * iterator->_max_length was sanity-checked against
1553 * skb->len by iterator init
1554 */
1555 skb_pull(skb, iterator._max_length);
1556
1557 return true;
1558 }
1559
1560 netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1561 struct net_device *dev)
1562 {
1563 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1564 struct ieee80211_chanctx_conf *chanctx_conf;
1565 struct ieee80211_channel *chan;
1566 struct ieee80211_radiotap_header *prthdr =
1567 (struct ieee80211_radiotap_header *)skb->data;
1568 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1569 struct ieee80211_hdr *hdr;
1570 struct ieee80211_sub_if_data *tmp_sdata, *sdata;
1571 u16 len_rthdr;
1572 int hdrlen;
1573
1574 /* check for not even having the fixed radiotap header part */
1575 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1576 goto fail; /* too short to be possibly valid */
1577
1578 /* is it a header version we can trust to find length from? */
1579 if (unlikely(prthdr->it_version))
1580 goto fail; /* only version 0 is supported */
1581
1582 /* then there must be a radiotap header with a length we can use */
1583 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1584
1585 /* does the skb contain enough to deliver on the alleged length? */
1586 if (unlikely(skb->len < len_rthdr))
1587 goto fail; /* skb too short for claimed rt header extent */
1588
1589 /*
1590 * fix up the pointers accounting for the radiotap
1591 * header still being in there. We are being given
1592 * a precooked IEEE80211 header so no need for
1593 * normal processing
1594 */
1595 skb_set_mac_header(skb, len_rthdr);
1596 /*
1597 * these are just fixed to the end of the rt area since we
1598 * don't have any better information and at this point, nobody cares
1599 */
1600 skb_set_network_header(skb, len_rthdr);
1601 skb_set_transport_header(skb, len_rthdr);
1602
1603 if (skb->len < len_rthdr + 2)
1604 goto fail;
1605
1606 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
1607 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1608
1609 if (skb->len < len_rthdr + hdrlen)
1610 goto fail;
1611
1612 /*
1613 * Initialize skb->protocol if the injected frame is a data frame
1614 * carrying a rfc1042 header
1615 */
1616 if (ieee80211_is_data(hdr->frame_control) &&
1617 skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
1618 u8 *payload = (u8 *)hdr + hdrlen;
1619
1620 if (ether_addr_equal(payload, rfc1042_header))
1621 skb->protocol = cpu_to_be16((payload[6] << 8) |
1622 payload[7]);
1623 }
1624
1625 memset(info, 0, sizeof(*info));
1626
1627 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
1628 IEEE80211_TX_CTL_INJECTED;
1629
1630 /* process and remove the injection radiotap header */
1631 if (!ieee80211_parse_tx_radiotap(skb))
1632 goto fail;
1633
1634 rcu_read_lock();
1635
1636 /*
1637 * We process outgoing injected frames that have a local address
1638 * we handle as though they are non-injected frames.
1639 * This code here isn't entirely correct, the local MAC address
1640 * isn't always enough to find the interface to use; for proper
1641 * VLAN/WDS support we will need a different mechanism (which
1642 * likely isn't going to be monitor interfaces).
1643 */
1644 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1645
1646 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
1647 if (!ieee80211_sdata_running(tmp_sdata))
1648 continue;
1649 if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1650 tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1651 tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
1652 continue;
1653 if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
1654 sdata = tmp_sdata;
1655 break;
1656 }
1657 }
1658
1659 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1660 if (!chanctx_conf) {
1661 tmp_sdata = rcu_dereference(local->monitor_sdata);
1662 if (tmp_sdata)
1663 chanctx_conf =
1664 rcu_dereference(tmp_sdata->vif.chanctx_conf);
1665 }
1666
1667 if (chanctx_conf)
1668 chan = chanctx_conf->def.chan;
1669 else if (!local->use_chanctx)
1670 chan = local->_oper_chandef.chan;
1671 else
1672 goto fail_rcu;
1673
1674 /*
1675 * Frame injection is not allowed if beaconing is not allowed
1676 * or if we need radar detection. Beaconing is usually not allowed when
1677 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1678 * Passive scan is also used in world regulatory domains where
1679 * your country is not known and as such it should be treated as
1680 * NO TX unless the channel is explicitly allowed in which case
1681 * your current regulatory domain would not have the passive scan
1682 * flag.
1683 *
1684 * Since AP mode uses monitor interfaces to inject/TX management
1685 * frames we can make AP mode the exception to this rule once it
1686 * supports radar detection as its implementation can deal with
1687 * radar detection by itself. We can do that later by adding a
1688 * monitor flag interfaces used for AP support.
1689 */
1690 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1691 IEEE80211_CHAN_PASSIVE_SCAN)))
1692 goto fail_rcu;
1693
1694 ieee80211_xmit(sdata, skb, chan->band);
1695 rcu_read_unlock();
1696
1697 return NETDEV_TX_OK;
1698
1699 fail_rcu:
1700 rcu_read_unlock();
1701 fail:
1702 dev_kfree_skb(skb);
1703 return NETDEV_TX_OK; /* meaning, we dealt with the skb */
1704 }
1705
1706 /**
1707 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
1708 * subinterfaces (wlan#, WDS, and VLAN interfaces)
1709 * @skb: packet to be sent
1710 * @dev: incoming interface
1711 *
1712 * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will
1713 * not be freed, and caller is responsible for either retrying later or freeing
1714 * skb).
1715 *
1716 * This function takes in an Ethernet header and encapsulates it with suitable
1717 * IEEE 802.11 header based on which interface the packet is coming in. The
1718 * encapsulated packet will then be passed to master interface, wlan#.11, for
1719 * transmission (through low-level driver).
1720 */
1721 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1722 struct net_device *dev)
1723 {
1724 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1725 struct ieee80211_local *local = sdata->local;
1726 struct ieee80211_tx_info *info;
1727 int head_need;
1728 u16 ethertype, hdrlen, meshhdrlen = 0;
1729 __le16 fc;
1730 struct ieee80211_hdr hdr;
1731 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1732 struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
1733 const u8 *encaps_data;
1734 int encaps_len, skip_header_bytes;
1735 int nh_pos, h_pos;
1736 struct sta_info *sta = NULL;
1737 bool wme_sta = false, authorized = false, tdls_auth = false;
1738 bool tdls_direct = false;
1739 bool multicast;
1740 u32 info_flags = 0;
1741 u16 info_id = 0;
1742 struct ieee80211_chanctx_conf *chanctx_conf;
1743 struct ieee80211_sub_if_data *ap_sdata;
1744 enum ieee80211_band band;
1745
1746 if (unlikely(skb->len < ETH_HLEN))
1747 goto fail;
1748
1749 /* convert Ethernet header to proper 802.11 header (based on
1750 * operation mode) */
1751 ethertype = (skb->data[12] << 8) | skb->data[13];
1752 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1753
1754 rcu_read_lock();
1755
1756 switch (sdata->vif.type) {
1757 case NL80211_IFTYPE_AP_VLAN:
1758 sta = rcu_dereference(sdata->u.vlan.sta);
1759 if (sta) {
1760 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1761 /* RA TA DA SA */
1762 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1763 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1764 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1765 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1766 hdrlen = 30;
1767 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1768 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1769 }
1770 ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1771 u.ap);
1772 chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
1773 if (!chanctx_conf)
1774 goto fail_rcu;
1775 band = chanctx_conf->def.chan->band;
1776 if (sta)
1777 break;
1778 /* fall through */
1779 case NL80211_IFTYPE_AP:
1780 if (sdata->vif.type == NL80211_IFTYPE_AP)
1781 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1782 if (!chanctx_conf)
1783 goto fail_rcu;
1784 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1785 /* DA BSSID SA */
1786 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1787 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1788 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1789 hdrlen = 24;
1790 band = chanctx_conf->def.chan->band;
1791 break;
1792 case NL80211_IFTYPE_WDS:
1793 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1794 /* RA TA DA SA */
1795 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1796 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1797 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1798 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1799 hdrlen = 30;
1800 /*
1801 * This is the exception! WDS style interfaces are prohibited
1802 * when channel contexts are in used so this must be valid
1803 */
1804 band = local->hw.conf.chandef.chan->band;
1805 break;
1806 #ifdef CONFIG_MAC80211_MESH
1807 case NL80211_IFTYPE_MESH_POINT:
1808 if (!is_multicast_ether_addr(skb->data)) {
1809 struct sta_info *next_hop;
1810 bool mpp_lookup = true;
1811
1812 mpath = mesh_path_lookup(sdata, skb->data);
1813 if (mpath) {
1814 mpp_lookup = false;
1815 next_hop = rcu_dereference(mpath->next_hop);
1816 if (!next_hop ||
1817 !(mpath->flags & (MESH_PATH_ACTIVE |
1818 MESH_PATH_RESOLVING)))
1819 mpp_lookup = true;
1820 }
1821
1822 if (mpp_lookup)
1823 mppath = mpp_path_lookup(sdata, skb->data);
1824
1825 if (mppath && mpath)
1826 mesh_path_del(mpath->sdata, mpath->dst);
1827 }
1828
1829 /*
1830 * Use address extension if it is a packet from
1831 * another interface or if we know the destination
1832 * is being proxied by a portal (i.e. portal address
1833 * differs from proxied address)
1834 */
1835 if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
1836 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
1837 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1838 skb->data, skb->data + ETH_ALEN);
1839 meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
1840 NULL, NULL);
1841 } else {
1842 /* DS -> MBSS (802.11-2012 13.11.3.3).
1843 * For unicast with unknown forwarding information,
1844 * destination might be in the MBSS or if that fails
1845 * forwarded to another mesh gate. In either case
1846 * resolution will be handled in ieee80211_xmit(), so
1847 * leave the original DA. This also works for mcast */
1848 const u8 *mesh_da = skb->data;
1849
1850 if (mppath)
1851 mesh_da = mppath->mpp;
1852 else if (mpath)
1853 mesh_da = mpath->dst;
1854
1855 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1856 mesh_da, sdata->vif.addr);
1857 if (is_multicast_ether_addr(mesh_da))
1858 /* DA TA mSA AE:SA */
1859 meshhdrlen = ieee80211_new_mesh_header(
1860 sdata, &mesh_hdr,
1861 skb->data + ETH_ALEN, NULL);
1862 else
1863 /* RA TA mDA mSA AE:DA SA */
1864 meshhdrlen = ieee80211_new_mesh_header(
1865 sdata, &mesh_hdr, skb->data,
1866 skb->data + ETH_ALEN);
1867
1868 }
1869 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1870 if (!chanctx_conf)
1871 goto fail_rcu;
1872 band = chanctx_conf->def.chan->band;
1873 break;
1874 #endif
1875 case NL80211_IFTYPE_STATION:
1876 if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1877 bool tdls_peer = false;
1878
1879 sta = sta_info_get(sdata, skb->data);
1880 if (sta) {
1881 authorized = test_sta_flag(sta,
1882 WLAN_STA_AUTHORIZED);
1883 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1884 tdls_peer = test_sta_flag(sta,
1885 WLAN_STA_TDLS_PEER);
1886 tdls_auth = test_sta_flag(sta,
1887 WLAN_STA_TDLS_PEER_AUTH);
1888 }
1889
1890 /*
1891 * If the TDLS link is enabled, send everything
1892 * directly. Otherwise, allow TDLS setup frames
1893 * to be transmitted indirectly.
1894 */
1895 tdls_direct = tdls_peer && (tdls_auth ||
1896 !(ethertype == ETH_P_TDLS && skb->len > 14 &&
1897 skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
1898 }
1899
1900 if (tdls_direct) {
1901 /* link during setup - throw out frames to peer */
1902 if (!tdls_auth)
1903 goto fail_rcu;
1904
1905 /* DA SA BSSID */
1906 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1907 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1908 memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
1909 hdrlen = 24;
1910 } else if (sdata->u.mgd.use_4addr &&
1911 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1912 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
1913 IEEE80211_FCTL_TODS);
1914 /* RA TA DA SA */
1915 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1916 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1917 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1918 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1919 hdrlen = 30;
1920 } else {
1921 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1922 /* BSSID SA DA */
1923 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1924 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1925 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1926 hdrlen = 24;
1927 }
1928 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1929 if (!chanctx_conf)
1930 goto fail_rcu;
1931 band = chanctx_conf->def.chan->band;
1932 break;
1933 case NL80211_IFTYPE_ADHOC:
1934 /* DA SA BSSID */
1935 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1936 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1937 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
1938 hdrlen = 24;
1939 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1940 if (!chanctx_conf)
1941 goto fail_rcu;
1942 band = chanctx_conf->def.chan->band;
1943 break;
1944 default:
1945 goto fail_rcu;
1946 }
1947
1948 /*
1949 * There's no need to try to look up the destination
1950 * if it is a multicast address (which can only happen
1951 * in AP mode)
1952 */
1953 multicast = is_multicast_ether_addr(hdr.addr1);
1954 if (!multicast) {
1955 sta = sta_info_get(sdata, hdr.addr1);
1956 if (sta) {
1957 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1958 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1959 }
1960 }
1961
1962 /* For mesh, the use of the QoS header is mandatory */
1963 if (ieee80211_vif_is_mesh(&sdata->vif))
1964 wme_sta = true;
1965
1966 /* receiver and we are QoS enabled, use a QoS type frame */
1967 if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
1968 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1969 hdrlen += 2;
1970 }
1971
1972 /*
1973 * Drop unicast frames to unauthorised stations unless they are
1974 * EAPOL frames from the local station.
1975 */
1976 if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
1977 !is_multicast_ether_addr(hdr.addr1) && !authorized &&
1978 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1979 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1980 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1981 net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
1982 dev->name, hdr.addr1);
1983 #endif
1984
1985 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1986
1987 goto fail_rcu;
1988 }
1989
1990 if (unlikely(!multicast && skb->sk &&
1991 skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
1992 struct sk_buff *orig_skb = skb;
1993
1994 skb = skb_clone(skb, GFP_ATOMIC);
1995 if (skb) {
1996 unsigned long flags;
1997 int id;
1998
1999 spin_lock_irqsave(&local->ack_status_lock, flags);
2000 id = idr_alloc(&local->ack_status_frames, orig_skb,
2001 1, 0x10000, GFP_ATOMIC);
2002 spin_unlock_irqrestore(&local->ack_status_lock, flags);
2003
2004 if (id >= 0) {
2005 info_id = id;
2006 info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
2007 } else if (skb_shared(skb)) {
2008 kfree_skb(orig_skb);
2009 } else {
2010 kfree_skb(skb);
2011 skb = orig_skb;
2012 }
2013 } else {
2014 /* couldn't clone -- lose tx status ... */
2015 skb = orig_skb;
2016 }
2017 }
2018
2019 /*
2020 * If the skb is shared we need to obtain our own copy.
2021 */
2022 if (skb_shared(skb)) {
2023 struct sk_buff *tmp_skb = skb;
2024
2025 /* can't happen -- skb is a clone if info_id != 0 */
2026 WARN_ON(info_id);
2027
2028 skb = skb_clone(skb, GFP_ATOMIC);
2029 kfree_skb(tmp_skb);
2030
2031 if (!skb)
2032 goto fail_rcu;
2033 }
2034
2035 hdr.frame_control = fc;
2036 hdr.duration_id = 0;
2037 hdr.seq_ctrl = 0;
2038
2039 skip_header_bytes = ETH_HLEN;
2040 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
2041 encaps_data = bridge_tunnel_header;
2042 encaps_len = sizeof(bridge_tunnel_header);
2043 skip_header_bytes -= 2;
2044 } else if (ethertype >= ETH_P_802_3_MIN) {
2045 encaps_data = rfc1042_header;
2046 encaps_len = sizeof(rfc1042_header);
2047 skip_header_bytes -= 2;
2048 } else {
2049 encaps_data = NULL;
2050 encaps_len = 0;
2051 }
2052
2053 nh_pos = skb_network_header(skb) - skb->data;
2054 h_pos = skb_transport_header(skb) - skb->data;
2055
2056 skb_pull(skb, skip_header_bytes);
2057 nh_pos -= skip_header_bytes;
2058 h_pos -= skip_header_bytes;
2059
2060 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
2061
2062 /*
2063 * So we need to modify the skb header and hence need a copy of
2064 * that. The head_need variable above doesn't, so far, include
2065 * the needed header space that we don't need right away. If we
2066 * can, then we don't reallocate right now but only after the
2067 * frame arrives at the master device (if it does...)
2068 *
2069 * If we cannot, however, then we will reallocate to include all
2070 * the ever needed space. Also, if we need to reallocate it anyway,
2071 * make it big enough for everything we may ever need.
2072 */
2073
2074 if (head_need > 0 || skb_cloned(skb)) {
2075 head_need += IEEE80211_ENCRYPT_HEADROOM;
2076 head_need += local->tx_headroom;
2077 head_need = max_t(int, 0, head_need);
2078 if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
2079 ieee80211_free_txskb(&local->hw, skb);
2080 skb = NULL;
2081 goto fail_rcu;
2082 }
2083 }
2084
2085 if (encaps_data) {
2086 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
2087 nh_pos += encaps_len;
2088 h_pos += encaps_len;
2089 }
2090
2091 #ifdef CONFIG_MAC80211_MESH
2092 if (meshhdrlen > 0) {
2093 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
2094 nh_pos += meshhdrlen;
2095 h_pos += meshhdrlen;
2096 }
2097 #endif
2098
2099 if (ieee80211_is_data_qos(fc)) {
2100 __le16 *qos_control;
2101
2102 qos_control = (__le16*) skb_push(skb, 2);
2103 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
2104 /*
2105 * Maybe we could actually set some fields here, for now just
2106 * initialise to zero to indicate no special operation.
2107 */
2108 *qos_control = 0;
2109 } else
2110 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
2111
2112 nh_pos += hdrlen;
2113 h_pos += hdrlen;
2114
2115 dev->stats.tx_packets++;
2116 dev->stats.tx_bytes += skb->len;
2117
2118 /* Update skb pointers to various headers since this modified frame
2119 * is going to go through Linux networking code that may potentially
2120 * need things like pointer to IP header. */
2121 skb_set_mac_header(skb, 0);
2122 skb_set_network_header(skb, nh_pos);
2123 skb_set_transport_header(skb, h_pos);
2124
2125 info = IEEE80211_SKB_CB(skb);
2126 memset(info, 0, sizeof(*info));
2127
2128 dev->trans_start = jiffies;
2129
2130 info->flags = info_flags;
2131 info->ack_frame_id = info_id;
2132
2133 ieee80211_xmit(sdata, skb, band);
2134 rcu_read_unlock();
2135
2136 return NETDEV_TX_OK;
2137
2138 fail_rcu:
2139 rcu_read_unlock();
2140 fail:
2141 dev_kfree_skb(skb);
2142 return NETDEV_TX_OK;
2143 }
2144
2145
2146 /*
2147 * ieee80211_clear_tx_pending may not be called in a context where
2148 * it is possible that it packets could come in again.
2149 */
2150 void ieee80211_clear_tx_pending(struct ieee80211_local *local)
2151 {
2152 struct sk_buff *skb;
2153 int i;
2154
2155 for (i = 0; i < local->hw.queues; i++) {
2156 while ((skb = skb_dequeue(&local->pending[i])) != NULL)
2157 ieee80211_free_txskb(&local->hw, skb);
2158 }
2159 }
2160
2161 /*
2162 * Returns false if the frame couldn't be transmitted but was queued instead,
2163 * which in this case means re-queued -- take as an indication to stop sending
2164 * more pending frames.
2165 */
2166 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2167 struct sk_buff *skb)
2168 {
2169 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2170 struct ieee80211_sub_if_data *sdata;
2171 struct sta_info *sta;
2172 struct ieee80211_hdr *hdr;
2173 bool result;
2174 struct ieee80211_chanctx_conf *chanctx_conf;
2175
2176 sdata = vif_to_sdata(info->control.vif);
2177
2178 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2179 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2180 if (unlikely(!chanctx_conf)) {
2181 dev_kfree_skb(skb);
2182 return true;
2183 }
2184 result = ieee80211_tx(sdata, skb, true,
2185 chanctx_conf->def.chan->band);
2186 } else {
2187 struct sk_buff_head skbs;
2188
2189 __skb_queue_head_init(&skbs);
2190 __skb_queue_tail(&skbs, skb);
2191
2192 hdr = (struct ieee80211_hdr *)skb->data;
2193 sta = sta_info_get(sdata, hdr->addr1);
2194
2195 result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
2196 }
2197
2198 return result;
2199 }
2200
2201 /*
2202 * Transmit all pending packets. Called from tasklet.
2203 */
2204 void ieee80211_tx_pending(unsigned long data)
2205 {
2206 struct ieee80211_local *local = (struct ieee80211_local *)data;
2207 unsigned long flags;
2208 int i;
2209 bool txok;
2210
2211 rcu_read_lock();
2212
2213 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
2214 for (i = 0; i < local->hw.queues; i++) {
2215 /*
2216 * If queue is stopped by something other than due to pending
2217 * frames, or we have no pending frames, proceed to next queue.
2218 */
2219 if (local->queue_stop_reasons[i] ||
2220 skb_queue_empty(&local->pending[i]))
2221 continue;
2222
2223 while (!skb_queue_empty(&local->pending[i])) {
2224 struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
2225 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2226
2227 if (WARN_ON(!info->control.vif)) {
2228 ieee80211_free_txskb(&local->hw, skb);
2229 continue;
2230 }
2231
2232 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
2233 flags);
2234
2235 txok = ieee80211_tx_pending_skb(local, skb);
2236 spin_lock_irqsave(&local->queue_stop_reason_lock,
2237 flags);
2238 if (!txok)
2239 break;
2240 }
2241
2242 if (skb_queue_empty(&local->pending[i]))
2243 ieee80211_propagate_queue_wake(local, i);
2244 }
2245 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2246
2247 rcu_read_unlock();
2248 }
2249
2250 /* functions for drivers to get certain frames */
2251
2252 static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2253 struct ps_data *ps, struct sk_buff *skb)
2254 {
2255 u8 *pos, *tim;
2256 int aid0 = 0;
2257 int i, have_bits = 0, n1, n2;
2258
2259 /* Generate bitmap for TIM only if there are any STAs in power save
2260 * mode. */
2261 if (atomic_read(&ps->num_sta_ps) > 0)
2262 /* in the hope that this is faster than
2263 * checking byte-for-byte */
2264 have_bits = !bitmap_empty((unsigned long*)ps->tim,
2265 IEEE80211_MAX_AID+1);
2266
2267 if (ps->dtim_count == 0)
2268 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2269 else
2270 ps->dtim_count--;
2271
2272 tim = pos = (u8 *) skb_put(skb, 6);
2273 *pos++ = WLAN_EID_TIM;
2274 *pos++ = 4;
2275 *pos++ = ps->dtim_count;
2276 *pos++ = sdata->vif.bss_conf.dtim_period;
2277
2278 if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
2279 aid0 = 1;
2280
2281 ps->dtim_bc_mc = aid0 == 1;
2282
2283 if (have_bits) {
2284 /* Find largest even number N1 so that bits numbered 1 through
2285 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
2286 * (N2 + 1) x 8 through 2007 are 0. */
2287 n1 = 0;
2288 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
2289 if (ps->tim[i]) {
2290 n1 = i & 0xfe;
2291 break;
2292 }
2293 }
2294 n2 = n1;
2295 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
2296 if (ps->tim[i]) {
2297 n2 = i;
2298 break;
2299 }
2300 }
2301
2302 /* Bitmap control */
2303 *pos++ = n1 | aid0;
2304 /* Part Virt Bitmap */
2305 skb_put(skb, n2 - n1);
2306 memcpy(pos, ps->tim + n1, n2 - n1 + 1);
2307
2308 tim[1] = n2 - n1 + 4;
2309 } else {
2310 *pos++ = aid0; /* Bitmap control */
2311 *pos++ = 0; /* Part Virt Bitmap */
2312 }
2313 }
2314
2315 static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2316 struct ps_data *ps, struct sk_buff *skb)
2317 {
2318 struct ieee80211_local *local = sdata->local;
2319
2320 /*
2321 * Not very nice, but we want to allow the driver to call
2322 * ieee80211_beacon_get() as a response to the set_tim()
2323 * callback. That, however, is already invoked under the
2324 * sta_lock to guarantee consistent and race-free update
2325 * of the tim bitmap in mac80211 and the driver.
2326 */
2327 if (local->tim_in_locked_section) {
2328 __ieee80211_beacon_add_tim(sdata, ps, skb);
2329 } else {
2330 spin_lock_bh(&local->tim_lock);
2331 __ieee80211_beacon_add_tim(sdata, ps, skb);
2332 spin_unlock_bh(&local->tim_lock);
2333 }
2334
2335 return 0;
2336 }
2337
2338 struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2339 struct ieee80211_vif *vif,
2340 u16 *tim_offset, u16 *tim_length)
2341 {
2342 struct ieee80211_local *local = hw_to_local(hw);
2343 struct sk_buff *skb = NULL;
2344 struct ieee80211_tx_info *info;
2345 struct ieee80211_sub_if_data *sdata = NULL;
2346 enum ieee80211_band band;
2347 struct ieee80211_tx_rate_control txrc;
2348 struct ieee80211_chanctx_conf *chanctx_conf;
2349
2350 rcu_read_lock();
2351
2352 sdata = vif_to_sdata(vif);
2353 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2354
2355 if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
2356 goto out;
2357
2358 if (tim_offset)
2359 *tim_offset = 0;
2360 if (tim_length)
2361 *tim_length = 0;
2362
2363 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2364 struct ieee80211_if_ap *ap = &sdata->u.ap;
2365 struct beacon_data *beacon = rcu_dereference(ap->beacon);
2366
2367 if (beacon) {
2368 /*
2369 * headroom, head length,
2370 * tail length and maximum TIM length
2371 */
2372 skb = dev_alloc_skb(local->tx_headroom +
2373 beacon->head_len +
2374 beacon->tail_len + 256);
2375 if (!skb)
2376 goto out;
2377
2378 skb_reserve(skb, local->tx_headroom);
2379 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2380 beacon->head_len);
2381
2382 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2383
2384 if (tim_offset)
2385 *tim_offset = beacon->head_len;
2386 if (tim_length)
2387 *tim_length = skb->len - beacon->head_len;
2388
2389 if (beacon->tail)
2390 memcpy(skb_put(skb, beacon->tail_len),
2391 beacon->tail, beacon->tail_len);
2392 } else
2393 goto out;
2394 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2395 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2396 struct ieee80211_hdr *hdr;
2397 struct beacon_data *presp = rcu_dereference(ifibss->presp);
2398
2399 if (!presp)
2400 goto out;
2401
2402 skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
2403 if (!skb)
2404 goto out;
2405 skb_reserve(skb, local->tx_headroom);
2406 memcpy(skb_put(skb, presp->head_len), presp->head,
2407 presp->head_len);
2408
2409 hdr = (struct ieee80211_hdr *) skb->data;
2410 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2411 IEEE80211_STYPE_BEACON);
2412 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2413 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2414 struct beacon_data *bcn = rcu_dereference(ifmsh->beacon);
2415
2416 if (!bcn)
2417 goto out;
2418
2419 if (ifmsh->sync_ops)
2420 ifmsh->sync_ops->adjust_tbtt(
2421 sdata);
2422
2423 skb = dev_alloc_skb(local->tx_headroom +
2424 bcn->head_len +
2425 256 + /* TIM IE */
2426 bcn->tail_len);
2427 if (!skb)
2428 goto out;
2429 skb_reserve(skb, local->tx_headroom);
2430 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
2431 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb);
2432 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
2433 } else {
2434 WARN_ON(1);
2435 goto out;
2436 }
2437
2438 band = chanctx_conf->def.chan->band;
2439
2440 info = IEEE80211_SKB_CB(skb);
2441
2442 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2443 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2444 info->band = band;
2445
2446 memset(&txrc, 0, sizeof(txrc));
2447 txrc.hw = hw;
2448 txrc.sband = local->hw.wiphy->bands[band];
2449 txrc.bss_conf = &sdata->vif.bss_conf;
2450 txrc.skb = skb;
2451 txrc.reported_rate.idx = -1;
2452 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2453 if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
2454 txrc.max_rate_idx = -1;
2455 else
2456 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2457 txrc.bss = true;
2458 rate_control_get_rate(sdata, NULL, &txrc);
2459
2460 info->control.vif = vif;
2461
2462 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
2463 IEEE80211_TX_CTL_ASSIGN_SEQ |
2464 IEEE80211_TX_CTL_FIRST_FRAGMENT;
2465 out:
2466 rcu_read_unlock();
2467 return skb;
2468 }
2469 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2470
2471 struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2472 struct ieee80211_vif *vif)
2473 {
2474 struct ieee80211_if_ap *ap = NULL;
2475 struct sk_buff *skb = NULL;
2476 struct probe_resp *presp = NULL;
2477 struct ieee80211_hdr *hdr;
2478 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2479
2480 if (sdata->vif.type != NL80211_IFTYPE_AP)
2481 return NULL;
2482
2483 rcu_read_lock();
2484
2485 ap = &sdata->u.ap;
2486 presp = rcu_dereference(ap->probe_resp);
2487 if (!presp)
2488 goto out;
2489
2490 skb = dev_alloc_skb(presp->len);
2491 if (!skb)
2492 goto out;
2493
2494 memcpy(skb_put(skb, presp->len), presp->data, presp->len);
2495
2496 hdr = (struct ieee80211_hdr *) skb->data;
2497 memset(hdr->addr1, 0, sizeof(hdr->addr1));
2498
2499 out:
2500 rcu_read_unlock();
2501 return skb;
2502 }
2503 EXPORT_SYMBOL(ieee80211_proberesp_get);
2504
2505 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2506 struct ieee80211_vif *vif)
2507 {
2508 struct ieee80211_sub_if_data *sdata;
2509 struct ieee80211_if_managed *ifmgd;
2510 struct ieee80211_pspoll *pspoll;
2511 struct ieee80211_local *local;
2512 struct sk_buff *skb;
2513
2514 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2515 return NULL;
2516
2517 sdata = vif_to_sdata(vif);
2518 ifmgd = &sdata->u.mgd;
2519 local = sdata->local;
2520
2521 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2522 if (!skb)
2523 return NULL;
2524
2525 skb_reserve(skb, local->hw.extra_tx_headroom);
2526
2527 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2528 memset(pspoll, 0, sizeof(*pspoll));
2529 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2530 IEEE80211_STYPE_PSPOLL);
2531 pspoll->aid = cpu_to_le16(ifmgd->aid);
2532
2533 /* aid in PS-Poll has its two MSBs each set to 1 */
2534 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2535
2536 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2537 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2538
2539 return skb;
2540 }
2541 EXPORT_SYMBOL(ieee80211_pspoll_get);
2542
2543 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2544 struct ieee80211_vif *vif)
2545 {
2546 struct ieee80211_hdr_3addr *nullfunc;
2547 struct ieee80211_sub_if_data *sdata;
2548 struct ieee80211_if_managed *ifmgd;
2549 struct ieee80211_local *local;
2550 struct sk_buff *skb;
2551
2552 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2553 return NULL;
2554
2555 sdata = vif_to_sdata(vif);
2556 ifmgd = &sdata->u.mgd;
2557 local = sdata->local;
2558
2559 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2560 if (!skb)
2561 return NULL;
2562
2563 skb_reserve(skb, local->hw.extra_tx_headroom);
2564
2565 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2566 sizeof(*nullfunc));
2567 memset(nullfunc, 0, sizeof(*nullfunc));
2568 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2569 IEEE80211_STYPE_NULLFUNC |
2570 IEEE80211_FCTL_TODS);
2571 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2572 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2573 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2574
2575 return skb;
2576 }
2577 EXPORT_SYMBOL(ieee80211_nullfunc_get);
2578
2579 struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2580 struct ieee80211_vif *vif,
2581 const u8 *ssid, size_t ssid_len,
2582 size_t tailroom)
2583 {
2584 struct ieee80211_sub_if_data *sdata;
2585 struct ieee80211_local *local;
2586 struct ieee80211_hdr_3addr *hdr;
2587 struct sk_buff *skb;
2588 size_t ie_ssid_len;
2589 u8 *pos;
2590
2591 sdata = vif_to_sdata(vif);
2592 local = sdata->local;
2593 ie_ssid_len = 2 + ssid_len;
2594
2595 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2596 ie_ssid_len + tailroom);
2597 if (!skb)
2598 return NULL;
2599
2600 skb_reserve(skb, local->hw.extra_tx_headroom);
2601
2602 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2603 memset(hdr, 0, sizeof(*hdr));
2604 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2605 IEEE80211_STYPE_PROBE_REQ);
2606 eth_broadcast_addr(hdr->addr1);
2607 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2608 eth_broadcast_addr(hdr->addr3);
2609
2610 pos = skb_put(skb, ie_ssid_len);
2611 *pos++ = WLAN_EID_SSID;
2612 *pos++ = ssid_len;
2613 if (ssid_len)
2614 memcpy(pos, ssid, ssid_len);
2615 pos += ssid_len;
2616
2617 return skb;
2618 }
2619 EXPORT_SYMBOL(ieee80211_probereq_get);
2620
2621 void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2622 const void *frame, size_t frame_len,
2623 const struct ieee80211_tx_info *frame_txctl,
2624 struct ieee80211_rts *rts)
2625 {
2626 const struct ieee80211_hdr *hdr = frame;
2627
2628 rts->frame_control =
2629 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
2630 rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
2631 frame_txctl);
2632 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
2633 memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
2634 }
2635 EXPORT_SYMBOL(ieee80211_rts_get);
2636
2637 void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2638 const void *frame, size_t frame_len,
2639 const struct ieee80211_tx_info *frame_txctl,
2640 struct ieee80211_cts *cts)
2641 {
2642 const struct ieee80211_hdr *hdr = frame;
2643
2644 cts->frame_control =
2645 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
2646 cts->duration = ieee80211_ctstoself_duration(hw, vif,
2647 frame_len, frame_txctl);
2648 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
2649 }
2650 EXPORT_SYMBOL(ieee80211_ctstoself_get);
2651
2652 struct sk_buff *
2653 ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2654 struct ieee80211_vif *vif)
2655 {
2656 struct ieee80211_local *local = hw_to_local(hw);
2657 struct sk_buff *skb = NULL;
2658 struct ieee80211_tx_data tx;
2659 struct ieee80211_sub_if_data *sdata;
2660 struct ps_data *ps;
2661 struct ieee80211_tx_info *info;
2662 struct ieee80211_chanctx_conf *chanctx_conf;
2663
2664 sdata = vif_to_sdata(vif);
2665
2666 rcu_read_lock();
2667 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2668
2669 if (!chanctx_conf)
2670 goto out;
2671
2672 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2673 struct beacon_data *beacon =
2674 rcu_dereference(sdata->u.ap.beacon);
2675
2676 if (!beacon || !beacon->head)
2677 goto out;
2678
2679 ps = &sdata->u.ap.ps;
2680 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2681 ps = &sdata->u.mesh.ps;
2682 } else {
2683 goto out;
2684 }
2685
2686 if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
2687 goto out; /* send buffered bc/mc only after DTIM beacon */
2688
2689 while (1) {
2690 skb = skb_dequeue(&ps->bc_buf);
2691 if (!skb)
2692 goto out;
2693 local->total_ps_buffered--;
2694
2695 if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
2696 struct ieee80211_hdr *hdr =
2697 (struct ieee80211_hdr *) skb->data;
2698 /* more buffered multicast/broadcast frames ==> set
2699 * MoreData flag in IEEE 802.11 header to inform PS
2700 * STAs */
2701 hdr->frame_control |=
2702 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2703 }
2704
2705 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2706 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
2707 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2708 break;
2709 dev_kfree_skb_any(skb);
2710 }
2711
2712 info = IEEE80211_SKB_CB(skb);
2713
2714 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2715 info->band = chanctx_conf->def.chan->band;
2716
2717 if (invoke_tx_handlers(&tx))
2718 skb = NULL;
2719 out:
2720 rcu_read_unlock();
2721
2722 return skb;
2723 }
2724 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2725
2726 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
2727 struct sk_buff *skb, int tid,
2728 enum ieee80211_band band)
2729 {
2730 int ac = ieee802_1d_to_ac[tid & 7];
2731
2732 skb_set_mac_header(skb, 0);
2733 skb_set_network_header(skb, 0);
2734 skb_set_transport_header(skb, 0);
2735
2736 skb_set_queue_mapping(skb, ac);
2737 skb->priority = tid;
2738
2739 skb->dev = sdata->dev;
2740
2741 /*
2742 * The other path calling ieee80211_xmit is from the tasklet,
2743 * and while we can handle concurrent transmissions locking
2744 * requirements are that we do not come into tx with bhs on.
2745 */
2746 local_bh_disable();
2747 ieee80211_xmit(sdata, skb, band);
2748 local_bh_enable();
2749 }
This page took 0.13236 seconds and 4 git commands to generate.