mac80211: allow drivers to sleep in ampdu_action
[deliverable/linux.git] / net / mac80211 / agg-tx.c
CommitLineData
b8695a8f
JB
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
cfcdbde3 9 * Copyright 2007-2010, Intel Corporation
b8695a8f
JB
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
5a0e3ad6 17#include <linux/slab.h>
b8695a8f
JB
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
24487981 20#include "driver-ops.h"
b8695a8f
JB
21#include "wme.h"
22
86ab6c5a
JB
23/**
24 * DOC: TX aggregation
25 *
26 * Aggregation on the TX side requires setting the hardware flag
27 * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
28 * hardware parameter to the number of hardware AMPDU queues. If there are no
29 * hardware queues then the driver will (currently) have to do all frame
30 * buffering.
31 *
32 * When TX aggregation is started by some subsystem (usually the rate control
33 * algorithm would be appropriate) by calling the
34 * ieee80211_start_tx_ba_session() function, the driver will be notified via
35 * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
36 *
37 * In response to that, the driver is later required to call the
38 * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
39 * function, which will start the aggregation session.
40 *
41 * Similarly, when the aggregation session is stopped by
42 * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
43 * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
44 * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
45 * (or ieee80211_stop_tx_ba_cb_irqsafe()).
46 */
47
b8695a8f
JB
48static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
49 const u8 *da, u16 tid,
50 u8 dialog_token, u16 start_seq_num,
51 u16 agg_size, u16 timeout)
52{
53 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
54 struct sk_buff *skb;
55 struct ieee80211_mgmt *mgmt;
56 u16 capab;
57
58 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
59
60 if (!skb) {
61 printk(KERN_ERR "%s: failed to allocate buffer "
47846c9b 62 "for addba request frame\n", sdata->name);
b8695a8f
JB
63 return;
64 }
65 skb_reserve(skb, local->hw.extra_tx_headroom);
66 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
67 memset(mgmt, 0, 24);
68 memcpy(mgmt->da, da, ETH_ALEN);
47846c9b 69 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
8abd3f9b
JB
70 if (sdata->vif.type == NL80211_IFTYPE_AP ||
71 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
47846c9b 72 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
46900298
JB
73 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
74 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
b8695a8f
JB
75
76 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
77 IEEE80211_STYPE_ACTION);
78
79 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
80
81 mgmt->u.action.category = WLAN_CATEGORY_BACK;
82 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
83
84 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
85 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
86 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
87 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
88
89 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
90
91 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
92 mgmt->u.action.u.addba_req.start_seq_num =
93 cpu_to_le16(start_seq_num << 4);
94
62ae67be 95 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
96}
97
98void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
99{
100 struct ieee80211_local *local = sdata->local;
101 struct sk_buff *skb;
102 struct ieee80211_bar *bar;
103 u16 bar_control = 0;
104
105 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
106 if (!skb) {
107 printk(KERN_ERR "%s: failed to allocate buffer for "
47846c9b 108 "bar frame\n", sdata->name);
b8695a8f
JB
109 return;
110 }
111 skb_reserve(skb, local->hw.extra_tx_headroom);
112 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
113 memset(bar, 0, sizeof(*bar));
114 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
115 IEEE80211_STYPE_BACK_REQ);
116 memcpy(bar->ra, ra, ETH_ALEN);
47846c9b 117 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
b8695a8f
JB
118 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
119 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
120 bar_control |= (u16)(tid << 12);
121 bar->control = cpu_to_le16(bar_control);
122 bar->start_seq_num = cpu_to_le16(ssn);
123
62ae67be
JB
124 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
125 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
126}
127
a622ab72
JB
128static void kfree_tid_tx(struct rcu_head *rcu_head)
129{
130 struct tid_ampdu_tx *tid_tx =
131 container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
132
133 kfree(tid_tx);
134}
135
67c282c0
JB
136int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
137 enum ieee80211_back_parties initiator)
23e6a7ea 138{
849b7967 139 struct ieee80211_local *local = sta->local;
a622ab72 140 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
23e6a7ea 141 int ret;
a622ab72 142
cfcdbde3 143 lockdep_assert_held(&sta->ampdu_mlme.mtx);
a622ab72 144
cfcdbde3 145 if (!tid_tx)
a622ab72 146 return -ENOENT;
23e6a7ea 147
cfcdbde3
JB
148 spin_lock_bh(&sta->lock);
149
0ab33703
JB
150 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
151 /* not even started yet! */
152 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
cfcdbde3 153 spin_unlock_bh(&sta->lock);
0ab33703
JB
154 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
155 return 0;
156 }
157
cfcdbde3
JB
158 spin_unlock_bh(&sta->lock);
159
827d42c9
JB
160#ifdef CONFIG_MAC80211_HT_DEBUG
161 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
162 sta->sta.addr, tid);
163#endif /* CONFIG_MAC80211_HT_DEBUG */
164
a622ab72 165 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
23e6a7ea 166
a622ab72
JB
167 /*
168 * After this packets are no longer handed right through
169 * to the driver but are put onto tid_tx->pending instead,
170 * with locking to ensure proper access.
171 */
172 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
736708bd 173
a622ab72 174 tid_tx->stop_initiator = initiator;
23e6a7ea 175
12375ef9 176 ret = drv_ampdu_action(local, sta->sdata,
c951ad35 177 IEEE80211_AMPDU_TX_STOP,
24487981 178 &sta->sta, tid, NULL);
23e6a7ea
JB
179
180 /* HW shall not deny going back to legacy */
181 if (WARN_ON(ret)) {
cd8ffc80
JB
182 /*
183 * We may have pending packets get stuck in this case...
184 * Not bothering with a workaround for now.
185 */
23e6a7ea
JB
186 }
187
188 return ret;
189}
190
b8695a8f
JB
191/*
192 * After sending add Block Ack request we activated a timer until
193 * add Block Ack response will arrive from the recipient.
194 * If this timer expires sta_addba_resp_timer_expired will be executed.
195 */
196static void sta_addba_resp_timer_expired(unsigned long data)
197{
198 /* not an elegant detour, but there is no choice as the timer passes
199 * only one argument, and both sta_info and TID are needed, so init
200 * flow in sta_info_create gives the TID as data, while the timer_to_id
201 * array gives the sta through container_of */
202 u16 tid = *(u8 *)data;
23e6a7ea 203 struct sta_info *sta = container_of((void *)data,
b8695a8f 204 struct sta_info, timer_to_tid[tid]);
a622ab72 205 struct tid_ampdu_tx *tid_tx;
23e6a7ea 206
b8695a8f 207 /* check if the TID waits for addBA response */
83a5cbf7
JB
208 rcu_read_lock();
209 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
a622ab72
JB
210 if (!tid_tx ||
211 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
83a5cbf7 212 rcu_read_unlock();
b8695a8f
JB
213#ifdef CONFIG_MAC80211_HT_DEBUG
214 printk(KERN_DEBUG "timer expired on tid %d but we are not "
67e0f392 215 "(or no longer) expecting addBA response there\n",
8ade0082 216 tid);
b8695a8f 217#endif
23e6a7ea 218 return;
b8695a8f
JB
219 }
220
221#ifdef CONFIG_MAC80211_HT_DEBUG
222 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
223#endif
224
83a5cbf7
JB
225 ieee80211_stop_tx_ba_session(&sta->sta, tid);
226 rcu_read_unlock();
b8695a8f
JB
227}
228
96f5e66e
JB
229static inline int ieee80211_ac_from_tid(int tid)
230{
231 return ieee802_1d_to_ac[tid & 7];
232}
233
a6a67db2
JB
234/*
235 * When multiple aggregation sessions on multiple stations
236 * are being created/destroyed simultaneously, we need to
237 * refcount the global queue stop caused by that in order
238 * to not get into a situation where one of the aggregation
239 * setup or teardown re-enables queues before the other is
240 * ready to handle that.
241 *
242 * These two functions take care of this issue by keeping
243 * a global "agg_queue_stop" refcount.
244 */
245static void __acquires(agg_queue)
246ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
247{
248 int queue = ieee80211_ac_from_tid(tid);
249
250 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
251 ieee80211_stop_queue_by_reason(
252 &local->hw, queue,
253 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
254 __acquire(agg_queue);
255}
256
257static void __releases(agg_queue)
258ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
259{
260 int queue = ieee80211_ac_from_tid(tid);
261
262 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
263 ieee80211_wake_queue_by_reason(
264 &local->hw, queue,
265 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
266 __release(agg_queue);
267}
268
67c282c0 269void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
0ab33703
JB
270{
271 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
272 struct ieee80211_local *local = sta->local;
273 struct ieee80211_sub_if_data *sdata = sta->sdata;
274 u16 start_seq_num;
275 int ret;
276
cfcdbde3
JB
277 lockdep_assert_held(&sta->ampdu_mlme.mtx);
278
0ab33703
JB
279 /*
280 * While we're asking the driver about the aggregation,
281 * stop the AC queue so that we don't have to worry
282 * about frames that came in while we were doing that,
283 * which would require us to put them to the AC pending
284 * afterwards which just makes the code more complex.
285 */
286 ieee80211_stop_queue_agg(local, tid);
287
288 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
289
290 /*
cfcdbde3
JB
291 * make sure no packets are being processed to get
292 * valid starting sequence number
0ab33703 293 */
cfcdbde3
JB
294 synchronize_net();
295
0ab33703
JB
296 start_seq_num = sta->tid_seq[tid] >> 4;
297
298 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
299 &sta->sta, tid, &start_seq_num);
300 if (ret) {
301#ifdef CONFIG_MAC80211_HT_DEBUG
302 printk(KERN_DEBUG "BA request denied - HW unavailable for"
303 " tid %d\n", tid);
304#endif
cfcdbde3 305 spin_lock_bh(&sta->lock);
0ab33703 306 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
cfcdbde3
JB
307 spin_unlock_bh(&sta->lock);
308
0ab33703
JB
309 ieee80211_wake_queue_agg(local, tid);
310 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
311 return;
312 }
313
314 /* we can take packets again now */
315 ieee80211_wake_queue_agg(local, tid);
316
317 /* activate the timer for the recipient's addBA response */
318 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
319#ifdef CONFIG_MAC80211_HT_DEBUG
320 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
321#endif
322
cfcdbde3 323 spin_lock_bh(&sta->lock);
0ab33703 324 sta->ampdu_mlme.addba_req_num[tid]++;
cfcdbde3 325 spin_unlock_bh(&sta->lock);
0ab33703
JB
326
327 /* send AddBA request */
328 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
329 tid_tx->dialog_token, start_seq_num,
330 0x40, 5000);
331}
332
c951ad35 333int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 334{
c951ad35
JB
335 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
336 struct ieee80211_sub_if_data *sdata = sta->sdata;
337 struct ieee80211_local *local = sdata->local;
a622ab72 338 struct tid_ampdu_tx *tid_tx;
e4e72fb4 339 int ret = 0;
b8695a8f 340
b5878a2d
JB
341 trace_api_start_tx_ba_session(pubsta, tid);
342
23e6a7ea
JB
343 if (WARN_ON(!local->ops->ampdu_action))
344 return -EINVAL;
345
c951ad35
JB
346 if ((tid >= STA_TID_NUM) ||
347 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
b8695a8f
JB
348 return -EINVAL;
349
350#ifdef CONFIG_MAC80211_HT_DEBUG
351 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
c951ad35 352 pubsta->addr, tid);
b8695a8f
JB
353#endif /* CONFIG_MAC80211_HT_DEBUG */
354
8abd3f9b
JB
355 /*
356 * The aggregation code is not prepared to handle
357 * anything but STA/AP due to the BSSID handling.
358 * IBSS could work in the code but isn't supported
359 * by drivers or the standard.
360 */
c951ad35
JB
361 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
362 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
363 sdata->vif.type != NL80211_IFTYPE_AP)
364 return -EINVAL;
8abd3f9b 365
618f356b 366 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
722f069a 367#ifdef CONFIG_MAC80211_HT_DEBUG
2a419056 368 printk(KERN_DEBUG "BA sessions blocked. "
722f069a
S
369 "Denying BA session request\n");
370#endif
c951ad35 371 return -EINVAL;
722f069a
S
372 }
373
b8695a8f
JB
374 spin_lock_bh(&sta->lock);
375
376 /* we have tried too many times, receiver does not want A-MPDU */
377 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
378 ret = -EBUSY;
379 goto err_unlock_sta;
380 }
381
a622ab72 382 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 383 /* check if the TID is not in aggregation flow already */
a622ab72 384 if (tid_tx) {
b8695a8f
JB
385#ifdef CONFIG_MAC80211_HT_DEBUG
386 printk(KERN_DEBUG "BA request denied - session is not "
387 "idle on tid %u\n", tid);
388#endif /* CONFIG_MAC80211_HT_DEBUG */
389 ret = -EAGAIN;
390 goto err_unlock_sta;
391 }
392
393 /* prepare A-MPDU MLME for Tx aggregation */
a622ab72
JB
394 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
395 if (!tid_tx) {
b8695a8f
JB
396#ifdef CONFIG_MAC80211_HT_DEBUG
397 if (net_ratelimit())
398 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
399 tid);
400#endif
401 ret = -ENOMEM;
0ab33703 402 goto err_unlock_sta;
b8695a8f 403 }
96f5e66e 404
a622ab72 405 skb_queue_head_init(&tid_tx->pending);
0ab33703 406 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
cd8ffc80 407
b8695a8f 408 /* Tx timer */
a622ab72
JB
409 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
410 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
411 init_timer(&tid_tx->addba_resp_timer);
b8695a8f 412
0ab33703 413 /* assign a dialog token */
b8695a8f 414 sta->ampdu_mlme.dialog_token_allocator++;
a622ab72 415 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
b8695a8f 416
0ab33703
JB
417 /* finally, assign it to the array */
418 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
51a0d38d 419
0ab33703 420 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
b8695a8f 421
0ab33703 422 /* this flow continues off the work */
96f5e66e 423 err_unlock_sta:
b8695a8f 424 spin_unlock_bh(&sta->lock);
b8695a8f
JB
425 return ret;
426}
427EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
428
cd8ffc80
JB
429/*
430 * splice packets from the STA's pending to the local pending,
a6a67db2 431 * requires a call to ieee80211_agg_splice_finish later
cd8ffc80 432 */
a6a67db2
JB
433static void __acquires(agg_queue)
434ieee80211_agg_splice_packets(struct ieee80211_local *local,
435 struct tid_ampdu_tx *tid_tx, u16 tid)
cd8ffc80 436{
a6a67db2 437 int queue = ieee80211_ac_from_tid(tid);
cd8ffc80 438 unsigned long flags;
cd8ffc80 439
a6a67db2 440 ieee80211_stop_queue_agg(local, tid);
cd8ffc80 441
a622ab72
JB
442 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
443 " from the pending queue\n", tid))
416fbdff
LR
444 return;
445
a622ab72 446 if (!skb_queue_empty(&tid_tx->pending)) {
cd8ffc80 447 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
cd8ffc80 448 /* copy over remaining packets */
a622ab72
JB
449 skb_queue_splice_tail_init(&tid_tx->pending,
450 &local->pending[queue]);
cd8ffc80
JB
451 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
452 }
453}
454
a6a67db2
JB
455static void __releases(agg_queue)
456ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
cd8ffc80 457{
a6a67db2 458 ieee80211_wake_queue_agg(local, tid);
cd8ffc80
JB
459}
460
b1720231
JB
461static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
462 struct sta_info *sta, u16 tid)
463{
cfcdbde3 464 lockdep_assert_held(&sta->ampdu_mlme.mtx);
a622ab72 465
b1720231 466#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 467 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
b1720231
JB
468#endif
469
cfcdbde3
JB
470 drv_ampdu_action(local, sta->sdata,
471 IEEE80211_AMPDU_TX_OPERATIONAL,
472 &sta->sta, tid, NULL);
473
474 /*
475 * synchronize with TX path, while splicing the TX path
476 * should block so it won't put more packets onto pending.
477 */
478 spin_lock_bh(&sta->lock);
479
a622ab72 480 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
cd8ffc80 481 /*
a622ab72
JB
482 * Now mark as operational. This will be visible
483 * in the TX path, and lets it go lock-free in
484 * the common case.
cd8ffc80 485 */
a622ab72
JB
486 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
487 ieee80211_agg_splice_finish(local, tid);
b1720231 488
cfcdbde3 489 spin_unlock_bh(&sta->lock);
b1720231
JB
490}
491
c951ad35 492void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
b8695a8f 493{
c951ad35
JB
494 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
495 struct ieee80211_local *local = sdata->local;
b8695a8f 496 struct sta_info *sta;
a622ab72 497 struct tid_ampdu_tx *tid_tx;
b8695a8f 498
b5878a2d
JB
499 trace_api_start_tx_ba_cb(sdata, ra, tid);
500
b8695a8f
JB
501 if (tid >= STA_TID_NUM) {
502#ifdef CONFIG_MAC80211_HT_DEBUG
503 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
504 tid, STA_TID_NUM);
505#endif
506 return;
507 }
508
cfcdbde3 509 mutex_lock(&local->sta_mtx);
abe60632 510 sta = sta_info_get(sdata, ra);
b8695a8f 511 if (!sta) {
cfcdbde3 512 mutex_unlock(&local->sta_mtx);
b8695a8f
JB
513#ifdef CONFIG_MAC80211_HT_DEBUG
514 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
515#endif
516 return;
517 }
518
cfcdbde3 519 mutex_lock(&sta->ampdu_mlme.mtx);
a622ab72 520 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 521
a622ab72 522 if (WARN_ON(!tid_tx)) {
b8695a8f 523#ifdef CONFIG_MAC80211_HT_DEBUG
a622ab72 524 printk(KERN_DEBUG "addBA was not requested!\n");
b8695a8f 525#endif
cfcdbde3 526 goto unlock;
b8695a8f
JB
527 }
528
a622ab72 529 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
cfcdbde3 530 goto unlock;
b8695a8f 531
a622ab72 532 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
b1720231 533 ieee80211_agg_tx_operational(local, sta, tid);
96f5e66e 534
cfcdbde3
JB
535 unlock:
536 mutex_unlock(&sta->ampdu_mlme.mtx);
537 mutex_unlock(&local->sta_mtx);
b8695a8f 538}
b8695a8f 539
c951ad35 540void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
86ab6c5a
JB
541 const u8 *ra, u16 tid)
542{
c951ad35
JB
543 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
544 struct ieee80211_local *local = sdata->local;
86ab6c5a
JB
545 struct ieee80211_ra_tid *ra_tid;
546 struct sk_buff *skb = dev_alloc_skb(0);
547
548 if (unlikely(!skb)) {
549#ifdef CONFIG_MAC80211_HT_DEBUG
550 if (net_ratelimit())
551 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 552 "dropping start BA session", sdata->name);
86ab6c5a
JB
553#endif
554 return;
555 }
556 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
557 memcpy(&ra_tid->ra, ra, ETH_ALEN);
558 ra_tid->tid = tid;
559
c1475ca9
JB
560 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
561 skb_queue_tail(&sdata->skb_queue, skb);
562 ieee80211_queue_work(&local->hw, &sdata->work);
86ab6c5a
JB
563}
564EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
565
849b7967
JB
566int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
567 enum ieee80211_back_parties initiator)
568{
849b7967
JB
569 int ret;
570
cfcdbde3 571 mutex_lock(&sta->ampdu_mlme.mtx);
849b7967 572
849b7967
JB
573 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
574
cfcdbde3
JB
575 mutex_unlock(&sta->ampdu_mlme.mtx);
576
849b7967
JB
577 return ret;
578}
b8695a8f 579
6a8579d0 580int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 581{
c951ad35
JB
582 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
583 struct ieee80211_sub_if_data *sdata = sta->sdata;
584 struct ieee80211_local *local = sdata->local;
0ab33703
JB
585 struct tid_ampdu_tx *tid_tx;
586 int ret = 0;
b8695a8f 587
6a8579d0 588 trace_api_stop_tx_ba_session(pubsta, tid);
b5878a2d 589
4253119a 590 if (!local->ops->ampdu_action)
23e6a7ea
JB
591 return -EINVAL;
592
b8695a8f
JB
593 if (tid >= STA_TID_NUM)
594 return -EINVAL;
595
0ab33703
JB
596 spin_lock_bh(&sta->lock);
597 tid_tx = sta->ampdu_mlme.tid_tx[tid];
598
599 if (!tid_tx) {
600 ret = -ENOENT;
601 goto unlock;
602 }
603
604 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
605 /* already in progress stopping it */
606 ret = 0;
607 goto unlock;
608 }
609
610 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
611 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
612
613 unlock:
614 spin_unlock_bh(&sta->lock);
615 return ret;
b8695a8f
JB
616}
617EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
618
c951ad35 619void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
b8695a8f 620{
c951ad35
JB
621 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
622 struct ieee80211_local *local = sdata->local;
b8695a8f 623 struct sta_info *sta;
a622ab72 624 struct tid_ampdu_tx *tid_tx;
b8695a8f 625
b5878a2d
JB
626 trace_api_stop_tx_ba_cb(sdata, ra, tid);
627
b8695a8f
JB
628 if (tid >= STA_TID_NUM) {
629#ifdef CONFIG_MAC80211_HT_DEBUG
630 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
631 tid, STA_TID_NUM);
632#endif
633 return;
634 }
635
636#ifdef CONFIG_MAC80211_HT_DEBUG
637 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
638 ra, tid);
639#endif /* CONFIG_MAC80211_HT_DEBUG */
640
cfcdbde3
JB
641 mutex_lock(&local->sta_mtx);
642
abe60632 643 sta = sta_info_get(sdata, ra);
b8695a8f
JB
644 if (!sta) {
645#ifdef CONFIG_MAC80211_HT_DEBUG
646 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
647#endif
cfcdbde3 648 goto unlock;
b8695a8f 649 }
b8695a8f 650
cfcdbde3 651 mutex_lock(&sta->ampdu_mlme.mtx);
a622ab72
JB
652 spin_lock_bh(&sta->lock);
653 tid_tx = sta->ampdu_mlme.tid_tx[tid];
654
655 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
b8695a8f
JB
656#ifdef CONFIG_MAC80211_HT_DEBUG
657 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
658#endif
cfcdbde3 659 goto unlock_sta;
b8695a8f
JB
660 }
661
a622ab72 662 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
b8695a8f
JB
663 ieee80211_send_delba(sta->sdata, ra, tid,
664 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
665
a622ab72
JB
666 /*
667 * When we get here, the TX path will not be lockless any more wrt.
668 * aggregation, since the OPERATIONAL bit has long been cleared.
669 * Thus it will block on getting the lock, if it occurs. So if we
670 * stop the queue now, we will not get any more packets, and any
671 * that might be being processed will wait for us here, thereby
672 * guaranteeing that no packets go to the tid_tx pending queue any
673 * more.
674 */
b8695a8f 675
a622ab72 676 ieee80211_agg_splice_packets(local, tid_tx, tid);
96f5e66e 677
a622ab72
JB
678 /* future packets must not find the tid_tx struct any more */
679 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
cd8ffc80 680
a622ab72 681 ieee80211_agg_splice_finish(local, tid);
cd8ffc80 682
a622ab72 683 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
b8695a8f 684
cfcdbde3 685 unlock_sta:
a622ab72 686 spin_unlock_bh(&sta->lock);
cfcdbde3
JB
687 mutex_unlock(&sta->ampdu_mlme.mtx);
688 unlock:
689 mutex_unlock(&local->sta_mtx);
b8695a8f 690}
b8695a8f 691
c951ad35 692void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
b8695a8f
JB
693 const u8 *ra, u16 tid)
694{
c951ad35
JB
695 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
696 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
697 struct ieee80211_ra_tid *ra_tid;
698 struct sk_buff *skb = dev_alloc_skb(0);
699
700 if (unlikely(!skb)) {
701#ifdef CONFIG_MAC80211_HT_DEBUG
702 if (net_ratelimit())
703 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 704 "dropping stop BA session", sdata->name);
b8695a8f
JB
705#endif
706 return;
707 }
708 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
709 memcpy(&ra_tid->ra, ra, ETH_ALEN);
710 ra_tid->tid = tid;
711
c1475ca9
JB
712 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
713 skb_queue_tail(&sdata->skb_queue, skb);
714 ieee80211_queue_work(&local->hw, &sdata->work);
b8695a8f
JB
715}
716EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
717
86ab6c5a 718
b8695a8f
JB
719void ieee80211_process_addba_resp(struct ieee80211_local *local,
720 struct sta_info *sta,
721 struct ieee80211_mgmt *mgmt,
722 size_t len)
723{
a622ab72 724 struct tid_ampdu_tx *tid_tx;
b1720231 725 u16 capab, tid;
b8695a8f
JB
726
727 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
728 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
729
cfcdbde3 730 mutex_lock(&sta->ampdu_mlme.mtx);
b8695a8f 731
a622ab72 732 tid_tx = sta->ampdu_mlme.tid_tx[tid];
a622ab72 733 if (!tid_tx)
8ade0082 734 goto out;
b8695a8f 735
a622ab72 736 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
b8695a8f
JB
737#ifdef CONFIG_MAC80211_HT_DEBUG
738 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
a622ab72 739#endif
8ade0082 740 goto out;
b8695a8f
JB
741 }
742
a622ab72 743 del_timer(&tid_tx->addba_resp_timer);
8ade0082 744
b8695a8f 745#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 746 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
a622ab72 747#endif
2171abc5 748
b8695a8f
JB
749 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
750 == WLAN_STATUS_SUCCESS) {
a622ab72
JB
751 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
752 &tid_tx->state)) {
753 /* ignore duplicate response */
754 goto out;
755 }
b8695a8f 756
a622ab72 757 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
b1720231 758 ieee80211_agg_tx_operational(local, sta, tid);
b8695a8f 759
b1720231 760 sta->ampdu_mlme.addba_req_num[tid] = 0;
b8695a8f 761 } else {
849b7967 762 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
b8695a8f 763 }
2171abc5 764
2171abc5 765 out:
cfcdbde3 766 mutex_unlock(&sta->ampdu_mlme.mtx);
b8695a8f 767}
This page took 0.156425 seconds and 5 git commands to generate.