Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelv...
[deliverable/linux.git] / drivers / net / wireless / mwifiex / wmm.c
1 /*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 static bool enable_tx_amsdu;
41 module_param(enable_tx_amsdu, bool, 0644);
42
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
46 0x00, 0x01, 0x00
47 };
48
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
50 WMM_AC_BK,
51 WMM_AC_VI,
52 WMM_AC_VO
53 };
54
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
65 };
66
67 /*
68 * This table inverses the tos_to_tid operation to get a priority
69 * which is in sequential order, and can be compared.
70 * Use this to compare the priority of two different TIDs.
71 */
72 static u8 tos_to_tid_inv[] = {
73 0x02, /* from tos_to_tid[2] = 0 */
74 0x00, /* from tos_to_tid[0] = 1 */
75 0x01, /* from tos_to_tid[1] = 2 */
76 0x03,
77 0x04,
78 0x05,
79 0x06,
80 0x07};
81
82 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
83
84 /*
85 * This function debug prints the priority parameters for a WMM AC.
86 */
87 static void
88 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
89 {
90 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
91
92 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
93 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
94 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
95 & MWIFIEX_ACI) >> 5]],
96 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
97 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
98 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
99 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
100 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
101 le16_to_cpu(ac_param->tx_op_limit));
102 }
103
104 /*
105 * This function allocates a route address list.
106 *
107 * The function also initializes the list with the provided RA.
108 */
109 static struct mwifiex_ra_list_tbl *
110 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
111 {
112 struct mwifiex_ra_list_tbl *ra_list;
113
114 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
115 if (!ra_list)
116 return NULL;
117
118 INIT_LIST_HEAD(&ra_list->list);
119 skb_queue_head_init(&ra_list->skb_head);
120
121 memcpy(ra_list->ra, ra, ETH_ALEN);
122
123 ra_list->total_pkt_count = 0;
124
125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
126
127 return ra_list;
128 }
129
130 /* This function returns random no between 16 and 32 to be used as threshold
131 * for no of packets after which BA setup is initiated.
132 */
133 static u8 mwifiex_get_random_ba_threshold(void)
134 {
135 u32 sec, usec;
136 struct timeval ba_tstamp;
137 u8 ba_threshold;
138
139 /* setup ba_packet_threshold here random number between
140 * [BA_SETUP_PACKET_OFFSET,
141 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
142 */
143
144 do_gettimeofday(&ba_tstamp);
145 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
146 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
147 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
148 + BA_SETUP_PACKET_OFFSET;
149
150 return ba_threshold;
151 }
152
153 /*
154 * This function allocates and adds a RA list for all TIDs
155 * with the given RA.
156 */
157 void
158 mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
159 {
160 int i;
161 struct mwifiex_ra_list_tbl *ra_list;
162 struct mwifiex_adapter *adapter = priv->adapter;
163 struct mwifiex_sta_node *node;
164 unsigned long flags;
165
166 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
167 node = mwifiex_get_sta_entry(priv, ra);
168 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
169
170 for (i = 0; i < MAX_NUM_TID; ++i) {
171 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
172 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
173
174 if (!ra_list)
175 break;
176
177 ra_list->is_11n_enabled = 0;
178 if (!mwifiex_queuing_ra_based(priv)) {
179 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
180 } else {
181 ra_list->is_11n_enabled =
182 mwifiex_is_sta_11n_enabled(priv, node);
183 if (ra_list->is_11n_enabled)
184 ra_list->max_amsdu = node->max_amsdu;
185 }
186
187 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
188 ra_list, ra_list->is_11n_enabled);
189
190 if (ra_list->is_11n_enabled) {
191 ra_list->ba_pkt_count = 0;
192 ra_list->ba_packet_thr =
193 mwifiex_get_random_ba_threshold();
194 }
195 list_add_tail(&ra_list->list,
196 &priv->wmm.tid_tbl_ptr[i].ra_list);
197 }
198 }
199
200 /*
201 * This function sets the WMM queue priorities to their default values.
202 */
203 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
204 {
205 /* Default queue priorities: VO->VI->BE->BK */
206 priv->wmm.queue_priority[0] = WMM_AC_VO;
207 priv->wmm.queue_priority[1] = WMM_AC_VI;
208 priv->wmm.queue_priority[2] = WMM_AC_BE;
209 priv->wmm.queue_priority[3] = WMM_AC_BK;
210 }
211
212 /*
213 * This function map ACs to TIDs.
214 */
215 static void
216 mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
217 {
218 u8 *queue_priority = wmm->queue_priority;
219 int i;
220
221 for (i = 0; i < 4; ++i) {
222 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
223 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
224 }
225
226 for (i = 0; i < MAX_NUM_TID; ++i)
227 tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
228
229 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
230 }
231
232 /*
233 * This function initializes WMM priority queues.
234 */
235 void
236 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
237 struct ieee_types_wmm_parameter *wmm_ie)
238 {
239 u16 cw_min, avg_back_off, tmp[4];
240 u32 i, j, num_ac;
241 u8 ac_idx;
242
243 if (!wmm_ie || !priv->wmm_enabled) {
244 /* WMM is not enabled, just set the defaults and return */
245 mwifiex_wmm_default_queue_priorities(priv);
246 return;
247 }
248
249 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
250 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
251 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
252 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
253 wmm_ie->reserved);
254
255 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
256 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
257 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
258 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
259 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
260
261 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
262 priv->wmm.queue_priority[ac_idx] = ac_idx;
263 tmp[ac_idx] = avg_back_off;
264
265 dev_dbg(priv->adapter->dev,
266 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
267 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
268 cw_min, avg_back_off);
269 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
270 }
271
272 /* Bubble sort */
273 for (i = 0; i < num_ac; i++) {
274 for (j = 1; j < num_ac - i; j++) {
275 if (tmp[j - 1] > tmp[j]) {
276 swap(tmp[j - 1], tmp[j]);
277 swap(priv->wmm.queue_priority[j - 1],
278 priv->wmm.queue_priority[j]);
279 } else if (tmp[j - 1] == tmp[j]) {
280 if (priv->wmm.queue_priority[j - 1]
281 < priv->wmm.queue_priority[j])
282 swap(priv->wmm.queue_priority[j - 1],
283 priv->wmm.queue_priority[j]);
284 }
285 }
286 }
287
288 mwifiex_wmm_queue_priorities_tid(&priv->wmm);
289 }
290
291 /*
292 * This function evaluates whether or not an AC is to be downgraded.
293 *
294 * In case the AC is not enabled, the highest AC is returned that is
295 * enabled and does not require admission control.
296 */
297 static enum mwifiex_wmm_ac_e
298 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
299 enum mwifiex_wmm_ac_e eval_ac)
300 {
301 int down_ac;
302 enum mwifiex_wmm_ac_e ret_ac;
303 struct mwifiex_wmm_ac_status *ac_status;
304
305 ac_status = &priv->wmm.ac_status[eval_ac];
306
307 if (!ac_status->disabled)
308 /* Okay to use this AC, its enabled */
309 return eval_ac;
310
311 /* Setup a default return value of the lowest priority */
312 ret_ac = WMM_AC_BK;
313
314 /*
315 * Find the highest AC that is enabled and does not require
316 * admission control. The spec disallows downgrading to an AC,
317 * which is enabled due to a completed admission control.
318 * Unadmitted traffic is not to be sent on an AC with admitted
319 * traffic.
320 */
321 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
322 ac_status = &priv->wmm.ac_status[down_ac];
323
324 if (!ac_status->disabled && !ac_status->flow_required)
325 /* AC is enabled and does not require admission
326 control */
327 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
328 }
329
330 return ret_ac;
331 }
332
333 /*
334 * This function downgrades WMM priority queue.
335 */
336 void
337 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
338 {
339 int ac_val;
340
341 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
342 "BK(0), BE(1), VI(2), VO(3)\n");
343
344 if (!priv->wmm_enabled) {
345 /* WMM is not enabled, default priorities */
346 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
347 priv->wmm.ac_down_graded_vals[ac_val] =
348 (enum mwifiex_wmm_ac_e) ac_val;
349 } else {
350 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
351 priv->wmm.ac_down_graded_vals[ac_val]
352 = mwifiex_wmm_eval_downgrade_ac(priv,
353 (enum mwifiex_wmm_ac_e) ac_val);
354 dev_dbg(priv->adapter->dev,
355 "info: WMM: AC PRIO %d maps to %d\n",
356 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
357 }
358 }
359 }
360
361 /*
362 * This function converts the IP TOS field to an WMM AC
363 * Queue assignment.
364 */
365 static enum mwifiex_wmm_ac_e
366 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
367 {
368 /* Map of TOS UP values to WMM AC */
369 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
370 WMM_AC_BK,
371 WMM_AC_BK,
372 WMM_AC_BE,
373 WMM_AC_VI,
374 WMM_AC_VI,
375 WMM_AC_VO,
376 WMM_AC_VO
377 };
378
379 if (tos >= ARRAY_SIZE(tos_to_ac))
380 return WMM_AC_BE;
381
382 return tos_to_ac[tos];
383 }
384
385 /*
386 * This function evaluates a given TID and downgrades it to a lower
387 * TID if the WMM Parameter IE received from the AP indicates that the
388 * AP is disabled (due to call admission control (ACM bit). Mapping
389 * of TID to AC is taken care of internally.
390 */
391 static u8
392 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
393 {
394 enum mwifiex_wmm_ac_e ac, ac_down;
395 u8 new_tid;
396
397 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
398 ac_down = priv->wmm.ac_down_graded_vals[ac];
399
400 /* Send the index to tid array, picking from the array will be
401 * taken care by dequeuing function
402 */
403 new_tid = ac_to_tid[ac_down][tid % 2];
404
405 return new_tid;
406 }
407
408 /*
409 * This function initializes the WMM state information and the
410 * WMM data path queues.
411 */
412 void
413 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
414 {
415 int i, j;
416 struct mwifiex_private *priv;
417
418 for (j = 0; j < adapter->priv_num; ++j) {
419 priv = adapter->priv[j];
420 if (!priv)
421 continue;
422
423 for (i = 0; i < MAX_NUM_TID; ++i) {
424 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
425 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
426 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
427 }
428
429 priv->aggr_prio_tbl[6].amsdu
430 = priv->aggr_prio_tbl[6].ampdu_ap
431 = priv->aggr_prio_tbl[6].ampdu_user
432 = BA_STREAM_NOT_ALLOWED;
433
434 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
435 = priv->aggr_prio_tbl[7].ampdu_user
436 = BA_STREAM_NOT_ALLOWED;
437
438 mwifiex_set_ba_params(priv);
439 mwifiex_reset_11n_rx_seq_num(priv);
440
441 atomic_set(&priv->wmm.tx_pkts_queued, 0);
442 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
443 }
444 }
445
446 /*
447 * This function checks if WMM Tx queue is empty.
448 */
449 int
450 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
451 {
452 int i;
453 struct mwifiex_private *priv;
454
455 for (i = 0; i < adapter->priv_num; ++i) {
456 priv = adapter->priv[i];
457 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
458 return false;
459 }
460
461 return true;
462 }
463
464 /*
465 * This function deletes all packets in an RA list node.
466 *
467 * The packet sent completion callback handler are called with
468 * status failure, after they are dequeued to ensure proper
469 * cleanup. The RA list node itself is freed at the end.
470 */
471 static void
472 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
473 struct mwifiex_ra_list_tbl *ra_list)
474 {
475 struct mwifiex_adapter *adapter = priv->adapter;
476 struct sk_buff *skb, *tmp;
477
478 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
479 mwifiex_write_data_complete(adapter, skb, 0, -1);
480 }
481
482 /*
483 * This function deletes all packets in an RA list.
484 *
485 * Each nodes in the RA list are freed individually first, and then
486 * the RA list itself is freed.
487 */
488 static void
489 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
490 struct list_head *ra_list_head)
491 {
492 struct mwifiex_ra_list_tbl *ra_list;
493
494 list_for_each_entry(ra_list, ra_list_head, list)
495 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
496 }
497
498 /*
499 * This function deletes all packets in all RA lists.
500 */
501 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
502 {
503 int i;
504
505 for (i = 0; i < MAX_NUM_TID; i++)
506 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
507 ra_list);
508
509 atomic_set(&priv->wmm.tx_pkts_queued, 0);
510 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
511 }
512
513 /*
514 * This function deletes all route addresses from all RA lists.
515 */
516 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
517 {
518 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
519 int i;
520
521 for (i = 0; i < MAX_NUM_TID; ++i) {
522 dev_dbg(priv->adapter->dev,
523 "info: ra_list: freeing buf for tid %d\n", i);
524 list_for_each_entry_safe(ra_list, tmp_node,
525 &priv->wmm.tid_tbl_ptr[i].ra_list,
526 list) {
527 list_del(&ra_list->list);
528 kfree(ra_list);
529 }
530
531 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
532 }
533 }
534
535 /*
536 * This function cleans up the Tx and Rx queues.
537 *
538 * Cleanup includes -
539 * - All packets in RA lists
540 * - All entries in Rx reorder table
541 * - All entries in Tx BA stream table
542 * - MPA buffer (if required)
543 * - All RA lists
544 */
545 void
546 mwifiex_clean_txrx(struct mwifiex_private *priv)
547 {
548 unsigned long flags;
549
550 mwifiex_11n_cleanup_reorder_tbl(priv);
551 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
552
553 mwifiex_wmm_cleanup_queues(priv);
554 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
555
556 if (priv->adapter->if_ops.cleanup_mpa_buf)
557 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
558
559 mwifiex_wmm_delete_all_ralist(priv);
560 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
561
562 if (priv->adapter->if_ops.clean_pcie_ring)
563 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
564 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
565 }
566
567 /*
568 * This function retrieves a particular RA list node, matching with the
569 * given TID and RA address.
570 */
571 static struct mwifiex_ra_list_tbl *
572 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
573 u8 *ra_addr)
574 {
575 struct mwifiex_ra_list_tbl *ra_list;
576
577 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
578 list) {
579 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
580 return ra_list;
581 }
582
583 return NULL;
584 }
585
586 /*
587 * This function retrieves an RA list node for a given TID and
588 * RA address pair.
589 *
590 * If no such node is found, a new node is added first and then
591 * retrieved.
592 */
593 static struct mwifiex_ra_list_tbl *
594 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
595 {
596 struct mwifiex_ra_list_tbl *ra_list;
597
598 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
599 if (ra_list)
600 return ra_list;
601 mwifiex_ralist_add(priv, ra_addr);
602
603 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
604 }
605
606 /*
607 * This function checks if a particular RA list node exists in a given TID
608 * table index.
609 */
610 int
611 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
612 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
613 {
614 struct mwifiex_ra_list_tbl *rlist;
615
616 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
617 list) {
618 if (rlist == ra_list)
619 return true;
620 }
621
622 return false;
623 }
624
625 /*
626 * This function adds a packet to WMM queue.
627 *
628 * In disconnected state the packet is immediately dropped and the
629 * packet send completion callback is called with status failure.
630 *
631 * Otherwise, the correct RA list node is located and the packet
632 * is queued at the list tail.
633 */
634 void
635 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
636 struct sk_buff *skb)
637 {
638 struct mwifiex_adapter *adapter = priv->adapter;
639 u32 tid;
640 struct mwifiex_ra_list_tbl *ra_list;
641 u8 ra[ETH_ALEN], tid_down;
642 unsigned long flags;
643
644 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
645 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
646 mwifiex_write_data_complete(adapter, skb, 0, -1);
647 return;
648 }
649
650 tid = skb->priority;
651
652 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
653
654 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
655
656 /* In case of infra as we have already created the list during
657 association we just don't have to call get_queue_raptr, we will
658 have only 1 raptr for a tid in case of infra */
659 if (!mwifiex_queuing_ra_based(priv) &&
660 !mwifiex_is_skb_mgmt_frame(skb)) {
661 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
662 ra_list = list_first_entry(
663 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
664 struct mwifiex_ra_list_tbl, list);
665 else
666 ra_list = NULL;
667 } else {
668 memcpy(ra, skb->data, ETH_ALEN);
669 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
670 memset(ra, 0xff, ETH_ALEN);
671 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
672 }
673
674 if (!ra_list) {
675 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
676 mwifiex_write_data_complete(adapter, skb, 0, -1);
677 return;
678 }
679
680 skb_queue_tail(&ra_list->skb_head, skb);
681
682 ra_list->ba_pkt_count++;
683 ra_list->total_pkt_count++;
684
685 if (atomic_read(&priv->wmm.highest_queued_prio) <
686 tos_to_tid_inv[tid_down])
687 atomic_set(&priv->wmm.highest_queued_prio,
688 tos_to_tid_inv[tid_down]);
689
690 atomic_inc(&priv->wmm.tx_pkts_queued);
691
692 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
693 }
694
695 /*
696 * This function processes the get WMM status command response from firmware.
697 *
698 * The response may contain multiple TLVs -
699 * - AC Queue status TLVs
700 * - Current WMM Parameter IE TLV
701 * - Admission Control action frame TLVs
702 *
703 * This function parses the TLVs and then calls further specific functions
704 * to process any changes in the queue prioritize or state.
705 */
706 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
707 const struct host_cmd_ds_command *resp)
708 {
709 u8 *curr = (u8 *) &resp->params.get_wmm_status;
710 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
711 bool valid = true;
712
713 struct mwifiex_ie_types_data *tlv_hdr;
714 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
715 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
716 struct mwifiex_wmm_ac_status *ac_status;
717
718 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
719 resp_len);
720
721 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
722 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
723 tlv_len = le16_to_cpu(tlv_hdr->header.len);
724
725 switch (le16_to_cpu(tlv_hdr->header.type)) {
726 case TLV_TYPE_WMMQSTATUS:
727 tlv_wmm_qstatus =
728 (struct mwifiex_ie_types_wmm_queue_status *)
729 tlv_hdr;
730 dev_dbg(priv->adapter->dev,
731 "info: CMD_RESP: WMM_GET_STATUS:"
732 " QSTATUS TLV: %d, %d, %d\n",
733 tlv_wmm_qstatus->queue_index,
734 tlv_wmm_qstatus->flow_required,
735 tlv_wmm_qstatus->disabled);
736
737 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
738 queue_index];
739 ac_status->disabled = tlv_wmm_qstatus->disabled;
740 ac_status->flow_required =
741 tlv_wmm_qstatus->flow_required;
742 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
743 break;
744
745 case WLAN_EID_VENDOR_SPECIFIC:
746 /*
747 * Point the regular IEEE IE 2 bytes into the Marvell IE
748 * and setup the IEEE IE type and length byte fields
749 */
750
751 wmm_param_ie =
752 (struct ieee_types_wmm_parameter *) (curr +
753 2);
754 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
755 wmm_param_ie->vend_hdr.element_id =
756 WLAN_EID_VENDOR_SPECIFIC;
757
758 dev_dbg(priv->adapter->dev,
759 "info: CMD_RESP: WMM_GET_STATUS:"
760 " WMM Parameter Set Count: %d\n",
761 wmm_param_ie->qos_info_bitmap &
762 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
763
764 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
765 wmm_ie, wmm_param_ie,
766 wmm_param_ie->vend_hdr.len + 2);
767
768 break;
769
770 default:
771 valid = false;
772 break;
773 }
774
775 curr += (tlv_len + sizeof(tlv_hdr->header));
776 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
777 }
778
779 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
780 mwifiex_wmm_setup_ac_downgrade(priv);
781
782 return 0;
783 }
784
785 /*
786 * Callback handler from the command module to allow insertion of a WMM TLV.
787 *
788 * If the BSS we are associating to supports WMM, this function adds the
789 * required WMM Information IE to the association request command buffer in
790 * the form of a Marvell extended IEEE IE.
791 */
792 u32
793 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
794 u8 **assoc_buf,
795 struct ieee_types_wmm_parameter *wmm_ie,
796 struct ieee80211_ht_cap *ht_cap)
797 {
798 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
799 u32 ret_len = 0;
800
801 /* Null checks */
802 if (!assoc_buf)
803 return 0;
804 if (!(*assoc_buf))
805 return 0;
806
807 if (!wmm_ie)
808 return 0;
809
810 dev_dbg(priv->adapter->dev,
811 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
812 wmm_ie->vend_hdr.element_id);
813
814 if ((priv->wmm_required ||
815 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
816 priv->adapter->config_bands & BAND_AN))) &&
817 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
818 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
819 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
820 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
821 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
822 le16_to_cpu(wmm_tlv->header.len));
823 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
824 memcpy((u8 *) (wmm_tlv->wmm_ie
825 + le16_to_cpu(wmm_tlv->header.len)
826 - sizeof(priv->wmm_qosinfo)),
827 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
828
829 ret_len = sizeof(wmm_tlv->header)
830 + le16_to_cpu(wmm_tlv->header.len);
831
832 *assoc_buf += ret_len;
833 }
834
835 return ret_len;
836 }
837
838 /*
839 * This function computes the time delay in the driver queues for a
840 * given packet.
841 *
842 * When the packet is received at the OS/Driver interface, the current
843 * time is set in the packet structure. The difference between the present
844 * time and that received time is computed in this function and limited
845 * based on pre-compiled limits in the driver.
846 */
847 u8
848 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
849 const struct sk_buff *skb)
850 {
851 u8 ret_val;
852 struct timeval out_tstamp, in_tstamp;
853 u32 queue_delay;
854
855 do_gettimeofday(&out_tstamp);
856 in_tstamp = ktime_to_timeval(skb->tstamp);
857
858 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
859 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
860
861 /*
862 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
863 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
864 *
865 * Pass max value if queue_delay is beyond the uint8 range
866 */
867 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
868
869 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
870 " %d ms sent to FW\n", queue_delay, ret_val);
871
872 return ret_val;
873 }
874
875 /*
876 * This function retrieves the highest priority RA list table pointer.
877 */
878 static struct mwifiex_ra_list_tbl *
879 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
880 struct mwifiex_private **priv, int *tid)
881 {
882 struct mwifiex_private *priv_tmp;
883 struct mwifiex_ra_list_tbl *ptr;
884 struct mwifiex_tid_tbl *tid_ptr;
885 atomic_t *hqp;
886 unsigned long flags_bss, flags_ra;
887 int i, j;
888
889 /* check the BSS with highest priority first */
890 for (j = adapter->priv_num - 1; j >= 0; --j) {
891 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
892 flags_bss);
893
894 /* iterate over BSS with the equal priority */
895 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
896 &adapter->bss_prio_tbl[j].bss_prio_head,
897 list) {
898
899 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
900
901 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
902 continue;
903
904 /* iterate over the WMM queues of the BSS */
905 hqp = &priv_tmp->wmm.highest_queued_prio;
906 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
907
908 spin_lock_irqsave(&priv_tmp->wmm.
909 ra_list_spinlock, flags_ra);
910
911 tid_ptr = &(priv_tmp)->wmm.
912 tid_tbl_ptr[tos_to_tid[i]];
913
914 /* iterate over receiver addresses */
915 list_for_each_entry(ptr, &tid_ptr->ra_list,
916 list) {
917
918 if (!skb_queue_empty(&ptr->skb_head))
919 /* holds both locks */
920 goto found;
921 }
922
923 spin_unlock_irqrestore(&priv_tmp->wmm.
924 ra_list_spinlock,
925 flags_ra);
926 }
927 }
928
929 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
930 flags_bss);
931 }
932
933 return NULL;
934
935 found:
936 /* holds bss_prio_lock / ra_list_spinlock */
937 if (atomic_read(hqp) > i)
938 atomic_set(hqp, i);
939 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
940 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
941 flags_bss);
942
943 *priv = priv_tmp;
944 *tid = tos_to_tid[i];
945
946 return ptr;
947 }
948
949 /* This functions rotates ra and bss lists so packets are picked round robin.
950 *
951 * After a packet is successfully transmitted, rotate the ra list, so the ra
952 * next to the one transmitted, will come first in the list. This way we pick
953 * the ra' in a round robin fashion. Same applies to bss nodes of equal
954 * priority.
955 *
956 * Function also increments wmm.packets_out counter.
957 */
958 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
959 struct mwifiex_ra_list_tbl *ra,
960 int tid)
961 {
962 struct mwifiex_adapter *adapter = priv->adapter;
963 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
964 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
965 unsigned long flags;
966
967 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
968 /*
969 * dirty trick: we remove 'head' temporarily and reinsert it after
970 * curr bss node. imagine list to stay fixed while head is moved
971 */
972 list_move(&tbl[priv->bss_priority].bss_prio_head,
973 &tbl[priv->bss_priority].bss_prio_cur->list);
974 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
975
976 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
977 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
978 priv->wmm.packets_out[tid]++;
979 /* same as above */
980 list_move(&tid_ptr->ra_list, &ra->list);
981 }
982 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
983 }
984
985 /*
986 * This function checks if 11n aggregation is possible.
987 */
988 static int
989 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
990 struct mwifiex_ra_list_tbl *ptr,
991 int max_buf_size)
992 {
993 int count = 0, total_size = 0;
994 struct sk_buff *skb, *tmp;
995 int max_amsdu_size;
996
997 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
998 ptr->is_11n_enabled)
999 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1000 else
1001 max_amsdu_size = max_buf_size;
1002
1003 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1004 total_size += skb->len;
1005 if (total_size >= max_amsdu_size)
1006 break;
1007 if (++count >= MIN_NUM_AMSDU)
1008 return true;
1009 }
1010
1011 return false;
1012 }
1013
1014 /*
1015 * This function sends a single packet to firmware for transmission.
1016 */
1017 static void
1018 mwifiex_send_single_packet(struct mwifiex_private *priv,
1019 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1020 unsigned long ra_list_flags)
1021 __releases(&priv->wmm.ra_list_spinlock)
1022 {
1023 struct sk_buff *skb, *skb_next;
1024 struct mwifiex_tx_param tx_param;
1025 struct mwifiex_adapter *adapter = priv->adapter;
1026 struct mwifiex_txinfo *tx_info;
1027
1028 if (skb_queue_empty(&ptr->skb_head)) {
1029 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1030 ra_list_flags);
1031 dev_dbg(adapter->dev, "data: nothing to send\n");
1032 return;
1033 }
1034
1035 skb = skb_dequeue(&ptr->skb_head);
1036
1037 tx_info = MWIFIEX_SKB_TXCB(skb);
1038 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1039
1040 ptr->total_pkt_count--;
1041
1042 if (!skb_queue_empty(&ptr->skb_head))
1043 skb_next = skb_peek(&ptr->skb_head);
1044 else
1045 skb_next = NULL;
1046
1047 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1048
1049 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1050 sizeof(struct txpd) : 0);
1051
1052 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1053 /* Queue the packet back at the head */
1054 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1055
1056 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1057 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1058 ra_list_flags);
1059 mwifiex_write_data_complete(adapter, skb, 0, -1);
1060 return;
1061 }
1062
1063 skb_queue_tail(&ptr->skb_head, skb);
1064
1065 ptr->total_pkt_count++;
1066 ptr->ba_pkt_count++;
1067 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1068 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1069 ra_list_flags);
1070 } else {
1071 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1072 atomic_dec(&priv->wmm.tx_pkts_queued);
1073 }
1074 }
1075
1076 /*
1077 * This function checks if the first packet in the given RA list
1078 * is already processed or not.
1079 */
1080 static int
1081 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1082 struct mwifiex_ra_list_tbl *ptr)
1083 {
1084 struct sk_buff *skb;
1085 struct mwifiex_txinfo *tx_info;
1086
1087 if (skb_queue_empty(&ptr->skb_head))
1088 return false;
1089
1090 skb = skb_peek(&ptr->skb_head);
1091
1092 tx_info = MWIFIEX_SKB_TXCB(skb);
1093 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1094 return true;
1095
1096 return false;
1097 }
1098
1099 /*
1100 * This function sends a single processed packet to firmware for
1101 * transmission.
1102 */
1103 static void
1104 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1105 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1106 unsigned long ra_list_flags)
1107 __releases(&priv->wmm.ra_list_spinlock)
1108 {
1109 struct mwifiex_tx_param tx_param;
1110 struct mwifiex_adapter *adapter = priv->adapter;
1111 int ret = -1;
1112 struct sk_buff *skb, *skb_next;
1113 struct mwifiex_txinfo *tx_info;
1114
1115 if (skb_queue_empty(&ptr->skb_head)) {
1116 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1117 ra_list_flags);
1118 return;
1119 }
1120
1121 skb = skb_dequeue(&ptr->skb_head);
1122
1123 if (!skb_queue_empty(&ptr->skb_head))
1124 skb_next = skb_peek(&ptr->skb_head);
1125 else
1126 skb_next = NULL;
1127
1128 tx_info = MWIFIEX_SKB_TXCB(skb);
1129
1130 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1131
1132 if (adapter->iface_type == MWIFIEX_USB) {
1133 adapter->data_sent = true;
1134 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1135 skb, NULL);
1136 } else {
1137 tx_param.next_pkt_len =
1138 ((skb_next) ? skb_next->len +
1139 sizeof(struct txpd) : 0);
1140 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1141 skb, &tx_param);
1142 }
1143
1144 switch (ret) {
1145 case -EBUSY:
1146 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1147 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1148
1149 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1150 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1151 ra_list_flags);
1152 mwifiex_write_data_complete(adapter, skb, 0, -1);
1153 return;
1154 }
1155
1156 skb_queue_tail(&ptr->skb_head, skb);
1157
1158 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1159 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1160 ra_list_flags);
1161 break;
1162 case -1:
1163 if (adapter->iface_type != MWIFIEX_PCIE)
1164 adapter->data_sent = false;
1165 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1166 adapter->dbg.num_tx_host_to_card_failure++;
1167 mwifiex_write_data_complete(adapter, skb, 0, ret);
1168 break;
1169 case -EINPROGRESS:
1170 if (adapter->iface_type != MWIFIEX_PCIE)
1171 adapter->data_sent = false;
1172 default:
1173 break;
1174 }
1175 if (ret != -EBUSY) {
1176 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1177 atomic_dec(&priv->wmm.tx_pkts_queued);
1178 }
1179 }
1180
1181 /*
1182 * This function dequeues a packet from the highest priority list
1183 * and transmits it.
1184 */
1185 static int
1186 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1187 {
1188 struct mwifiex_ra_list_tbl *ptr;
1189 struct mwifiex_private *priv = NULL;
1190 int ptr_index = 0;
1191 u8 ra[ETH_ALEN];
1192 int tid_del = 0, tid = 0;
1193 unsigned long flags;
1194
1195 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1196 if (!ptr)
1197 return -1;
1198
1199 tid = mwifiex_get_tid(ptr);
1200
1201 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1202
1203 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1204 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1205 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1206 return -1;
1207 }
1208
1209 if (mwifiex_is_ptr_processed(priv, ptr)) {
1210 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1211 /* ra_list_spinlock has been freed in
1212 mwifiex_send_processed_packet() */
1213 return 0;
1214 }
1215
1216 if (!ptr->is_11n_enabled ||
1217 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1218 priv->wps.session_enable ||
1219 ((priv->sec_info.wpa_enabled ||
1220 priv->sec_info.wpa2_enabled) &&
1221 !priv->wpa_is_gtk_set)) {
1222 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1223 /* ra_list_spinlock has been freed in
1224 mwifiex_send_single_packet() */
1225 } else {
1226 if (mwifiex_is_ampdu_allowed(priv, tid) &&
1227 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1228 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1229 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1230 BA_SETUP_INPROGRESS);
1231 mwifiex_send_addba(priv, tid, ptr->ra);
1232 } else if (mwifiex_find_stream_to_delete
1233 (priv, tid, &tid_del, ra)) {
1234 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1235 BA_SETUP_INPROGRESS);
1236 mwifiex_send_delba(priv, tid_del, ra, 1);
1237 }
1238 }
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 /* ra_list_spinlock has been freed in
1244 mwifiex_11n_aggregate_pkt() */
1245 else
1246 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1247 /* ra_list_spinlock has been freed in
1248 mwifiex_send_single_packet() */
1249 }
1250 return 0;
1251 }
1252
1253 /*
1254 * This function transmits the highest priority packet awaiting in the
1255 * WMM Queues.
1256 */
1257 void
1258 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1259 {
1260 do {
1261 /* Check if busy */
1262 if (adapter->data_sent || adapter->tx_lock_flag)
1263 break;
1264
1265 if (mwifiex_dequeue_tx_packet(adapter))
1266 break;
1267 } while (!mwifiex_wmm_lists_empty(adapter));
1268 }
This page took 0.059739 seconds and 5 git commands to generate.