mwifiex: allocate space for one more mwifiex_private structure
[deliverable/linux.git] / drivers / net / wireless / mwifiex / wmm.c
1 /*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 /* WMM information IE */
41 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
42 0x00, 0x50, 0xf2, 0x02,
43 0x00, 0x01, 0x00
44 };
45
46 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
47 WMM_AC_BK,
48 WMM_AC_VI,
49 WMM_AC_VO
50 };
51
52 static u8 tos_to_tid[] = {
53 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
54 0x01, /* 0 1 0 AC_BK */
55 0x02, /* 0 0 0 AC_BK */
56 0x00, /* 0 0 1 AC_BE */
57 0x03, /* 0 1 1 AC_BE */
58 0x04, /* 1 0 0 AC_VI */
59 0x05, /* 1 0 1 AC_VI */
60 0x06, /* 1 1 0 AC_VO */
61 0x07 /* 1 1 1 AC_VO */
62 };
63
64 /*
65 * This table inverses the tos_to_tid operation to get a priority
66 * which is in sequential order, and can be compared.
67 * Use this to compare the priority of two different TIDs.
68 */
69 static u8 tos_to_tid_inv[] = {
70 0x02, /* from tos_to_tid[2] = 0 */
71 0x00, /* from tos_to_tid[0] = 1 */
72 0x01, /* from tos_to_tid[1] = 2 */
73 0x03,
74 0x04,
75 0x05,
76 0x06,
77 0x07};
78
79 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
80
81 /*
82 * This function debug prints the priority parameters for a WMM AC.
83 */
84 static void
85 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
86 {
87 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
88
89 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
90 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
91 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
92 & MWIFIEX_ACI) >> 5]],
93 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
94 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
95 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
96 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
97 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
98 le16_to_cpu(ac_param->tx_op_limit));
99 }
100
101 /*
102 * This function allocates a route address list.
103 *
104 * The function also initializes the list with the provided RA.
105 */
106 static struct mwifiex_ra_list_tbl *
107 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
108 {
109 struct mwifiex_ra_list_tbl *ra_list;
110
111 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
112
113 if (!ra_list) {
114 dev_err(adapter->dev, "%s: failed to alloc ra_list\n",
115 __func__);
116 return NULL;
117 }
118 INIT_LIST_HEAD(&ra_list->list);
119 skb_queue_head_init(&ra_list->skb_head);
120
121 memcpy(ra_list->ra, ra, ETH_ALEN);
122
123 ra_list->total_pkts_size = 0;
124
125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
126
127 return ra_list;
128 }
129
130 /*
131 * This function allocates and adds a RA list for all TIDs
132 * with the given RA.
133 */
134 void
135 mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
136 {
137 int i;
138 struct mwifiex_ra_list_tbl *ra_list;
139 struct mwifiex_adapter *adapter = priv->adapter;
140
141 for (i = 0; i < MAX_NUM_TID; ++i) {
142 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
143 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
144
145 if (!ra_list)
146 break;
147
148 if (!mwifiex_queuing_ra_based(priv))
149 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
150 else
151 ra_list->is_11n_enabled = false;
152
153 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
154 ra_list, ra_list->is_11n_enabled);
155
156 list_add_tail(&ra_list->list,
157 &priv->wmm.tid_tbl_ptr[i].ra_list);
158
159 if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
160 priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
161 }
162 }
163
164 /*
165 * This function sets the WMM queue priorities to their default values.
166 */
167 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
168 {
169 /* Default queue priorities: VO->VI->BE->BK */
170 priv->wmm.queue_priority[0] = WMM_AC_VO;
171 priv->wmm.queue_priority[1] = WMM_AC_VI;
172 priv->wmm.queue_priority[2] = WMM_AC_BE;
173 priv->wmm.queue_priority[3] = WMM_AC_BK;
174 }
175
176 /*
177 * This function map ACs to TIDs.
178 */
179 static void
180 mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
181 {
182 u8 *queue_priority = wmm->queue_priority;
183 int i;
184
185 for (i = 0; i < 4; ++i) {
186 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
187 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
188 }
189
190 for (i = 0; i < MAX_NUM_TID; ++i)
191 tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
192
193 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
194 }
195
196 /*
197 * This function initializes WMM priority queues.
198 */
199 void
200 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
201 struct ieee_types_wmm_parameter *wmm_ie)
202 {
203 u16 cw_min, avg_back_off, tmp[4];
204 u32 i, j, num_ac;
205 u8 ac_idx;
206
207 if (!wmm_ie || !priv->wmm_enabled) {
208 /* WMM is not enabled, just set the defaults and return */
209 mwifiex_wmm_default_queue_priorities(priv);
210 return;
211 }
212
213 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
214 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
215 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
216 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
217 wmm_ie->reserved);
218
219 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
220 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
221 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
222 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
223 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
224
225 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
226 priv->wmm.queue_priority[ac_idx] = ac_idx;
227 tmp[ac_idx] = avg_back_off;
228
229 dev_dbg(priv->adapter->dev,
230 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
231 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
232 cw_min, avg_back_off);
233 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
234 }
235
236 /* Bubble sort */
237 for (i = 0; i < num_ac; i++) {
238 for (j = 1; j < num_ac - i; j++) {
239 if (tmp[j - 1] > tmp[j]) {
240 swap(tmp[j - 1], tmp[j]);
241 swap(priv->wmm.queue_priority[j - 1],
242 priv->wmm.queue_priority[j]);
243 } else if (tmp[j - 1] == tmp[j]) {
244 if (priv->wmm.queue_priority[j - 1]
245 < priv->wmm.queue_priority[j])
246 swap(priv->wmm.queue_priority[j - 1],
247 priv->wmm.queue_priority[j]);
248 }
249 }
250 }
251
252 mwifiex_wmm_queue_priorities_tid(&priv->wmm);
253 }
254
255 /*
256 * This function evaluates whether or not an AC is to be downgraded.
257 *
258 * In case the AC is not enabled, the highest AC is returned that is
259 * enabled and does not require admission control.
260 */
261 static enum mwifiex_wmm_ac_e
262 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
263 enum mwifiex_wmm_ac_e eval_ac)
264 {
265 int down_ac;
266 enum mwifiex_wmm_ac_e ret_ac;
267 struct mwifiex_wmm_ac_status *ac_status;
268
269 ac_status = &priv->wmm.ac_status[eval_ac];
270
271 if (!ac_status->disabled)
272 /* Okay to use this AC, its enabled */
273 return eval_ac;
274
275 /* Setup a default return value of the lowest priority */
276 ret_ac = WMM_AC_BK;
277
278 /*
279 * Find the highest AC that is enabled and does not require
280 * admission control. The spec disallows downgrading to an AC,
281 * which is enabled due to a completed admission control.
282 * Unadmitted traffic is not to be sent on an AC with admitted
283 * traffic.
284 */
285 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
286 ac_status = &priv->wmm.ac_status[down_ac];
287
288 if (!ac_status->disabled && !ac_status->flow_required)
289 /* AC is enabled and does not require admission
290 control */
291 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
292 }
293
294 return ret_ac;
295 }
296
297 /*
298 * This function downgrades WMM priority queue.
299 */
300 void
301 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
302 {
303 int ac_val;
304
305 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
306 "BK(0), BE(1), VI(2), VO(3)\n");
307
308 if (!priv->wmm_enabled) {
309 /* WMM is not enabled, default priorities */
310 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
311 priv->wmm.ac_down_graded_vals[ac_val] =
312 (enum mwifiex_wmm_ac_e) ac_val;
313 } else {
314 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
315 priv->wmm.ac_down_graded_vals[ac_val]
316 = mwifiex_wmm_eval_downgrade_ac(priv,
317 (enum mwifiex_wmm_ac_e) ac_val);
318 dev_dbg(priv->adapter->dev,
319 "info: WMM: AC PRIO %d maps to %d\n",
320 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
321 }
322 }
323 }
324
325 /*
326 * This function converts the IP TOS field to an WMM AC
327 * Queue assignment.
328 */
329 static enum mwifiex_wmm_ac_e
330 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
331 {
332 /* Map of TOS UP values to WMM AC */
333 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
334 WMM_AC_BK,
335 WMM_AC_BK,
336 WMM_AC_BE,
337 WMM_AC_VI,
338 WMM_AC_VI,
339 WMM_AC_VO,
340 WMM_AC_VO
341 };
342
343 if (tos >= ARRAY_SIZE(tos_to_ac))
344 return WMM_AC_BE;
345
346 return tos_to_ac[tos];
347 }
348
349 /*
350 * This function evaluates a given TID and downgrades it to a lower
351 * TID if the WMM Parameter IE received from the AP indicates that the
352 * AP is disabled (due to call admission control (ACM bit). Mapping
353 * of TID to AC is taken care of internally.
354 */
355 static u8
356 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
357 {
358 enum mwifiex_wmm_ac_e ac, ac_down;
359 u8 new_tid;
360
361 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
362 ac_down = priv->wmm.ac_down_graded_vals[ac];
363
364 /* Send the index to tid array, picking from the array will be
365 * taken care by dequeuing function
366 */
367 new_tid = ac_to_tid[ac_down][tid % 2];
368
369 return new_tid;
370 }
371
372 /*
373 * This function initializes the WMM state information and the
374 * WMM data path queues.
375 */
376 void
377 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
378 {
379 int i, j;
380 struct mwifiex_private *priv;
381
382 for (j = 0; j < adapter->priv_num; ++j) {
383 priv = adapter->priv[j];
384 if (!priv)
385 continue;
386
387 for (i = 0; i < MAX_NUM_TID; ++i) {
388 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
389 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
390 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
391 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
392 }
393
394 priv->aggr_prio_tbl[6].amsdu
395 = priv->aggr_prio_tbl[6].ampdu_ap
396 = priv->aggr_prio_tbl[6].ampdu_user
397 = BA_STREAM_NOT_ALLOWED;
398
399 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
400 = priv->aggr_prio_tbl[7].ampdu_user
401 = BA_STREAM_NOT_ALLOWED;
402
403 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
404 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
405 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
406
407 atomic_set(&priv->wmm.tx_pkts_queued, 0);
408 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
409 }
410 }
411
412 /*
413 * This function checks if WMM Tx queue is empty.
414 */
415 int
416 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
417 {
418 int i;
419 struct mwifiex_private *priv;
420
421 for (i = 0; i < adapter->priv_num; ++i) {
422 priv = adapter->priv[i];
423 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
424 return false;
425 }
426
427 return true;
428 }
429
430 /*
431 * This function deletes all packets in an RA list node.
432 *
433 * The packet sent completion callback handler are called with
434 * status failure, after they are dequeued to ensure proper
435 * cleanup. The RA list node itself is freed at the end.
436 */
437 static void
438 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
439 struct mwifiex_ra_list_tbl *ra_list)
440 {
441 struct mwifiex_adapter *adapter = priv->adapter;
442 struct sk_buff *skb, *tmp;
443
444 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
445 mwifiex_write_data_complete(adapter, skb, -1);
446 }
447
448 /*
449 * This function deletes all packets in an RA list.
450 *
451 * Each nodes in the RA list are freed individually first, and then
452 * the RA list itself is freed.
453 */
454 static void
455 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
456 struct list_head *ra_list_head)
457 {
458 struct mwifiex_ra_list_tbl *ra_list;
459
460 list_for_each_entry(ra_list, ra_list_head, list)
461 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
462 }
463
464 /*
465 * This function deletes all packets in all RA lists.
466 */
467 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
468 {
469 int i;
470
471 for (i = 0; i < MAX_NUM_TID; i++)
472 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
473 ra_list);
474
475 atomic_set(&priv->wmm.tx_pkts_queued, 0);
476 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
477 }
478
479 /*
480 * This function deletes all route addresses from all RA lists.
481 */
482 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
483 {
484 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
485 int i;
486
487 for (i = 0; i < MAX_NUM_TID; ++i) {
488 dev_dbg(priv->adapter->dev,
489 "info: ra_list: freeing buf for tid %d\n", i);
490 list_for_each_entry_safe(ra_list, tmp_node,
491 &priv->wmm.tid_tbl_ptr[i].ra_list,
492 list) {
493 list_del(&ra_list->list);
494 kfree(ra_list);
495 }
496
497 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
498
499 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
500 }
501 }
502
503 /*
504 * This function cleans up the Tx and Rx queues.
505 *
506 * Cleanup includes -
507 * - All packets in RA lists
508 * - All entries in Rx reorder table
509 * - All entries in Tx BA stream table
510 * - MPA buffer (if required)
511 * - All RA lists
512 */
513 void
514 mwifiex_clean_txrx(struct mwifiex_private *priv)
515 {
516 unsigned long flags;
517
518 mwifiex_11n_cleanup_reorder_tbl(priv);
519 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
520
521 mwifiex_wmm_cleanup_queues(priv);
522 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
523
524 if (priv->adapter->if_ops.cleanup_mpa_buf)
525 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
526
527 mwifiex_wmm_delete_all_ralist(priv);
528 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
529
530 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
531 }
532
533 /*
534 * This function retrieves a particular RA list node, matching with the
535 * given TID and RA address.
536 */
537 static struct mwifiex_ra_list_tbl *
538 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
539 u8 *ra_addr)
540 {
541 struct mwifiex_ra_list_tbl *ra_list;
542
543 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
544 list) {
545 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
546 return ra_list;
547 }
548
549 return NULL;
550 }
551
552 /*
553 * This function retrieves an RA list node for a given TID and
554 * RA address pair.
555 *
556 * If no such node is found, a new node is added first and then
557 * retrieved.
558 */
559 static struct mwifiex_ra_list_tbl *
560 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
561 {
562 struct mwifiex_ra_list_tbl *ra_list;
563
564 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
565 if (ra_list)
566 return ra_list;
567 mwifiex_ralist_add(priv, ra_addr);
568
569 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
570 }
571
572 /*
573 * This function checks if a particular RA list node exists in a given TID
574 * table index.
575 */
576 int
577 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
578 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
579 {
580 struct mwifiex_ra_list_tbl *rlist;
581
582 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
583 list) {
584 if (rlist == ra_list)
585 return true;
586 }
587
588 return false;
589 }
590
591 /*
592 * This function adds a packet to WMM queue.
593 *
594 * In disconnected state the packet is immediately dropped and the
595 * packet send completion callback is called with status failure.
596 *
597 * Otherwise, the correct RA list node is located and the packet
598 * is queued at the list tail.
599 */
600 void
601 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
602 struct sk_buff *skb)
603 {
604 struct mwifiex_adapter *adapter = priv->adapter;
605 u32 tid;
606 struct mwifiex_ra_list_tbl *ra_list;
607 u8 ra[ETH_ALEN], tid_down;
608 unsigned long flags;
609
610 if (!priv->media_connected) {
611 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
612 mwifiex_write_data_complete(adapter, skb, -1);
613 return;
614 }
615
616 tid = skb->priority;
617
618 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
619
620 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
621
622 /* In case of infra as we have already created the list during
623 association we just don't have to call get_queue_raptr, we will
624 have only 1 raptr for a tid in case of infra */
625 if (!mwifiex_queuing_ra_based(priv)) {
626 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
627 ra_list = list_first_entry(
628 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
629 struct mwifiex_ra_list_tbl, list);
630 else
631 ra_list = NULL;
632 } else {
633 memcpy(ra, skb->data, ETH_ALEN);
634 if (ra[0] & 0x01)
635 memset(ra, 0xff, ETH_ALEN);
636 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
637 }
638
639 if (!ra_list) {
640 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
641 mwifiex_write_data_complete(adapter, skb, -1);
642 return;
643 }
644
645 skb_queue_tail(&ra_list->skb_head, skb);
646
647 ra_list->total_pkts_size += skb->len;
648
649 atomic_inc(&priv->wmm.tx_pkts_queued);
650
651 if (atomic_read(&priv->wmm.highest_queued_prio) <
652 tos_to_tid_inv[tid_down])
653 atomic_set(&priv->wmm.highest_queued_prio,
654 tos_to_tid_inv[tid_down]);
655
656 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
657 }
658
659 /*
660 * This function processes the get WMM status command response from firmware.
661 *
662 * The response may contain multiple TLVs -
663 * - AC Queue status TLVs
664 * - Current WMM Parameter IE TLV
665 * - Admission Control action frame TLVs
666 *
667 * This function parses the TLVs and then calls further specific functions
668 * to process any changes in the queue prioritize or state.
669 */
670 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
671 const struct host_cmd_ds_command *resp)
672 {
673 u8 *curr = (u8 *) &resp->params.get_wmm_status;
674 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
675 int valid = true;
676
677 struct mwifiex_ie_types_data *tlv_hdr;
678 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
679 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
680 struct mwifiex_wmm_ac_status *ac_status;
681
682 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
683 resp_len);
684
685 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
686 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
687 tlv_len = le16_to_cpu(tlv_hdr->header.len);
688
689 switch (le16_to_cpu(tlv_hdr->header.type)) {
690 case TLV_TYPE_WMMQSTATUS:
691 tlv_wmm_qstatus =
692 (struct mwifiex_ie_types_wmm_queue_status *)
693 tlv_hdr;
694 dev_dbg(priv->adapter->dev,
695 "info: CMD_RESP: WMM_GET_STATUS:"
696 " QSTATUS TLV: %d, %d, %d\n",
697 tlv_wmm_qstatus->queue_index,
698 tlv_wmm_qstatus->flow_required,
699 tlv_wmm_qstatus->disabled);
700
701 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
702 queue_index];
703 ac_status->disabled = tlv_wmm_qstatus->disabled;
704 ac_status->flow_required =
705 tlv_wmm_qstatus->flow_required;
706 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
707 break;
708
709 case WLAN_EID_VENDOR_SPECIFIC:
710 /*
711 * Point the regular IEEE IE 2 bytes into the Marvell IE
712 * and setup the IEEE IE type and length byte fields
713 */
714
715 wmm_param_ie =
716 (struct ieee_types_wmm_parameter *) (curr +
717 2);
718 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
719 wmm_param_ie->vend_hdr.element_id =
720 WLAN_EID_VENDOR_SPECIFIC;
721
722 dev_dbg(priv->adapter->dev,
723 "info: CMD_RESP: WMM_GET_STATUS:"
724 " WMM Parameter Set Count: %d\n",
725 wmm_param_ie->qos_info_bitmap &
726 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
727
728 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
729 wmm_ie, wmm_param_ie,
730 wmm_param_ie->vend_hdr.len + 2);
731
732 break;
733
734 default:
735 valid = false;
736 break;
737 }
738
739 curr += (tlv_len + sizeof(tlv_hdr->header));
740 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
741 }
742
743 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
744 mwifiex_wmm_setup_ac_downgrade(priv);
745
746 return 0;
747 }
748
749 /*
750 * Callback handler from the command module to allow insertion of a WMM TLV.
751 *
752 * If the BSS we are associating to supports WMM, this function adds the
753 * required WMM Information IE to the association request command buffer in
754 * the form of a Marvell extended IEEE IE.
755 */
756 u32
757 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
758 u8 **assoc_buf,
759 struct ieee_types_wmm_parameter *wmm_ie,
760 struct ieee80211_ht_cap *ht_cap)
761 {
762 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
763 u32 ret_len = 0;
764
765 /* Null checks */
766 if (!assoc_buf)
767 return 0;
768 if (!(*assoc_buf))
769 return 0;
770
771 if (!wmm_ie)
772 return 0;
773
774 dev_dbg(priv->adapter->dev,
775 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
776 wmm_ie->vend_hdr.element_id);
777
778 if ((priv->wmm_required ||
779 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
780 priv->adapter->config_bands & BAND_AN))) &&
781 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
782 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
783 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
784 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
785 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
786 le16_to_cpu(wmm_tlv->header.len));
787 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
788 memcpy((u8 *) (wmm_tlv->wmm_ie
789 + le16_to_cpu(wmm_tlv->header.len)
790 - sizeof(priv->wmm_qosinfo)),
791 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
792
793 ret_len = sizeof(wmm_tlv->header)
794 + le16_to_cpu(wmm_tlv->header.len);
795
796 *assoc_buf += ret_len;
797 }
798
799 return ret_len;
800 }
801
802 /*
803 * This function computes the time delay in the driver queues for a
804 * given packet.
805 *
806 * When the packet is received at the OS/Driver interface, the current
807 * time is set in the packet structure. The difference between the present
808 * time and that received time is computed in this function and limited
809 * based on pre-compiled limits in the driver.
810 */
811 u8
812 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
813 const struct sk_buff *skb)
814 {
815 u8 ret_val;
816 struct timeval out_tstamp, in_tstamp;
817 u32 queue_delay;
818
819 do_gettimeofday(&out_tstamp);
820 in_tstamp = ktime_to_timeval(skb->tstamp);
821
822 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
823 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
824
825 /*
826 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
827 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
828 *
829 * Pass max value if queue_delay is beyond the uint8 range
830 */
831 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
832
833 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
834 " %d ms sent to FW\n", queue_delay, ret_val);
835
836 return ret_val;
837 }
838
839 /*
840 * This function retrieves the highest priority RA list table pointer.
841 */
842 static struct mwifiex_ra_list_tbl *
843 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
844 struct mwifiex_private **priv, int *tid)
845 {
846 struct mwifiex_private *priv_tmp;
847 struct mwifiex_ra_list_tbl *ptr, *head;
848 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
849 struct mwifiex_tid_tbl *tid_ptr;
850 atomic_t *hqp;
851 int is_list_empty;
852 unsigned long flags;
853 int i, j;
854
855 for (j = adapter->priv_num - 1; j >= 0; --j) {
856 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
857 flags);
858 is_list_empty = list_empty(&adapter->bss_prio_tbl[j]
859 .bss_prio_head);
860 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
861 flags);
862 if (is_list_empty)
863 continue;
864
865 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
866 (struct mwifiex_bss_prio_node *)
867 &adapter->bss_prio_tbl[j].bss_prio_head) {
868 bssprio_node =
869 list_first_entry(&adapter->bss_prio_tbl[j]
870 .bss_prio_head,
871 struct mwifiex_bss_prio_node,
872 list);
873 bssprio_head = bssprio_node;
874 } else {
875 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
876 bssprio_head = bssprio_node;
877 }
878
879 do {
880 priv_tmp = bssprio_node->priv;
881 hqp = &priv_tmp->wmm.highest_queued_prio;
882
883 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
884
885 tid_ptr = &(priv_tmp)->wmm.
886 tid_tbl_ptr[tos_to_tid[i]];
887
888 /* For non-STA ra_list_curr may be NULL */
889 if (!tid_ptr->ra_list_curr)
890 continue;
891
892 spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
893 flags);
894 is_list_empty =
895 list_empty(&adapter->bss_prio_tbl[j]
896 .bss_prio_head);
897 spin_unlock_irqrestore(&tid_ptr->tid_tbl_lock,
898 flags);
899 if (is_list_empty)
900 continue;
901
902 /*
903 * Always choose the next ra we transmitted
904 * last time, this way we pick the ra's in
905 * round robin fashion.
906 */
907 ptr = list_first_entry(
908 &tid_ptr->ra_list_curr->list,
909 struct mwifiex_ra_list_tbl,
910 list);
911
912 head = ptr;
913 if (ptr == (struct mwifiex_ra_list_tbl *)
914 &tid_ptr->ra_list) {
915 /* Get next ra */
916 ptr = list_first_entry(&ptr->list,
917 struct mwifiex_ra_list_tbl, list);
918 head = ptr;
919 }
920
921 do {
922 is_list_empty =
923 skb_queue_empty(&ptr->skb_head);
924
925 if (!is_list_empty)
926 goto found;
927
928 /* Get next ra */
929 ptr = list_first_entry(&ptr->list,
930 struct mwifiex_ra_list_tbl,
931 list);
932 if (ptr ==
933 (struct mwifiex_ra_list_tbl *)
934 &tid_ptr->ra_list)
935 ptr = list_first_entry(
936 &ptr->list,
937 struct mwifiex_ra_list_tbl,
938 list);
939 } while (ptr != head);
940 }
941
942 /* No packet at any TID for this priv. Mark as such
943 * to skip checking TIDs for this priv (until pkt is
944 * added).
945 */
946 atomic_set(hqp, NO_PKT_PRIO_TID);
947
948 /* Get next bss priority node */
949 bssprio_node = list_first_entry(&bssprio_node->list,
950 struct mwifiex_bss_prio_node,
951 list);
952
953 if (bssprio_node ==
954 (struct mwifiex_bss_prio_node *)
955 &adapter->bss_prio_tbl[j].bss_prio_head)
956 /* Get next bss priority node */
957 bssprio_node = list_first_entry(
958 &bssprio_node->list,
959 struct mwifiex_bss_prio_node,
960 list);
961 } while (bssprio_node != bssprio_head);
962 }
963 return NULL;
964
965 found:
966 spin_lock_irqsave(&priv_tmp->wmm.ra_list_spinlock, flags);
967 if (atomic_read(hqp) > i)
968 atomic_set(hqp, i);
969 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags);
970
971 *priv = priv_tmp;
972 *tid = tos_to_tid[i];
973
974 return ptr;
975 }
976
977 /*
978 * This function checks if 11n aggregation is possible.
979 */
980 static int
981 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
982 struct mwifiex_ra_list_tbl *ptr,
983 int max_buf_size)
984 {
985 int count = 0, total_size = 0;
986 struct sk_buff *skb, *tmp;
987
988 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
989 total_size += skb->len;
990 if (total_size >= max_buf_size)
991 break;
992 if (++count >= MIN_NUM_AMSDU)
993 return true;
994 }
995
996 return false;
997 }
998
999 /*
1000 * This function sends a single packet to firmware for transmission.
1001 */
1002 static void
1003 mwifiex_send_single_packet(struct mwifiex_private *priv,
1004 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1005 unsigned long ra_list_flags)
1006 __releases(&priv->wmm.ra_list_spinlock)
1007 {
1008 struct sk_buff *skb, *skb_next;
1009 struct mwifiex_tx_param tx_param;
1010 struct mwifiex_adapter *adapter = priv->adapter;
1011 struct mwifiex_txinfo *tx_info;
1012
1013 if (skb_queue_empty(&ptr->skb_head)) {
1014 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1015 ra_list_flags);
1016 dev_dbg(adapter->dev, "data: nothing to send\n");
1017 return;
1018 }
1019
1020 skb = skb_dequeue(&ptr->skb_head);
1021
1022 tx_info = MWIFIEX_SKB_TXCB(skb);
1023 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1024
1025 ptr->total_pkts_size -= skb->len;
1026
1027 if (!skb_queue_empty(&ptr->skb_head))
1028 skb_next = skb_peek(&ptr->skb_head);
1029 else
1030 skb_next = NULL;
1031
1032 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1033
1034 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1035 sizeof(struct txpd) : 0);
1036
1037 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1038 /* Queue the packet back at the head */
1039 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1040
1041 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1042 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1043 ra_list_flags);
1044 mwifiex_write_data_complete(adapter, skb, -1);
1045 return;
1046 }
1047
1048 skb_queue_tail(&ptr->skb_head, skb);
1049
1050 ptr->total_pkts_size += skb->len;
1051 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1052 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1053 ra_list_flags);
1054 } else {
1055 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1056 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1057 priv->wmm.packets_out[ptr_index]++;
1058 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1059 }
1060 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1061 list_first_entry(
1062 &adapter->bss_prio_tbl[priv->bss_priority]
1063 .bss_prio_cur->list,
1064 struct mwifiex_bss_prio_node,
1065 list);
1066 atomic_dec(&priv->wmm.tx_pkts_queued);
1067 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1068 ra_list_flags);
1069 }
1070 }
1071
1072 /*
1073 * This function checks if the first packet in the given RA list
1074 * is already processed or not.
1075 */
1076 static int
1077 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1078 struct mwifiex_ra_list_tbl *ptr)
1079 {
1080 struct sk_buff *skb;
1081 struct mwifiex_txinfo *tx_info;
1082
1083 if (skb_queue_empty(&ptr->skb_head))
1084 return false;
1085
1086 skb = skb_peek(&ptr->skb_head);
1087
1088 tx_info = MWIFIEX_SKB_TXCB(skb);
1089 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1090 return true;
1091
1092 return false;
1093 }
1094
1095 /*
1096 * This function sends a single processed packet to firmware for
1097 * transmission.
1098 */
1099 static void
1100 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1101 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1102 unsigned long ra_list_flags)
1103 __releases(&priv->wmm.ra_list_spinlock)
1104 {
1105 struct mwifiex_tx_param tx_param;
1106 struct mwifiex_adapter *adapter = priv->adapter;
1107 int ret = -1;
1108 struct sk_buff *skb, *skb_next;
1109 struct mwifiex_txinfo *tx_info;
1110
1111 if (skb_queue_empty(&ptr->skb_head)) {
1112 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1113 ra_list_flags);
1114 return;
1115 }
1116
1117 skb = skb_dequeue(&ptr->skb_head);
1118
1119 if (!skb_queue_empty(&ptr->skb_head))
1120 skb_next = skb_peek(&ptr->skb_head);
1121 else
1122 skb_next = NULL;
1123
1124 tx_info = MWIFIEX_SKB_TXCB(skb);
1125
1126 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1127
1128 if (adapter->iface_type == MWIFIEX_USB) {
1129 adapter->data_sent = true;
1130 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1131 skb, NULL);
1132 } else {
1133 tx_param.next_pkt_len =
1134 ((skb_next) ? skb_next->len +
1135 sizeof(struct txpd) : 0);
1136 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1137 skb, &tx_param);
1138 }
1139
1140 switch (ret) {
1141 case -EBUSY:
1142 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1143 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1144
1145 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1146 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1147 ra_list_flags);
1148 mwifiex_write_data_complete(adapter, skb, -1);
1149 return;
1150 }
1151
1152 skb_queue_tail(&ptr->skb_head, skb);
1153
1154 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1155 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1156 ra_list_flags);
1157 break;
1158 case -1:
1159 adapter->data_sent = false;
1160 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1161 adapter->dbg.num_tx_host_to_card_failure++;
1162 mwifiex_write_data_complete(adapter, skb, ret);
1163 break;
1164 case -EINPROGRESS:
1165 adapter->data_sent = false;
1166 default:
1167 break;
1168 }
1169 if (ret != -EBUSY) {
1170 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1171 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1172 priv->wmm.packets_out[ptr_index]++;
1173 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1174 }
1175 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1176 list_first_entry(
1177 &adapter->bss_prio_tbl[priv->bss_priority]
1178 .bss_prio_cur->list,
1179 struct mwifiex_bss_prio_node,
1180 list);
1181 atomic_dec(&priv->wmm.tx_pkts_queued);
1182 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1183 ra_list_flags);
1184 }
1185 }
1186
1187 /*
1188 * This function dequeues a packet from the highest priority list
1189 * and transmits it.
1190 */
1191 static int
1192 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1193 {
1194 struct mwifiex_ra_list_tbl *ptr;
1195 struct mwifiex_private *priv = NULL;
1196 int ptr_index = 0;
1197 u8 ra[ETH_ALEN];
1198 int tid_del = 0, tid = 0;
1199 unsigned long flags;
1200
1201 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1202 if (!ptr)
1203 return -1;
1204
1205 tid = mwifiex_get_tid(ptr);
1206
1207 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1208
1209 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1210 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1211 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1212 return -1;
1213 }
1214
1215 if (mwifiex_is_ptr_processed(priv, ptr)) {
1216 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1217 /* ra_list_spinlock has been freed in
1218 mwifiex_send_processed_packet() */
1219 return 0;
1220 }
1221
1222 if (!ptr->is_11n_enabled ||
1223 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1224 ((priv->sec_info.wpa_enabled ||
1225 priv->sec_info.wpa2_enabled) &&
1226 !priv->wpa_is_gtk_set)) {
1227 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1228 /* ra_list_spinlock has been freed in
1229 mwifiex_send_single_packet() */
1230 } else {
1231 if (mwifiex_is_ampdu_allowed(priv, tid)) {
1232 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1233 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1234 BA_SETUP_INPROGRESS);
1235 mwifiex_send_addba(priv, tid, ptr->ra);
1236 } else if (mwifiex_find_stream_to_delete
1237 (priv, tid, &tid_del, ra)) {
1238 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1239 BA_SETUP_INPROGRESS);
1240 mwifiex_send_delba(priv, tid_del, ra, 1);
1241 }
1242 }
1243 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1244 mwifiex_is_11n_aggragation_possible(priv, ptr,
1245 adapter->tx_buf_size))
1246 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
1247 ptr_index, flags);
1248 /* ra_list_spinlock has been freed in
1249 mwifiex_11n_aggregate_pkt() */
1250 else
1251 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1252 /* ra_list_spinlock has been freed in
1253 mwifiex_send_single_packet() */
1254 }
1255 return 0;
1256 }
1257
1258 /*
1259 * This function transmits the highest priority packet awaiting in the
1260 * WMM Queues.
1261 */
1262 void
1263 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1264 {
1265 do {
1266 /* Check if busy */
1267 if (adapter->data_sent || adapter->tx_lock_flag)
1268 break;
1269
1270 if (mwifiex_dequeue_tx_packet(adapter))
1271 break;
1272 } while (!mwifiex_wmm_lists_empty(adapter));
1273 }
This page took 0.059188 seconds and 5 git commands to generate.