cfg80211: constify MAC addresses in cfg80211 ops
[deliverable/linux.git] / drivers / net / wireless / mwifiex / wmm.c
1 /*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
42
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
46 0x00, 0x01, 0x00
47 };
48
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
50 WMM_AC_BK,
51 WMM_AC_VI,
52 WMM_AC_VO
53 };
54
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
65 };
66
67 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
68
69 /*
70 * This function debug prints the priority parameters for a WMM AC.
71 */
72 static void
73 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
74 {
75 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
76
77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
80 & MWIFIEX_ACI) >> 5]],
81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
86 le16_to_cpu(ac_param->tx_op_limit));
87 }
88
89 /*
90 * This function allocates a route address list.
91 *
92 * The function also initializes the list with the provided RA.
93 */
94 static struct mwifiex_ra_list_tbl *
95 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
96 {
97 struct mwifiex_ra_list_tbl *ra_list;
98
99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
100 if (!ra_list)
101 return NULL;
102
103 INIT_LIST_HEAD(&ra_list->list);
104 skb_queue_head_init(&ra_list->skb_head);
105
106 memcpy(ra_list->ra, ra, ETH_ALEN);
107
108 ra_list->total_pkt_count = 0;
109
110 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
111
112 return ra_list;
113 }
114
115 /* This function returns random no between 16 and 32 to be used as threshold
116 * for no of packets after which BA setup is initiated.
117 */
118 static u8 mwifiex_get_random_ba_threshold(void)
119 {
120 u32 sec, usec;
121 struct timeval ba_tstamp;
122 u8 ba_threshold;
123
124 /* setup ba_packet_threshold here random number between
125 * [BA_SETUP_PACKET_OFFSET,
126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
127 */
128
129 do_gettimeofday(&ba_tstamp);
130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
133 + BA_SETUP_PACKET_OFFSET;
134
135 return ba_threshold;
136 }
137
138 /*
139 * This function allocates and adds a RA list for all TIDs
140 * with the given RA.
141 */
142 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
143 {
144 int i;
145 struct mwifiex_ra_list_tbl *ra_list;
146 struct mwifiex_adapter *adapter = priv->adapter;
147 struct mwifiex_sta_node *node;
148 unsigned long flags;
149
150 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
151 node = mwifiex_get_sta_entry(priv, ra);
152 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
153
154 for (i = 0; i < MAX_NUM_TID; ++i) {
155 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
156 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
157
158 if (!ra_list)
159 break;
160
161 ra_list->is_11n_enabled = 0;
162 ra_list->tdls_link = false;
163 if (!mwifiex_queuing_ra_based(priv)) {
164 if (mwifiex_get_tdls_link_status(priv, ra) ==
165 TDLS_SETUP_COMPLETE) {
166 ra_list->is_11n_enabled =
167 mwifiex_tdls_peer_11n_enabled(priv, ra);
168 } else {
169 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
170 }
171 } else {
172 ra_list->is_11n_enabled =
173 mwifiex_is_sta_11n_enabled(priv, node);
174 if (ra_list->is_11n_enabled)
175 ra_list->max_amsdu = node->max_amsdu;
176 }
177
178 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
179 ra_list, ra_list->is_11n_enabled);
180
181 if (ra_list->is_11n_enabled) {
182 ra_list->ba_pkt_count = 0;
183 ra_list->ba_packet_thr =
184 mwifiex_get_random_ba_threshold();
185 }
186 list_add_tail(&ra_list->list,
187 &priv->wmm.tid_tbl_ptr[i].ra_list);
188 }
189 }
190
191 /*
192 * This function sets the WMM queue priorities to their default values.
193 */
194 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
195 {
196 /* Default queue priorities: VO->VI->BE->BK */
197 priv->wmm.queue_priority[0] = WMM_AC_VO;
198 priv->wmm.queue_priority[1] = WMM_AC_VI;
199 priv->wmm.queue_priority[2] = WMM_AC_BE;
200 priv->wmm.queue_priority[3] = WMM_AC_BK;
201 }
202
203 /*
204 * This function map ACs to TIDs.
205 */
206 static void
207 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
208 {
209 struct mwifiex_wmm_desc *wmm = &priv->wmm;
210 u8 *queue_priority = wmm->queue_priority;
211 int i;
212
213 for (i = 0; i < 4; ++i) {
214 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
215 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
216 }
217
218 for (i = 0; i < MAX_NUM_TID; ++i)
219 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
220
221 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
222 }
223
224 /*
225 * This function initializes WMM priority queues.
226 */
227 void
228 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
229 struct ieee_types_wmm_parameter *wmm_ie)
230 {
231 u16 cw_min, avg_back_off, tmp[4];
232 u32 i, j, num_ac;
233 u8 ac_idx;
234
235 if (!wmm_ie || !priv->wmm_enabled) {
236 /* WMM is not enabled, just set the defaults and return */
237 mwifiex_wmm_default_queue_priorities(priv);
238 return;
239 }
240
241 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
242 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
243 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
244 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
245 wmm_ie->reserved);
246
247 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
248 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
249 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
250 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
251 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
252
253 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
254 priv->wmm.queue_priority[ac_idx] = ac_idx;
255 tmp[ac_idx] = avg_back_off;
256
257 dev_dbg(priv->adapter->dev,
258 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
259 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
260 cw_min, avg_back_off);
261 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
262 }
263
264 /* Bubble sort */
265 for (i = 0; i < num_ac; i++) {
266 for (j = 1; j < num_ac - i; j++) {
267 if (tmp[j - 1] > tmp[j]) {
268 swap(tmp[j - 1], tmp[j]);
269 swap(priv->wmm.queue_priority[j - 1],
270 priv->wmm.queue_priority[j]);
271 } else if (tmp[j - 1] == tmp[j]) {
272 if (priv->wmm.queue_priority[j - 1]
273 < priv->wmm.queue_priority[j])
274 swap(priv->wmm.queue_priority[j - 1],
275 priv->wmm.queue_priority[j]);
276 }
277 }
278 }
279
280 mwifiex_wmm_queue_priorities_tid(priv);
281 }
282
283 /*
284 * This function evaluates whether or not an AC is to be downgraded.
285 *
286 * In case the AC is not enabled, the highest AC is returned that is
287 * enabled and does not require admission control.
288 */
289 static enum mwifiex_wmm_ac_e
290 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
291 enum mwifiex_wmm_ac_e eval_ac)
292 {
293 int down_ac;
294 enum mwifiex_wmm_ac_e ret_ac;
295 struct mwifiex_wmm_ac_status *ac_status;
296
297 ac_status = &priv->wmm.ac_status[eval_ac];
298
299 if (!ac_status->disabled)
300 /* Okay to use this AC, its enabled */
301 return eval_ac;
302
303 /* Setup a default return value of the lowest priority */
304 ret_ac = WMM_AC_BK;
305
306 /*
307 * Find the highest AC that is enabled and does not require
308 * admission control. The spec disallows downgrading to an AC,
309 * which is enabled due to a completed admission control.
310 * Unadmitted traffic is not to be sent on an AC with admitted
311 * traffic.
312 */
313 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
314 ac_status = &priv->wmm.ac_status[down_ac];
315
316 if (!ac_status->disabled && !ac_status->flow_required)
317 /* AC is enabled and does not require admission
318 control */
319 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
320 }
321
322 return ret_ac;
323 }
324
325 /*
326 * This function downgrades WMM priority queue.
327 */
328 void
329 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
330 {
331 int ac_val;
332
333 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
334 "BK(0), BE(1), VI(2), VO(3)\n");
335
336 if (!priv->wmm_enabled) {
337 /* WMM is not enabled, default priorities */
338 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
339 priv->wmm.ac_down_graded_vals[ac_val] =
340 (enum mwifiex_wmm_ac_e) ac_val;
341 } else {
342 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
343 priv->wmm.ac_down_graded_vals[ac_val]
344 = mwifiex_wmm_eval_downgrade_ac(priv,
345 (enum mwifiex_wmm_ac_e) ac_val);
346 dev_dbg(priv->adapter->dev,
347 "info: WMM: AC PRIO %d maps to %d\n",
348 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
349 }
350 }
351 }
352
353 /*
354 * This function converts the IP TOS field to an WMM AC
355 * Queue assignment.
356 */
357 static enum mwifiex_wmm_ac_e
358 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
359 {
360 /* Map of TOS UP values to WMM AC */
361 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
362 WMM_AC_BK,
363 WMM_AC_BK,
364 WMM_AC_BE,
365 WMM_AC_VI,
366 WMM_AC_VI,
367 WMM_AC_VO,
368 WMM_AC_VO
369 };
370
371 if (tos >= ARRAY_SIZE(tos_to_ac))
372 return WMM_AC_BE;
373
374 return tos_to_ac[tos];
375 }
376
377 /*
378 * This function evaluates a given TID and downgrades it to a lower
379 * TID if the WMM Parameter IE received from the AP indicates that the
380 * AP is disabled (due to call admission control (ACM bit). Mapping
381 * of TID to AC is taken care of internally.
382 */
383 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
384 {
385 enum mwifiex_wmm_ac_e ac, ac_down;
386 u8 new_tid;
387
388 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
389 ac_down = priv->wmm.ac_down_graded_vals[ac];
390
391 /* Send the index to tid array, picking from the array will be
392 * taken care by dequeuing function
393 */
394 new_tid = ac_to_tid[ac_down][tid % 2];
395
396 return new_tid;
397 }
398
399 /*
400 * This function initializes the WMM state information and the
401 * WMM data path queues.
402 */
403 void
404 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
405 {
406 int i, j;
407 struct mwifiex_private *priv;
408
409 for (j = 0; j < adapter->priv_num; ++j) {
410 priv = adapter->priv[j];
411 if (!priv)
412 continue;
413
414 for (i = 0; i < MAX_NUM_TID; ++i) {
415 if (!disable_tx_amsdu &&
416 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
417 priv->aggr_prio_tbl[i].amsdu =
418 priv->tos_to_tid_inv[i];
419 else
420 priv->aggr_prio_tbl[i].amsdu =
421 BA_STREAM_NOT_ALLOWED;
422 priv->aggr_prio_tbl[i].ampdu_ap =
423 priv->tos_to_tid_inv[i];
424 priv->aggr_prio_tbl[i].ampdu_user =
425 priv->tos_to_tid_inv[i];
426 }
427
428 priv->aggr_prio_tbl[6].amsdu
429 = priv->aggr_prio_tbl[6].ampdu_ap
430 = priv->aggr_prio_tbl[6].ampdu_user
431 = BA_STREAM_NOT_ALLOWED;
432
433 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
434 = priv->aggr_prio_tbl[7].ampdu_user
435 = BA_STREAM_NOT_ALLOWED;
436
437 mwifiex_set_ba_params(priv);
438 mwifiex_reset_11n_rx_seq_num(priv);
439
440 atomic_set(&priv->wmm.tx_pkts_queued, 0);
441 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
442 }
443 }
444
445 /*
446 * This function checks if WMM Tx queue is empty.
447 */
448 int
449 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
450 {
451 int i;
452 struct mwifiex_private *priv;
453
454 for (i = 0; i < adapter->priv_num; ++i) {
455 priv = adapter->priv[i];
456 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
457 return false;
458 }
459
460 return true;
461 }
462
463 /*
464 * This function deletes all packets in an RA list node.
465 *
466 * The packet sent completion callback handler are called with
467 * status failure, after they are dequeued to ensure proper
468 * cleanup. The RA list node itself is freed at the end.
469 */
470 static void
471 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
472 struct mwifiex_ra_list_tbl *ra_list)
473 {
474 struct mwifiex_adapter *adapter = priv->adapter;
475 struct sk_buff *skb, *tmp;
476
477 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
478 mwifiex_write_data_complete(adapter, skb, 0, -1);
479 }
480
481 /*
482 * This function deletes all packets in an RA list.
483 *
484 * Each nodes in the RA list are freed individually first, and then
485 * the RA list itself is freed.
486 */
487 static void
488 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
489 struct list_head *ra_list_head)
490 {
491 struct mwifiex_ra_list_tbl *ra_list;
492
493 list_for_each_entry(ra_list, ra_list_head, list)
494 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
495 }
496
497 /*
498 * This function deletes all packets in all RA lists.
499 */
500 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
501 {
502 int i;
503
504 for (i = 0; i < MAX_NUM_TID; i++)
505 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
506 ra_list);
507
508 atomic_set(&priv->wmm.tx_pkts_queued, 0);
509 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
510 }
511
512 /*
513 * This function deletes all route addresses from all RA lists.
514 */
515 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
516 {
517 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
518 int i;
519
520 for (i = 0; i < MAX_NUM_TID; ++i) {
521 dev_dbg(priv->adapter->dev,
522 "info: ra_list: freeing buf for tid %d\n", i);
523 list_for_each_entry_safe(ra_list, tmp_node,
524 &priv->wmm.tid_tbl_ptr[i].ra_list,
525 list) {
526 list_del(&ra_list->list);
527 kfree(ra_list);
528 }
529
530 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
531 }
532 }
533
534 /*
535 * This function cleans up the Tx and Rx queues.
536 *
537 * Cleanup includes -
538 * - All packets in RA lists
539 * - All entries in Rx reorder table
540 * - All entries in Tx BA stream table
541 * - MPA buffer (if required)
542 * - All RA lists
543 */
544 void
545 mwifiex_clean_txrx(struct mwifiex_private *priv)
546 {
547 unsigned long flags;
548 struct sk_buff *skb, *tmp;
549
550 mwifiex_11n_cleanup_reorder_tbl(priv);
551 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
552
553 mwifiex_wmm_cleanup_queues(priv);
554 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
555
556 if (priv->adapter->if_ops.cleanup_mpa_buf)
557 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
558
559 mwifiex_wmm_delete_all_ralist(priv);
560 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
561
562 if (priv->adapter->if_ops.clean_pcie_ring &&
563 !priv->adapter->surprise_removed)
564 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
565 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
566
567 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
568 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
569 }
570
571 /*
572 * This function retrieves a particular RA list node, matching with the
573 * given TID and RA address.
574 */
575 static struct mwifiex_ra_list_tbl *
576 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
577 const u8 *ra_addr)
578 {
579 struct mwifiex_ra_list_tbl *ra_list;
580
581 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
582 list) {
583 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
584 return ra_list;
585 }
586
587 return NULL;
588 }
589
590 /*
591 * This function retrieves an RA list node for a given TID and
592 * RA address pair.
593 *
594 * If no such node is found, a new node is added first and then
595 * retrieved.
596 */
597 struct mwifiex_ra_list_tbl *
598 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
599 const u8 *ra_addr)
600 {
601 struct mwifiex_ra_list_tbl *ra_list;
602
603 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
604 if (ra_list)
605 return ra_list;
606 mwifiex_ralist_add(priv, ra_addr);
607
608 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
609 }
610
611 /*
612 * This function checks if a particular RA list node exists in a given TID
613 * table index.
614 */
615 int
616 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
617 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
618 {
619 struct mwifiex_ra_list_tbl *rlist;
620
621 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
622 list) {
623 if (rlist == ra_list)
624 return true;
625 }
626
627 return false;
628 }
629
630 /*
631 * This function adds a packet to WMM queue.
632 *
633 * In disconnected state the packet is immediately dropped and the
634 * packet send completion callback is called with status failure.
635 *
636 * Otherwise, the correct RA list node is located and the packet
637 * is queued at the list tail.
638 */
639 void
640 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
641 struct sk_buff *skb)
642 {
643 struct mwifiex_adapter *adapter = priv->adapter;
644 u32 tid;
645 struct mwifiex_ra_list_tbl *ra_list;
646 u8 ra[ETH_ALEN], tid_down;
647 unsigned long flags;
648 struct list_head list_head;
649 int tdls_status = TDLS_NOT_SETUP;
650 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
651 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
652
653 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
654
655 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
656 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
657 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
658 dev_dbg(adapter->dev,
659 "TDLS setup packet for %pM. Don't block\n", ra);
660 else
661 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
662 }
663
664 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
665 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
666 mwifiex_write_data_complete(adapter, skb, 0, -1);
667 return;
668 }
669
670 tid = skb->priority;
671
672 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
673
674 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
675
676 /* In case of infra as we have already created the list during
677 association we just don't have to call get_queue_raptr, we will
678 have only 1 raptr for a tid in case of infra */
679 if (!mwifiex_queuing_ra_based(priv) &&
680 !mwifiex_is_skb_mgmt_frame(skb)) {
681 switch (tdls_status) {
682 case TDLS_SETUP_COMPLETE:
683 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
684 ra);
685 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
686 break;
687 case TDLS_SETUP_INPROGRESS:
688 skb_queue_tail(&priv->tdls_txq, skb);
689 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
690 flags);
691 return;
692 default:
693 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
694 if (!list_empty(&list_head))
695 ra_list = list_first_entry(
696 &list_head, struct mwifiex_ra_list_tbl,
697 list);
698 else
699 ra_list = NULL;
700 break;
701 }
702 } else {
703 memcpy(ra, skb->data, ETH_ALEN);
704 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
705 memset(ra, 0xff, ETH_ALEN);
706 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
707 }
708
709 if (!ra_list) {
710 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
711 mwifiex_write_data_complete(adapter, skb, 0, -1);
712 return;
713 }
714
715 skb_queue_tail(&ra_list->skb_head, skb);
716
717 ra_list->ba_pkt_count++;
718 ra_list->total_pkt_count++;
719
720 if (atomic_read(&priv->wmm.highest_queued_prio) <
721 priv->tos_to_tid_inv[tid_down])
722 atomic_set(&priv->wmm.highest_queued_prio,
723 priv->tos_to_tid_inv[tid_down]);
724
725 atomic_inc(&priv->wmm.tx_pkts_queued);
726
727 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
728 }
729
730 /*
731 * This function processes the get WMM status command response from firmware.
732 *
733 * The response may contain multiple TLVs -
734 * - AC Queue status TLVs
735 * - Current WMM Parameter IE TLV
736 * - Admission Control action frame TLVs
737 *
738 * This function parses the TLVs and then calls further specific functions
739 * to process any changes in the queue prioritize or state.
740 */
741 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
742 const struct host_cmd_ds_command *resp)
743 {
744 u8 *curr = (u8 *) &resp->params.get_wmm_status;
745 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
746 bool valid = true;
747
748 struct mwifiex_ie_types_data *tlv_hdr;
749 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
750 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
751 struct mwifiex_wmm_ac_status *ac_status;
752
753 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
754 resp_len);
755
756 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
757 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
758 tlv_len = le16_to_cpu(tlv_hdr->header.len);
759
760 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
761 break;
762
763 switch (le16_to_cpu(tlv_hdr->header.type)) {
764 case TLV_TYPE_WMMQSTATUS:
765 tlv_wmm_qstatus =
766 (struct mwifiex_ie_types_wmm_queue_status *)
767 tlv_hdr;
768 dev_dbg(priv->adapter->dev,
769 "info: CMD_RESP: WMM_GET_STATUS:"
770 " QSTATUS TLV: %d, %d, %d\n",
771 tlv_wmm_qstatus->queue_index,
772 tlv_wmm_qstatus->flow_required,
773 tlv_wmm_qstatus->disabled);
774
775 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
776 queue_index];
777 ac_status->disabled = tlv_wmm_qstatus->disabled;
778 ac_status->flow_required =
779 tlv_wmm_qstatus->flow_required;
780 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
781 break;
782
783 case WLAN_EID_VENDOR_SPECIFIC:
784 /*
785 * Point the regular IEEE IE 2 bytes into the Marvell IE
786 * and setup the IEEE IE type and length byte fields
787 */
788
789 wmm_param_ie =
790 (struct ieee_types_wmm_parameter *) (curr +
791 2);
792 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
793 wmm_param_ie->vend_hdr.element_id =
794 WLAN_EID_VENDOR_SPECIFIC;
795
796 dev_dbg(priv->adapter->dev,
797 "info: CMD_RESP: WMM_GET_STATUS:"
798 " WMM Parameter Set Count: %d\n",
799 wmm_param_ie->qos_info_bitmap &
800 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
801
802 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
803 wmm_ie, wmm_param_ie,
804 wmm_param_ie->vend_hdr.len + 2);
805
806 break;
807
808 default:
809 valid = false;
810 break;
811 }
812
813 curr += (tlv_len + sizeof(tlv_hdr->header));
814 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
815 }
816
817 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
818 mwifiex_wmm_setup_ac_downgrade(priv);
819
820 return 0;
821 }
822
823 /*
824 * Callback handler from the command module to allow insertion of a WMM TLV.
825 *
826 * If the BSS we are associating to supports WMM, this function adds the
827 * required WMM Information IE to the association request command buffer in
828 * the form of a Marvell extended IEEE IE.
829 */
830 u32
831 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
832 u8 **assoc_buf,
833 struct ieee_types_wmm_parameter *wmm_ie,
834 struct ieee80211_ht_cap *ht_cap)
835 {
836 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
837 u32 ret_len = 0;
838
839 /* Null checks */
840 if (!assoc_buf)
841 return 0;
842 if (!(*assoc_buf))
843 return 0;
844
845 if (!wmm_ie)
846 return 0;
847
848 dev_dbg(priv->adapter->dev,
849 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
850 wmm_ie->vend_hdr.element_id);
851
852 if ((priv->wmm_required ||
853 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
854 priv->adapter->config_bands & BAND_AN))) &&
855 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
856 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
857 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
858 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
859 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
860 le16_to_cpu(wmm_tlv->header.len));
861 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
862 memcpy((u8 *) (wmm_tlv->wmm_ie
863 + le16_to_cpu(wmm_tlv->header.len)
864 - sizeof(priv->wmm_qosinfo)),
865 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
866
867 ret_len = sizeof(wmm_tlv->header)
868 + le16_to_cpu(wmm_tlv->header.len);
869
870 *assoc_buf += ret_len;
871 }
872
873 return ret_len;
874 }
875
876 /*
877 * This function computes the time delay in the driver queues for a
878 * given packet.
879 *
880 * When the packet is received at the OS/Driver interface, the current
881 * time is set in the packet structure. The difference between the present
882 * time and that received time is computed in this function and limited
883 * based on pre-compiled limits in the driver.
884 */
885 u8
886 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
887 const struct sk_buff *skb)
888 {
889 u8 ret_val;
890 struct timeval out_tstamp, in_tstamp;
891 u32 queue_delay;
892
893 do_gettimeofday(&out_tstamp);
894 in_tstamp = ktime_to_timeval(skb->tstamp);
895
896 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
897 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
898
899 /*
900 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
901 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
902 *
903 * Pass max value if queue_delay is beyond the uint8 range
904 */
905 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
906
907 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
908 " %d ms sent to FW\n", queue_delay, ret_val);
909
910 return ret_val;
911 }
912
913 /*
914 * This function retrieves the highest priority RA list table pointer.
915 */
916 static struct mwifiex_ra_list_tbl *
917 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
918 struct mwifiex_private **priv, int *tid)
919 {
920 struct mwifiex_private *priv_tmp;
921 struct mwifiex_ra_list_tbl *ptr;
922 struct mwifiex_tid_tbl *tid_ptr;
923 atomic_t *hqp;
924 unsigned long flags_bss, flags_ra;
925 int i, j;
926
927 /* check the BSS with highest priority first */
928 for (j = adapter->priv_num - 1; j >= 0; --j) {
929 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
930 flags_bss);
931
932 /* iterate over BSS with the equal priority */
933 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
934 &adapter->bss_prio_tbl[j].bss_prio_head,
935 list) {
936
937 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
938
939 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
940 continue;
941
942 /* iterate over the WMM queues of the BSS */
943 hqp = &priv_tmp->wmm.highest_queued_prio;
944 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
945
946 spin_lock_irqsave(&priv_tmp->wmm.
947 ra_list_spinlock, flags_ra);
948
949 tid_ptr = &(priv_tmp)->wmm.
950 tid_tbl_ptr[tos_to_tid[i]];
951
952 /* iterate over receiver addresses */
953 list_for_each_entry(ptr, &tid_ptr->ra_list,
954 list) {
955
956 if (!skb_queue_empty(&ptr->skb_head))
957 /* holds both locks */
958 goto found;
959 }
960
961 spin_unlock_irqrestore(&priv_tmp->wmm.
962 ra_list_spinlock,
963 flags_ra);
964 }
965 }
966
967 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
968 flags_bss);
969 }
970
971 return NULL;
972
973 found:
974 /* holds bss_prio_lock / ra_list_spinlock */
975 if (atomic_read(hqp) > i)
976 atomic_set(hqp, i);
977 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
978 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
979 flags_bss);
980
981 *priv = priv_tmp;
982 *tid = tos_to_tid[i];
983
984 return ptr;
985 }
986
987 /* This functions rotates ra and bss lists so packets are picked round robin.
988 *
989 * After a packet is successfully transmitted, rotate the ra list, so the ra
990 * next to the one transmitted, will come first in the list. This way we pick
991 * the ra' in a round robin fashion. Same applies to bss nodes of equal
992 * priority.
993 *
994 * Function also increments wmm.packets_out counter.
995 */
996 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
997 struct mwifiex_ra_list_tbl *ra,
998 int tid)
999 {
1000 struct mwifiex_adapter *adapter = priv->adapter;
1001 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1002 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
1006 /*
1007 * dirty trick: we remove 'head' temporarily and reinsert it after
1008 * curr bss node. imagine list to stay fixed while head is moved
1009 */
1010 list_move(&tbl[priv->bss_priority].bss_prio_head,
1011 &tbl[priv->bss_priority].bss_prio_cur->list);
1012 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
1013
1014 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1015 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1016 priv->wmm.packets_out[tid]++;
1017 /* same as above */
1018 list_move(&tid_ptr->ra_list, &ra->list);
1019 }
1020 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1021 }
1022
1023 /*
1024 * This function checks if 11n aggregation is possible.
1025 */
1026 static int
1027 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1028 struct mwifiex_ra_list_tbl *ptr,
1029 int max_buf_size)
1030 {
1031 int count = 0, total_size = 0;
1032 struct sk_buff *skb, *tmp;
1033 int max_amsdu_size;
1034
1035 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1036 ptr->is_11n_enabled)
1037 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1038 else
1039 max_amsdu_size = max_buf_size;
1040
1041 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1042 total_size += skb->len;
1043 if (total_size >= max_amsdu_size)
1044 break;
1045 if (++count >= MIN_NUM_AMSDU)
1046 return true;
1047 }
1048
1049 return false;
1050 }
1051
1052 /*
1053 * This function sends a single packet to firmware for transmission.
1054 */
1055 static void
1056 mwifiex_send_single_packet(struct mwifiex_private *priv,
1057 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1058 unsigned long ra_list_flags)
1059 __releases(&priv->wmm.ra_list_spinlock)
1060 {
1061 struct sk_buff *skb, *skb_next;
1062 struct mwifiex_tx_param tx_param;
1063 struct mwifiex_adapter *adapter = priv->adapter;
1064 struct mwifiex_txinfo *tx_info;
1065
1066 if (skb_queue_empty(&ptr->skb_head)) {
1067 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1068 ra_list_flags);
1069 dev_dbg(adapter->dev, "data: nothing to send\n");
1070 return;
1071 }
1072
1073 skb = skb_dequeue(&ptr->skb_head);
1074
1075 tx_info = MWIFIEX_SKB_TXCB(skb);
1076 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1077
1078 ptr->total_pkt_count--;
1079
1080 if (!skb_queue_empty(&ptr->skb_head))
1081 skb_next = skb_peek(&ptr->skb_head);
1082 else
1083 skb_next = NULL;
1084
1085 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1086
1087 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1088 sizeof(struct txpd) : 0);
1089
1090 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1091 /* Queue the packet back at the head */
1092 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1093
1094 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1095 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1096 ra_list_flags);
1097 mwifiex_write_data_complete(adapter, skb, 0, -1);
1098 return;
1099 }
1100
1101 skb_queue_tail(&ptr->skb_head, skb);
1102
1103 ptr->total_pkt_count++;
1104 ptr->ba_pkt_count++;
1105 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1106 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1107 ra_list_flags);
1108 } else {
1109 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1110 atomic_dec(&priv->wmm.tx_pkts_queued);
1111 }
1112 }
1113
1114 /*
1115 * This function checks if the first packet in the given RA list
1116 * is already processed or not.
1117 */
1118 static int
1119 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1120 struct mwifiex_ra_list_tbl *ptr)
1121 {
1122 struct sk_buff *skb;
1123 struct mwifiex_txinfo *tx_info;
1124
1125 if (skb_queue_empty(&ptr->skb_head))
1126 return false;
1127
1128 skb = skb_peek(&ptr->skb_head);
1129
1130 tx_info = MWIFIEX_SKB_TXCB(skb);
1131 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1132 return true;
1133
1134 return false;
1135 }
1136
1137 /*
1138 * This function sends a single processed packet to firmware for
1139 * transmission.
1140 */
1141 static void
1142 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1143 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1144 unsigned long ra_list_flags)
1145 __releases(&priv->wmm.ra_list_spinlock)
1146 {
1147 struct mwifiex_tx_param tx_param;
1148 struct mwifiex_adapter *adapter = priv->adapter;
1149 int ret = -1;
1150 struct sk_buff *skb, *skb_next;
1151 struct mwifiex_txinfo *tx_info;
1152
1153 if (skb_queue_empty(&ptr->skb_head)) {
1154 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1155 ra_list_flags);
1156 return;
1157 }
1158
1159 skb = skb_dequeue(&ptr->skb_head);
1160
1161 if (!skb_queue_empty(&ptr->skb_head))
1162 skb_next = skb_peek(&ptr->skb_head);
1163 else
1164 skb_next = NULL;
1165
1166 tx_info = MWIFIEX_SKB_TXCB(skb);
1167
1168 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1169
1170 if (adapter->iface_type == MWIFIEX_USB) {
1171 adapter->data_sent = true;
1172 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1173 skb, NULL);
1174 } else {
1175 tx_param.next_pkt_len =
1176 ((skb_next) ? skb_next->len +
1177 sizeof(struct txpd) : 0);
1178 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1179 skb, &tx_param);
1180 }
1181
1182 switch (ret) {
1183 case -EBUSY:
1184 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1185 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1186
1187 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1188 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1189 ra_list_flags);
1190 mwifiex_write_data_complete(adapter, skb, 0, -1);
1191 return;
1192 }
1193
1194 skb_queue_tail(&ptr->skb_head, skb);
1195
1196 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1197 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1198 ra_list_flags);
1199 break;
1200 case -1:
1201 if (adapter->iface_type != MWIFIEX_PCIE)
1202 adapter->data_sent = false;
1203 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1204 adapter->dbg.num_tx_host_to_card_failure++;
1205 mwifiex_write_data_complete(adapter, skb, 0, ret);
1206 break;
1207 case -EINPROGRESS:
1208 if (adapter->iface_type != MWIFIEX_PCIE)
1209 adapter->data_sent = false;
1210 default:
1211 break;
1212 }
1213 if (ret != -EBUSY) {
1214 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1215 atomic_dec(&priv->wmm.tx_pkts_queued);
1216 }
1217 }
1218
1219 /*
1220 * This function dequeues a packet from the highest priority list
1221 * and transmits it.
1222 */
1223 static int
1224 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1225 {
1226 struct mwifiex_ra_list_tbl *ptr;
1227 struct mwifiex_private *priv = NULL;
1228 int ptr_index = 0;
1229 u8 ra[ETH_ALEN];
1230 int tid_del = 0, tid = 0;
1231 unsigned long flags;
1232
1233 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1234 if (!ptr)
1235 return -1;
1236
1237 tid = mwifiex_get_tid(ptr);
1238
1239 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1240
1241 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1242 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1243 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1244 return -1;
1245 }
1246
1247 if (mwifiex_is_ptr_processed(priv, ptr)) {
1248 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1249 /* ra_list_spinlock has been freed in
1250 mwifiex_send_processed_packet() */
1251 return 0;
1252 }
1253
1254 if (!ptr->is_11n_enabled ||
1255 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1256 priv->wps.session_enable) {
1257 if (ptr->is_11n_enabled &&
1258 mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
1259 mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
1260 mwifiex_is_amsdu_allowed(priv, tid) &&
1261 mwifiex_is_11n_aggragation_possible(priv, ptr,
1262 adapter->tx_buf_size))
1263 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1264 /* ra_list_spinlock has been freed in
1265 * mwifiex_11n_aggregate_pkt()
1266 */
1267 else
1268 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1269 /* ra_list_spinlock has been freed in
1270 * mwifiex_send_single_packet()
1271 */
1272 } else {
1273 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1274 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1275 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1276 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1277 BA_SETUP_INPROGRESS);
1278 mwifiex_send_addba(priv, tid, ptr->ra);
1279 } else if (mwifiex_find_stream_to_delete
1280 (priv, tid, &tid_del, ra)) {
1281 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1282 BA_SETUP_INPROGRESS);
1283 mwifiex_send_delba(priv, tid_del, ra, 1);
1284 }
1285 }
1286 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1287 mwifiex_is_11n_aggragation_possible(priv, ptr,
1288 adapter->tx_buf_size))
1289 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1290 /* ra_list_spinlock has been freed in
1291 mwifiex_11n_aggregate_pkt() */
1292 else
1293 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1294 /* ra_list_spinlock has been freed in
1295 mwifiex_send_single_packet() */
1296 }
1297 return 0;
1298 }
1299
1300 /*
1301 * This function transmits the highest priority packet awaiting in the
1302 * WMM Queues.
1303 */
1304 void
1305 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1306 {
1307 do {
1308 /* Check if busy */
1309 if (adapter->data_sent || adapter->tx_lock_flag)
1310 break;
1311
1312 if (mwifiex_dequeue_tx_packet(adapter))
1313 break;
1314 } while (!mwifiex_wmm_lists_empty(adapter));
1315 }
This page took 0.092286 seconds and 5 git commands to generate.