Merge tag 'backlight-for-linus-4.2' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / wireless / mwifiex / wmm.c
1 /*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011-2014, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
42
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
46 0x00, 0x01, 0x00
47 };
48
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
50 WMM_AC_BK,
51 WMM_AC_VI,
52 WMM_AC_VO
53 };
54
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
65 };
66
67 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
68
69 /*
70 * This function debug prints the priority parameters for a WMM AC.
71 */
72 static void
73 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
74 {
75 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
76
77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
80 & MWIFIEX_ACI) >> 5]],
81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
86 le16_to_cpu(ac_param->tx_op_limit));
87 }
88
89 /*
90 * This function allocates a route address list.
91 *
92 * The function also initializes the list with the provided RA.
93 */
94 static struct mwifiex_ra_list_tbl *
95 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
96 {
97 struct mwifiex_ra_list_tbl *ra_list;
98
99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
100 if (!ra_list)
101 return NULL;
102
103 INIT_LIST_HEAD(&ra_list->list);
104 skb_queue_head_init(&ra_list->skb_head);
105
106 memcpy(ra_list->ra, ra, ETH_ALEN);
107
108 ra_list->total_pkt_count = 0;
109
110 mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
111
112 return ra_list;
113 }
114
115 /* This function returns random no between 16 and 32 to be used as threshold
116 * for no of packets after which BA setup is initiated.
117 */
118 static u8 mwifiex_get_random_ba_threshold(void)
119 {
120 u32 sec, usec;
121 struct timeval ba_tstamp;
122 u8 ba_threshold;
123
124 /* setup ba_packet_threshold here random number between
125 * [BA_SETUP_PACKET_OFFSET,
126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
127 */
128
129 do_gettimeofday(&ba_tstamp);
130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
133 + BA_SETUP_PACKET_OFFSET;
134
135 return ba_threshold;
136 }
137
138 /*
139 * This function allocates and adds a RA list for all TIDs
140 * with the given RA.
141 */
142 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
143 {
144 int i;
145 struct mwifiex_ra_list_tbl *ra_list;
146 struct mwifiex_adapter *adapter = priv->adapter;
147 struct mwifiex_sta_node *node;
148 unsigned long flags;
149
150
151 for (i = 0; i < MAX_NUM_TID; ++i) {
152 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
153 mwifiex_dbg(adapter, INFO,
154 "info: created ra_list %p\n", ra_list);
155
156 if (!ra_list)
157 break;
158
159 ra_list->is_11n_enabled = 0;
160 ra_list->tdls_link = false;
161 ra_list->ba_status = BA_SETUP_NONE;
162 ra_list->amsdu_in_ampdu = false;
163 if (!mwifiex_queuing_ra_based(priv)) {
164 if (mwifiex_get_tdls_link_status(priv, ra) ==
165 TDLS_SETUP_COMPLETE) {
166 ra_list->tdls_link = true;
167 ra_list->is_11n_enabled =
168 mwifiex_tdls_peer_11n_enabled(priv, ra);
169 } else {
170 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
171 }
172 } else {
173 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
174 node = mwifiex_get_sta_entry(priv, ra);
175 ra_list->is_11n_enabled =
176 mwifiex_is_sta_11n_enabled(priv, node);
177 if (ra_list->is_11n_enabled)
178 ra_list->max_amsdu = node->max_amsdu;
179 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
180 }
181
182 mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
183 ra_list, ra_list->is_11n_enabled);
184
185 if (ra_list->is_11n_enabled) {
186 ra_list->ba_pkt_count = 0;
187 ra_list->ba_packet_thr =
188 mwifiex_get_random_ba_threshold();
189 }
190 list_add_tail(&ra_list->list,
191 &priv->wmm.tid_tbl_ptr[i].ra_list);
192 }
193 }
194
195 /*
196 * This function sets the WMM queue priorities to their default values.
197 */
198 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
199 {
200 /* Default queue priorities: VO->VI->BE->BK */
201 priv->wmm.queue_priority[0] = WMM_AC_VO;
202 priv->wmm.queue_priority[1] = WMM_AC_VI;
203 priv->wmm.queue_priority[2] = WMM_AC_BE;
204 priv->wmm.queue_priority[3] = WMM_AC_BK;
205 }
206
207 /*
208 * This function map ACs to TIDs.
209 */
210 static void
211 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
212 {
213 struct mwifiex_wmm_desc *wmm = &priv->wmm;
214 u8 *queue_priority = wmm->queue_priority;
215 int i;
216
217 for (i = 0; i < 4; ++i) {
218 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
219 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
220 }
221
222 for (i = 0; i < MAX_NUM_TID; ++i)
223 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
224
225 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
226 }
227
228 /*
229 * This function initializes WMM priority queues.
230 */
231 void
232 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
233 struct ieee_types_wmm_parameter *wmm_ie)
234 {
235 u16 cw_min, avg_back_off, tmp[4];
236 u32 i, j, num_ac;
237 u8 ac_idx;
238
239 if (!wmm_ie || !priv->wmm_enabled) {
240 /* WMM is not enabled, just set the defaults and return */
241 mwifiex_wmm_default_queue_priorities(priv);
242 return;
243 }
244
245 mwifiex_dbg(priv->adapter, INFO,
246 "info: WMM Parameter IE: version=%d,\t"
247 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
248 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
249 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
250 wmm_ie->reserved);
251
252 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
253 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
254 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
255 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
256 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
257
258 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
259 priv->wmm.queue_priority[ac_idx] = ac_idx;
260 tmp[ac_idx] = avg_back_off;
261
262 mwifiex_dbg(priv->adapter, INFO,
263 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
264 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
265 cw_min, avg_back_off);
266 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
267 }
268
269 /* Bubble sort */
270 for (i = 0; i < num_ac; i++) {
271 for (j = 1; j < num_ac - i; j++) {
272 if (tmp[j - 1] > tmp[j]) {
273 swap(tmp[j - 1], tmp[j]);
274 swap(priv->wmm.queue_priority[j - 1],
275 priv->wmm.queue_priority[j]);
276 } else if (tmp[j - 1] == tmp[j]) {
277 if (priv->wmm.queue_priority[j - 1]
278 < priv->wmm.queue_priority[j])
279 swap(priv->wmm.queue_priority[j - 1],
280 priv->wmm.queue_priority[j]);
281 }
282 }
283 }
284
285 mwifiex_wmm_queue_priorities_tid(priv);
286 }
287
288 /*
289 * This function evaluates whether or not an AC is to be downgraded.
290 *
291 * In case the AC is not enabled, the highest AC is returned that is
292 * enabled and does not require admission control.
293 */
294 static enum mwifiex_wmm_ac_e
295 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
296 enum mwifiex_wmm_ac_e eval_ac)
297 {
298 int down_ac;
299 enum mwifiex_wmm_ac_e ret_ac;
300 struct mwifiex_wmm_ac_status *ac_status;
301
302 ac_status = &priv->wmm.ac_status[eval_ac];
303
304 if (!ac_status->disabled)
305 /* Okay to use this AC, its enabled */
306 return eval_ac;
307
308 /* Setup a default return value of the lowest priority */
309 ret_ac = WMM_AC_BK;
310
311 /*
312 * Find the highest AC that is enabled and does not require
313 * admission control. The spec disallows downgrading to an AC,
314 * which is enabled due to a completed admission control.
315 * Unadmitted traffic is not to be sent on an AC with admitted
316 * traffic.
317 */
318 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
319 ac_status = &priv->wmm.ac_status[down_ac];
320
321 if (!ac_status->disabled && !ac_status->flow_required)
322 /* AC is enabled and does not require admission
323 control */
324 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
325 }
326
327 return ret_ac;
328 }
329
330 /*
331 * This function downgrades WMM priority queue.
332 */
333 void
334 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
335 {
336 int ac_val;
337
338 mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
339 "BK(0), BE(1), VI(2), VO(3)\n");
340
341 if (!priv->wmm_enabled) {
342 /* WMM is not enabled, default priorities */
343 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
344 priv->wmm.ac_down_graded_vals[ac_val] =
345 (enum mwifiex_wmm_ac_e) ac_val;
346 } else {
347 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
348 priv->wmm.ac_down_graded_vals[ac_val]
349 = mwifiex_wmm_eval_downgrade_ac(priv,
350 (enum mwifiex_wmm_ac_e) ac_val);
351 mwifiex_dbg(priv->adapter, INFO,
352 "info: WMM: AC PRIO %d maps to %d\n",
353 ac_val,
354 priv->wmm.ac_down_graded_vals[ac_val]);
355 }
356 }
357 }
358
359 /*
360 * This function converts the IP TOS field to an WMM AC
361 * Queue assignment.
362 */
363 static enum mwifiex_wmm_ac_e
364 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
365 {
366 /* Map of TOS UP values to WMM AC */
367 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
368 WMM_AC_BK,
369 WMM_AC_BK,
370 WMM_AC_BE,
371 WMM_AC_VI,
372 WMM_AC_VI,
373 WMM_AC_VO,
374 WMM_AC_VO
375 };
376
377 if (tos >= ARRAY_SIZE(tos_to_ac))
378 return WMM_AC_BE;
379
380 return tos_to_ac[tos];
381 }
382
383 /*
384 * This function evaluates a given TID and downgrades it to a lower
385 * TID if the WMM Parameter IE received from the AP indicates that the
386 * AP is disabled (due to call admission control (ACM bit). Mapping
387 * of TID to AC is taken care of internally.
388 */
389 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
390 {
391 enum mwifiex_wmm_ac_e ac, ac_down;
392 u8 new_tid;
393
394 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
395 ac_down = priv->wmm.ac_down_graded_vals[ac];
396
397 /* Send the index to tid array, picking from the array will be
398 * taken care by dequeuing function
399 */
400 new_tid = ac_to_tid[ac_down][tid % 2];
401
402 return new_tid;
403 }
404
405 /*
406 * This function initializes the WMM state information and the
407 * WMM data path queues.
408 */
409 void
410 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
411 {
412 int i, j;
413 struct mwifiex_private *priv;
414
415 for (j = 0; j < adapter->priv_num; ++j) {
416 priv = adapter->priv[j];
417 if (!priv)
418 continue;
419
420 for (i = 0; i < MAX_NUM_TID; ++i) {
421 if (!disable_tx_amsdu &&
422 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
423 priv->aggr_prio_tbl[i].amsdu =
424 priv->tos_to_tid_inv[i];
425 else
426 priv->aggr_prio_tbl[i].amsdu =
427 BA_STREAM_NOT_ALLOWED;
428 priv->aggr_prio_tbl[i].ampdu_ap =
429 priv->tos_to_tid_inv[i];
430 priv->aggr_prio_tbl[i].ampdu_user =
431 priv->tos_to_tid_inv[i];
432 }
433
434 priv->aggr_prio_tbl[6].amsdu
435 = priv->aggr_prio_tbl[6].ampdu_ap
436 = priv->aggr_prio_tbl[6].ampdu_user
437 = BA_STREAM_NOT_ALLOWED;
438
439 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
440 = priv->aggr_prio_tbl[7].ampdu_user
441 = BA_STREAM_NOT_ALLOWED;
442
443 mwifiex_set_ba_params(priv);
444 mwifiex_reset_11n_rx_seq_num(priv);
445
446 atomic_set(&priv->wmm.tx_pkts_queued, 0);
447 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
448 }
449 }
450
451 /*
452 * This function checks if WMM Tx queue is empty.
453 */
454 int
455 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
456 {
457 int i;
458 struct mwifiex_private *priv;
459
460 for (i = 0; i < adapter->priv_num; ++i) {
461 priv = adapter->priv[i];
462 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
463 return false;
464 }
465
466 return true;
467 }
468
469 /*
470 * This function deletes all packets in an RA list node.
471 *
472 * The packet sent completion callback handler are called with
473 * status failure, after they are dequeued to ensure proper
474 * cleanup. The RA list node itself is freed at the end.
475 */
476 static void
477 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
478 struct mwifiex_ra_list_tbl *ra_list)
479 {
480 struct mwifiex_adapter *adapter = priv->adapter;
481 struct sk_buff *skb, *tmp;
482
483 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
484 mwifiex_write_data_complete(adapter, skb, 0, -1);
485 }
486
487 /*
488 * This function deletes all packets in an RA list.
489 *
490 * Each nodes in the RA list are freed individually first, and then
491 * the RA list itself is freed.
492 */
493 static void
494 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
495 struct list_head *ra_list_head)
496 {
497 struct mwifiex_ra_list_tbl *ra_list;
498
499 list_for_each_entry(ra_list, ra_list_head, list)
500 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
501 }
502
503 /*
504 * This function deletes all packets in all RA lists.
505 */
506 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
507 {
508 int i;
509
510 for (i = 0; i < MAX_NUM_TID; i++)
511 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
512 ra_list);
513
514 atomic_set(&priv->wmm.tx_pkts_queued, 0);
515 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
516 }
517
518 /*
519 * This function deletes all route addresses from all RA lists.
520 */
521 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
522 {
523 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
524 int i;
525
526 for (i = 0; i < MAX_NUM_TID; ++i) {
527 mwifiex_dbg(priv->adapter, INFO,
528 "info: ra_list: freeing buf for tid %d\n", i);
529 list_for_each_entry_safe(ra_list, tmp_node,
530 &priv->wmm.tid_tbl_ptr[i].ra_list,
531 list) {
532 list_del(&ra_list->list);
533 kfree(ra_list);
534 }
535
536 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
537 }
538 }
539
540 static int mwifiex_free_ack_frame(int id, void *p, void *data)
541 {
542 pr_warn("Have pending ack frames!\n");
543 kfree_skb(p);
544 return 0;
545 }
546
547 /*
548 * This function cleans up the Tx and Rx queues.
549 *
550 * Cleanup includes -
551 * - All packets in RA lists
552 * - All entries in Rx reorder table
553 * - All entries in Tx BA stream table
554 * - MPA buffer (if required)
555 * - All RA lists
556 */
557 void
558 mwifiex_clean_txrx(struct mwifiex_private *priv)
559 {
560 unsigned long flags;
561 struct sk_buff *skb, *tmp;
562
563 mwifiex_11n_cleanup_reorder_tbl(priv);
564 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
565
566 mwifiex_wmm_cleanup_queues(priv);
567 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
568
569 if (priv->adapter->if_ops.cleanup_mpa_buf)
570 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
571
572 mwifiex_wmm_delete_all_ralist(priv);
573 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
574
575 if (priv->adapter->if_ops.clean_pcie_ring &&
576 !priv->adapter->surprise_removed)
577 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
578 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
579
580 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
581 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
582
583 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
584 idr_destroy(&priv->ack_status_frames);
585 }
586
587 /*
588 * This function retrieves a particular RA list node, matching with the
589 * given TID and RA address.
590 */
591 struct mwifiex_ra_list_tbl *
592 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
593 const u8 *ra_addr)
594 {
595 struct mwifiex_ra_list_tbl *ra_list;
596
597 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
598 list) {
599 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
600 return ra_list;
601 }
602
603 return NULL;
604 }
605
606 /*
607 * This function retrieves an RA list node for a given TID and
608 * RA address pair.
609 *
610 * If no such node is found, a new node is added first and then
611 * retrieved.
612 */
613 struct mwifiex_ra_list_tbl *
614 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
615 const u8 *ra_addr)
616 {
617 struct mwifiex_ra_list_tbl *ra_list;
618
619 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
620 if (ra_list)
621 return ra_list;
622 mwifiex_ralist_add(priv, ra_addr);
623
624 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
625 }
626
627 /*
628 * This function deletes RA list nodes for given mac for all TIDs.
629 * Function also decrements TX pending count accordingly.
630 */
631 void
632 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
633 {
634 struct mwifiex_ra_list_tbl *ra_list;
635 unsigned long flags;
636 int i;
637
638 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
639
640 for (i = 0; i < MAX_NUM_TID; ++i) {
641 ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
642
643 if (!ra_list)
644 continue;
645 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
646 atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
647 list_del(&ra_list->list);
648 kfree(ra_list);
649 }
650 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
651 }
652
653 /*
654 * This function checks if a particular RA list node exists in a given TID
655 * table index.
656 */
657 int
658 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
659 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
660 {
661 struct mwifiex_ra_list_tbl *rlist;
662
663 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
664 list) {
665 if (rlist == ra_list)
666 return true;
667 }
668
669 return false;
670 }
671
672 /*
673 * This function adds a packet to WMM queue.
674 *
675 * In disconnected state the packet is immediately dropped and the
676 * packet send completion callback is called with status failure.
677 *
678 * Otherwise, the correct RA list node is located and the packet
679 * is queued at the list tail.
680 */
681 void
682 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
683 struct sk_buff *skb)
684 {
685 struct mwifiex_adapter *adapter = priv->adapter;
686 u32 tid;
687 struct mwifiex_ra_list_tbl *ra_list;
688 u8 ra[ETH_ALEN], tid_down;
689 unsigned long flags;
690 struct list_head list_head;
691 int tdls_status = TDLS_NOT_SETUP;
692 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
693 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
694
695 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
696
697 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
698 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
699 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
700 mwifiex_dbg(adapter, DATA,
701 "TDLS setup packet for %pM.\t"
702 "Don't block\n", ra);
703 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
704 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
705 }
706
707 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
708 mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
709 mwifiex_write_data_complete(adapter, skb, 0, -1);
710 return;
711 }
712
713 tid = skb->priority;
714
715 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
716
717 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
718
719 /* In case of infra as we have already created the list during
720 association we just don't have to call get_queue_raptr, we will
721 have only 1 raptr for a tid in case of infra */
722 if (!mwifiex_queuing_ra_based(priv) &&
723 !mwifiex_is_skb_mgmt_frame(skb)) {
724 switch (tdls_status) {
725 case TDLS_SETUP_COMPLETE:
726 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
727 ra);
728 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
729 break;
730 case TDLS_SETUP_INPROGRESS:
731 skb_queue_tail(&priv->tdls_txq, skb);
732 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
733 flags);
734 return;
735 default:
736 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
737 if (!list_empty(&list_head))
738 ra_list = list_first_entry(
739 &list_head, struct mwifiex_ra_list_tbl,
740 list);
741 else
742 ra_list = NULL;
743 break;
744 }
745 } else {
746 memcpy(ra, skb->data, ETH_ALEN);
747 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
748 eth_broadcast_addr(ra);
749 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
750 }
751
752 if (!ra_list) {
753 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
754 mwifiex_write_data_complete(adapter, skb, 0, -1);
755 return;
756 }
757
758 skb_queue_tail(&ra_list->skb_head, skb);
759
760 ra_list->ba_pkt_count++;
761 ra_list->total_pkt_count++;
762
763 if (atomic_read(&priv->wmm.highest_queued_prio) <
764 priv->tos_to_tid_inv[tid_down])
765 atomic_set(&priv->wmm.highest_queued_prio,
766 priv->tos_to_tid_inv[tid_down]);
767
768 atomic_inc(&priv->wmm.tx_pkts_queued);
769
770 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
771 }
772
773 /*
774 * This function processes the get WMM status command response from firmware.
775 *
776 * The response may contain multiple TLVs -
777 * - AC Queue status TLVs
778 * - Current WMM Parameter IE TLV
779 * - Admission Control action frame TLVs
780 *
781 * This function parses the TLVs and then calls further specific functions
782 * to process any changes in the queue prioritize or state.
783 */
784 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
785 const struct host_cmd_ds_command *resp)
786 {
787 u8 *curr = (u8 *) &resp->params.get_wmm_status;
788 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
789 int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
790 bool valid = true;
791
792 struct mwifiex_ie_types_data *tlv_hdr;
793 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
794 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
795 struct mwifiex_wmm_ac_status *ac_status;
796
797 mwifiex_dbg(priv->adapter, INFO,
798 "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
799 resp_len);
800
801 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
802 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
803 tlv_len = le16_to_cpu(tlv_hdr->header.len);
804
805 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
806 break;
807
808 switch (le16_to_cpu(tlv_hdr->header.type)) {
809 case TLV_TYPE_WMMQSTATUS:
810 tlv_wmm_qstatus =
811 (struct mwifiex_ie_types_wmm_queue_status *)
812 tlv_hdr;
813 mwifiex_dbg(priv->adapter, CMD,
814 "info: CMD_RESP: WMM_GET_STATUS:\t"
815 "QSTATUS TLV: %d, %d, %d\n",
816 tlv_wmm_qstatus->queue_index,
817 tlv_wmm_qstatus->flow_required,
818 tlv_wmm_qstatus->disabled);
819
820 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
821 queue_index];
822 ac_status->disabled = tlv_wmm_qstatus->disabled;
823 ac_status->flow_required =
824 tlv_wmm_qstatus->flow_required;
825 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
826 break;
827
828 case WLAN_EID_VENDOR_SPECIFIC:
829 /*
830 * Point the regular IEEE IE 2 bytes into the Marvell IE
831 * and setup the IEEE IE type and length byte fields
832 */
833
834 wmm_param_ie =
835 (struct ieee_types_wmm_parameter *) (curr +
836 2);
837 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
838 wmm_param_ie->vend_hdr.element_id =
839 WLAN_EID_VENDOR_SPECIFIC;
840
841 mwifiex_dbg(priv->adapter, CMD,
842 "info: CMD_RESP: WMM_GET_STATUS:\t"
843 "WMM Parameter Set Count: %d\n",
844 wmm_param_ie->qos_info_bitmap & mask);
845
846 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
847 wmm_ie, wmm_param_ie,
848 wmm_param_ie->vend_hdr.len + 2);
849
850 break;
851
852 default:
853 valid = false;
854 break;
855 }
856
857 curr += (tlv_len + sizeof(tlv_hdr->header));
858 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
859 }
860
861 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
862 mwifiex_wmm_setup_ac_downgrade(priv);
863
864 return 0;
865 }
866
867 /*
868 * Callback handler from the command module to allow insertion of a WMM TLV.
869 *
870 * If the BSS we are associating to supports WMM, this function adds the
871 * required WMM Information IE to the association request command buffer in
872 * the form of a Marvell extended IEEE IE.
873 */
874 u32
875 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
876 u8 **assoc_buf,
877 struct ieee_types_wmm_parameter *wmm_ie,
878 struct ieee80211_ht_cap *ht_cap)
879 {
880 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
881 u32 ret_len = 0;
882
883 /* Null checks */
884 if (!assoc_buf)
885 return 0;
886 if (!(*assoc_buf))
887 return 0;
888
889 if (!wmm_ie)
890 return 0;
891
892 mwifiex_dbg(priv->adapter, INFO,
893 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
894 wmm_ie->vend_hdr.element_id);
895
896 if ((priv->wmm_required ||
897 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
898 priv->adapter->config_bands & BAND_AN))) &&
899 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
900 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
901 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
902 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
903 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
904 le16_to_cpu(wmm_tlv->header.len));
905 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
906 memcpy((u8 *) (wmm_tlv->wmm_ie
907 + le16_to_cpu(wmm_tlv->header.len)
908 - sizeof(priv->wmm_qosinfo)),
909 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
910
911 ret_len = sizeof(wmm_tlv->header)
912 + le16_to_cpu(wmm_tlv->header.len);
913
914 *assoc_buf += ret_len;
915 }
916
917 return ret_len;
918 }
919
920 /*
921 * This function computes the time delay in the driver queues for a
922 * given packet.
923 *
924 * When the packet is received at the OS/Driver interface, the current
925 * time is set in the packet structure. The difference between the present
926 * time and that received time is computed in this function and limited
927 * based on pre-compiled limits in the driver.
928 */
929 u8
930 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
931 const struct sk_buff *skb)
932 {
933 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
934 u8 ret_val;
935
936 /*
937 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
938 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
939 *
940 * Pass max value if queue_delay is beyond the uint8 range
941 */
942 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
943
944 mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
945 "%d ms sent to FW\n", queue_delay, ret_val);
946
947 return ret_val;
948 }
949
950 /*
951 * This function retrieves the highest priority RA list table pointer.
952 */
953 static struct mwifiex_ra_list_tbl *
954 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
955 struct mwifiex_private **priv, int *tid)
956 {
957 struct mwifiex_private *priv_tmp;
958 struct mwifiex_ra_list_tbl *ptr;
959 struct mwifiex_tid_tbl *tid_ptr;
960 atomic_t *hqp;
961 unsigned long flags_ra;
962 int i, j;
963
964 /* check the BSS with highest priority first */
965 for (j = adapter->priv_num - 1; j >= 0; --j) {
966 /* iterate over BSS with the equal priority */
967 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
968 &adapter->bss_prio_tbl[j].bss_prio_head,
969 list) {
970
971 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
972
973 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
974 continue;
975
976 /* iterate over the WMM queues of the BSS */
977 hqp = &priv_tmp->wmm.highest_queued_prio;
978 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
979
980 spin_lock_irqsave(&priv_tmp->wmm.
981 ra_list_spinlock, flags_ra);
982
983 tid_ptr = &(priv_tmp)->wmm.
984 tid_tbl_ptr[tos_to_tid[i]];
985
986 /* iterate over receiver addresses */
987 list_for_each_entry(ptr, &tid_ptr->ra_list,
988 list) {
989
990 if (!skb_queue_empty(&ptr->skb_head))
991 /* holds both locks */
992 goto found;
993 }
994
995 spin_unlock_irqrestore(&priv_tmp->wmm.
996 ra_list_spinlock,
997 flags_ra);
998 }
999 }
1000
1001 }
1002
1003 return NULL;
1004
1005 found:
1006 /* holds ra_list_spinlock */
1007 if (atomic_read(hqp) > i)
1008 atomic_set(hqp, i);
1009 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
1010
1011 *priv = priv_tmp;
1012 *tid = tos_to_tid[i];
1013
1014 return ptr;
1015 }
1016
1017 /* This functions rotates ra and bss lists so packets are picked round robin.
1018 *
1019 * After a packet is successfully transmitted, rotate the ra list, so the ra
1020 * next to the one transmitted, will come first in the list. This way we pick
1021 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1022 * priority.
1023 *
1024 * Function also increments wmm.packets_out counter.
1025 */
1026 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1027 struct mwifiex_ra_list_tbl *ra,
1028 int tid)
1029 {
1030 struct mwifiex_adapter *adapter = priv->adapter;
1031 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1032 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1033 unsigned long flags;
1034
1035 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
1036 /*
1037 * dirty trick: we remove 'head' temporarily and reinsert it after
1038 * curr bss node. imagine list to stay fixed while head is moved
1039 */
1040 list_move(&tbl[priv->bss_priority].bss_prio_head,
1041 &tbl[priv->bss_priority].bss_prio_cur->list);
1042 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
1043
1044 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1045 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1046 priv->wmm.packets_out[tid]++;
1047 /* same as above */
1048 list_move(&tid_ptr->ra_list, &ra->list);
1049 }
1050 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1051 }
1052
1053 /*
1054 * This function checks if 11n aggregation is possible.
1055 */
1056 static int
1057 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1058 struct mwifiex_ra_list_tbl *ptr,
1059 int max_buf_size)
1060 {
1061 int count = 0, total_size = 0;
1062 struct sk_buff *skb, *tmp;
1063 int max_amsdu_size;
1064
1065 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1066 ptr->is_11n_enabled)
1067 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1068 else
1069 max_amsdu_size = max_buf_size;
1070
1071 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1072 total_size += skb->len;
1073 if (total_size >= max_amsdu_size)
1074 break;
1075 if (++count >= MIN_NUM_AMSDU)
1076 return true;
1077 }
1078
1079 return false;
1080 }
1081
1082 /*
1083 * This function sends a single packet to firmware for transmission.
1084 */
1085 static void
1086 mwifiex_send_single_packet(struct mwifiex_private *priv,
1087 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1088 unsigned long ra_list_flags)
1089 __releases(&priv->wmm.ra_list_spinlock)
1090 {
1091 struct sk_buff *skb, *skb_next;
1092 struct mwifiex_tx_param tx_param;
1093 struct mwifiex_adapter *adapter = priv->adapter;
1094 struct mwifiex_txinfo *tx_info;
1095
1096 if (skb_queue_empty(&ptr->skb_head)) {
1097 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1098 ra_list_flags);
1099 mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1100 return;
1101 }
1102
1103 skb = skb_dequeue(&ptr->skb_head);
1104
1105 tx_info = MWIFIEX_SKB_TXCB(skb);
1106 mwifiex_dbg(adapter, DATA,
1107 "data: dequeuing the packet %p %p\n", ptr, skb);
1108
1109 ptr->total_pkt_count--;
1110
1111 if (!skb_queue_empty(&ptr->skb_head))
1112 skb_next = skb_peek(&ptr->skb_head);
1113 else
1114 skb_next = NULL;
1115
1116 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1117
1118 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1119 sizeof(struct txpd) : 0);
1120
1121 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1122 /* Queue the packet back at the head */
1123 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1124
1125 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1126 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1127 ra_list_flags);
1128 mwifiex_write_data_complete(adapter, skb, 0, -1);
1129 return;
1130 }
1131
1132 skb_queue_tail(&ptr->skb_head, skb);
1133
1134 ptr->total_pkt_count++;
1135 ptr->ba_pkt_count++;
1136 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1137 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1138 ra_list_flags);
1139 } else {
1140 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1141 atomic_dec(&priv->wmm.tx_pkts_queued);
1142 }
1143 }
1144
1145 /*
1146 * This function checks if the first packet in the given RA list
1147 * is already processed or not.
1148 */
1149 static int
1150 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1151 struct mwifiex_ra_list_tbl *ptr)
1152 {
1153 struct sk_buff *skb;
1154 struct mwifiex_txinfo *tx_info;
1155
1156 if (skb_queue_empty(&ptr->skb_head))
1157 return false;
1158
1159 skb = skb_peek(&ptr->skb_head);
1160
1161 tx_info = MWIFIEX_SKB_TXCB(skb);
1162 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1163 return true;
1164
1165 return false;
1166 }
1167
1168 /*
1169 * This function sends a single processed packet to firmware for
1170 * transmission.
1171 */
1172 static void
1173 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1174 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1175 unsigned long ra_list_flags)
1176 __releases(&priv->wmm.ra_list_spinlock)
1177 {
1178 struct mwifiex_tx_param tx_param;
1179 struct mwifiex_adapter *adapter = priv->adapter;
1180 int ret = -1;
1181 struct sk_buff *skb, *skb_next;
1182 struct mwifiex_txinfo *tx_info;
1183
1184 if (skb_queue_empty(&ptr->skb_head)) {
1185 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1186 ra_list_flags);
1187 return;
1188 }
1189
1190 skb = skb_dequeue(&ptr->skb_head);
1191
1192 if (adapter->data_sent || adapter->tx_lock_flag) {
1193 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1194 ra_list_flags);
1195 skb_queue_tail(&adapter->tx_data_q, skb);
1196 atomic_inc(&adapter->tx_queued);
1197 return;
1198 }
1199
1200 if (!skb_queue_empty(&ptr->skb_head))
1201 skb_next = skb_peek(&ptr->skb_head);
1202 else
1203 skb_next = NULL;
1204
1205 tx_info = MWIFIEX_SKB_TXCB(skb);
1206
1207 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1208
1209 if (adapter->iface_type == MWIFIEX_USB) {
1210 adapter->data_sent = true;
1211 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1212 skb, NULL);
1213 } else {
1214 tx_param.next_pkt_len =
1215 ((skb_next) ? skb_next->len +
1216 sizeof(struct txpd) : 0);
1217 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1218 skb, &tx_param);
1219 }
1220
1221 switch (ret) {
1222 case -EBUSY:
1223 mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1224 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1225
1226 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1227 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1228 ra_list_flags);
1229 mwifiex_write_data_complete(adapter, skb, 0, -1);
1230 return;
1231 }
1232
1233 skb_queue_tail(&ptr->skb_head, skb);
1234
1235 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1236 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1237 ra_list_flags);
1238 break;
1239 case -1:
1240 if (adapter->iface_type != MWIFIEX_PCIE)
1241 adapter->data_sent = false;
1242 mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1243 adapter->dbg.num_tx_host_to_card_failure++;
1244 mwifiex_write_data_complete(adapter, skb, 0, ret);
1245 break;
1246 case -EINPROGRESS:
1247 if (adapter->iface_type != MWIFIEX_PCIE)
1248 adapter->data_sent = false;
1249 break;
1250 case 0:
1251 mwifiex_write_data_complete(adapter, skb, 0, ret);
1252 default:
1253 break;
1254 }
1255 if (ret != -EBUSY) {
1256 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1257 atomic_dec(&priv->wmm.tx_pkts_queued);
1258 }
1259 }
1260
1261 /*
1262 * This function dequeues a packet from the highest priority list
1263 * and transmits it.
1264 */
1265 static int
1266 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1267 {
1268 struct mwifiex_ra_list_tbl *ptr;
1269 struct mwifiex_private *priv = NULL;
1270 int ptr_index = 0;
1271 u8 ra[ETH_ALEN];
1272 int tid_del = 0, tid = 0;
1273 unsigned long flags;
1274
1275 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1276 if (!ptr)
1277 return -1;
1278
1279 tid = mwifiex_get_tid(ptr);
1280
1281 mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1282
1283 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1284 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1285 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1286 return -1;
1287 }
1288
1289 if (mwifiex_is_ptr_processed(priv, ptr)) {
1290 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1291 /* ra_list_spinlock has been freed in
1292 mwifiex_send_processed_packet() */
1293 return 0;
1294 }
1295
1296 if (!ptr->is_11n_enabled ||
1297 ptr->ba_status ||
1298 priv->wps.session_enable) {
1299 if (ptr->is_11n_enabled &&
1300 ptr->ba_status &&
1301 ptr->amsdu_in_ampdu &&
1302 mwifiex_is_amsdu_allowed(priv, tid) &&
1303 mwifiex_is_11n_aggragation_possible(priv, ptr,
1304 adapter->tx_buf_size))
1305 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1306 /* ra_list_spinlock has been freed in
1307 * mwifiex_11n_aggregate_pkt()
1308 */
1309 else
1310 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1311 /* ra_list_spinlock has been freed in
1312 * mwifiex_send_single_packet()
1313 */
1314 } else {
1315 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1316 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1317 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1318 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1319 BA_SETUP_INPROGRESS);
1320 mwifiex_send_addba(priv, tid, ptr->ra);
1321 } else if (mwifiex_find_stream_to_delete
1322 (priv, tid, &tid_del, ra)) {
1323 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1324 BA_SETUP_INPROGRESS);
1325 mwifiex_send_delba(priv, tid_del, ra, 1);
1326 }
1327 }
1328 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1329 mwifiex_is_11n_aggragation_possible(priv, ptr,
1330 adapter->tx_buf_size))
1331 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1332 /* ra_list_spinlock has been freed in
1333 mwifiex_11n_aggregate_pkt() */
1334 else
1335 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1336 /* ra_list_spinlock has been freed in
1337 mwifiex_send_single_packet() */
1338 }
1339 return 0;
1340 }
1341
1342 /*
1343 * This function transmits the highest priority packet awaiting in the
1344 * WMM Queues.
1345 */
1346 void
1347 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1348 {
1349 do {
1350 if (mwifiex_dequeue_tx_packet(adapter))
1351 break;
1352 if (adapter->iface_type != MWIFIEX_SDIO) {
1353 if (adapter->data_sent ||
1354 adapter->tx_lock_flag)
1355 break;
1356 } else {
1357 if (atomic_read(&adapter->tx_queued) >=
1358 MWIFIEX_MAX_PKTS_TXQ)
1359 break;
1360 }
1361 } while (!mwifiex_wmm_lists_empty(adapter));
1362 }
This page took 0.056646 seconds and 6 git commands to generate.