Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / net / wireless / mwifiex / wmm.c
1 /*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011-2014, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
42
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
46 0x00, 0x01, 0x00
47 };
48
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
50 WMM_AC_BK,
51 WMM_AC_VI,
52 WMM_AC_VO
53 };
54
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
65 };
66
67 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
68
69 /*
70 * This function debug prints the priority parameters for a WMM AC.
71 */
72 static void
73 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
74 {
75 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
76
77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
80 & MWIFIEX_ACI) >> 5]],
81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
86 le16_to_cpu(ac_param->tx_op_limit));
87 }
88
89 /*
90 * This function allocates a route address list.
91 *
92 * The function also initializes the list with the provided RA.
93 */
94 static struct mwifiex_ra_list_tbl *
95 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
96 {
97 struct mwifiex_ra_list_tbl *ra_list;
98
99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
100 if (!ra_list)
101 return NULL;
102
103 INIT_LIST_HEAD(&ra_list->list);
104 skb_queue_head_init(&ra_list->skb_head);
105
106 memcpy(ra_list->ra, ra, ETH_ALEN);
107
108 ra_list->total_pkt_count = 0;
109
110 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
111
112 return ra_list;
113 }
114
115 /* This function returns random no between 16 and 32 to be used as threshold
116 * for no of packets after which BA setup is initiated.
117 */
118 static u8 mwifiex_get_random_ba_threshold(void)
119 {
120 u32 sec, usec;
121 struct timeval ba_tstamp;
122 u8 ba_threshold;
123
124 /* setup ba_packet_threshold here random number between
125 * [BA_SETUP_PACKET_OFFSET,
126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
127 */
128
129 do_gettimeofday(&ba_tstamp);
130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
133 + BA_SETUP_PACKET_OFFSET;
134
135 return ba_threshold;
136 }
137
138 /*
139 * This function allocates and adds a RA list for all TIDs
140 * with the given RA.
141 */
142 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
143 {
144 int i;
145 struct mwifiex_ra_list_tbl *ra_list;
146 struct mwifiex_adapter *adapter = priv->adapter;
147 struct mwifiex_sta_node *node;
148 unsigned long flags;
149
150
151 for (i = 0; i < MAX_NUM_TID; ++i) {
152 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
153 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
154
155 if (!ra_list)
156 break;
157
158 ra_list->is_11n_enabled = 0;
159 ra_list->tdls_link = false;
160 ra_list->ba_status = BA_SETUP_NONE;
161 ra_list->amsdu_in_ampdu = false;
162 if (!mwifiex_queuing_ra_based(priv)) {
163 if (mwifiex_get_tdls_link_status(priv, ra) ==
164 TDLS_SETUP_COMPLETE) {
165 ra_list->tdls_link = true;
166 ra_list->is_11n_enabled =
167 mwifiex_tdls_peer_11n_enabled(priv, ra);
168 } else {
169 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
170 }
171 } else {
172 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
173 node = mwifiex_get_sta_entry(priv, ra);
174 ra_list->is_11n_enabled =
175 mwifiex_is_sta_11n_enabled(priv, node);
176 if (ra_list->is_11n_enabled)
177 ra_list->max_amsdu = node->max_amsdu;
178 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
179 }
180
181 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
182 ra_list, ra_list->is_11n_enabled);
183
184 if (ra_list->is_11n_enabled) {
185 ra_list->ba_pkt_count = 0;
186 ra_list->ba_packet_thr =
187 mwifiex_get_random_ba_threshold();
188 }
189 list_add_tail(&ra_list->list,
190 &priv->wmm.tid_tbl_ptr[i].ra_list);
191 }
192 }
193
194 /*
195 * This function sets the WMM queue priorities to their default values.
196 */
197 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
198 {
199 /* Default queue priorities: VO->VI->BE->BK */
200 priv->wmm.queue_priority[0] = WMM_AC_VO;
201 priv->wmm.queue_priority[1] = WMM_AC_VI;
202 priv->wmm.queue_priority[2] = WMM_AC_BE;
203 priv->wmm.queue_priority[3] = WMM_AC_BK;
204 }
205
206 /*
207 * This function map ACs to TIDs.
208 */
209 static void
210 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
211 {
212 struct mwifiex_wmm_desc *wmm = &priv->wmm;
213 u8 *queue_priority = wmm->queue_priority;
214 int i;
215
216 for (i = 0; i < 4; ++i) {
217 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
218 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
219 }
220
221 for (i = 0; i < MAX_NUM_TID; ++i)
222 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
223
224 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
225 }
226
227 /*
228 * This function initializes WMM priority queues.
229 */
230 void
231 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
232 struct ieee_types_wmm_parameter *wmm_ie)
233 {
234 u16 cw_min, avg_back_off, tmp[4];
235 u32 i, j, num_ac;
236 u8 ac_idx;
237
238 if (!wmm_ie || !priv->wmm_enabled) {
239 /* WMM is not enabled, just set the defaults and return */
240 mwifiex_wmm_default_queue_priorities(priv);
241 return;
242 }
243
244 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
245 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
246 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
247 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
248 wmm_ie->reserved);
249
250 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
251 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
252 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
253 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
254 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
255
256 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
257 priv->wmm.queue_priority[ac_idx] = ac_idx;
258 tmp[ac_idx] = avg_back_off;
259
260 dev_dbg(priv->adapter->dev,
261 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
262 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
263 cw_min, avg_back_off);
264 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
265 }
266
267 /* Bubble sort */
268 for (i = 0; i < num_ac; i++) {
269 for (j = 1; j < num_ac - i; j++) {
270 if (tmp[j - 1] > tmp[j]) {
271 swap(tmp[j - 1], tmp[j]);
272 swap(priv->wmm.queue_priority[j - 1],
273 priv->wmm.queue_priority[j]);
274 } else if (tmp[j - 1] == tmp[j]) {
275 if (priv->wmm.queue_priority[j - 1]
276 < priv->wmm.queue_priority[j])
277 swap(priv->wmm.queue_priority[j - 1],
278 priv->wmm.queue_priority[j]);
279 }
280 }
281 }
282
283 mwifiex_wmm_queue_priorities_tid(priv);
284 }
285
286 /*
287 * This function evaluates whether or not an AC is to be downgraded.
288 *
289 * In case the AC is not enabled, the highest AC is returned that is
290 * enabled and does not require admission control.
291 */
292 static enum mwifiex_wmm_ac_e
293 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
294 enum mwifiex_wmm_ac_e eval_ac)
295 {
296 int down_ac;
297 enum mwifiex_wmm_ac_e ret_ac;
298 struct mwifiex_wmm_ac_status *ac_status;
299
300 ac_status = &priv->wmm.ac_status[eval_ac];
301
302 if (!ac_status->disabled)
303 /* Okay to use this AC, its enabled */
304 return eval_ac;
305
306 /* Setup a default return value of the lowest priority */
307 ret_ac = WMM_AC_BK;
308
309 /*
310 * Find the highest AC that is enabled and does not require
311 * admission control. The spec disallows downgrading to an AC,
312 * which is enabled due to a completed admission control.
313 * Unadmitted traffic is not to be sent on an AC with admitted
314 * traffic.
315 */
316 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
317 ac_status = &priv->wmm.ac_status[down_ac];
318
319 if (!ac_status->disabled && !ac_status->flow_required)
320 /* AC is enabled and does not require admission
321 control */
322 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
323 }
324
325 return ret_ac;
326 }
327
328 /*
329 * This function downgrades WMM priority queue.
330 */
331 void
332 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
333 {
334 int ac_val;
335
336 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
337 "BK(0), BE(1), VI(2), VO(3)\n");
338
339 if (!priv->wmm_enabled) {
340 /* WMM is not enabled, default priorities */
341 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
342 priv->wmm.ac_down_graded_vals[ac_val] =
343 (enum mwifiex_wmm_ac_e) ac_val;
344 } else {
345 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
346 priv->wmm.ac_down_graded_vals[ac_val]
347 = mwifiex_wmm_eval_downgrade_ac(priv,
348 (enum mwifiex_wmm_ac_e) ac_val);
349 dev_dbg(priv->adapter->dev,
350 "info: WMM: AC PRIO %d maps to %d\n",
351 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
352 }
353 }
354 }
355
356 /*
357 * This function converts the IP TOS field to an WMM AC
358 * Queue assignment.
359 */
360 static enum mwifiex_wmm_ac_e
361 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
362 {
363 /* Map of TOS UP values to WMM AC */
364 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
365 WMM_AC_BK,
366 WMM_AC_BK,
367 WMM_AC_BE,
368 WMM_AC_VI,
369 WMM_AC_VI,
370 WMM_AC_VO,
371 WMM_AC_VO
372 };
373
374 if (tos >= ARRAY_SIZE(tos_to_ac))
375 return WMM_AC_BE;
376
377 return tos_to_ac[tos];
378 }
379
380 /*
381 * This function evaluates a given TID and downgrades it to a lower
382 * TID if the WMM Parameter IE received from the AP indicates that the
383 * AP is disabled (due to call admission control (ACM bit). Mapping
384 * of TID to AC is taken care of internally.
385 */
386 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
387 {
388 enum mwifiex_wmm_ac_e ac, ac_down;
389 u8 new_tid;
390
391 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
392 ac_down = priv->wmm.ac_down_graded_vals[ac];
393
394 /* Send the index to tid array, picking from the array will be
395 * taken care by dequeuing function
396 */
397 new_tid = ac_to_tid[ac_down][tid % 2];
398
399 return new_tid;
400 }
401
402 /*
403 * This function initializes the WMM state information and the
404 * WMM data path queues.
405 */
406 void
407 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
408 {
409 int i, j;
410 struct mwifiex_private *priv;
411
412 for (j = 0; j < adapter->priv_num; ++j) {
413 priv = adapter->priv[j];
414 if (!priv)
415 continue;
416
417 for (i = 0; i < MAX_NUM_TID; ++i) {
418 if (!disable_tx_amsdu &&
419 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
420 priv->aggr_prio_tbl[i].amsdu =
421 priv->tos_to_tid_inv[i];
422 else
423 priv->aggr_prio_tbl[i].amsdu =
424 BA_STREAM_NOT_ALLOWED;
425 priv->aggr_prio_tbl[i].ampdu_ap =
426 priv->tos_to_tid_inv[i];
427 priv->aggr_prio_tbl[i].ampdu_user =
428 priv->tos_to_tid_inv[i];
429 }
430
431 mwifiex_set_ba_params(priv);
432 mwifiex_reset_11n_rx_seq_num(priv);
433
434 atomic_set(&priv->wmm.tx_pkts_queued, 0);
435 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
436 }
437 }
438
439 /*
440 * This function checks if WMM Tx queue is empty.
441 */
442 int
443 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
444 {
445 int i;
446 struct mwifiex_private *priv;
447
448 for (i = 0; i < adapter->priv_num; ++i) {
449 priv = adapter->priv[i];
450 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
451 return false;
452 }
453
454 return true;
455 }
456
457 /*
458 * This function deletes all packets in an RA list node.
459 *
460 * The packet sent completion callback handler are called with
461 * status failure, after they are dequeued to ensure proper
462 * cleanup. The RA list node itself is freed at the end.
463 */
464 static void
465 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
466 struct mwifiex_ra_list_tbl *ra_list)
467 {
468 struct mwifiex_adapter *adapter = priv->adapter;
469 struct sk_buff *skb, *tmp;
470
471 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
472 mwifiex_write_data_complete(adapter, skb, 0, -1);
473 }
474
475 /*
476 * This function deletes all packets in an RA list.
477 *
478 * Each nodes in the RA list are freed individually first, and then
479 * the RA list itself is freed.
480 */
481 static void
482 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
483 struct list_head *ra_list_head)
484 {
485 struct mwifiex_ra_list_tbl *ra_list;
486
487 list_for_each_entry(ra_list, ra_list_head, list)
488 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
489 }
490
491 /*
492 * This function deletes all packets in all RA lists.
493 */
494 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
495 {
496 int i;
497
498 for (i = 0; i < MAX_NUM_TID; i++)
499 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
500 ra_list);
501
502 atomic_set(&priv->wmm.tx_pkts_queued, 0);
503 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
504 }
505
506 /*
507 * This function deletes all route addresses from all RA lists.
508 */
509 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
510 {
511 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
512 int i;
513
514 for (i = 0; i < MAX_NUM_TID; ++i) {
515 dev_dbg(priv->adapter->dev,
516 "info: ra_list: freeing buf for tid %d\n", i);
517 list_for_each_entry_safe(ra_list, tmp_node,
518 &priv->wmm.tid_tbl_ptr[i].ra_list,
519 list) {
520 list_del(&ra_list->list);
521 kfree(ra_list);
522 }
523
524 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
525 }
526 }
527
528 static int mwifiex_free_ack_frame(int id, void *p, void *data)
529 {
530 pr_warn("Have pending ack frames!\n");
531 kfree_skb(p);
532 return 0;
533 }
534
535 /*
536 * This function cleans up the Tx and Rx queues.
537 *
538 * Cleanup includes -
539 * - All packets in RA lists
540 * - All entries in Rx reorder table
541 * - All entries in Tx BA stream table
542 * - MPA buffer (if required)
543 * - All RA lists
544 */
545 void
546 mwifiex_clean_txrx(struct mwifiex_private *priv)
547 {
548 unsigned long flags;
549 struct sk_buff *skb, *tmp;
550
551 mwifiex_11n_cleanup_reorder_tbl(priv);
552 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
553
554 mwifiex_wmm_cleanup_queues(priv);
555 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
556
557 if (priv->adapter->if_ops.cleanup_mpa_buf)
558 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
559
560 mwifiex_wmm_delete_all_ralist(priv);
561 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
562
563 if (priv->adapter->if_ops.clean_pcie_ring &&
564 !priv->adapter->surprise_removed)
565 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
566 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
567
568 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
569 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
570
571 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
572 idr_destroy(&priv->ack_status_frames);
573 }
574
575 /*
576 * This function retrieves a particular RA list node, matching with the
577 * given TID and RA address.
578 */
579 struct mwifiex_ra_list_tbl *
580 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
581 const u8 *ra_addr)
582 {
583 struct mwifiex_ra_list_tbl *ra_list;
584
585 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
586 list) {
587 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
588 return ra_list;
589 }
590
591 return NULL;
592 }
593
594 /*
595 * This function retrieves an RA list node for a given TID and
596 * RA address pair.
597 *
598 * If no such node is found, a new node is added first and then
599 * retrieved.
600 */
601 struct mwifiex_ra_list_tbl *
602 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
603 const u8 *ra_addr)
604 {
605 struct mwifiex_ra_list_tbl *ra_list;
606
607 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
608 if (ra_list)
609 return ra_list;
610 mwifiex_ralist_add(priv, ra_addr);
611
612 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
613 }
614
615 /*
616 * This function deletes RA list nodes for given mac for all TIDs.
617 * Function also decrements TX pending count accordingly.
618 */
619 void
620 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
621 {
622 struct mwifiex_ra_list_tbl *ra_list;
623 unsigned long flags;
624 int i;
625
626 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
627
628 for (i = 0; i < MAX_NUM_TID; ++i) {
629 ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
630
631 if (!ra_list)
632 continue;
633 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
634 atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
635 list_del(&ra_list->list);
636 kfree(ra_list);
637 }
638 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
639 }
640
641 /*
642 * This function checks if a particular RA list node exists in a given TID
643 * table index.
644 */
645 int
646 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
647 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
648 {
649 struct mwifiex_ra_list_tbl *rlist;
650
651 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
652 list) {
653 if (rlist == ra_list)
654 return true;
655 }
656
657 return false;
658 }
659
660 /*
661 * This function adds a packet to WMM queue.
662 *
663 * In disconnected state the packet is immediately dropped and the
664 * packet send completion callback is called with status failure.
665 *
666 * Otherwise, the correct RA list node is located and the packet
667 * is queued at the list tail.
668 */
669 void
670 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
671 struct sk_buff *skb)
672 {
673 struct mwifiex_adapter *adapter = priv->adapter;
674 u32 tid;
675 struct mwifiex_ra_list_tbl *ra_list;
676 u8 ra[ETH_ALEN], tid_down;
677 unsigned long flags;
678 struct list_head list_head;
679 int tdls_status = TDLS_NOT_SETUP;
680 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
681 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
682
683 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
684
685 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
686 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
687 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
688 dev_dbg(adapter->dev,
689 "TDLS setup packet for %pM. Don't block\n", ra);
690 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
691 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
692 }
693
694 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
695 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
696 mwifiex_write_data_complete(adapter, skb, 0, -1);
697 return;
698 }
699
700 tid = skb->priority;
701
702 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
703
704 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
705
706 /* In case of infra as we have already created the list during
707 association we just don't have to call get_queue_raptr, we will
708 have only 1 raptr for a tid in case of infra */
709 if (!mwifiex_queuing_ra_based(priv) &&
710 !mwifiex_is_skb_mgmt_frame(skb)) {
711 switch (tdls_status) {
712 case TDLS_SETUP_COMPLETE:
713 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
714 ra);
715 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
716 break;
717 case TDLS_SETUP_INPROGRESS:
718 skb_queue_tail(&priv->tdls_txq, skb);
719 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
720 flags);
721 return;
722 default:
723 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
724 if (!list_empty(&list_head))
725 ra_list = list_first_entry(
726 &list_head, struct mwifiex_ra_list_tbl,
727 list);
728 else
729 ra_list = NULL;
730 break;
731 }
732 } else {
733 memcpy(ra, skb->data, ETH_ALEN);
734 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
735 eth_broadcast_addr(ra);
736 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
737 }
738
739 if (!ra_list) {
740 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
741 mwifiex_write_data_complete(adapter, skb, 0, -1);
742 return;
743 }
744
745 skb_queue_tail(&ra_list->skb_head, skb);
746
747 ra_list->ba_pkt_count++;
748 ra_list->total_pkt_count++;
749
750 if (atomic_read(&priv->wmm.highest_queued_prio) <
751 priv->tos_to_tid_inv[tid_down])
752 atomic_set(&priv->wmm.highest_queued_prio,
753 priv->tos_to_tid_inv[tid_down]);
754
755 atomic_inc(&priv->wmm.tx_pkts_queued);
756
757 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
758 }
759
760 /*
761 * This function processes the get WMM status command response from firmware.
762 *
763 * The response may contain multiple TLVs -
764 * - AC Queue status TLVs
765 * - Current WMM Parameter IE TLV
766 * - Admission Control action frame TLVs
767 *
768 * This function parses the TLVs and then calls further specific functions
769 * to process any changes in the queue prioritize or state.
770 */
771 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
772 const struct host_cmd_ds_command *resp)
773 {
774 u8 *curr = (u8 *) &resp->params.get_wmm_status;
775 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
776 bool valid = true;
777
778 struct mwifiex_ie_types_data *tlv_hdr;
779 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
780 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
781 struct mwifiex_wmm_ac_status *ac_status;
782
783 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
784 resp_len);
785
786 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
787 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
788 tlv_len = le16_to_cpu(tlv_hdr->header.len);
789
790 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
791 break;
792
793 switch (le16_to_cpu(tlv_hdr->header.type)) {
794 case TLV_TYPE_WMMQSTATUS:
795 tlv_wmm_qstatus =
796 (struct mwifiex_ie_types_wmm_queue_status *)
797 tlv_hdr;
798 dev_dbg(priv->adapter->dev,
799 "info: CMD_RESP: WMM_GET_STATUS:"
800 " QSTATUS TLV: %d, %d, %d\n",
801 tlv_wmm_qstatus->queue_index,
802 tlv_wmm_qstatus->flow_required,
803 tlv_wmm_qstatus->disabled);
804
805 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
806 queue_index];
807 ac_status->disabled = tlv_wmm_qstatus->disabled;
808 ac_status->flow_required =
809 tlv_wmm_qstatus->flow_required;
810 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
811 break;
812
813 case WLAN_EID_VENDOR_SPECIFIC:
814 /*
815 * Point the regular IEEE IE 2 bytes into the Marvell IE
816 * and setup the IEEE IE type and length byte fields
817 */
818
819 wmm_param_ie =
820 (struct ieee_types_wmm_parameter *) (curr +
821 2);
822 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
823 wmm_param_ie->vend_hdr.element_id =
824 WLAN_EID_VENDOR_SPECIFIC;
825
826 dev_dbg(priv->adapter->dev,
827 "info: CMD_RESP: WMM_GET_STATUS:"
828 " WMM Parameter Set Count: %d\n",
829 wmm_param_ie->qos_info_bitmap &
830 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
831
832 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
833 wmm_ie, wmm_param_ie,
834 wmm_param_ie->vend_hdr.len + 2);
835
836 break;
837
838 default:
839 valid = false;
840 break;
841 }
842
843 curr += (tlv_len + sizeof(tlv_hdr->header));
844 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
845 }
846
847 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
848 mwifiex_wmm_setup_ac_downgrade(priv);
849
850 return 0;
851 }
852
853 /*
854 * Callback handler from the command module to allow insertion of a WMM TLV.
855 *
856 * If the BSS we are associating to supports WMM, this function adds the
857 * required WMM Information IE to the association request command buffer in
858 * the form of a Marvell extended IEEE IE.
859 */
860 u32
861 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
862 u8 **assoc_buf,
863 struct ieee_types_wmm_parameter *wmm_ie,
864 struct ieee80211_ht_cap *ht_cap)
865 {
866 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
867 u32 ret_len = 0;
868
869 /* Null checks */
870 if (!assoc_buf)
871 return 0;
872 if (!(*assoc_buf))
873 return 0;
874
875 if (!wmm_ie)
876 return 0;
877
878 dev_dbg(priv->adapter->dev,
879 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
880 wmm_ie->vend_hdr.element_id);
881
882 if ((priv->wmm_required ||
883 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
884 priv->adapter->config_bands & BAND_AN))) &&
885 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
886 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
887 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
888 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
889 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
890 le16_to_cpu(wmm_tlv->header.len));
891 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
892 memcpy((u8 *) (wmm_tlv->wmm_ie
893 + le16_to_cpu(wmm_tlv->header.len)
894 - sizeof(priv->wmm_qosinfo)),
895 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
896
897 ret_len = sizeof(wmm_tlv->header)
898 + le16_to_cpu(wmm_tlv->header.len);
899
900 *assoc_buf += ret_len;
901 }
902
903 return ret_len;
904 }
905
906 /*
907 * This function computes the time delay in the driver queues for a
908 * given packet.
909 *
910 * When the packet is received at the OS/Driver interface, the current
911 * time is set in the packet structure. The difference between the present
912 * time and that received time is computed in this function and limited
913 * based on pre-compiled limits in the driver.
914 */
915 u8
916 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
917 const struct sk_buff *skb)
918 {
919 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
920 u8 ret_val;
921
922 /*
923 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
924 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
925 *
926 * Pass max value if queue_delay is beyond the uint8 range
927 */
928 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
929
930 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
931 " %d ms sent to FW\n", queue_delay, ret_val);
932
933 return ret_val;
934 }
935
936 /*
937 * This function retrieves the highest priority RA list table pointer.
938 */
939 static struct mwifiex_ra_list_tbl *
940 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
941 struct mwifiex_private **priv, int *tid)
942 {
943 struct mwifiex_private *priv_tmp;
944 struct mwifiex_ra_list_tbl *ptr;
945 struct mwifiex_tid_tbl *tid_ptr;
946 atomic_t *hqp;
947 unsigned long flags_ra;
948 int i, j;
949
950 /* check the BSS with highest priority first */
951 for (j = adapter->priv_num - 1; j >= 0; --j) {
952 /* iterate over BSS with the equal priority */
953 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
954 &adapter->bss_prio_tbl[j].bss_prio_head,
955 list) {
956
957 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
958
959 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
960 continue;
961
962 /* iterate over the WMM queues of the BSS */
963 hqp = &priv_tmp->wmm.highest_queued_prio;
964 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
965
966 spin_lock_irqsave(&priv_tmp->wmm.
967 ra_list_spinlock, flags_ra);
968
969 tid_ptr = &(priv_tmp)->wmm.
970 tid_tbl_ptr[tos_to_tid[i]];
971
972 /* iterate over receiver addresses */
973 list_for_each_entry(ptr, &tid_ptr->ra_list,
974 list) {
975
976 if (!skb_queue_empty(&ptr->skb_head))
977 /* holds both locks */
978 goto found;
979 }
980
981 spin_unlock_irqrestore(&priv_tmp->wmm.
982 ra_list_spinlock,
983 flags_ra);
984 }
985 }
986
987 }
988
989 return NULL;
990
991 found:
992 /* holds ra_list_spinlock */
993 if (atomic_read(hqp) > i)
994 atomic_set(hqp, i);
995 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
996
997 *priv = priv_tmp;
998 *tid = tos_to_tid[i];
999
1000 return ptr;
1001 }
1002
1003 /* This functions rotates ra and bss lists so packets are picked round robin.
1004 *
1005 * After a packet is successfully transmitted, rotate the ra list, so the ra
1006 * next to the one transmitted, will come first in the list. This way we pick
1007 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1008 * priority.
1009 *
1010 * Function also increments wmm.packets_out counter.
1011 */
1012 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1013 struct mwifiex_ra_list_tbl *ra,
1014 int tid)
1015 {
1016 struct mwifiex_adapter *adapter = priv->adapter;
1017 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1018 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1019 unsigned long flags;
1020
1021 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
1022 /*
1023 * dirty trick: we remove 'head' temporarily and reinsert it after
1024 * curr bss node. imagine list to stay fixed while head is moved
1025 */
1026 list_move(&tbl[priv->bss_priority].bss_prio_head,
1027 &tbl[priv->bss_priority].bss_prio_cur->list);
1028 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
1029
1030 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1031 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1032 priv->wmm.packets_out[tid]++;
1033 /* same as above */
1034 list_move(&tid_ptr->ra_list, &ra->list);
1035 }
1036 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1037 }
1038
1039 /*
1040 * This function checks if 11n aggregation is possible.
1041 */
1042 static int
1043 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1044 struct mwifiex_ra_list_tbl *ptr,
1045 int max_buf_size)
1046 {
1047 int count = 0, total_size = 0;
1048 struct sk_buff *skb, *tmp;
1049 int max_amsdu_size;
1050
1051 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1052 ptr->is_11n_enabled)
1053 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1054 else
1055 max_amsdu_size = max_buf_size;
1056
1057 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1058 total_size += skb->len;
1059 if (total_size >= max_amsdu_size)
1060 break;
1061 if (++count >= MIN_NUM_AMSDU)
1062 return true;
1063 }
1064
1065 return false;
1066 }
1067
1068 /*
1069 * This function sends a single packet to firmware for transmission.
1070 */
1071 static void
1072 mwifiex_send_single_packet(struct mwifiex_private *priv,
1073 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1074 unsigned long ra_list_flags)
1075 __releases(&priv->wmm.ra_list_spinlock)
1076 {
1077 struct sk_buff *skb, *skb_next;
1078 struct mwifiex_tx_param tx_param;
1079 struct mwifiex_adapter *adapter = priv->adapter;
1080 struct mwifiex_txinfo *tx_info;
1081
1082 if (skb_queue_empty(&ptr->skb_head)) {
1083 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1084 ra_list_flags);
1085 dev_dbg(adapter->dev, "data: nothing to send\n");
1086 return;
1087 }
1088
1089 skb = skb_dequeue(&ptr->skb_head);
1090
1091 tx_info = MWIFIEX_SKB_TXCB(skb);
1092 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1093
1094 ptr->total_pkt_count--;
1095
1096 if (!skb_queue_empty(&ptr->skb_head))
1097 skb_next = skb_peek(&ptr->skb_head);
1098 else
1099 skb_next = NULL;
1100
1101 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1102
1103 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1104 sizeof(struct txpd) : 0);
1105
1106 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1107 /* Queue the packet back at the head */
1108 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1109
1110 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1111 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1112 ra_list_flags);
1113 mwifiex_write_data_complete(adapter, skb, 0, -1);
1114 return;
1115 }
1116
1117 skb_queue_tail(&ptr->skb_head, skb);
1118
1119 ptr->total_pkt_count++;
1120 ptr->ba_pkt_count++;
1121 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1123 ra_list_flags);
1124 } else {
1125 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1126 atomic_dec(&priv->wmm.tx_pkts_queued);
1127 }
1128 }
1129
1130 /*
1131 * This function checks if the first packet in the given RA list
1132 * is already processed or not.
1133 */
1134 static int
1135 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1136 struct mwifiex_ra_list_tbl *ptr)
1137 {
1138 struct sk_buff *skb;
1139 struct mwifiex_txinfo *tx_info;
1140
1141 if (skb_queue_empty(&ptr->skb_head))
1142 return false;
1143
1144 skb = skb_peek(&ptr->skb_head);
1145
1146 tx_info = MWIFIEX_SKB_TXCB(skb);
1147 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1148 return true;
1149
1150 return false;
1151 }
1152
1153 /*
1154 * This function sends a single processed packet to firmware for
1155 * transmission.
1156 */
1157 static void
1158 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1159 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1160 unsigned long ra_list_flags)
1161 __releases(&priv->wmm.ra_list_spinlock)
1162 {
1163 struct mwifiex_tx_param tx_param;
1164 struct mwifiex_adapter *adapter = priv->adapter;
1165 int ret = -1;
1166 struct sk_buff *skb, *skb_next;
1167 struct mwifiex_txinfo *tx_info;
1168
1169 if (skb_queue_empty(&ptr->skb_head)) {
1170 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1171 ra_list_flags);
1172 return;
1173 }
1174
1175 skb = skb_dequeue(&ptr->skb_head);
1176
1177 if (adapter->data_sent || adapter->tx_lock_flag) {
1178 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1179 ra_list_flags);
1180 skb_queue_tail(&adapter->tx_data_q, skb);
1181 atomic_inc(&adapter->tx_queued);
1182 return;
1183 }
1184
1185 if (!skb_queue_empty(&ptr->skb_head))
1186 skb_next = skb_peek(&ptr->skb_head);
1187 else
1188 skb_next = NULL;
1189
1190 tx_info = MWIFIEX_SKB_TXCB(skb);
1191
1192 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1193
1194 if (adapter->iface_type == MWIFIEX_USB) {
1195 adapter->data_sent = true;
1196 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1197 skb, NULL);
1198 } else {
1199 tx_param.next_pkt_len =
1200 ((skb_next) ? skb_next->len +
1201 sizeof(struct txpd) : 0);
1202 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1203 skb, &tx_param);
1204 }
1205
1206 switch (ret) {
1207 case -EBUSY:
1208 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1209 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1210
1211 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1212 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1213 ra_list_flags);
1214 mwifiex_write_data_complete(adapter, skb, 0, -1);
1215 return;
1216 }
1217
1218 skb_queue_tail(&ptr->skb_head, skb);
1219
1220 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1221 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1222 ra_list_flags);
1223 break;
1224 case -1:
1225 if (adapter->iface_type != MWIFIEX_PCIE)
1226 adapter->data_sent = false;
1227 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1228 adapter->dbg.num_tx_host_to_card_failure++;
1229 mwifiex_write_data_complete(adapter, skb, 0, ret);
1230 break;
1231 case -EINPROGRESS:
1232 if (adapter->iface_type != MWIFIEX_PCIE)
1233 adapter->data_sent = false;
1234 break;
1235 case 0:
1236 mwifiex_write_data_complete(adapter, skb, 0, ret);
1237 default:
1238 break;
1239 }
1240 if (ret != -EBUSY) {
1241 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1242 atomic_dec(&priv->wmm.tx_pkts_queued);
1243 }
1244 }
1245
1246 /*
1247 * This function dequeues a packet from the highest priority list
1248 * and transmits it.
1249 */
1250 static int
1251 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1252 {
1253 struct mwifiex_ra_list_tbl *ptr;
1254 struct mwifiex_private *priv = NULL;
1255 int ptr_index = 0;
1256 u8 ra[ETH_ALEN];
1257 int tid_del = 0, tid = 0;
1258 unsigned long flags;
1259
1260 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1261 if (!ptr)
1262 return -1;
1263
1264 tid = mwifiex_get_tid(ptr);
1265
1266 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1267
1268 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1269 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1270 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1271 return -1;
1272 }
1273
1274 if (mwifiex_is_ptr_processed(priv, ptr)) {
1275 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1276 /* ra_list_spinlock has been freed in
1277 mwifiex_send_processed_packet() */
1278 return 0;
1279 }
1280
1281 if (!ptr->is_11n_enabled ||
1282 ptr->ba_status ||
1283 priv->wps.session_enable) {
1284 if (ptr->is_11n_enabled &&
1285 ptr->ba_status &&
1286 ptr->amsdu_in_ampdu &&
1287 mwifiex_is_amsdu_allowed(priv, tid) &&
1288 mwifiex_is_11n_aggragation_possible(priv, ptr,
1289 adapter->tx_buf_size))
1290 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1291 /* ra_list_spinlock has been freed in
1292 * mwifiex_11n_aggregate_pkt()
1293 */
1294 else
1295 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1296 /* ra_list_spinlock has been freed in
1297 * mwifiex_send_single_packet()
1298 */
1299 } else {
1300 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1301 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1302 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1303 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1304 BA_SETUP_INPROGRESS);
1305 mwifiex_send_addba(priv, tid, ptr->ra);
1306 } else if (mwifiex_find_stream_to_delete
1307 (priv, tid, &tid_del, ra)) {
1308 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1309 BA_SETUP_INPROGRESS);
1310 mwifiex_send_delba(priv, tid_del, ra, 1);
1311 }
1312 }
1313 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1314 mwifiex_is_11n_aggragation_possible(priv, ptr,
1315 adapter->tx_buf_size))
1316 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1317 /* ra_list_spinlock has been freed in
1318 mwifiex_11n_aggregate_pkt() */
1319 else
1320 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1321 /* ra_list_spinlock has been freed in
1322 mwifiex_send_single_packet() */
1323 }
1324 return 0;
1325 }
1326
1327 /*
1328 * This function transmits the highest priority packet awaiting in the
1329 * WMM Queues.
1330 */
1331 void
1332 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1333 {
1334 do {
1335 if (mwifiex_dequeue_tx_packet(adapter))
1336 break;
1337 if (adapter->iface_type != MWIFIEX_SDIO) {
1338 if (adapter->data_sent ||
1339 adapter->tx_lock_flag)
1340 break;
1341 } else {
1342 if (atomic_read(&adapter->tx_queued) >=
1343 MWIFIEX_MAX_PKTS_TXQ)
1344 break;
1345 }
1346 } while (!mwifiex_wmm_lists_empty(adapter));
1347 }
This page took 0.094305 seconds and 6 git commands to generate.