rsi: Added debug messages.
[deliverable/linux.git] / drivers / net / wireless / rsi / rsi_91x_core.c
CommitLineData
dad0d04f
FF
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18#include "rsi_common.h"
19
20/**
21 * rsi_determine_min_weight_queue() - This function determines the queue with
22 * the min weight.
23 * @common: Pointer to the driver private structure.
24 *
25 * Return: q_num: Corresponding queue number.
26 */
27static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
28{
29 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
30 u32 q_len = 0;
31 u8 ii = 0;
32
33 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
34 q_len = skb_queue_len(&common->tx_queue[ii]);
35 if ((tx_qinfo[ii].pkt_contended) && q_len) {
36 common->min_weight = tx_qinfo[ii].weight;
37 break;
38 }
39 }
40 return ii;
41}
42
43/**
44 * rsi_recalculate_weights() - This function recalculates the weights
45 * corresponding to each queue.
46 * @common: Pointer to the driver private structure.
47 *
48 * Return: recontend_queue bool variable
49 */
50static bool rsi_recalculate_weights(struct rsi_common *common)
51{
52 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
53 bool recontend_queue = false;
54 u8 ii = 0;
55 u32 q_len = 0;
56
57 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
58 q_len = skb_queue_len(&common->tx_queue[ii]);
59 /* Check for the need of contention */
60 if (q_len) {
61 if (tx_qinfo[ii].pkt_contended) {
62 tx_qinfo[ii].weight =
63 ((tx_qinfo[ii].weight > common->min_weight) ?
64 tx_qinfo[ii].weight - common->min_weight : 0);
65 } else {
66 tx_qinfo[ii].pkt_contended = 1;
67 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
68 recontend_queue = true;
69 }
70 } else { /* No packets so no contention */
71 tx_qinfo[ii].weight = 0;
72 tx_qinfo[ii].pkt_contended = 0;
73 }
74 }
75
76 return recontend_queue;
77}
78
360accb0
JM
79/**
80 * rsi_get_num_pkts_dequeue() - This function determines the number of
81 * packets to be dequeued based on the number
82 * of bytes calculated using txop.
83 *
84 * @common: Pointer to the driver private structure.
85 * @q_num: the queue from which pkts have to be dequeued
86 *
87 * Return: pkt_num: Number of pkts to be dequeued.
88 */
89static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
90{
91 struct rsi_hw *adapter = common->priv;
92 struct sk_buff *skb;
93 u32 pkt_cnt = 0;
94 s16 txop = common->tx_qinfo[q_num].txop * 32;
95 struct ieee80211_rate rate;
96
97 rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
98 if (q_num == VI_Q)
99 txop = ((txop << 5) / 80);
100
101 if (skb_queue_len(&common->tx_queue[q_num]))
102 skb = skb_peek(&common->tx_queue[q_num]);
103 else
104 return 0;
105
106 do {
107 txop -= ieee80211_generic_frame_duration(adapter->hw,
108 adapter->vifs[0],
109 common->band,
110 skb->len, &rate);
111 pkt_cnt += 1;
112 /*checking if pkts are still there*/
113 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
114 skb = skb->next;
115 else
116 break;
117
118 } while (txop > 0);
119
120 return pkt_cnt;
121}
122
dad0d04f
FF
123/**
124 * rsi_core_determine_hal_queue() - This function determines the queue from
125 * which packet has to be dequeued.
126 * @common: Pointer to the driver private structure.
127 *
128 * Return: q_num: Corresponding queue number on success.
129 */
130static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
131{
132 bool recontend_queue = false;
133 u32 q_len = 0;
134 u8 q_num = INVALID_QUEUE;
360accb0 135 u8 ii = 0;
dad0d04f
FF
136
137 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
138 if (!common->mgmt_q_block)
139 q_num = MGMT_SOFT_Q;
140 return q_num;
141 }
142
360accb0
JM
143 if (common->hw_data_qs_blocked)
144 return q_num;
145
dad0d04f
FF
146 if (common->pkt_cnt != 0) {
147 --common->pkt_cnt;
148 return common->selected_qnum;
149 }
150
151get_queue_num:
dad0d04f
FF
152 recontend_queue = false;
153
154 q_num = rsi_determine_min_weight_queue(common);
5156fd24 155
dad0d04f
FF
156 ii = q_num;
157
158 /* Selecting the queue with least back off */
159 for (; ii < NUM_EDCA_QUEUES; ii++) {
360accb0 160 q_len = skb_queue_len(&common->tx_queue[ii]);
dad0d04f 161 if (((common->tx_qinfo[ii].pkt_contended) &&
360accb0
JM
162 (common->tx_qinfo[ii].weight < common->min_weight)) &&
163 q_len) {
164 common->min_weight = common->tx_qinfo[ii].weight;
dad0d04f
FF
165 q_num = ii;
166 }
167 }
168
5156fd24
FF
169 if (q_num < NUM_EDCA_QUEUES)
170 common->tx_qinfo[q_num].pkt_contended = 0;
171
dad0d04f
FF
172 /* Adjust the back off values for all queues again */
173 recontend_queue = rsi_recalculate_weights(common);
174
175 q_len = skb_queue_len(&common->tx_queue[q_num]);
176 if (!q_len) {
177 /* If any queues are freshly contended and the selected queue
178 * doesn't have any packets
179 * then get the queue number again with fresh values
180 */
181 if (recontend_queue)
182 goto get_queue_num;
183
184 q_num = INVALID_QUEUE;
185 return q_num;
186 }
187
188 common->selected_qnum = q_num;
189 q_len = skb_queue_len(&common->tx_queue[q_num]);
190
360accb0
JM
191 if (q_num == VO_Q || q_num == VI_Q) {
192 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
193 common->pkt_cnt -= 1;
194 };
dad0d04f
FF
195
196 return q_num;
197}
198
199/**
200 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
201 * specified by the queue number.
202 * @common: Pointer to the driver private structure.
203 * @skb: Pointer to the socket buffer structure.
204 *
205 * Return: None.
206 */
207static void rsi_core_queue_pkt(struct rsi_common *common,
208 struct sk_buff *skb)
209{
210 u8 q_num = skb->priority;
211 if (q_num >= NUM_SOFT_QUEUES) {
212 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
213 __func__, q_num);
214 dev_kfree_skb(skb);
215 return;
216 }
217
218 skb_queue_tail(&common->tx_queue[q_num], skb);
219}
220
221/**
222 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
223 * specified by the queue number.
224 * @common: Pointer to the driver private structure.
225 * @q_num: Queue number.
226 *
227 * Return: Pointer to sk_buff structure.
228 */
229static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
230 u8 q_num)
231{
232 if (q_num >= NUM_SOFT_QUEUES) {
233 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
234 __func__, q_num);
235 return NULL;
236 }
237
238 return skb_dequeue(&common->tx_queue[q_num]);
239}
240
241/**
242 * rsi_core_qos_processor() - This function is used to determine the wmm queue
243 * based on the backoff procedure. Data packets are
244 * dequeued from the selected hal queue and sent to
245 * the below layers.
246 * @common: Pointer to the driver private structure.
247 *
248 * Return: None.
249 */
250void rsi_core_qos_processor(struct rsi_common *common)
251{
252 struct rsi_hw *adapter = common->priv;
253 struct sk_buff *skb;
254 unsigned long tstamp_1, tstamp_2;
255 u8 q_num;
256 int status;
257
258 tstamp_1 = jiffies;
259 while (1) {
260 q_num = rsi_core_determine_hal_queue(common);
261 rsi_dbg(DATA_TX_ZONE,
262 "%s: Queue number = %d\n", __func__, q_num);
263
264 if (q_num == INVALID_QUEUE) {
265 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
266 break;
267 }
268
269 mutex_lock(&common->tx_rxlock);
270
271 status = adapter->check_hw_queue_status(adapter, q_num);
272 if ((status <= 0)) {
273 mutex_unlock(&common->tx_rxlock);
274 break;
275 }
276
277 if ((q_num < MGMT_SOFT_Q) &&
278 ((skb_queue_len(&common->tx_queue[q_num])) <=
279 MIN_DATA_QUEUE_WATER_MARK)) {
280 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
281 ieee80211_wake_queue(adapter->hw,
282 WME_AC(q_num));
283 }
284
285 skb = rsi_core_dequeue_pkt(common, q_num);
286 if (skb == NULL) {
258587f9 287 rsi_dbg(ERR_ZONE, "skb null\n");
dad0d04f
FF
288 mutex_unlock(&common->tx_rxlock);
289 break;
290 }
291
292 if (q_num == MGMT_SOFT_Q)
293 status = rsi_send_mgmt_pkt(common, skb);
294 else
295 status = rsi_send_data_pkt(common, skb);
296
297 if (status) {
298 mutex_unlock(&common->tx_rxlock);
299 break;
300 }
301
302 common->tx_stats.total_tx_pkt_send[q_num]++;
303
304 tstamp_2 = jiffies;
305 mutex_unlock(&common->tx_rxlock);
306
307 if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
308 schedule();
309 }
310}
311
312/**
313 * rsi_core_xmit() - This function transmits the packets received from mac80211
314 * @common: Pointer to the driver private structure.
315 * @skb: Pointer to the socket buffer structure.
316 *
317 * Return: None.
318 */
319void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
320{
321 struct rsi_hw *adapter = common->priv;
322 struct ieee80211_tx_info *info;
323 struct skb_info *tx_params;
324 struct ieee80211_hdr *tmp_hdr = NULL;
325 u8 q_num, tid = 0;
326
327 if ((!skb) || (!skb->len)) {
328 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
329 __func__);
330 goto xmit_fail;
331 }
332 info = IEEE80211_SKB_CB(skb);
333 tx_params = (struct skb_info *)info->driver_data;
334 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
335
336 if (common->fsm_state != FSM_MAC_INIT_DONE) {
337 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
338 goto xmit_fail;
339 }
340
341 if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
342 (ieee80211_is_ctl(tmp_hdr->frame_control))) {
343 q_num = MGMT_SOFT_Q;
344 skb->priority = q_num;
345 } else {
346 if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
347 tid = (skb->data[24] & IEEE80211_QOS_TID);
348 skb->priority = TID_TO_WME_AC(tid);
349 } else {
350 tid = IEEE80211_NONQOS_TID;
351 skb->priority = BE_Q;
352 }
353 q_num = skb->priority;
354 tx_params->tid = tid;
355 tx_params->sta_id = 0;
356 }
357
358 if ((q_num != MGMT_SOFT_Q) &&
359 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
360 DATA_QUEUE_WATER_MARK)) {
258587f9 361 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
dad0d04f
FF
362 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
363 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
364 rsi_set_event(&common->tx_thread.event);
365 goto xmit_fail;
366 }
367
368 rsi_core_queue_pkt(common, skb);
369 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
370 rsi_set_event(&common->tx_thread.event);
371
372 return;
373
374xmit_fail:
375 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
376 /* Dropping pkt here */
377 ieee80211_free_txskb(common->priv->hw, skb);
378}
This page took 0.058086 seconds and 5 git commands to generate.