rsi: Changed the SDIO interrupt variables and some clean up.
[deliverable/linux.git] / drivers / net / wireless / rsi / rsi_91x_core.c
CommitLineData
dad0d04f
FF
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18#include "rsi_common.h"
19
20/**
21 * rsi_determine_min_weight_queue() - This function determines the queue with
22 * the min weight.
23 * @common: Pointer to the driver private structure.
24 *
25 * Return: q_num: Corresponding queue number.
26 */
27static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
28{
29 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
30 u32 q_len = 0;
31 u8 ii = 0;
32
33 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
34 q_len = skb_queue_len(&common->tx_queue[ii]);
35 if ((tx_qinfo[ii].pkt_contended) && q_len) {
36 common->min_weight = tx_qinfo[ii].weight;
37 break;
38 }
39 }
40 return ii;
41}
42
43/**
44 * rsi_recalculate_weights() - This function recalculates the weights
45 * corresponding to each queue.
46 * @common: Pointer to the driver private structure.
47 *
48 * Return: recontend_queue bool variable
49 */
50static bool rsi_recalculate_weights(struct rsi_common *common)
51{
52 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
53 bool recontend_queue = false;
54 u8 ii = 0;
55 u32 q_len = 0;
56
57 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
58 q_len = skb_queue_len(&common->tx_queue[ii]);
59 /* Check for the need of contention */
60 if (q_len) {
61 if (tx_qinfo[ii].pkt_contended) {
62 tx_qinfo[ii].weight =
63 ((tx_qinfo[ii].weight > common->min_weight) ?
64 tx_qinfo[ii].weight - common->min_weight : 0);
65 } else {
66 tx_qinfo[ii].pkt_contended = 1;
67 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
68 recontend_queue = true;
69 }
70 } else { /* No packets so no contention */
71 tx_qinfo[ii].weight = 0;
72 tx_qinfo[ii].pkt_contended = 0;
73 }
74 }
75
76 return recontend_queue;
77}
78
79/**
80 * rsi_core_determine_hal_queue() - This function determines the queue from
81 * which packet has to be dequeued.
82 * @common: Pointer to the driver private structure.
83 *
84 * Return: q_num: Corresponding queue number on success.
85 */
86static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
87{
88 bool recontend_queue = false;
89 u32 q_len = 0;
90 u8 q_num = INVALID_QUEUE;
af64dc74 91 u8 ii = 0, min = 0;
dad0d04f
FF
92
93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
94 if (!common->mgmt_q_block)
95 q_num = MGMT_SOFT_Q;
96 return q_num;
97 }
98
99 if (common->pkt_cnt != 0) {
100 --common->pkt_cnt;
101 return common->selected_qnum;
102 }
103
104get_queue_num:
dad0d04f
FF
105 recontend_queue = false;
106
107 q_num = rsi_determine_min_weight_queue(common);
5156fd24 108
dad0d04f
FF
109 q_len = skb_queue_len(&common->tx_queue[ii]);
110 ii = q_num;
111
112 /* Selecting the queue with least back off */
113 for (; ii < NUM_EDCA_QUEUES; ii++) {
114 if (((common->tx_qinfo[ii].pkt_contended) &&
115 (common->tx_qinfo[ii].weight < min)) && q_len) {
116 min = common->tx_qinfo[ii].weight;
117 q_num = ii;
118 }
119 }
120
5156fd24
FF
121 if (q_num < NUM_EDCA_QUEUES)
122 common->tx_qinfo[q_num].pkt_contended = 0;
123
dad0d04f
FF
124 /* Adjust the back off values for all queues again */
125 recontend_queue = rsi_recalculate_weights(common);
126
127 q_len = skb_queue_len(&common->tx_queue[q_num]);
128 if (!q_len) {
129 /* If any queues are freshly contended and the selected queue
130 * doesn't have any packets
131 * then get the queue number again with fresh values
132 */
133 if (recontend_queue)
134 goto get_queue_num;
135
136 q_num = INVALID_QUEUE;
137 return q_num;
138 }
139
140 common->selected_qnum = q_num;
141 q_len = skb_queue_len(&common->tx_queue[q_num]);
142
143 switch (common->selected_qnum) {
144 case VO_Q:
145 if (q_len > MAX_CONTINUOUS_VO_PKTS)
146 common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
147 else
148 common->pkt_cnt = --q_len;
149 break;
150
151 case VI_Q:
152 if (q_len > MAX_CONTINUOUS_VI_PKTS)
153 common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
154 else
155 common->pkt_cnt = --q_len;
156
157 break;
158
159 default:
160 common->pkt_cnt = 0;
161 break;
162 }
163
164 return q_num;
165}
166
167/**
168 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
169 * specified by the queue number.
170 * @common: Pointer to the driver private structure.
171 * @skb: Pointer to the socket buffer structure.
172 *
173 * Return: None.
174 */
175static void rsi_core_queue_pkt(struct rsi_common *common,
176 struct sk_buff *skb)
177{
178 u8 q_num = skb->priority;
179 if (q_num >= NUM_SOFT_QUEUES) {
180 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
181 __func__, q_num);
182 dev_kfree_skb(skb);
183 return;
184 }
185
186 skb_queue_tail(&common->tx_queue[q_num], skb);
187}
188
189/**
190 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
191 * specified by the queue number.
192 * @common: Pointer to the driver private structure.
193 * @q_num: Queue number.
194 *
195 * Return: Pointer to sk_buff structure.
196 */
197static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
198 u8 q_num)
199{
200 if (q_num >= NUM_SOFT_QUEUES) {
201 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
202 __func__, q_num);
203 return NULL;
204 }
205
206 return skb_dequeue(&common->tx_queue[q_num]);
207}
208
209/**
210 * rsi_core_qos_processor() - This function is used to determine the wmm queue
211 * based on the backoff procedure. Data packets are
212 * dequeued from the selected hal queue and sent to
213 * the below layers.
214 * @common: Pointer to the driver private structure.
215 *
216 * Return: None.
217 */
218void rsi_core_qos_processor(struct rsi_common *common)
219{
220 struct rsi_hw *adapter = common->priv;
221 struct sk_buff *skb;
222 unsigned long tstamp_1, tstamp_2;
223 u8 q_num;
224 int status;
225
226 tstamp_1 = jiffies;
227 while (1) {
228 q_num = rsi_core_determine_hal_queue(common);
229 rsi_dbg(DATA_TX_ZONE,
230 "%s: Queue number = %d\n", __func__, q_num);
231
232 if (q_num == INVALID_QUEUE) {
233 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
234 break;
235 }
236
237 mutex_lock(&common->tx_rxlock);
238
239 status = adapter->check_hw_queue_status(adapter, q_num);
240 if ((status <= 0)) {
241 mutex_unlock(&common->tx_rxlock);
242 break;
243 }
244
245 if ((q_num < MGMT_SOFT_Q) &&
246 ((skb_queue_len(&common->tx_queue[q_num])) <=
247 MIN_DATA_QUEUE_WATER_MARK)) {
248 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
249 ieee80211_wake_queue(adapter->hw,
250 WME_AC(q_num));
251 }
252
253 skb = rsi_core_dequeue_pkt(common, q_num);
254 if (skb == NULL) {
255 mutex_unlock(&common->tx_rxlock);
256 break;
257 }
258
259 if (q_num == MGMT_SOFT_Q)
260 status = rsi_send_mgmt_pkt(common, skb);
261 else
262 status = rsi_send_data_pkt(common, skb);
263
264 if (status) {
265 mutex_unlock(&common->tx_rxlock);
266 break;
267 }
268
269 common->tx_stats.total_tx_pkt_send[q_num]++;
270
271 tstamp_2 = jiffies;
272 mutex_unlock(&common->tx_rxlock);
273
274 if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
275 schedule();
276 }
277}
278
279/**
280 * rsi_core_xmit() - This function transmits the packets received from mac80211
281 * @common: Pointer to the driver private structure.
282 * @skb: Pointer to the socket buffer structure.
283 *
284 * Return: None.
285 */
286void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
287{
288 struct rsi_hw *adapter = common->priv;
289 struct ieee80211_tx_info *info;
290 struct skb_info *tx_params;
291 struct ieee80211_hdr *tmp_hdr = NULL;
292 u8 q_num, tid = 0;
293
294 if ((!skb) || (!skb->len)) {
295 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
296 __func__);
297 goto xmit_fail;
298 }
299 info = IEEE80211_SKB_CB(skb);
300 tx_params = (struct skb_info *)info->driver_data;
301 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
302
303 if (common->fsm_state != FSM_MAC_INIT_DONE) {
304 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
305 goto xmit_fail;
306 }
307
308 if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
309 (ieee80211_is_ctl(tmp_hdr->frame_control))) {
310 q_num = MGMT_SOFT_Q;
311 skb->priority = q_num;
312 } else {
313 if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
314 tid = (skb->data[24] & IEEE80211_QOS_TID);
315 skb->priority = TID_TO_WME_AC(tid);
316 } else {
317 tid = IEEE80211_NONQOS_TID;
318 skb->priority = BE_Q;
319 }
320 q_num = skb->priority;
321 tx_params->tid = tid;
322 tx_params->sta_id = 0;
323 }
324
325 if ((q_num != MGMT_SOFT_Q) &&
326 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
327 DATA_QUEUE_WATER_MARK)) {
328 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
329 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
330 rsi_set_event(&common->tx_thread.event);
331 goto xmit_fail;
332 }
333
334 rsi_core_queue_pkt(common, skb);
335 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
336 rsi_set_event(&common->tx_thread.event);
337
338 return;
339
340xmit_fail:
341 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
342 /* Dropping pkt here */
343 ieee80211_free_txskb(common->priv->hw, skb);
344}
This page took 0.055041 seconds and 5 git commands to generate.