rt2x00: Don't use queue entry as parameter when creating TX descriptor.
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
CommitLineData
181d6902 1/*
7e613e16
ID
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
9c9a0d14 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
181d6902
ID
5 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23/*
24 Module: rt2x00lib
25 Abstract: rt2x00 queue specific routines.
26 */
27
5a0e3ad6 28#include <linux/slab.h>
181d6902
ID
29#include <linux/kernel.h>
30#include <linux/module.h>
c4da0048 31#include <linux/dma-mapping.h>
181d6902
ID
32
33#include "rt2x00.h"
34#include "rt2x00lib.h"
35
fa69560f 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
239c249d 37{
fa69560f 38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
c4da0048
GW
39 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
2bb057d0
ID
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
239c249d
GW
44
45 /*
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
48 */
c4da0048 49 frame_size = entry->queue->data_size + entry->queue->desc_size;
239c249d
GW
50
51 /*
ff352391
ID
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
239c249d 55 */
2bb057d0
ID
56 head_size = 4;
57
58 /*
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
9c3444d3 61 * and 8 bytes for ICV data as tailroon.
2bb057d0 62 */
7dab73b3 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
2bb057d0 64 head_size += 8;
9c3444d3 65 tail_size += 8;
2bb057d0 66 }
239c249d
GW
67
68 /*
69 * Allocate skbuffer.
70 */
2bb057d0 71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
239c249d
GW
72 if (!skb)
73 return NULL;
74
2bb057d0
ID
75 /*
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
78 */
79 skb_reserve(skb, head_size);
239c249d
GW
80 skb_put(skb, frame_size);
81
c4da0048
GW
82 /*
83 * Populate skbdesc.
84 */
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
7dab73b3 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
c4da0048
GW
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data,
92 skb->len,
93 DMA_FROM_DEVICE);
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
95 }
96
239c249d
GW
97 return skb;
98}
30caa6e3 99
fa69560f 100void rt2x00queue_map_txskb(struct queue_entry *entry)
30caa6e3 101{
fa69560f
ID
102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
c4da0048 104
3ee54a07 105 skbdesc->skb_dma =
fa69560f 106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
c4da0048
GW
107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
108}
109EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
110
fa69560f 111void rt2x00queue_unmap_skb(struct queue_entry *entry)
c4da0048 112{
fa69560f
ID
113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
c4da0048
GW
115
116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
fa69560f 117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
c4da0048
GW
118 DMA_FROM_DEVICE);
119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
546adf29 120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
fa69560f 121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
c4da0048
GW
122 DMA_TO_DEVICE);
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 }
125}
0b8004aa 126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
c4da0048 127
fa69560f 128void rt2x00queue_free_skb(struct queue_entry *entry)
c4da0048 129{
fa69560f 130 if (!entry->skb)
9a613195
ID
131 return;
132
fa69560f
ID
133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(entry->skb);
135 entry->skb = NULL;
30caa6e3 136}
239c249d 137
daee6c09 138void rt2x00queue_align_frame(struct sk_buff *skb)
9f166171 139{
9f166171 140 unsigned int frame_length = skb->len;
daee6c09 141 unsigned int align = ALIGN_SIZE(skb, 0);
9f166171
ID
142
143 if (!align)
144 return;
145
daee6c09
ID
146 skb_push(skb, align);
147 memmove(skb->data, skb->data + align, frame_length);
148 skb_trim(skb, frame_length);
149}
150
daee6c09
ID
151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
152{
2e331462 153 unsigned int payload_length = skb->len - header_length;
daee6c09
ID
154 unsigned int header_align = ALIGN_SIZE(skb, 0);
155 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
e54be4e7 156 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
daee6c09 157
2e331462
GW
158 /*
159 * Adjust the header alignment if the payload needs to be moved more
160 * than the header.
161 */
162 if (payload_align > header_align)
163 header_align += 4;
164
165 /* There is nothing to do if no alignment is needed */
166 if (!header_align)
167 return;
daee6c09 168
2e331462
GW
169 /* Reserve the amount of space needed in front of the frame */
170 skb_push(skb, header_align);
171
172 /*
173 * Move the header.
174 */
175 memmove(skb->data, skb->data + header_align, header_length);
176
177 /* Move the payload, if present and if required */
178 if (payload_length && payload_align)
daee6c09 179 memmove(skb->data + header_length + l2pad,
a5186e99 180 skb->data + header_length + l2pad + payload_align,
2e331462
GW
181 payload_length);
182
183 /* Trim the skb to the correct size */
184 skb_trim(skb, header_length + l2pad + payload_length);
9f166171
ID
185}
186
daee6c09
ID
187void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
188{
a061a93b
GW
189 /*
190 * L2 padding is only present if the skb contains more than just the
191 * IEEE 802.11 header.
192 */
193 unsigned int l2pad = (skb->len > header_length) ?
194 L2PAD_SIZE(header_length) : 0;
daee6c09 195
354e39db 196 if (!l2pad)
daee6c09
ID
197 return;
198
a061a93b
GW
199 memmove(skb->data + l2pad, skb->data, header_length);
200 skb_pull(skb, l2pad);
daee6c09
ID
201}
202
77b5621b
GW
203static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
204 struct sk_buff *skb,
7b40982e
ID
205 struct txentry_desc *txdesc)
206{
77b5621b
GW
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
7b40982e 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
7b40982e 210
c262e08b 211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
7b40982e
ID
212 return;
213
7fe7ee77
HS
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215
77b5621b 216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
7fe7ee77
HS
217 return;
218
7b40982e 219 /*
7fe7ee77
HS
220 * The hardware is not able to insert a sequence number. Assign a
221 * software generated one here.
7b40982e
ID
222 *
223 * This is wrong because beacons are not getting sequence
224 * numbers assigned properly.
225 *
226 * A secondary problem exists for drivers that cannot toggle
227 * sequence counting per-frame, since those will override the
228 * sequence counter given by mac80211.
229 */
798eefde 230 spin_lock(&intf->seqlock);
7b40982e
ID
231
232 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
233 intf->seqno += 0x10;
234 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
235 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
236
798eefde 237 spin_unlock(&intf->seqlock);
7b40982e 238
7b40982e
ID
239}
240
77b5621b
GW
241static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
242 struct sk_buff *skb,
7b40982e
ID
243 struct txentry_desc *txdesc,
244 const struct rt2x00_rate *hwrate)
245{
77b5621b 246 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
7b40982e
ID
247 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
248 unsigned int data_length;
249 unsigned int duration;
250 unsigned int residual;
251
2517794b
HS
252 /*
253 * Determine with what IFS priority this frame should be send.
254 * Set ifs to IFS_SIFS when the this is not the first fragment,
255 * or this fragment came after RTS/CTS.
256 */
257 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
258 txdesc->u.plcp.ifs = IFS_BACKOFF;
259 else
260 txdesc->u.plcp.ifs = IFS_SIFS;
261
7b40982e 262 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
77b5621b
GW
263 data_length = skb->len + 4;
264 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
7b40982e
ID
265
266 /*
267 * PLCP setup
268 * Length calculation depends on OFDM/CCK rate.
269 */
26a1d07f
HS
270 txdesc->u.plcp.signal = hwrate->plcp;
271 txdesc->u.plcp.service = 0x04;
7b40982e
ID
272
273 if (hwrate->flags & DEV_RATE_OFDM) {
26a1d07f
HS
274 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
275 txdesc->u.plcp.length_low = data_length & 0x3f;
7b40982e
ID
276 } else {
277 /*
278 * Convert length to microseconds.
279 */
280 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
281 duration = GET_DURATION(data_length, hwrate->bitrate);
282
283 if (residual != 0) {
284 duration++;
285
286 /*
287 * Check if we need to set the Length Extension
288 */
289 if (hwrate->bitrate == 110 && residual <= 30)
26a1d07f 290 txdesc->u.plcp.service |= 0x80;
7b40982e
ID
291 }
292
26a1d07f
HS
293 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
294 txdesc->u.plcp.length_low = duration & 0xff;
7b40982e
ID
295
296 /*
297 * When preamble is enabled we should set the
298 * preamble bit for the signal.
299 */
300 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
26a1d07f 301 txdesc->u.plcp.signal |= 0x08;
7b40982e
ID
302 }
303}
304
77b5621b
GW
305static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
306 struct sk_buff *skb,
46a01ec0
GW
307 struct txentry_desc *txdesc,
308 const struct rt2x00_rate *hwrate)
309{
77b5621b 310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
46a01ec0 311 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
77b5621b 312 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
46a01ec0
GW
313
314 if (tx_info->control.sta)
315 txdesc->u.ht.mpdu_density =
316 tx_info->control.sta->ht_cap.ampdu_density;
317
318 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
319
320 /*
321 * Only one STBC stream is supported for now.
322 */
323 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
324 txdesc->u.ht.stbc = 1;
325
326 /*
327 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
328 * mcs rate to be used
329 */
330 if (txrate->flags & IEEE80211_TX_RC_MCS) {
331 txdesc->u.ht.mcs = txrate->idx;
332
333 /*
334 * MIMO PS should be set to 1 for STA's using dynamic SM PS
335 * when using more then one tx stream (>MCS7).
336 */
337 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
338 ((tx_info->control.sta->ht_cap.cap &
339 IEEE80211_HT_CAP_SM_PS) >>
340 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
341 WLAN_HT_CAP_SM_PS_DYNAMIC)
342 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
343 } else {
344 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
345 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
346 txdesc->u.ht.mcs |= 0x08;
347 }
348
349 /*
350 * This frame is eligible for an AMPDU, however, don't aggregate
351 * frames that are intended to probe a specific tx rate.
352 */
353 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
354 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
355 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
356
357 /*
358 * Set 40Mhz mode if necessary (for legacy rates this will
359 * duplicate the frame to both channels).
360 */
361 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
362 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
363 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
364 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
365 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
366
367 /*
368 * Determine IFS values
369 * - Use TXOP_BACKOFF for management frames except beacons
370 * - Use TXOP_SIFS for fragment bursts
371 * - Use TXOP_HTTXOP for everything else
372 *
373 * Note: rt2800 devices won't use CTS protection (if used)
374 * for frames not transmitted with TXOP_HTTXOP
375 */
376 if (ieee80211_is_mgmt(hdr->frame_control) &&
377 !ieee80211_is_beacon(hdr->frame_control))
378 txdesc->u.ht.txop = TXOP_BACKOFF;
379 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
380 txdesc->u.ht.txop = TXOP_SIFS;
381 else
382 txdesc->u.ht.txop = TXOP_HTTXOP;
383}
384
77b5621b
GW
385static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
386 struct sk_buff *skb,
bd88a781 387 struct txentry_desc *txdesc)
7050ec82 388{
77b5621b
GW
389 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
390 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
55b585e2
HS
391 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
392 struct ieee80211_rate *rate;
393 const struct rt2x00_rate *hwrate = NULL;
7050ec82
ID
394
395 memset(txdesc, 0, sizeof(*txdesc));
396
9f166171 397 /*
df624ca5 398 * Header and frame information.
9f166171 399 */
77b5621b
GW
400 txdesc->length = skb->len;
401 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
9f166171 402
7050ec82
ID
403 /*
404 * Check whether this frame is to be acked.
405 */
e039fa4a 406 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
7050ec82
ID
407 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
408
409 /*
410 * Check if this is a RTS/CTS frame
411 */
ac104462
ID
412 if (ieee80211_is_rts(hdr->frame_control) ||
413 ieee80211_is_cts(hdr->frame_control)) {
7050ec82 414 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
ac104462 415 if (ieee80211_is_rts(hdr->frame_control))
7050ec82 416 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
e039fa4a 417 else
7050ec82 418 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
e039fa4a 419 if (tx_info->control.rts_cts_rate_idx >= 0)
2e92e6f2 420 rate =
e039fa4a 421 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
422 }
423
424 /*
425 * Determine retry information.
426 */
e6a9854b 427 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
42c82857 428 if (txdesc->retry_limit >= rt2x00dev->long_retry)
7050ec82
ID
429 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
430
431 /*
432 * Check if more fragments are pending
433 */
2606e422 434 if (ieee80211_has_morefrags(hdr->frame_control)) {
7050ec82
ID
435 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
436 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
437 }
438
2606e422
HS
439 /*
440 * Check if more frames (!= fragments) are pending
441 */
442 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
443 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
444
7050ec82
ID
445 /*
446 * Beacons and probe responses require the tsf timestamp
1bce85cf 447 * to be inserted into the frame.
7050ec82 448 */
1bce85cf
HS
449 if (ieee80211_is_beacon(hdr->frame_control) ||
450 ieee80211_is_probe_resp(hdr->frame_control))
7050ec82
ID
451 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
452
7b40982e 453 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
2517794b 454 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
7050ec82 455 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
7050ec82 456
076f9582
ID
457 /*
458 * Determine rate modulation.
459 */
55b585e2
HS
460 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
461 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
462 else if (txrate->flags & IEEE80211_TX_RC_MCS)
463 txdesc->rate_mode = RATE_MODE_HT_MIX;
464 else {
465 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
466 hwrate = rt2x00_get_rate(rate->hw_value);
467 if (hwrate->flags & DEV_RATE_OFDM)
468 txdesc->rate_mode = RATE_MODE_OFDM;
469 else
470 txdesc->rate_mode = RATE_MODE_CCK;
471 }
7050ec82 472
7b40982e
ID
473 /*
474 * Apply TX descriptor handling by components
475 */
77b5621b
GW
476 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
477 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
26a1d07f 478
7dab73b3 479 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
77b5621b
GW
480 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
481 hwrate);
26a1d07f 482 else
77b5621b
GW
483 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
484 hwrate);
7050ec82 485}
7050ec82 486
78eea11b
GW
487static int rt2x00queue_write_tx_data(struct queue_entry *entry,
488 struct txentry_desc *txdesc)
489{
490 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
491
492 /*
493 * This should not happen, we already checked the entry
494 * was ours. When the hardware disagrees there has been
495 * a queue corruption!
496 */
497 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
498 rt2x00dev->ops->lib->get_entry_state(entry))) {
499 ERROR(rt2x00dev,
500 "Corrupt queue %d, accessing entry which is not ours.\n"
501 "Please file bug report to %s.\n",
502 entry->queue->qid, DRV_PROJECT);
503 return -EINVAL;
504 }
505
506 /*
507 * Add the requested extra tx headroom in front of the skb.
508 */
509 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
510 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
511
512 /*
76dd5ddf 513 * Call the driver's write_tx_data function, if it exists.
78eea11b 514 */
76dd5ddf
GW
515 if (rt2x00dev->ops->lib->write_tx_data)
516 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
78eea11b
GW
517
518 /*
519 * Map the skb to DMA.
520 */
7dab73b3 521 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
fa69560f 522 rt2x00queue_map_txskb(entry);
78eea11b
GW
523
524 return 0;
525}
526
bd88a781
ID
527static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
528 struct txentry_desc *txdesc)
7050ec82 529{
b869767b 530 struct data_queue *queue = entry->queue;
7050ec82 531
93331458 532 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
7050ec82
ID
533
534 /*
535 * All processing on the frame has been completed, this means
536 * it is now ready to be dumped to userspace through debugfs.
537 */
93331458 538 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
6295d815
GW
539}
540
8be4eed0 541static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
6295d815
GW
542 struct txentry_desc *txdesc)
543{
7050ec82 544 /*
b869767b 545 * Check if we need to kick the queue, there are however a few rules
6295d815 546 * 1) Don't kick unless this is the last in frame in a burst.
b869767b
ID
547 * When the burst flag is set, this frame is always followed
548 * by another frame which in some way are related to eachother.
549 * This is true for fragments, RTS or CTS-to-self frames.
6295d815 550 * 2) Rule 1 can be broken when the available entries
b869767b 551 * in the queue are less then a certain threshold.
7050ec82 552 */
b869767b
ID
553 if (rt2x00queue_threshold(queue) ||
554 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
dbba306f 555 queue->rt2x00dev->ops->lib->kick_queue(queue);
7050ec82 556}
7050ec82 557
7351c6bd
JB
558int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
559 bool local)
6db3786a 560{
e6a9854b 561 struct ieee80211_tx_info *tx_info;
77a861c4 562 struct queue_entry *entry;
6db3786a 563 struct txentry_desc txdesc;
d74f5ba4 564 struct skb_frame_desc *skbdesc;
e6a9854b 565 u8 rate_idx, rate_flags;
77a861c4
GW
566 int ret = 0;
567
568 spin_lock(&queue->tx_lock);
569
570 entry = rt2x00queue_get_entry(queue, Q_INDEX);
6db3786a 571
6a4c499e
HS
572 if (unlikely(rt2x00queue_full(queue))) {
573 ERROR(queue->rt2x00dev,
574 "Dropping frame due to full tx queue %d.\n", queue->qid);
77a861c4
GW
575 ret = -ENOBUFS;
576 goto out;
6a4c499e 577 }
6db3786a 578
c6084d5f
HS
579 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
580 &entry->flags))) {
6db3786a
ID
581 ERROR(queue->rt2x00dev,
582 "Arrived at non-free entry in the non-full queue %d.\n"
583 "Please file bug report to %s.\n",
584 queue->qid, DRV_PROJECT);
77a861c4
GW
585 ret = -EINVAL;
586 goto out;
6db3786a
ID
587 }
588
589 /*
590 * Copy all TX descriptor information into txdesc,
591 * after that we are free to use the skb->cb array
592 * for our information.
593 */
594 entry->skb = skb;
77b5621b 595 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
6db3786a 596
d74f5ba4 597 /*
e6a9854b 598 * All information is retrieved from the skb->cb array,
2bb057d0 599 * now we should claim ownership of the driver part of that
e6a9854b 600 * array, preserving the bitrate index and flags.
d74f5ba4 601 */
e6a9854b
JB
602 tx_info = IEEE80211_SKB_CB(skb);
603 rate_idx = tx_info->control.rates[0].idx;
604 rate_flags = tx_info->control.rates[0].flags;
0e3de998 605 skbdesc = get_skb_frame_desc(skb);
d74f5ba4
ID
606 memset(skbdesc, 0, sizeof(*skbdesc));
607 skbdesc->entry = entry;
e6a9854b
JB
608 skbdesc->tx_rate_idx = rate_idx;
609 skbdesc->tx_rate_flags = rate_flags;
d74f5ba4 610
7351c6bd
JB
611 if (local)
612 skbdesc->flags |= SKBDESC_NOT_MAC80211;
613
2bb057d0
ID
614 /*
615 * When hardware encryption is supported, and this frame
616 * is to be encrypted, we should strip the IV/EIV data from
3ad2f3fb 617 * the frame so we can provide it to the driver separately.
2bb057d0
ID
618 */
619 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
dddfb478 620 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
7dab73b3 621 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
9eb4e21e 622 rt2x00crypto_tx_copy_iv(skb, &txdesc);
dddfb478 623 else
9eb4e21e 624 rt2x00crypto_tx_remove_iv(skb, &txdesc);
dddfb478 625 }
2bb057d0 626
93354cbb 627 /*
25985edc 628 * When DMA allocation is required we should guarantee to the
93354cbb 629 * driver that the DMA is aligned to a 4-byte boundary.
93354cbb
ID
630 * However some drivers require L2 padding to pad the payload
631 * rather then the header. This could be a requirement for
632 * PCI and USB devices, while header alignment only is valid
633 * for PCI devices.
634 */
7dab73b3 635 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
daee6c09 636 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
7dab73b3 637 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
daee6c09 638 rt2x00queue_align_frame(entry->skb);
9f166171 639
2bb057d0
ID
640 /*
641 * It could be possible that the queue was corrupted and this
0e3de998
ID
642 * call failed. Since we always return NETDEV_TX_OK to mac80211,
643 * this frame will simply be dropped.
2bb057d0 644 */
78eea11b 645 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
0262ab0d 646 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
2bb057d0 647 entry->skb = NULL;
77a861c4
GW
648 ret = -EIO;
649 goto out;
6db3786a
ID
650 }
651
0262ab0d 652 set_bit(ENTRY_DATA_PENDING, &entry->flags);
6db3786a 653
75256f03 654 rt2x00queue_index_inc(entry, Q_INDEX);
6db3786a 655 rt2x00queue_write_tx_descriptor(entry, &txdesc);
8be4eed0 656 rt2x00queue_kick_tx_queue(queue, &txdesc);
6db3786a 657
77a861c4
GW
658out:
659 spin_unlock(&queue->tx_lock);
660 return ret;
6db3786a
ID
661}
662
69cf36a4
HS
663int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
664 struct ieee80211_vif *vif)
665{
666 struct rt2x00_intf *intf = vif_to_intf(vif);
667
668 if (unlikely(!intf->beacon))
669 return -ENOBUFS;
670
671 mutex_lock(&intf->beacon_skb_mutex);
672
673 /*
674 * Clean up the beacon skb.
675 */
676 rt2x00queue_free_skb(intf->beacon);
677
678 /*
679 * Clear beacon (single bssid devices don't need to clear the beacon
680 * since the beacon queue will get stopped anyway).
681 */
682 if (rt2x00dev->ops->lib->clear_beacon)
683 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
684
685 mutex_unlock(&intf->beacon_skb_mutex);
686
687 return 0;
688}
689
8414ff07
HS
690int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
691 struct ieee80211_vif *vif)
bd88a781
ID
692{
693 struct rt2x00_intf *intf = vif_to_intf(vif);
694 struct skb_frame_desc *skbdesc;
695 struct txentry_desc txdesc;
bd88a781
ID
696
697 if (unlikely(!intf->beacon))
698 return -ENOBUFS;
699
17512dc3
IP
700 /*
701 * Clean up the beacon skb.
702 */
fa69560f 703 rt2x00queue_free_skb(intf->beacon);
17512dc3 704
bd88a781 705 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
8414ff07 706 if (!intf->beacon->skb)
bd88a781
ID
707 return -ENOMEM;
708
709 /*
710 * Copy all TX descriptor information into txdesc,
711 * after that we are free to use the skb->cb array
712 * for our information.
713 */
77b5621b 714 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
bd88a781 715
bd88a781
ID
716 /*
717 * Fill in skb descriptor
718 */
719 skbdesc = get_skb_frame_desc(intf->beacon->skb);
720 memset(skbdesc, 0, sizeof(*skbdesc));
bd88a781
ID
721 skbdesc->entry = intf->beacon;
722
bd88a781 723 /*
69cf36a4 724 * Send beacon to hardware.
bd88a781 725 */
f224f4ef 726 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
bd88a781 727
8414ff07
HS
728 return 0;
729
730}
731
732int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
733 struct ieee80211_vif *vif)
734{
735 struct rt2x00_intf *intf = vif_to_intf(vif);
736 int ret;
737
738 mutex_lock(&intf->beacon_skb_mutex);
739 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
17512dc3
IP
740 mutex_unlock(&intf->beacon_skb_mutex);
741
8414ff07 742 return ret;
bd88a781
ID
743}
744
10e11568 745bool rt2x00queue_for_each_entry(struct data_queue *queue,
5eb7efe8
ID
746 enum queue_index start,
747 enum queue_index end,
10e11568
HS
748 void *data,
749 bool (*fn)(struct queue_entry *entry,
750 void *data))
5eb7efe8
ID
751{
752 unsigned long irqflags;
753 unsigned int index_start;
754 unsigned int index_end;
755 unsigned int i;
756
757 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
758 ERROR(queue->rt2x00dev,
759 "Entry requested from invalid index range (%d - %d)\n",
760 start, end);
10e11568 761 return true;
5eb7efe8
ID
762 }
763
764 /*
765 * Only protect the range we are going to loop over,
766 * if during our loop a extra entry is set to pending
767 * it should not be kicked during this run, since it
768 * is part of another TX operation.
769 */
813f0339 770 spin_lock_irqsave(&queue->index_lock, irqflags);
5eb7efe8
ID
771 index_start = queue->index[start];
772 index_end = queue->index[end];
813f0339 773 spin_unlock_irqrestore(&queue->index_lock, irqflags);
5eb7efe8
ID
774
775 /*
25985edc 776 * Start from the TX done pointer, this guarantees that we will
5eb7efe8
ID
777 * send out all frames in the correct order.
778 */
779 if (index_start < index_end) {
10e11568
HS
780 for (i = index_start; i < index_end; i++) {
781 if (fn(&queue->entries[i], data))
782 return true;
783 }
5eb7efe8 784 } else {
10e11568
HS
785 for (i = index_start; i < queue->limit; i++) {
786 if (fn(&queue->entries[i], data))
787 return true;
788 }
5eb7efe8 789
10e11568
HS
790 for (i = 0; i < index_end; i++) {
791 if (fn(&queue->entries[i], data))
792 return true;
793 }
5eb7efe8 794 }
10e11568
HS
795
796 return false;
5eb7efe8
ID
797}
798EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
799
181d6902
ID
800struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
801 enum queue_index index)
802{
803 struct queue_entry *entry;
5f46c4d0 804 unsigned long irqflags;
181d6902
ID
805
806 if (unlikely(index >= Q_INDEX_MAX)) {
807 ERROR(queue->rt2x00dev,
808 "Entry requested from invalid index type (%d)\n", index);
809 return NULL;
810 }
811
813f0339 812 spin_lock_irqsave(&queue->index_lock, irqflags);
181d6902
ID
813
814 entry = &queue->entries[queue->index[index]];
815
813f0339 816 spin_unlock_irqrestore(&queue->index_lock, irqflags);
181d6902
ID
817
818 return entry;
819}
820EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
821
75256f03 822void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
181d6902 823{
75256f03 824 struct data_queue *queue = entry->queue;
5f46c4d0
ID
825 unsigned long irqflags;
826
181d6902
ID
827 if (unlikely(index >= Q_INDEX_MAX)) {
828 ERROR(queue->rt2x00dev,
829 "Index change on invalid index type (%d)\n", index);
830 return;
831 }
832
813f0339 833 spin_lock_irqsave(&queue->index_lock, irqflags);
181d6902
ID
834
835 queue->index[index]++;
836 if (queue->index[index] >= queue->limit)
837 queue->index[index] = 0;
838
75256f03 839 entry->last_action = jiffies;
652a9dd2 840
10b6b801
ID
841 if (index == Q_INDEX) {
842 queue->length++;
843 } else if (index == Q_INDEX_DONE) {
844 queue->length--;
55887511 845 queue->count++;
10b6b801 846 }
181d6902 847
813f0339 848 spin_unlock_irqrestore(&queue->index_lock, irqflags);
181d6902 849}
181d6902 850
0b7fde54
ID
851void rt2x00queue_pause_queue(struct data_queue *queue)
852{
853 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
854 !test_bit(QUEUE_STARTED, &queue->flags) ||
855 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
856 return;
857
858 switch (queue->qid) {
f615e9a3
ID
859 case QID_AC_VO:
860 case QID_AC_VI:
0b7fde54
ID
861 case QID_AC_BE:
862 case QID_AC_BK:
0b7fde54
ID
863 /*
864 * For TX queues, we have to disable the queue
865 * inside mac80211.
866 */
867 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
868 break;
869 default:
870 break;
871 }
872}
873EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
874
875void rt2x00queue_unpause_queue(struct data_queue *queue)
876{
877 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
878 !test_bit(QUEUE_STARTED, &queue->flags) ||
879 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
880 return;
881
882 switch (queue->qid) {
f615e9a3
ID
883 case QID_AC_VO:
884 case QID_AC_VI:
0b7fde54
ID
885 case QID_AC_BE:
886 case QID_AC_BK:
0b7fde54
ID
887 /*
888 * For TX queues, we have to enable the queue
889 * inside mac80211.
890 */
891 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
892 break;
5be65609
ID
893 case QID_RX:
894 /*
895 * For RX we need to kick the queue now in order to
896 * receive frames.
897 */
898 queue->rt2x00dev->ops->lib->kick_queue(queue);
0b7fde54
ID
899 default:
900 break;
901 }
902}
903EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
904
905void rt2x00queue_start_queue(struct data_queue *queue)
906{
907 mutex_lock(&queue->status_lock);
908
909 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
910 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
911 mutex_unlock(&queue->status_lock);
912 return;
913 }
914
915 set_bit(QUEUE_PAUSED, &queue->flags);
916
917 queue->rt2x00dev->ops->lib->start_queue(queue);
918
919 rt2x00queue_unpause_queue(queue);
920
921 mutex_unlock(&queue->status_lock);
922}
923EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
924
925void rt2x00queue_stop_queue(struct data_queue *queue)
926{
927 mutex_lock(&queue->status_lock);
928
929 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
930 mutex_unlock(&queue->status_lock);
931 return;
932 }
933
934 rt2x00queue_pause_queue(queue);
935
936 queue->rt2x00dev->ops->lib->stop_queue(queue);
937
938 mutex_unlock(&queue->status_lock);
939}
940EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
941
5be65609
ID
942void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
943{
5be65609
ID
944 bool started;
945 bool tx_queue =
f615e9a3 946 (queue->qid == QID_AC_VO) ||
5be65609 947 (queue->qid == QID_AC_VI) ||
f615e9a3
ID
948 (queue->qid == QID_AC_BE) ||
949 (queue->qid == QID_AC_BK);
5be65609
ID
950
951 mutex_lock(&queue->status_lock);
952
953 /*
954 * If the queue has been started, we must stop it temporarily
955 * to prevent any new frames to be queued on the device. If
956 * we are not dropping the pending frames, the queue must
957 * only be stopped in the software and not the hardware,
958 * otherwise the queue will never become empty on its own.
959 */
960 started = test_bit(QUEUE_STARTED, &queue->flags);
961 if (started) {
962 /*
963 * Pause the queue
964 */
965 rt2x00queue_pause_queue(queue);
966
967 /*
968 * If we are not supposed to drop any pending
969 * frames, this means we must force a start (=kick)
970 * to the queue to make sure the hardware will
971 * start transmitting.
972 */
973 if (!drop && tx_queue)
974 queue->rt2x00dev->ops->lib->kick_queue(queue);
975 }
976
977 /*
152a5992
ID
978 * Check if driver supports flushing, if that is the case we can
979 * defer the flushing to the driver. Otherwise we must use the
980 * alternative which just waits for the queue to become empty.
5be65609 981 */
152a5992
ID
982 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
983 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
5be65609
ID
984
985 /*
986 * The queue flush has failed...
987 */
988 if (unlikely(!rt2x00queue_empty(queue)))
21957c31 989 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
5be65609
ID
990
991 /*
992 * Restore the queue to the previous status
993 */
994 if (started)
995 rt2x00queue_unpause_queue(queue);
996
997 mutex_unlock(&queue->status_lock);
998}
999EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1000
0b7fde54
ID
1001void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1002{
1003 struct data_queue *queue;
1004
1005 /*
1006 * rt2x00queue_start_queue will call ieee80211_wake_queue
1007 * for each queue after is has been properly initialized.
1008 */
1009 tx_queue_for_each(rt2x00dev, queue)
1010 rt2x00queue_start_queue(queue);
1011
1012 rt2x00queue_start_queue(rt2x00dev->rx);
1013}
1014EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1015
1016void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1017{
1018 struct data_queue *queue;
1019
1020 /*
1021 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1022 * as well, but we are completely shutting doing everything
1023 * now, so it is much safer to stop all TX queues at once,
1024 * and use rt2x00queue_stop_queue for cleaning up.
1025 */
1026 ieee80211_stop_queues(rt2x00dev->hw);
1027
1028 tx_queue_for_each(rt2x00dev, queue)
1029 rt2x00queue_stop_queue(queue);
1030
1031 rt2x00queue_stop_queue(rt2x00dev->rx);
1032}
1033EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1034
5be65609
ID
1035void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1036{
1037 struct data_queue *queue;
1038
1039 tx_queue_for_each(rt2x00dev, queue)
1040 rt2x00queue_flush_queue(queue, drop);
1041
1042 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1043}
1044EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1045
181d6902
ID
1046static void rt2x00queue_reset(struct data_queue *queue)
1047{
5f46c4d0 1048 unsigned long irqflags;
652a9dd2 1049 unsigned int i;
5f46c4d0 1050
813f0339 1051 spin_lock_irqsave(&queue->index_lock, irqflags);
181d6902
ID
1052
1053 queue->count = 0;
1054 queue->length = 0;
652a9dd2 1055
75256f03 1056 for (i = 0; i < Q_INDEX_MAX; i++)
652a9dd2 1057 queue->index[i] = 0;
181d6902 1058
813f0339 1059 spin_unlock_irqrestore(&queue->index_lock, irqflags);
181d6902
ID
1060}
1061
798b7adb 1062void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
181d6902
ID
1063{
1064 struct data_queue *queue;
1065 unsigned int i;
1066
798b7adb 1067 queue_for_each(rt2x00dev, queue) {
181d6902
ID
1068 rt2x00queue_reset(queue);
1069
64e7d723 1070 for (i = 0; i < queue->limit; i++)
798b7adb 1071 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
181d6902
ID
1072 }
1073}
1074
1075static int rt2x00queue_alloc_entries(struct data_queue *queue,
1076 const struct data_queue_desc *qdesc)
1077{
1078 struct queue_entry *entries;
1079 unsigned int entry_size;
1080 unsigned int i;
1081
1082 rt2x00queue_reset(queue);
1083
1084 queue->limit = qdesc->entry_num;
b869767b 1085 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
181d6902
ID
1086 queue->data_size = qdesc->data_size;
1087 queue->desc_size = qdesc->desc_size;
1088
1089 /*
1090 * Allocate all queue entries.
1091 */
1092 entry_size = sizeof(*entries) + qdesc->priv_size;
baeb2ffa 1093 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
181d6902
ID
1094 if (!entries)
1095 return -ENOMEM;
1096
1097#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
f8bfbc31
ME
1098 (((char *)(__base)) + ((__limit) * (__esize)) + \
1099 ((__index) * (__psize)))
181d6902
ID
1100
1101 for (i = 0; i < queue->limit; i++) {
1102 entries[i].flags = 0;
1103 entries[i].queue = queue;
1104 entries[i].skb = NULL;
1105 entries[i].entry_idx = i;
1106 entries[i].priv_data =
1107 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1108 sizeof(*entries), qdesc->priv_size);
1109 }
1110
1111#undef QUEUE_ENTRY_PRIV_OFFSET
1112
1113 queue->entries = entries;
1114
1115 return 0;
1116}
1117
fa69560f 1118static void rt2x00queue_free_skbs(struct data_queue *queue)
30caa6e3
GW
1119{
1120 unsigned int i;
1121
1122 if (!queue->entries)
1123 return;
1124
1125 for (i = 0; i < queue->limit; i++) {
fa69560f 1126 rt2x00queue_free_skb(&queue->entries[i]);
30caa6e3
GW
1127 }
1128}
1129
fa69560f 1130static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
30caa6e3
GW
1131{
1132 unsigned int i;
1133 struct sk_buff *skb;
1134
1135 for (i = 0; i < queue->limit; i++) {
fa69560f 1136 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
30caa6e3 1137 if (!skb)
61243d8e 1138 return -ENOMEM;
30caa6e3
GW
1139 queue->entries[i].skb = skb;
1140 }
1141
1142 return 0;
30caa6e3
GW
1143}
1144
181d6902
ID
1145int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1146{
1147 struct data_queue *queue;
1148 int status;
1149
181d6902
ID
1150 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1151 if (status)
1152 goto exit;
1153
1154 tx_queue_for_each(rt2x00dev, queue) {
1155 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1156 if (status)
1157 goto exit;
1158 }
1159
1160 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1161 if (status)
1162 goto exit;
1163
7dab73b3 1164 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
e74df4a7 1165 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
30caa6e3
GW
1166 rt2x00dev->ops->atim);
1167 if (status)
1168 goto exit;
1169 }
181d6902 1170
fa69560f 1171 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
181d6902
ID
1172 if (status)
1173 goto exit;
1174
1175 return 0;
1176
1177exit:
1178 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1179
1180 rt2x00queue_uninitialize(rt2x00dev);
1181
1182 return status;
1183}
1184
1185void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1186{
1187 struct data_queue *queue;
1188
fa69560f 1189 rt2x00queue_free_skbs(rt2x00dev->rx);
30caa6e3 1190
181d6902
ID
1191 queue_for_each(rt2x00dev, queue) {
1192 kfree(queue->entries);
1193 queue->entries = NULL;
1194 }
1195}
1196
8f539276
ID
1197static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1198 struct data_queue *queue, enum data_queue_qid qid)
1199{
0b7fde54 1200 mutex_init(&queue->status_lock);
77a861c4 1201 spin_lock_init(&queue->tx_lock);
813f0339 1202 spin_lock_init(&queue->index_lock);
8f539276
ID
1203
1204 queue->rt2x00dev = rt2x00dev;
1205 queue->qid = qid;
2af0a570 1206 queue->txop = 0;
8f539276
ID
1207 queue->aifs = 2;
1208 queue->cw_min = 5;
1209 queue->cw_max = 10;
1210}
1211
181d6902
ID
1212int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1213{
1214 struct data_queue *queue;
1215 enum data_queue_qid qid;
1216 unsigned int req_atim =
7dab73b3 1217 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
181d6902
ID
1218
1219 /*
1220 * We need the following queues:
1221 * RX: 1
61448f88 1222 * TX: ops->tx_queues
181d6902
ID
1223 * Beacon: 1
1224 * Atim: 1 (if required)
1225 */
61448f88 1226 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
181d6902 1227
baeb2ffa 1228 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
181d6902
ID
1229 if (!queue) {
1230 ERROR(rt2x00dev, "Queue allocation failed.\n");
1231 return -ENOMEM;
1232 }
1233
1234 /*
1235 * Initialize pointers
1236 */
1237 rt2x00dev->rx = queue;
1238 rt2x00dev->tx = &queue[1];
61448f88 1239 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
e74df4a7 1240 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
181d6902
ID
1241
1242 /*
1243 * Initialize queue parameters.
1244 * RX: qid = QID_RX
f615e9a3 1245 * TX: qid = QID_AC_VO + index
181d6902
ID
1246 * TX: cw_min: 2^5 = 32.
1247 * TX: cw_max: 2^10 = 1024.
565a019a
ID
1248 * BCN: qid = QID_BEACON
1249 * ATIM: qid = QID_ATIM
181d6902 1250 */
8f539276 1251 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
181d6902 1252
f615e9a3 1253 qid = QID_AC_VO;
8f539276
ID
1254 tx_queue_for_each(rt2x00dev, queue)
1255 rt2x00queue_init(rt2x00dev, queue, qid++);
181d6902 1256
e74df4a7 1257 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
181d6902 1258 if (req_atim)
e74df4a7 1259 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
181d6902
ID
1260
1261 return 0;
1262}
1263
1264void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1265{
1266 kfree(rt2x00dev->rx);
1267 rt2x00dev->rx = NULL;
1268 rt2x00dev->tx = NULL;
1269 rt2x00dev->bcn = NULL;
1270}
This page took 0.606628 seconds and 5 git commands to generate.