2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff
*rt2x00queue_alloc_rxskb(struct queue_entry
*entry
, gfp_t gfp
)
38 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
40 struct skb_frame_desc
*skbdesc
;
41 unsigned int frame_size
;
42 unsigned int head_size
= 0;
43 unsigned int tail_size
= 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size
= entry
->queue
->data_size
+ entry
->queue
->desc_size
;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CAPABILITY_HW_CRYPTO
, &rt2x00dev
->cap_flags
)) {
71 skb
= __dev_alloc_skb(frame_size
+ head_size
+ tail_size
, gfp
);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb
, head_size
);
80 skb_put(skb
, frame_size
);
85 skbdesc
= get_skb_frame_desc(skb
);
86 memset(skbdesc
, 0, sizeof(*skbdesc
));
87 skbdesc
->entry
= entry
;
89 if (test_bit(REQUIRE_DMA
, &rt2x00dev
->cap_flags
)) {
90 skbdesc
->skb_dma
= dma_map_single(rt2x00dev
->dev
,
94 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_RX
;
100 void rt2x00queue_map_txskb(struct queue_entry
*entry
)
102 struct device
*dev
= entry
->queue
->rt2x00dev
->dev
;
103 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
106 dma_map_single(dev
, entry
->skb
->data
, entry
->skb
->len
, DMA_TO_DEVICE
);
107 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_TX
;
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb
);
111 void rt2x00queue_unmap_skb(struct queue_entry
*entry
)
113 struct device
*dev
= entry
->queue
->rt2x00dev
->dev
;
114 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
116 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_RX
) {
117 dma_unmap_single(dev
, skbdesc
->skb_dma
, entry
->skb
->len
,
119 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_RX
;
120 } else if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_TX
) {
121 dma_unmap_single(dev
, skbdesc
->skb_dma
, entry
->skb
->len
,
123 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_TX
;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb
);
128 void rt2x00queue_free_skb(struct queue_entry
*entry
)
133 rt2x00queue_unmap_skb(entry
);
134 dev_kfree_skb_any(entry
->skb
);
138 void rt2x00queue_align_frame(struct sk_buff
*skb
)
140 unsigned int frame_length
= skb
->len
;
141 unsigned int align
= ALIGN_SIZE(skb
, 0);
146 skb_push(skb
, align
);
147 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
148 skb_trim(skb
, frame_length
);
151 void rt2x00queue_insert_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
153 unsigned int payload_length
= skb
->len
- header_length
;
154 unsigned int header_align
= ALIGN_SIZE(skb
, 0);
155 unsigned int payload_align
= ALIGN_SIZE(skb
, header_length
);
156 unsigned int l2pad
= payload_length
? L2PAD_SIZE(header_length
) : 0;
159 * Adjust the header alignment if the payload needs to be moved more
162 if (payload_align
> header_align
)
165 /* There is nothing to do if no alignment is needed */
169 /* Reserve the amount of space needed in front of the frame */
170 skb_push(skb
, header_align
);
175 memmove(skb
->data
, skb
->data
+ header_align
, header_length
);
177 /* Move the payload, if present and if required */
178 if (payload_length
&& payload_align
)
179 memmove(skb
->data
+ header_length
+ l2pad
,
180 skb
->data
+ header_length
+ l2pad
+ payload_align
,
183 /* Trim the skb to the correct size */
184 skb_trim(skb
, header_length
+ l2pad
+ payload_length
);
187 void rt2x00queue_remove_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
190 * L2 padding is only present if the skb contains more than just the
191 * IEEE 802.11 header.
193 unsigned int l2pad
= (skb
->len
> header_length
) ?
194 L2PAD_SIZE(header_length
) : 0;
199 memmove(skb
->data
+ l2pad
, skb
->data
, header_length
);
200 skb_pull(skb
, l2pad
);
203 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev
*rt2x00dev
,
205 struct txentry_desc
*txdesc
)
207 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
208 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
209 struct rt2x00_intf
*intf
= vif_to_intf(tx_info
->control
.vif
);
211 if (!(tx_info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
))
214 __set_bit(ENTRY_TXD_GENERATE_SEQ
, &txdesc
->flags
);
216 if (!test_bit(REQUIRE_SW_SEQNO
, &rt2x00dev
->cap_flags
)) {
218 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
219 * seqno on retransmited data (non-QOS) frames. To workaround
220 * the problem let's generate seqno in software if QOS is
223 if (test_bit(CONFIG_QOS_DISABLED
, &rt2x00dev
->flags
))
224 __clear_bit(ENTRY_TXD_GENERATE_SEQ
, &txdesc
->flags
);
226 /* H/W will generate sequence number */
231 * The hardware is not able to insert a sequence number. Assign a
232 * software generated one here.
234 * This is wrong because beacons are not getting sequence
235 * numbers assigned properly.
237 * A secondary problem exists for drivers that cannot toggle
238 * sequence counting per-frame, since those will override the
239 * sequence counter given by mac80211.
241 spin_lock(&intf
->seqlock
);
243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
))
245 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
246 hdr
->seq_ctrl
|= cpu_to_le16(intf
->seqno
);
248 spin_unlock(&intf
->seqlock
);
252 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev
*rt2x00dev
,
254 struct txentry_desc
*txdesc
,
255 const struct rt2x00_rate
*hwrate
)
257 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
258 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
259 unsigned int data_length
;
260 unsigned int duration
;
261 unsigned int residual
;
264 * Determine with what IFS priority this frame should be send.
265 * Set ifs to IFS_SIFS when the this is not the first fragment,
266 * or this fragment came after RTS/CTS.
268 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
))
269 txdesc
->u
.plcp
.ifs
= IFS_BACKOFF
;
271 txdesc
->u
.plcp
.ifs
= IFS_SIFS
;
273 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
274 data_length
= skb
->len
+ 4;
275 data_length
+= rt2x00crypto_tx_overhead(rt2x00dev
, skb
);
279 * Length calculation depends on OFDM/CCK rate.
281 txdesc
->u
.plcp
.signal
= hwrate
->plcp
;
282 txdesc
->u
.plcp
.service
= 0x04;
284 if (hwrate
->flags
& DEV_RATE_OFDM
) {
285 txdesc
->u
.plcp
.length_high
= (data_length
>> 6) & 0x3f;
286 txdesc
->u
.plcp
.length_low
= data_length
& 0x3f;
289 * Convert length to microseconds.
291 residual
= GET_DURATION_RES(data_length
, hwrate
->bitrate
);
292 duration
= GET_DURATION(data_length
, hwrate
->bitrate
);
298 * Check if we need to set the Length Extension
300 if (hwrate
->bitrate
== 110 && residual
<= 30)
301 txdesc
->u
.plcp
.service
|= 0x80;
304 txdesc
->u
.plcp
.length_high
= (duration
>> 8) & 0xff;
305 txdesc
->u
.plcp
.length_low
= duration
& 0xff;
308 * When preamble is enabled we should set the
309 * preamble bit for the signal.
311 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
312 txdesc
->u
.plcp
.signal
|= 0x08;
316 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev
*rt2x00dev
,
318 struct txentry_desc
*txdesc
,
319 const struct rt2x00_rate
*hwrate
)
321 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
322 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
323 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
324 struct rt2x00_sta
*sta_priv
= NULL
;
326 if (tx_info
->control
.sta
) {
327 txdesc
->u
.ht
.mpdu_density
=
328 tx_info
->control
.sta
->ht_cap
.ampdu_density
;
330 sta_priv
= sta_to_rt2x00_sta(tx_info
->control
.sta
);
331 txdesc
->u
.ht
.wcid
= sta_priv
->wcid
;
335 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
336 * mcs rate to be used
338 if (txrate
->flags
& IEEE80211_TX_RC_MCS
) {
339 txdesc
->u
.ht
.mcs
= txrate
->idx
;
342 * MIMO PS should be set to 1 for STA's using dynamic SM PS
343 * when using more then one tx stream (>MCS7).
345 if (tx_info
->control
.sta
&& txdesc
->u
.ht
.mcs
> 7 &&
346 ((tx_info
->control
.sta
->ht_cap
.cap
&
347 IEEE80211_HT_CAP_SM_PS
) >>
348 IEEE80211_HT_CAP_SM_PS_SHIFT
) ==
349 WLAN_HT_CAP_SM_PS_DYNAMIC
)
350 __set_bit(ENTRY_TXD_HT_MIMO_PS
, &txdesc
->flags
);
352 txdesc
->u
.ht
.mcs
= rt2x00_get_rate_mcs(hwrate
->mcs
);
353 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
354 txdesc
->u
.ht
.mcs
|= 0x08;
357 if (test_bit(CONFIG_HT_DISABLED
, &rt2x00dev
->flags
)) {
358 if (!(tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
))
359 txdesc
->u
.ht
.txop
= TXOP_SIFS
;
361 txdesc
->u
.ht
.txop
= TXOP_BACKOFF
;
363 /* Left zero on all other settings. */
367 txdesc
->u
.ht
.ba_size
= 7; /* FIXME: What value is needed? */
370 * Only one STBC stream is supported for now.
372 if (tx_info
->flags
& IEEE80211_TX_CTL_STBC
)
373 txdesc
->u
.ht
.stbc
= 1;
376 * This frame is eligible for an AMPDU, however, don't aggregate
377 * frames that are intended to probe a specific tx rate.
379 if (tx_info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
380 !(tx_info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
))
381 __set_bit(ENTRY_TXD_HT_AMPDU
, &txdesc
->flags
);
384 * Set 40Mhz mode if necessary (for legacy rates this will
385 * duplicate the frame to both channels).
387 if (txrate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
||
388 txrate
->flags
& IEEE80211_TX_RC_DUP_DATA
)
389 __set_bit(ENTRY_TXD_HT_BW_40
, &txdesc
->flags
);
390 if (txrate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
391 __set_bit(ENTRY_TXD_HT_SHORT_GI
, &txdesc
->flags
);
394 * Determine IFS values
395 * - Use TXOP_BACKOFF for management frames except beacons
396 * - Use TXOP_SIFS for fragment bursts
397 * - Use TXOP_HTTXOP for everything else
399 * Note: rt2800 devices won't use CTS protection (if used)
400 * for frames not transmitted with TXOP_HTTXOP
402 if (ieee80211_is_mgmt(hdr
->frame_control
) &&
403 !ieee80211_is_beacon(hdr
->frame_control
))
404 txdesc
->u
.ht
.txop
= TXOP_BACKOFF
;
405 else if (!(tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
))
406 txdesc
->u
.ht
.txop
= TXOP_SIFS
;
408 txdesc
->u
.ht
.txop
= TXOP_HTTXOP
;
411 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev
*rt2x00dev
,
413 struct txentry_desc
*txdesc
)
415 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
416 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
417 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
418 struct ieee80211_rate
*rate
;
419 const struct rt2x00_rate
*hwrate
= NULL
;
421 memset(txdesc
, 0, sizeof(*txdesc
));
424 * Header and frame information.
426 txdesc
->length
= skb
->len
;
427 txdesc
->header_length
= ieee80211_get_hdrlen_from_skb(skb
);
430 * Check whether this frame is to be acked.
432 if (!(tx_info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
433 __set_bit(ENTRY_TXD_ACK
, &txdesc
->flags
);
436 * Check if this is a RTS/CTS frame
438 if (ieee80211_is_rts(hdr
->frame_control
) ||
439 ieee80211_is_cts(hdr
->frame_control
)) {
440 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
441 if (ieee80211_is_rts(hdr
->frame_control
))
442 __set_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
);
444 __set_bit(ENTRY_TXD_CTS_FRAME
, &txdesc
->flags
);
445 if (tx_info
->control
.rts_cts_rate_idx
>= 0)
447 ieee80211_get_rts_cts_rate(rt2x00dev
->hw
, tx_info
);
451 * Determine retry information.
453 txdesc
->retry_limit
= tx_info
->control
.rates
[0].count
- 1;
454 if (txdesc
->retry_limit
>= rt2x00dev
->long_retry
)
455 __set_bit(ENTRY_TXD_RETRY_MODE
, &txdesc
->flags
);
458 * Check if more fragments are pending
460 if (ieee80211_has_morefrags(hdr
->frame_control
)) {
461 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
462 __set_bit(ENTRY_TXD_MORE_FRAG
, &txdesc
->flags
);
466 * Check if more frames (!= fragments) are pending
468 if (tx_info
->flags
& IEEE80211_TX_CTL_MORE_FRAMES
)
469 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
472 * Beacons and probe responses require the tsf timestamp
473 * to be inserted into the frame.
475 if (ieee80211_is_beacon(hdr
->frame_control
) ||
476 ieee80211_is_probe_resp(hdr
->frame_control
))
477 __set_bit(ENTRY_TXD_REQ_TIMESTAMP
, &txdesc
->flags
);
479 if ((tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
) &&
480 !test_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
))
481 __set_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
);
484 * Determine rate modulation.
486 if (txrate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
487 txdesc
->rate_mode
= RATE_MODE_HT_GREENFIELD
;
488 else if (txrate
->flags
& IEEE80211_TX_RC_MCS
)
489 txdesc
->rate_mode
= RATE_MODE_HT_MIX
;
491 rate
= ieee80211_get_tx_rate(rt2x00dev
->hw
, tx_info
);
492 hwrate
= rt2x00_get_rate(rate
->hw_value
);
493 if (hwrate
->flags
& DEV_RATE_OFDM
)
494 txdesc
->rate_mode
= RATE_MODE_OFDM
;
496 txdesc
->rate_mode
= RATE_MODE_CCK
;
500 * Apply TX descriptor handling by components
502 rt2x00crypto_create_tx_descriptor(rt2x00dev
, skb
, txdesc
);
503 rt2x00queue_create_tx_descriptor_seq(rt2x00dev
, skb
, txdesc
);
505 if (test_bit(REQUIRE_HT_TX_DESC
, &rt2x00dev
->cap_flags
))
506 rt2x00queue_create_tx_descriptor_ht(rt2x00dev
, skb
, txdesc
,
509 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev
, skb
, txdesc
,
513 static int rt2x00queue_write_tx_data(struct queue_entry
*entry
,
514 struct txentry_desc
*txdesc
)
516 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
519 * This should not happen, we already checked the entry
520 * was ours. When the hardware disagrees there has been
521 * a queue corruption!
523 if (unlikely(rt2x00dev
->ops
->lib
->get_entry_state
&&
524 rt2x00dev
->ops
->lib
->get_entry_state(entry
))) {
526 "Corrupt queue %d, accessing entry which is not ours.\n"
527 "Please file bug report to %s.\n",
528 entry
->queue
->qid
, DRV_PROJECT
);
533 * Add the requested extra tx headroom in front of the skb.
535 skb_push(entry
->skb
, rt2x00dev
->ops
->extra_tx_headroom
);
536 memset(entry
->skb
->data
, 0, rt2x00dev
->ops
->extra_tx_headroom
);
539 * Call the driver's write_tx_data function, if it exists.
541 if (rt2x00dev
->ops
->lib
->write_tx_data
)
542 rt2x00dev
->ops
->lib
->write_tx_data(entry
, txdesc
);
545 * Map the skb to DMA.
547 if (test_bit(REQUIRE_DMA
, &rt2x00dev
->cap_flags
))
548 rt2x00queue_map_txskb(entry
);
553 static void rt2x00queue_write_tx_descriptor(struct queue_entry
*entry
,
554 struct txentry_desc
*txdesc
)
556 struct data_queue
*queue
= entry
->queue
;
558 queue
->rt2x00dev
->ops
->lib
->write_tx_desc(entry
, txdesc
);
561 * All processing on the frame has been completed, this means
562 * it is now ready to be dumped to userspace through debugfs.
564 rt2x00debug_dump_frame(queue
->rt2x00dev
, DUMP_FRAME_TX
, entry
->skb
);
567 static void rt2x00queue_kick_tx_queue(struct data_queue
*queue
,
568 struct txentry_desc
*txdesc
)
571 * Check if we need to kick the queue, there are however a few rules
572 * 1) Don't kick unless this is the last in frame in a burst.
573 * When the burst flag is set, this frame is always followed
574 * by another frame which in some way are related to eachother.
575 * This is true for fragments, RTS or CTS-to-self frames.
576 * 2) Rule 1 can be broken when the available entries
577 * in the queue are less then a certain threshold.
579 if (rt2x00queue_threshold(queue
) ||
580 !test_bit(ENTRY_TXD_BURST
, &txdesc
->flags
))
581 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
584 int rt2x00queue_write_tx_frame(struct data_queue
*queue
, struct sk_buff
*skb
,
587 struct ieee80211_tx_info
*tx_info
;
588 struct queue_entry
*entry
;
589 struct txentry_desc txdesc
;
590 struct skb_frame_desc
*skbdesc
;
591 u8 rate_idx
, rate_flags
;
595 * Copy all TX descriptor information into txdesc,
596 * after that we are free to use the skb->cb array
597 * for our information.
599 rt2x00queue_create_tx_descriptor(queue
->rt2x00dev
, skb
, &txdesc
);
602 * All information is retrieved from the skb->cb array,
603 * now we should claim ownership of the driver part of that
604 * array, preserving the bitrate index and flags.
606 tx_info
= IEEE80211_SKB_CB(skb
);
607 rate_idx
= tx_info
->control
.rates
[0].idx
;
608 rate_flags
= tx_info
->control
.rates
[0].flags
;
609 skbdesc
= get_skb_frame_desc(skb
);
610 memset(skbdesc
, 0, sizeof(*skbdesc
));
611 skbdesc
->tx_rate_idx
= rate_idx
;
612 skbdesc
->tx_rate_flags
= rate_flags
;
615 skbdesc
->flags
|= SKBDESC_NOT_MAC80211
;
618 * When hardware encryption is supported, and this frame
619 * is to be encrypted, we should strip the IV/EIV data from
620 * the frame so we can provide it to the driver separately.
622 if (test_bit(ENTRY_TXD_ENCRYPT
, &txdesc
.flags
) &&
623 !test_bit(ENTRY_TXD_ENCRYPT_IV
, &txdesc
.flags
)) {
624 if (test_bit(REQUIRE_COPY_IV
, &queue
->rt2x00dev
->cap_flags
))
625 rt2x00crypto_tx_copy_iv(skb
, &txdesc
);
627 rt2x00crypto_tx_remove_iv(skb
, &txdesc
);
631 * When DMA allocation is required we should guarantee to the
632 * driver that the DMA is aligned to a 4-byte boundary.
633 * However some drivers require L2 padding to pad the payload
634 * rather then the header. This could be a requirement for
635 * PCI and USB devices, while header alignment only is valid
638 if (test_bit(REQUIRE_L2PAD
, &queue
->rt2x00dev
->cap_flags
))
639 rt2x00queue_insert_l2pad(skb
, txdesc
.header_length
);
640 else if (test_bit(REQUIRE_DMA
, &queue
->rt2x00dev
->cap_flags
))
641 rt2x00queue_align_frame(skb
);
644 * That function must be called with bh disabled.
646 spin_lock(&queue
->tx_lock
);
648 if (unlikely(rt2x00queue_full(queue
))) {
649 ERROR(queue
->rt2x00dev
,
650 "Dropping frame due to full tx queue %d.\n", queue
->qid
);
655 entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
657 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA
,
659 ERROR(queue
->rt2x00dev
,
660 "Arrived at non-free entry in the non-full queue %d.\n"
661 "Please file bug report to %s.\n",
662 queue
->qid
, DRV_PROJECT
);
667 skbdesc
->entry
= entry
;
671 * It could be possible that the queue was corrupted and this
672 * call failed. Since we always return NETDEV_TX_OK to mac80211,
673 * this frame will simply be dropped.
675 if (unlikely(rt2x00queue_write_tx_data(entry
, &txdesc
))) {
676 clear_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
);
682 set_bit(ENTRY_DATA_PENDING
, &entry
->flags
);
684 rt2x00queue_index_inc(entry
, Q_INDEX
);
685 rt2x00queue_write_tx_descriptor(entry
, &txdesc
);
686 rt2x00queue_kick_tx_queue(queue
, &txdesc
);
689 spin_unlock(&queue
->tx_lock
);
693 int rt2x00queue_clear_beacon(struct rt2x00_dev
*rt2x00dev
,
694 struct ieee80211_vif
*vif
)
696 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
698 if (unlikely(!intf
->beacon
))
701 mutex_lock(&intf
->beacon_skb_mutex
);
704 * Clean up the beacon skb.
706 rt2x00queue_free_skb(intf
->beacon
);
709 * Clear beacon (single bssid devices don't need to clear the beacon
710 * since the beacon queue will get stopped anyway).
712 if (rt2x00dev
->ops
->lib
->clear_beacon
)
713 rt2x00dev
->ops
->lib
->clear_beacon(intf
->beacon
);
715 mutex_unlock(&intf
->beacon_skb_mutex
);
720 int rt2x00queue_update_beacon_locked(struct rt2x00_dev
*rt2x00dev
,
721 struct ieee80211_vif
*vif
)
723 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
724 struct skb_frame_desc
*skbdesc
;
725 struct txentry_desc txdesc
;
727 if (unlikely(!intf
->beacon
))
731 * Clean up the beacon skb.
733 rt2x00queue_free_skb(intf
->beacon
);
735 intf
->beacon
->skb
= ieee80211_beacon_get(rt2x00dev
->hw
, vif
);
736 if (!intf
->beacon
->skb
)
740 * Copy all TX descriptor information into txdesc,
741 * after that we are free to use the skb->cb array
742 * for our information.
744 rt2x00queue_create_tx_descriptor(rt2x00dev
, intf
->beacon
->skb
, &txdesc
);
747 * Fill in skb descriptor
749 skbdesc
= get_skb_frame_desc(intf
->beacon
->skb
);
750 memset(skbdesc
, 0, sizeof(*skbdesc
));
751 skbdesc
->entry
= intf
->beacon
;
754 * Send beacon to hardware.
756 rt2x00dev
->ops
->lib
->write_beacon(intf
->beacon
, &txdesc
);
762 int rt2x00queue_update_beacon(struct rt2x00_dev
*rt2x00dev
,
763 struct ieee80211_vif
*vif
)
765 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
768 mutex_lock(&intf
->beacon_skb_mutex
);
769 ret
= rt2x00queue_update_beacon_locked(rt2x00dev
, vif
);
770 mutex_unlock(&intf
->beacon_skb_mutex
);
775 bool rt2x00queue_for_each_entry(struct data_queue
*queue
,
776 enum queue_index start
,
777 enum queue_index end
,
779 bool (*fn
)(struct queue_entry
*entry
,
782 unsigned long irqflags
;
783 unsigned int index_start
;
784 unsigned int index_end
;
787 if (unlikely(start
>= Q_INDEX_MAX
|| end
>= Q_INDEX_MAX
)) {
788 ERROR(queue
->rt2x00dev
,
789 "Entry requested from invalid index range (%d - %d)\n",
795 * Only protect the range we are going to loop over,
796 * if during our loop a extra entry is set to pending
797 * it should not be kicked during this run, since it
798 * is part of another TX operation.
800 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
801 index_start
= queue
->index
[start
];
802 index_end
= queue
->index
[end
];
803 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
806 * Start from the TX done pointer, this guarantees that we will
807 * send out all frames in the correct order.
809 if (index_start
< index_end
) {
810 for (i
= index_start
; i
< index_end
; i
++) {
811 if (fn(&queue
->entries
[i
], data
))
815 for (i
= index_start
; i
< queue
->limit
; i
++) {
816 if (fn(&queue
->entries
[i
], data
))
820 for (i
= 0; i
< index_end
; i
++) {
821 if (fn(&queue
->entries
[i
], data
))
828 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry
);
830 struct queue_entry
*rt2x00queue_get_entry(struct data_queue
*queue
,
831 enum queue_index index
)
833 struct queue_entry
*entry
;
834 unsigned long irqflags
;
836 if (unlikely(index
>= Q_INDEX_MAX
)) {
837 ERROR(queue
->rt2x00dev
,
838 "Entry requested from invalid index type (%d)\n", index
);
842 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
844 entry
= &queue
->entries
[queue
->index
[index
]];
846 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
850 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry
);
852 void rt2x00queue_index_inc(struct queue_entry
*entry
, enum queue_index index
)
854 struct data_queue
*queue
= entry
->queue
;
855 unsigned long irqflags
;
857 if (unlikely(index
>= Q_INDEX_MAX
)) {
858 ERROR(queue
->rt2x00dev
,
859 "Index change on invalid index type (%d)\n", index
);
863 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
865 queue
->index
[index
]++;
866 if (queue
->index
[index
] >= queue
->limit
)
867 queue
->index
[index
] = 0;
869 entry
->last_action
= jiffies
;
871 if (index
== Q_INDEX
) {
873 } else if (index
== Q_INDEX_DONE
) {
878 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
881 void rt2x00queue_pause_queue(struct data_queue
*queue
)
883 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
884 !test_bit(QUEUE_STARTED
, &queue
->flags
) ||
885 test_and_set_bit(QUEUE_PAUSED
, &queue
->flags
))
888 switch (queue
->qid
) {
894 * For TX queues, we have to disable the queue
897 ieee80211_stop_queue(queue
->rt2x00dev
->hw
, queue
->qid
);
903 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue
);
905 void rt2x00queue_unpause_queue(struct data_queue
*queue
)
907 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
908 !test_bit(QUEUE_STARTED
, &queue
->flags
) ||
909 !test_and_clear_bit(QUEUE_PAUSED
, &queue
->flags
))
912 switch (queue
->qid
) {
918 * For TX queues, we have to enable the queue
921 ieee80211_wake_queue(queue
->rt2x00dev
->hw
, queue
->qid
);
925 * For RX we need to kick the queue now in order to
928 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
933 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue
);
935 void rt2x00queue_start_queue(struct data_queue
*queue
)
937 mutex_lock(&queue
->status_lock
);
939 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
940 test_and_set_bit(QUEUE_STARTED
, &queue
->flags
)) {
941 mutex_unlock(&queue
->status_lock
);
945 set_bit(QUEUE_PAUSED
, &queue
->flags
);
947 queue
->rt2x00dev
->ops
->lib
->start_queue(queue
);
949 rt2x00queue_unpause_queue(queue
);
951 mutex_unlock(&queue
->status_lock
);
953 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue
);
955 void rt2x00queue_stop_queue(struct data_queue
*queue
)
957 mutex_lock(&queue
->status_lock
);
959 if (!test_and_clear_bit(QUEUE_STARTED
, &queue
->flags
)) {
960 mutex_unlock(&queue
->status_lock
);
964 rt2x00queue_pause_queue(queue
);
966 queue
->rt2x00dev
->ops
->lib
->stop_queue(queue
);
968 mutex_unlock(&queue
->status_lock
);
970 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue
);
972 void rt2x00queue_flush_queue(struct data_queue
*queue
, bool drop
)
976 (queue
->qid
== QID_AC_VO
) ||
977 (queue
->qid
== QID_AC_VI
) ||
978 (queue
->qid
== QID_AC_BE
) ||
979 (queue
->qid
== QID_AC_BK
);
981 mutex_lock(&queue
->status_lock
);
984 * If the queue has been started, we must stop it temporarily
985 * to prevent any new frames to be queued on the device. If
986 * we are not dropping the pending frames, the queue must
987 * only be stopped in the software and not the hardware,
988 * otherwise the queue will never become empty on its own.
990 started
= test_bit(QUEUE_STARTED
, &queue
->flags
);
995 rt2x00queue_pause_queue(queue
);
998 * If we are not supposed to drop any pending
999 * frames, this means we must force a start (=kick)
1000 * to the queue to make sure the hardware will
1001 * start transmitting.
1003 if (!drop
&& tx_queue
)
1004 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
1008 * Check if driver supports flushing, if that is the case we can
1009 * defer the flushing to the driver. Otherwise we must use the
1010 * alternative which just waits for the queue to become empty.
1012 if (likely(queue
->rt2x00dev
->ops
->lib
->flush_queue
))
1013 queue
->rt2x00dev
->ops
->lib
->flush_queue(queue
, drop
);
1016 * The queue flush has failed...
1018 if (unlikely(!rt2x00queue_empty(queue
)))
1019 WARNING(queue
->rt2x00dev
, "Queue %d failed to flush\n", queue
->qid
);
1022 * Restore the queue to the previous status
1025 rt2x00queue_unpause_queue(queue
);
1027 mutex_unlock(&queue
->status_lock
);
1029 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue
);
1031 void rt2x00queue_start_queues(struct rt2x00_dev
*rt2x00dev
)
1033 struct data_queue
*queue
;
1036 * rt2x00queue_start_queue will call ieee80211_wake_queue
1037 * for each queue after is has been properly initialized.
1039 tx_queue_for_each(rt2x00dev
, queue
)
1040 rt2x00queue_start_queue(queue
);
1042 rt2x00queue_start_queue(rt2x00dev
->rx
);
1044 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues
);
1046 void rt2x00queue_stop_queues(struct rt2x00_dev
*rt2x00dev
)
1048 struct data_queue
*queue
;
1051 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1052 * as well, but we are completely shutting doing everything
1053 * now, so it is much safer to stop all TX queues at once,
1054 * and use rt2x00queue_stop_queue for cleaning up.
1056 ieee80211_stop_queues(rt2x00dev
->hw
);
1058 tx_queue_for_each(rt2x00dev
, queue
)
1059 rt2x00queue_stop_queue(queue
);
1061 rt2x00queue_stop_queue(rt2x00dev
->rx
);
1063 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues
);
1065 void rt2x00queue_flush_queues(struct rt2x00_dev
*rt2x00dev
, bool drop
)
1067 struct data_queue
*queue
;
1069 tx_queue_for_each(rt2x00dev
, queue
)
1070 rt2x00queue_flush_queue(queue
, drop
);
1072 rt2x00queue_flush_queue(rt2x00dev
->rx
, drop
);
1074 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues
);
1076 static void rt2x00queue_reset(struct data_queue
*queue
)
1078 unsigned long irqflags
;
1081 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
1086 for (i
= 0; i
< Q_INDEX_MAX
; i
++)
1087 queue
->index
[i
] = 0;
1089 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
1092 void rt2x00queue_init_queues(struct rt2x00_dev
*rt2x00dev
)
1094 struct data_queue
*queue
;
1097 queue_for_each(rt2x00dev
, queue
) {
1098 rt2x00queue_reset(queue
);
1100 for (i
= 0; i
< queue
->limit
; i
++)
1101 rt2x00dev
->ops
->lib
->clear_entry(&queue
->entries
[i
]);
1105 static int rt2x00queue_alloc_entries(struct data_queue
*queue
,
1106 const struct data_queue_desc
*qdesc
)
1108 struct queue_entry
*entries
;
1109 unsigned int entry_size
;
1112 rt2x00queue_reset(queue
);
1114 queue
->limit
= qdesc
->entry_num
;
1115 queue
->threshold
= DIV_ROUND_UP(qdesc
->entry_num
, 10);
1116 queue
->data_size
= qdesc
->data_size
;
1117 queue
->desc_size
= qdesc
->desc_size
;
1120 * Allocate all queue entries.
1122 entry_size
= sizeof(*entries
) + qdesc
->priv_size
;
1123 entries
= kcalloc(queue
->limit
, entry_size
, GFP_KERNEL
);
1127 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1128 (((char *)(__base)) + ((__limit) * (__esize)) + \
1129 ((__index) * (__psize)))
1131 for (i
= 0; i
< queue
->limit
; i
++) {
1132 entries
[i
].flags
= 0;
1133 entries
[i
].queue
= queue
;
1134 entries
[i
].skb
= NULL
;
1135 entries
[i
].entry_idx
= i
;
1136 entries
[i
].priv_data
=
1137 QUEUE_ENTRY_PRIV_OFFSET(entries
, i
, queue
->limit
,
1138 sizeof(*entries
), qdesc
->priv_size
);
1141 #undef QUEUE_ENTRY_PRIV_OFFSET
1143 queue
->entries
= entries
;
1148 static void rt2x00queue_free_skbs(struct data_queue
*queue
)
1152 if (!queue
->entries
)
1155 for (i
= 0; i
< queue
->limit
; i
++) {
1156 rt2x00queue_free_skb(&queue
->entries
[i
]);
1160 static int rt2x00queue_alloc_rxskbs(struct data_queue
*queue
)
1163 struct sk_buff
*skb
;
1165 for (i
= 0; i
< queue
->limit
; i
++) {
1166 skb
= rt2x00queue_alloc_rxskb(&queue
->entries
[i
], GFP_KERNEL
);
1169 queue
->entries
[i
].skb
= skb
;
1175 int rt2x00queue_initialize(struct rt2x00_dev
*rt2x00dev
)
1177 struct data_queue
*queue
;
1180 status
= rt2x00queue_alloc_entries(rt2x00dev
->rx
, rt2x00dev
->ops
->rx
);
1184 tx_queue_for_each(rt2x00dev
, queue
) {
1185 status
= rt2x00queue_alloc_entries(queue
, rt2x00dev
->ops
->tx
);
1190 status
= rt2x00queue_alloc_entries(rt2x00dev
->bcn
, rt2x00dev
->ops
->bcn
);
1194 if (test_bit(REQUIRE_ATIM_QUEUE
, &rt2x00dev
->cap_flags
)) {
1195 status
= rt2x00queue_alloc_entries(rt2x00dev
->atim
,
1196 rt2x00dev
->ops
->atim
);
1201 status
= rt2x00queue_alloc_rxskbs(rt2x00dev
->rx
);
1208 ERROR(rt2x00dev
, "Queue entries allocation failed.\n");
1210 rt2x00queue_uninitialize(rt2x00dev
);
1215 void rt2x00queue_uninitialize(struct rt2x00_dev
*rt2x00dev
)
1217 struct data_queue
*queue
;
1219 rt2x00queue_free_skbs(rt2x00dev
->rx
);
1221 queue_for_each(rt2x00dev
, queue
) {
1222 kfree(queue
->entries
);
1223 queue
->entries
= NULL
;
1227 static void rt2x00queue_init(struct rt2x00_dev
*rt2x00dev
,
1228 struct data_queue
*queue
, enum data_queue_qid qid
)
1230 mutex_init(&queue
->status_lock
);
1231 spin_lock_init(&queue
->tx_lock
);
1232 spin_lock_init(&queue
->index_lock
);
1234 queue
->rt2x00dev
= rt2x00dev
;
1242 int rt2x00queue_allocate(struct rt2x00_dev
*rt2x00dev
)
1244 struct data_queue
*queue
;
1245 enum data_queue_qid qid
;
1246 unsigned int req_atim
=
1247 !!test_bit(REQUIRE_ATIM_QUEUE
, &rt2x00dev
->cap_flags
);
1250 * We need the following queues:
1252 * TX: ops->tx_queues
1254 * Atim: 1 (if required)
1256 rt2x00dev
->data_queues
= 2 + rt2x00dev
->ops
->tx_queues
+ req_atim
;
1258 queue
= kcalloc(rt2x00dev
->data_queues
, sizeof(*queue
), GFP_KERNEL
);
1260 ERROR(rt2x00dev
, "Queue allocation failed.\n");
1265 * Initialize pointers
1267 rt2x00dev
->rx
= queue
;
1268 rt2x00dev
->tx
= &queue
[1];
1269 rt2x00dev
->bcn
= &queue
[1 + rt2x00dev
->ops
->tx_queues
];
1270 rt2x00dev
->atim
= req_atim
? &queue
[2 + rt2x00dev
->ops
->tx_queues
] : NULL
;
1273 * Initialize queue parameters.
1275 * TX: qid = QID_AC_VO + index
1276 * TX: cw_min: 2^5 = 32.
1277 * TX: cw_max: 2^10 = 1024.
1278 * BCN: qid = QID_BEACON
1279 * ATIM: qid = QID_ATIM
1281 rt2x00queue_init(rt2x00dev
, rt2x00dev
->rx
, QID_RX
);
1284 tx_queue_for_each(rt2x00dev
, queue
)
1285 rt2x00queue_init(rt2x00dev
, queue
, qid
++);
1287 rt2x00queue_init(rt2x00dev
, rt2x00dev
->bcn
, QID_BEACON
);
1289 rt2x00queue_init(rt2x00dev
, rt2x00dev
->atim
, QID_ATIM
);
1294 void rt2x00queue_free(struct rt2x00_dev
*rt2x00dev
)
1296 kfree(rt2x00dev
->rx
);
1297 rt2x00dev
->rx
= NULL
;
1298 rt2x00dev
->tx
= NULL
;
1299 rt2x00dev
->bcn
= NULL
;