2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff
*rt2x00queue_alloc_rxskb(struct queue_entry
*entry
)
38 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
40 struct skb_frame_desc
*skbdesc
;
41 unsigned int frame_size
;
42 unsigned int head_size
= 0;
43 unsigned int tail_size
= 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size
= entry
->queue
->data_size
+ entry
->queue
->desc_size
;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO
, &rt2x00dev
->flags
)) {
71 skb
= dev_alloc_skb(frame_size
+ head_size
+ tail_size
);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb
, head_size
);
80 skb_put(skb
, frame_size
);
85 skbdesc
= get_skb_frame_desc(skb
);
86 memset(skbdesc
, 0, sizeof(*skbdesc
));
87 skbdesc
->entry
= entry
;
89 if (test_bit(DRIVER_REQUIRE_DMA
, &rt2x00dev
->flags
)) {
90 skbdesc
->skb_dma
= dma_map_single(rt2x00dev
->dev
,
94 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_RX
;
100 void rt2x00queue_map_txskb(struct queue_entry
*entry
)
102 struct device
*dev
= entry
->queue
->rt2x00dev
->dev
;
103 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
106 dma_map_single(dev
, entry
->skb
->data
, entry
->skb
->len
, DMA_TO_DEVICE
);
107 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_TX
;
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb
);
111 void rt2x00queue_unmap_skb(struct queue_entry
*entry
)
113 struct device
*dev
= entry
->queue
->rt2x00dev
->dev
;
114 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
116 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_RX
) {
117 dma_unmap_single(dev
, skbdesc
->skb_dma
, entry
->skb
->len
,
119 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_RX
;
120 } else if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_TX
) {
121 dma_unmap_single(dev
, skbdesc
->skb_dma
, entry
->skb
->len
,
123 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_TX
;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb
);
128 void rt2x00queue_free_skb(struct queue_entry
*entry
)
133 rt2x00queue_unmap_skb(entry
);
134 dev_kfree_skb_any(entry
->skb
);
138 void rt2x00queue_align_frame(struct sk_buff
*skb
)
140 unsigned int frame_length
= skb
->len
;
141 unsigned int align
= ALIGN_SIZE(skb
, 0);
146 skb_push(skb
, align
);
147 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
148 skb_trim(skb
, frame_length
);
151 void rt2x00queue_align_payload(struct sk_buff
*skb
, unsigned int header_length
)
153 unsigned int frame_length
= skb
->len
;
154 unsigned int align
= ALIGN_SIZE(skb
, header_length
);
159 skb_push(skb
, align
);
160 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
161 skb_trim(skb
, frame_length
);
164 void rt2x00queue_insert_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
166 unsigned int payload_length
= skb
->len
- header_length
;
167 unsigned int header_align
= ALIGN_SIZE(skb
, 0);
168 unsigned int payload_align
= ALIGN_SIZE(skb
, header_length
);
169 unsigned int l2pad
= payload_length
? L2PAD_SIZE(header_length
) : 0;
172 * Adjust the header alignment if the payload needs to be moved more
175 if (payload_align
> header_align
)
178 /* There is nothing to do if no alignment is needed */
182 /* Reserve the amount of space needed in front of the frame */
183 skb_push(skb
, header_align
);
188 memmove(skb
->data
, skb
->data
+ header_align
, header_length
);
190 /* Move the payload, if present and if required */
191 if (payload_length
&& payload_align
)
192 memmove(skb
->data
+ header_length
+ l2pad
,
193 skb
->data
+ header_length
+ l2pad
+ payload_align
,
196 /* Trim the skb to the correct size */
197 skb_trim(skb
, header_length
+ l2pad
+ payload_length
);
200 void rt2x00queue_remove_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
203 * L2 padding is only present if the skb contains more than just the
204 * IEEE 802.11 header.
206 unsigned int l2pad
= (skb
->len
> header_length
) ?
207 L2PAD_SIZE(header_length
) : 0;
212 memmove(skb
->data
+ l2pad
, skb
->data
, header_length
);
213 skb_pull(skb
, l2pad
);
216 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry
*entry
,
217 struct txentry_desc
*txdesc
)
219 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
220 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
221 struct rt2x00_intf
*intf
= vif_to_intf(tx_info
->control
.vif
);
222 unsigned long irqflags
;
224 if (!(tx_info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
) ||
225 unlikely(!tx_info
->control
.vif
))
229 * Hardware should insert sequence counter.
230 * FIXME: We insert a software sequence counter first for
231 * hardware that doesn't support hardware sequence counting.
233 * This is wrong because beacons are not getting sequence
234 * numbers assigned properly.
236 * A secondary problem exists for drivers that cannot toggle
237 * sequence counting per-frame, since those will override the
238 * sequence counter given by mac80211.
240 spin_lock_irqsave(&intf
->seqlock
, irqflags
);
242 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
))
244 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
245 hdr
->seq_ctrl
|= cpu_to_le16(intf
->seqno
);
247 spin_unlock_irqrestore(&intf
->seqlock
, irqflags
);
249 __set_bit(ENTRY_TXD_GENERATE_SEQ
, &txdesc
->flags
);
252 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry
*entry
,
253 struct txentry_desc
*txdesc
,
254 const struct rt2x00_rate
*hwrate
)
256 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
257 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
258 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
259 unsigned int data_length
;
260 unsigned int duration
;
261 unsigned int residual
;
263 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
264 data_length
= entry
->skb
->len
+ 4;
265 data_length
+= rt2x00crypto_tx_overhead(rt2x00dev
, entry
->skb
);
269 * Length calculation depends on OFDM/CCK rate.
271 txdesc
->signal
= hwrate
->plcp
;
272 txdesc
->service
= 0x04;
274 if (hwrate
->flags
& DEV_RATE_OFDM
) {
275 txdesc
->length_high
= (data_length
>> 6) & 0x3f;
276 txdesc
->length_low
= data_length
& 0x3f;
279 * Convert length to microseconds.
281 residual
= GET_DURATION_RES(data_length
, hwrate
->bitrate
);
282 duration
= GET_DURATION(data_length
, hwrate
->bitrate
);
288 * Check if we need to set the Length Extension
290 if (hwrate
->bitrate
== 110 && residual
<= 30)
291 txdesc
->service
|= 0x80;
294 txdesc
->length_high
= (duration
>> 8) & 0xff;
295 txdesc
->length_low
= duration
& 0xff;
298 * When preamble is enabled we should set the
299 * preamble bit for the signal.
301 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
302 txdesc
->signal
|= 0x08;
306 static void rt2x00queue_create_tx_descriptor(struct queue_entry
*entry
,
307 struct txentry_desc
*txdesc
)
309 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
310 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
311 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
312 struct ieee80211_rate
*rate
=
313 ieee80211_get_tx_rate(rt2x00dev
->hw
, tx_info
);
314 const struct rt2x00_rate
*hwrate
;
316 memset(txdesc
, 0, sizeof(*txdesc
));
319 * Header and frame information.
321 txdesc
->length
= entry
->skb
->len
;
322 txdesc
->header_length
= ieee80211_get_hdrlen_from_skb(entry
->skb
);
325 * Check whether this frame is to be acked.
327 if (!(tx_info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
328 __set_bit(ENTRY_TXD_ACK
, &txdesc
->flags
);
331 * Check if this is a RTS/CTS frame
333 if (ieee80211_is_rts(hdr
->frame_control
) ||
334 ieee80211_is_cts(hdr
->frame_control
)) {
335 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
336 if (ieee80211_is_rts(hdr
->frame_control
))
337 __set_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
);
339 __set_bit(ENTRY_TXD_CTS_FRAME
, &txdesc
->flags
);
340 if (tx_info
->control
.rts_cts_rate_idx
>= 0)
342 ieee80211_get_rts_cts_rate(rt2x00dev
->hw
, tx_info
);
346 * Determine retry information.
348 txdesc
->retry_limit
= tx_info
->control
.rates
[0].count
- 1;
349 if (txdesc
->retry_limit
>= rt2x00dev
->long_retry
)
350 __set_bit(ENTRY_TXD_RETRY_MODE
, &txdesc
->flags
);
353 * Check if more fragments are pending
355 if (ieee80211_has_morefrags(hdr
->frame_control
)) {
356 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
357 __set_bit(ENTRY_TXD_MORE_FRAG
, &txdesc
->flags
);
361 * Check if more frames (!= fragments) are pending
363 if (tx_info
->flags
& IEEE80211_TX_CTL_MORE_FRAMES
)
364 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
367 * Beacons and probe responses require the tsf timestamp
368 * to be inserted into the frame, except for a frame that has been injected
369 * through a monitor interface. This latter is needed for testing a
372 if ((ieee80211_is_beacon(hdr
->frame_control
) ||
373 ieee80211_is_probe_resp(hdr
->frame_control
)) &&
374 (!(tx_info
->flags
& IEEE80211_TX_CTL_INJECTED
)))
375 __set_bit(ENTRY_TXD_REQ_TIMESTAMP
, &txdesc
->flags
);
378 * Determine with what IFS priority this frame should be send.
379 * Set ifs to IFS_SIFS when the this is not the first fragment,
380 * or this fragment came after RTS/CTS.
382 if ((tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
) &&
383 !test_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
)) {
384 __set_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
);
385 txdesc
->ifs
= IFS_BACKOFF
;
387 txdesc
->ifs
= IFS_SIFS
;
390 * Determine rate modulation.
392 hwrate
= rt2x00_get_rate(rate
->hw_value
);
393 txdesc
->rate_mode
= RATE_MODE_CCK
;
394 if (hwrate
->flags
& DEV_RATE_OFDM
)
395 txdesc
->rate_mode
= RATE_MODE_OFDM
;
398 * Apply TX descriptor handling by components
400 rt2x00crypto_create_tx_descriptor(entry
, txdesc
);
401 rt2x00ht_create_tx_descriptor(entry
, txdesc
, hwrate
);
402 rt2x00queue_create_tx_descriptor_seq(entry
, txdesc
);
403 rt2x00queue_create_tx_descriptor_plcp(entry
, txdesc
, hwrate
);
406 static int rt2x00queue_write_tx_data(struct queue_entry
*entry
,
407 struct txentry_desc
*txdesc
)
409 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
412 * This should not happen, we already checked the entry
413 * was ours. When the hardware disagrees there has been
414 * a queue corruption!
416 if (unlikely(rt2x00dev
->ops
->lib
->get_entry_state
&&
417 rt2x00dev
->ops
->lib
->get_entry_state(entry
))) {
419 "Corrupt queue %d, accessing entry which is not ours.\n"
420 "Please file bug report to %s.\n",
421 entry
->queue
->qid
, DRV_PROJECT
);
426 * Add the requested extra tx headroom in front of the skb.
428 skb_push(entry
->skb
, rt2x00dev
->ops
->extra_tx_headroom
);
429 memset(entry
->skb
->data
, 0, rt2x00dev
->ops
->extra_tx_headroom
);
432 * Call the driver's write_tx_data function, if it exists.
434 if (rt2x00dev
->ops
->lib
->write_tx_data
)
435 rt2x00dev
->ops
->lib
->write_tx_data(entry
, txdesc
);
438 * Map the skb to DMA.
440 if (test_bit(DRIVER_REQUIRE_DMA
, &rt2x00dev
->flags
))
441 rt2x00queue_map_txskb(entry
);
446 static void rt2x00queue_write_tx_descriptor(struct queue_entry
*entry
,
447 struct txentry_desc
*txdesc
)
449 struct data_queue
*queue
= entry
->queue
;
451 queue
->rt2x00dev
->ops
->lib
->write_tx_desc(entry
, txdesc
);
454 * All processing on the frame has been completed, this means
455 * it is now ready to be dumped to userspace through debugfs.
457 rt2x00debug_dump_frame(queue
->rt2x00dev
, DUMP_FRAME_TX
, entry
->skb
);
460 static void rt2x00queue_kick_tx_queue(struct data_queue
*queue
,
461 struct txentry_desc
*txdesc
)
464 * Check if we need to kick the queue, there are however a few rules
465 * 1) Don't kick unless this is the last in frame in a burst.
466 * When the burst flag is set, this frame is always followed
467 * by another frame which in some way are related to eachother.
468 * This is true for fragments, RTS or CTS-to-self frames.
469 * 2) Rule 1 can be broken when the available entries
470 * in the queue are less then a certain threshold.
472 if (rt2x00queue_threshold(queue
) ||
473 !test_bit(ENTRY_TXD_BURST
, &txdesc
->flags
))
474 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
477 int rt2x00queue_write_tx_frame(struct data_queue
*queue
, struct sk_buff
*skb
,
480 struct ieee80211_tx_info
*tx_info
;
481 struct queue_entry
*entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
482 struct txentry_desc txdesc
;
483 struct skb_frame_desc
*skbdesc
;
484 u8 rate_idx
, rate_flags
;
486 if (unlikely(rt2x00queue_full(queue
)))
489 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA
,
491 ERROR(queue
->rt2x00dev
,
492 "Arrived at non-free entry in the non-full queue %d.\n"
493 "Please file bug report to %s.\n",
494 queue
->qid
, DRV_PROJECT
);
499 * Copy all TX descriptor information into txdesc,
500 * after that we are free to use the skb->cb array
501 * for our information.
504 rt2x00queue_create_tx_descriptor(entry
, &txdesc
);
507 * All information is retrieved from the skb->cb array,
508 * now we should claim ownership of the driver part of that
509 * array, preserving the bitrate index and flags.
511 tx_info
= IEEE80211_SKB_CB(skb
);
512 rate_idx
= tx_info
->control
.rates
[0].idx
;
513 rate_flags
= tx_info
->control
.rates
[0].flags
;
514 skbdesc
= get_skb_frame_desc(skb
);
515 memset(skbdesc
, 0, sizeof(*skbdesc
));
516 skbdesc
->entry
= entry
;
517 skbdesc
->tx_rate_idx
= rate_idx
;
518 skbdesc
->tx_rate_flags
= rate_flags
;
521 skbdesc
->flags
|= SKBDESC_NOT_MAC80211
;
524 * When hardware encryption is supported, and this frame
525 * is to be encrypted, we should strip the IV/EIV data from
526 * the frame so we can provide it to the driver separately.
528 if (test_bit(ENTRY_TXD_ENCRYPT
, &txdesc
.flags
) &&
529 !test_bit(ENTRY_TXD_ENCRYPT_IV
, &txdesc
.flags
)) {
530 if (test_bit(DRIVER_REQUIRE_COPY_IV
, &queue
->rt2x00dev
->flags
))
531 rt2x00crypto_tx_copy_iv(skb
, &txdesc
);
533 rt2x00crypto_tx_remove_iv(skb
, &txdesc
);
537 * When DMA allocation is required we should guarentee to the
538 * driver that the DMA is aligned to a 4-byte boundary.
539 * However some drivers require L2 padding to pad the payload
540 * rather then the header. This could be a requirement for
541 * PCI and USB devices, while header alignment only is valid
544 if (test_bit(DRIVER_REQUIRE_L2PAD
, &queue
->rt2x00dev
->flags
))
545 rt2x00queue_insert_l2pad(entry
->skb
, txdesc
.header_length
);
546 else if (test_bit(DRIVER_REQUIRE_DMA
, &queue
->rt2x00dev
->flags
))
547 rt2x00queue_align_frame(entry
->skb
);
550 * It could be possible that the queue was corrupted and this
551 * call failed. Since we always return NETDEV_TX_OK to mac80211,
552 * this frame will simply be dropped.
554 if (unlikely(rt2x00queue_write_tx_data(entry
, &txdesc
))) {
555 clear_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
);
560 set_bit(ENTRY_DATA_PENDING
, &entry
->flags
);
562 rt2x00queue_index_inc(queue
, Q_INDEX
);
563 rt2x00queue_write_tx_descriptor(entry
, &txdesc
);
564 rt2x00queue_kick_tx_queue(queue
, &txdesc
);
569 int rt2x00queue_clear_beacon(struct rt2x00_dev
*rt2x00dev
,
570 struct ieee80211_vif
*vif
)
572 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
574 if (unlikely(!intf
->beacon
))
577 mutex_lock(&intf
->beacon_skb_mutex
);
580 * Clean up the beacon skb.
582 rt2x00queue_free_skb(intf
->beacon
);
585 * Clear beacon (single bssid devices don't need to clear the beacon
586 * since the beacon queue will get stopped anyway).
588 if (rt2x00dev
->ops
->lib
->clear_beacon
)
589 rt2x00dev
->ops
->lib
->clear_beacon(intf
->beacon
);
591 mutex_unlock(&intf
->beacon_skb_mutex
);
596 int rt2x00queue_update_beacon_locked(struct rt2x00_dev
*rt2x00dev
,
597 struct ieee80211_vif
*vif
)
599 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
600 struct skb_frame_desc
*skbdesc
;
601 struct txentry_desc txdesc
;
603 if (unlikely(!intf
->beacon
))
607 * Clean up the beacon skb.
609 rt2x00queue_free_skb(intf
->beacon
);
611 intf
->beacon
->skb
= ieee80211_beacon_get(rt2x00dev
->hw
, vif
);
612 if (!intf
->beacon
->skb
)
616 * Copy all TX descriptor information into txdesc,
617 * after that we are free to use the skb->cb array
618 * for our information.
620 rt2x00queue_create_tx_descriptor(intf
->beacon
, &txdesc
);
623 * Fill in skb descriptor
625 skbdesc
= get_skb_frame_desc(intf
->beacon
->skb
);
626 memset(skbdesc
, 0, sizeof(*skbdesc
));
627 skbdesc
->entry
= intf
->beacon
;
630 * Send beacon to hardware.
632 rt2x00dev
->ops
->lib
->write_beacon(intf
->beacon
, &txdesc
);
638 int rt2x00queue_update_beacon(struct rt2x00_dev
*rt2x00dev
,
639 struct ieee80211_vif
*vif
)
641 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
644 mutex_lock(&intf
->beacon_skb_mutex
);
645 ret
= rt2x00queue_update_beacon_locked(rt2x00dev
, vif
);
646 mutex_unlock(&intf
->beacon_skb_mutex
);
651 void rt2x00queue_for_each_entry(struct data_queue
*queue
,
652 enum queue_index start
,
653 enum queue_index end
,
654 void (*fn
)(struct queue_entry
*entry
))
656 unsigned long irqflags
;
657 unsigned int index_start
;
658 unsigned int index_end
;
661 if (unlikely(start
>= Q_INDEX_MAX
|| end
>= Q_INDEX_MAX
)) {
662 ERROR(queue
->rt2x00dev
,
663 "Entry requested from invalid index range (%d - %d)\n",
669 * Only protect the range we are going to loop over,
670 * if during our loop a extra entry is set to pending
671 * it should not be kicked during this run, since it
672 * is part of another TX operation.
674 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
675 index_start
= queue
->index
[start
];
676 index_end
= queue
->index
[end
];
677 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
680 * Start from the TX done pointer, this guarentees that we will
681 * send out all frames in the correct order.
683 if (index_start
< index_end
) {
684 for (i
= index_start
; i
< index_end
; i
++)
685 fn(&queue
->entries
[i
]);
687 for (i
= index_start
; i
< queue
->limit
; i
++)
688 fn(&queue
->entries
[i
]);
690 for (i
= 0; i
< index_end
; i
++)
691 fn(&queue
->entries
[i
]);
694 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry
);
696 struct data_queue
*rt2x00queue_get_queue(struct rt2x00_dev
*rt2x00dev
,
697 const enum data_queue_qid queue
)
699 int atim
= test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
702 return rt2x00dev
->rx
;
704 if (queue
< rt2x00dev
->ops
->tx_queues
&& rt2x00dev
->tx
)
705 return &rt2x00dev
->tx
[queue
];
710 if (queue
== QID_BEACON
)
711 return &rt2x00dev
->bcn
[0];
712 else if (queue
== QID_ATIM
&& atim
)
713 return &rt2x00dev
->bcn
[1];
717 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue
);
719 struct queue_entry
*rt2x00queue_get_entry(struct data_queue
*queue
,
720 enum queue_index index
)
722 struct queue_entry
*entry
;
723 unsigned long irqflags
;
725 if (unlikely(index
>= Q_INDEX_MAX
)) {
726 ERROR(queue
->rt2x00dev
,
727 "Entry requested from invalid index type (%d)\n", index
);
731 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
733 entry
= &queue
->entries
[queue
->index
[index
]];
735 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
739 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry
);
741 void rt2x00queue_index_inc(struct data_queue
*queue
, enum queue_index index
)
743 unsigned long irqflags
;
745 if (unlikely(index
>= Q_INDEX_MAX
)) {
746 ERROR(queue
->rt2x00dev
,
747 "Index change on invalid index type (%d)\n", index
);
751 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
753 queue
->index
[index
]++;
754 if (queue
->index
[index
] >= queue
->limit
)
755 queue
->index
[index
] = 0;
757 queue
->last_action
[index
] = jiffies
;
759 if (index
== Q_INDEX
) {
761 } else if (index
== Q_INDEX_DONE
) {
766 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
769 void rt2x00queue_pause_queue(struct data_queue
*queue
)
771 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
772 !test_bit(QUEUE_STARTED
, &queue
->flags
) ||
773 test_and_set_bit(QUEUE_PAUSED
, &queue
->flags
))
776 switch (queue
->qid
) {
782 * For TX queues, we have to disable the queue
785 ieee80211_stop_queue(queue
->rt2x00dev
->hw
, queue
->qid
);
791 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue
);
793 void rt2x00queue_unpause_queue(struct data_queue
*queue
)
795 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
796 !test_bit(QUEUE_STARTED
, &queue
->flags
) ||
797 !test_and_clear_bit(QUEUE_PAUSED
, &queue
->flags
))
800 switch (queue
->qid
) {
806 * For TX queues, we have to enable the queue
809 ieee80211_wake_queue(queue
->rt2x00dev
->hw
, queue
->qid
);
813 * For RX we need to kick the queue now in order to
816 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
821 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue
);
823 void rt2x00queue_start_queue(struct data_queue
*queue
)
825 mutex_lock(&queue
->status_lock
);
827 if (!test_bit(DEVICE_STATE_PRESENT
, &queue
->rt2x00dev
->flags
) ||
828 test_and_set_bit(QUEUE_STARTED
, &queue
->flags
)) {
829 mutex_unlock(&queue
->status_lock
);
833 set_bit(QUEUE_PAUSED
, &queue
->flags
);
835 queue
->rt2x00dev
->ops
->lib
->start_queue(queue
);
837 rt2x00queue_unpause_queue(queue
);
839 mutex_unlock(&queue
->status_lock
);
841 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue
);
843 void rt2x00queue_stop_queue(struct data_queue
*queue
)
845 mutex_lock(&queue
->status_lock
);
847 if (!test_and_clear_bit(QUEUE_STARTED
, &queue
->flags
)) {
848 mutex_unlock(&queue
->status_lock
);
852 rt2x00queue_pause_queue(queue
);
854 queue
->rt2x00dev
->ops
->lib
->stop_queue(queue
);
856 mutex_unlock(&queue
->status_lock
);
858 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue
);
860 void rt2x00queue_flush_queue(struct data_queue
*queue
, bool drop
)
865 (queue
->qid
== QID_AC_VO
) ||
866 (queue
->qid
== QID_AC_VI
) ||
867 (queue
->qid
== QID_AC_BE
) ||
868 (queue
->qid
== QID_AC_BK
);
870 mutex_lock(&queue
->status_lock
);
873 * If the queue has been started, we must stop it temporarily
874 * to prevent any new frames to be queued on the device. If
875 * we are not dropping the pending frames, the queue must
876 * only be stopped in the software and not the hardware,
877 * otherwise the queue will never become empty on its own.
879 started
= test_bit(QUEUE_STARTED
, &queue
->flags
);
884 rt2x00queue_pause_queue(queue
);
887 * If we are not supposed to drop any pending
888 * frames, this means we must force a start (=kick)
889 * to the queue to make sure the hardware will
890 * start transmitting.
892 if (!drop
&& tx_queue
)
893 queue
->rt2x00dev
->ops
->lib
->kick_queue(queue
);
897 * Check if driver supports flushing, we can only guarentee
898 * full support for flushing if the driver is able
899 * to cancel all pending frames (drop = true).
901 if (drop
&& queue
->rt2x00dev
->ops
->lib
->flush_queue
)
902 queue
->rt2x00dev
->ops
->lib
->flush_queue(queue
);
905 * When we don't want to drop any frames, or when
906 * the driver doesn't fully flush the queue correcly,
907 * we must wait for the queue to become empty.
909 for (i
= 0; !rt2x00queue_empty(queue
) && i
< 100; i
++)
913 * The queue flush has failed...
915 if (unlikely(!rt2x00queue_empty(queue
)))
916 WARNING(queue
->rt2x00dev
, "Queue %d failed to flush\n", queue
->qid
);
919 * Restore the queue to the previous status
922 rt2x00queue_unpause_queue(queue
);
924 mutex_unlock(&queue
->status_lock
);
926 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue
);
928 void rt2x00queue_start_queues(struct rt2x00_dev
*rt2x00dev
)
930 struct data_queue
*queue
;
933 * rt2x00queue_start_queue will call ieee80211_wake_queue
934 * for each queue after is has been properly initialized.
936 tx_queue_for_each(rt2x00dev
, queue
)
937 rt2x00queue_start_queue(queue
);
939 rt2x00queue_start_queue(rt2x00dev
->rx
);
941 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues
);
943 void rt2x00queue_stop_queues(struct rt2x00_dev
*rt2x00dev
)
945 struct data_queue
*queue
;
948 * rt2x00queue_stop_queue will call ieee80211_stop_queue
949 * as well, but we are completely shutting doing everything
950 * now, so it is much safer to stop all TX queues at once,
951 * and use rt2x00queue_stop_queue for cleaning up.
953 ieee80211_stop_queues(rt2x00dev
->hw
);
955 tx_queue_for_each(rt2x00dev
, queue
)
956 rt2x00queue_stop_queue(queue
);
958 rt2x00queue_stop_queue(rt2x00dev
->rx
);
960 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues
);
962 void rt2x00queue_flush_queues(struct rt2x00_dev
*rt2x00dev
, bool drop
)
964 struct data_queue
*queue
;
966 tx_queue_for_each(rt2x00dev
, queue
)
967 rt2x00queue_flush_queue(queue
, drop
);
969 rt2x00queue_flush_queue(rt2x00dev
->rx
, drop
);
971 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues
);
973 static void rt2x00queue_reset(struct data_queue
*queue
)
975 unsigned long irqflags
;
978 spin_lock_irqsave(&queue
->index_lock
, irqflags
);
983 for (i
= 0; i
< Q_INDEX_MAX
; i
++) {
985 queue
->last_action
[i
] = jiffies
;
988 spin_unlock_irqrestore(&queue
->index_lock
, irqflags
);
991 void rt2x00queue_init_queues(struct rt2x00_dev
*rt2x00dev
)
993 struct data_queue
*queue
;
996 queue_for_each(rt2x00dev
, queue
) {
997 rt2x00queue_reset(queue
);
999 for (i
= 0; i
< queue
->limit
; i
++)
1000 rt2x00dev
->ops
->lib
->clear_entry(&queue
->entries
[i
]);
1004 static int rt2x00queue_alloc_entries(struct data_queue
*queue
,
1005 const struct data_queue_desc
*qdesc
)
1007 struct queue_entry
*entries
;
1008 unsigned int entry_size
;
1011 rt2x00queue_reset(queue
);
1013 queue
->limit
= qdesc
->entry_num
;
1014 queue
->threshold
= DIV_ROUND_UP(qdesc
->entry_num
, 10);
1015 queue
->data_size
= qdesc
->data_size
;
1016 queue
->desc_size
= qdesc
->desc_size
;
1019 * Allocate all queue entries.
1021 entry_size
= sizeof(*entries
) + qdesc
->priv_size
;
1022 entries
= kcalloc(queue
->limit
, entry_size
, GFP_KERNEL
);
1026 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1027 (((char *)(__base)) + ((__limit) * (__esize)) + \
1028 ((__index) * (__psize)))
1030 for (i
= 0; i
< queue
->limit
; i
++) {
1031 entries
[i
].flags
= 0;
1032 entries
[i
].queue
= queue
;
1033 entries
[i
].skb
= NULL
;
1034 entries
[i
].entry_idx
= i
;
1035 entries
[i
].priv_data
=
1036 QUEUE_ENTRY_PRIV_OFFSET(entries
, i
, queue
->limit
,
1037 sizeof(*entries
), qdesc
->priv_size
);
1040 #undef QUEUE_ENTRY_PRIV_OFFSET
1042 queue
->entries
= entries
;
1047 static void rt2x00queue_free_skbs(struct data_queue
*queue
)
1051 if (!queue
->entries
)
1054 for (i
= 0; i
< queue
->limit
; i
++) {
1055 rt2x00queue_free_skb(&queue
->entries
[i
]);
1059 static int rt2x00queue_alloc_rxskbs(struct data_queue
*queue
)
1062 struct sk_buff
*skb
;
1064 for (i
= 0; i
< queue
->limit
; i
++) {
1065 skb
= rt2x00queue_alloc_rxskb(&queue
->entries
[i
]);
1068 queue
->entries
[i
].skb
= skb
;
1074 int rt2x00queue_initialize(struct rt2x00_dev
*rt2x00dev
)
1076 struct data_queue
*queue
;
1079 status
= rt2x00queue_alloc_entries(rt2x00dev
->rx
, rt2x00dev
->ops
->rx
);
1083 tx_queue_for_each(rt2x00dev
, queue
) {
1084 status
= rt2x00queue_alloc_entries(queue
, rt2x00dev
->ops
->tx
);
1089 status
= rt2x00queue_alloc_entries(rt2x00dev
->bcn
, rt2x00dev
->ops
->bcn
);
1093 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
)) {
1094 status
= rt2x00queue_alloc_entries(&rt2x00dev
->bcn
[1],
1095 rt2x00dev
->ops
->atim
);
1100 status
= rt2x00queue_alloc_rxskbs(rt2x00dev
->rx
);
1107 ERROR(rt2x00dev
, "Queue entries allocation failed.\n");
1109 rt2x00queue_uninitialize(rt2x00dev
);
1114 void rt2x00queue_uninitialize(struct rt2x00_dev
*rt2x00dev
)
1116 struct data_queue
*queue
;
1118 rt2x00queue_free_skbs(rt2x00dev
->rx
);
1120 queue_for_each(rt2x00dev
, queue
) {
1121 kfree(queue
->entries
);
1122 queue
->entries
= NULL
;
1126 static void rt2x00queue_init(struct rt2x00_dev
*rt2x00dev
,
1127 struct data_queue
*queue
, enum data_queue_qid qid
)
1129 mutex_init(&queue
->status_lock
);
1130 spin_lock_init(&queue
->index_lock
);
1132 queue
->rt2x00dev
= rt2x00dev
;
1140 int rt2x00queue_allocate(struct rt2x00_dev
*rt2x00dev
)
1142 struct data_queue
*queue
;
1143 enum data_queue_qid qid
;
1144 unsigned int req_atim
=
1145 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
1148 * We need the following queues:
1150 * TX: ops->tx_queues
1152 * Atim: 1 (if required)
1154 rt2x00dev
->data_queues
= 2 + rt2x00dev
->ops
->tx_queues
+ req_atim
;
1156 queue
= kcalloc(rt2x00dev
->data_queues
, sizeof(*queue
), GFP_KERNEL
);
1158 ERROR(rt2x00dev
, "Queue allocation failed.\n");
1163 * Initialize pointers
1165 rt2x00dev
->rx
= queue
;
1166 rt2x00dev
->tx
= &queue
[1];
1167 rt2x00dev
->bcn
= &queue
[1 + rt2x00dev
->ops
->tx_queues
];
1170 * Initialize queue parameters.
1172 * TX: qid = QID_AC_VO + index
1173 * TX: cw_min: 2^5 = 32.
1174 * TX: cw_max: 2^10 = 1024.
1175 * BCN: qid = QID_BEACON
1176 * ATIM: qid = QID_ATIM
1178 rt2x00queue_init(rt2x00dev
, rt2x00dev
->rx
, QID_RX
);
1181 tx_queue_for_each(rt2x00dev
, queue
)
1182 rt2x00queue_init(rt2x00dev
, queue
, qid
++);
1184 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[0], QID_BEACON
);
1186 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[1], QID_ATIM
);
1191 void rt2x00queue_free(struct rt2x00_dev
*rt2x00dev
)
1193 kfree(rt2x00dev
->rx
);
1194 rt2x00dev
->rx
= NULL
;
1195 rt2x00dev
->tx
= NULL
;
1196 rt2x00dev
->bcn
= NULL
;