Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / net / wireless / ralink / rt2x00 / rt2x00queue.h
CommitLineData
181d6902 1/*
7e613e16 2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
181d6902
ID
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
a05b8c58 16 along with this program; if not, see <http://www.gnu.org/licenses/>.
181d6902
ID
17 */
18
19/*
20 Module: rt2x00
21 Abstract: rt2x00 queue datastructures and routines
22 */
23
24#ifndef RT2X00QUEUE_H
25#define RT2X00QUEUE_H
26
27#include <linux/prefetch.h>
28
29/**
49513481 30 * DOC: Entry frame size
181d6902
ID
31 *
32 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
33 * for USB devices this restriction does not apply, but the value of
34 * 2432 makes sense since it is big enough to contain the maximum fragment
35 * size according to the ieee802.11 specs.
35f00cfc
ID
36 * The aggregation size depends on support from the driver, but should
37 * be something around 3840 bytes.
181d6902 38 */
35f00cfc
ID
39#define DATA_FRAME_SIZE 2432
40#define MGMT_FRAME_SIZE 256
41#define AGGREGATION_SIZE 3840
181d6902 42
181d6902
ID
43/**
44 * enum data_queue_qid: Queue identification
e58c6aca 45 *
f615e9a3
ID
46 * @QID_AC_VO: AC VO queue
47 * @QID_AC_VI: AC VI queue
e58c6aca
ID
48 * @QID_AC_BE: AC BE queue
49 * @QID_AC_BK: AC BK queue
e58c6aca
ID
50 * @QID_HCCA: HCCA queue
51 * @QID_MGMT: MGMT queue (prio queue)
52 * @QID_RX: RX queue
53 * @QID_OTHER: None of the above (don't use, only present for completeness)
54 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
1d616b14 55 * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
181d6902
ID
56 */
57enum data_queue_qid {
f615e9a3
ID
58 QID_AC_VO = 0,
59 QID_AC_VI = 1,
60 QID_AC_BE = 2,
61 QID_AC_BK = 3,
181d6902
ID
62 QID_HCCA = 4,
63 QID_MGMT = 13,
64 QID_RX = 14,
65 QID_OTHER = 15,
e58c6aca
ID
66 QID_BEACON,
67 QID_ATIM,
181d6902
ID
68};
69
baf26a7e
ID
70/**
71 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
72 *
d74f5ba4
ID
73 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
74 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
9f166171 75 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
2bb057d0 76 * mac80211 but was stripped for processing by the driver.
7351c6bd
JB
77 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
78 * don't try to pass it back.
fd76f148
GW
79 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
80 * skb, instead of in the desc field.
baf26a7e 81 */
c4da0048 82enum skb_frame_desc_flags {
2bb057d0
ID
83 SKBDESC_DMA_MAPPED_RX = 1 << 0,
84 SKBDESC_DMA_MAPPED_TX = 1 << 1,
9f166171 85 SKBDESC_IV_STRIPPED = 1 << 2,
354e39db 86 SKBDESC_NOT_MAC80211 = 1 << 3,
fd76f148 87 SKBDESC_DESC_IN_SKB = 1 << 4,
c4da0048 88};
baf26a7e 89
181d6902
ID
90/**
91 * struct skb_frame_desc: Descriptor information for the skb buffer
92 *
e039fa4a
JB
93 * This structure is placed over the driver_data array, this means that
94 * this structure should not exceed the size of that array (40 bytes).
181d6902 95 *
baf26a7e 96 * @flags: Frame flags, see &enum skb_frame_desc_flags.
c4da0048 97 * @desc_len: Length of the frame descriptor.
e6a9854b
JB
98 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
99 * @tx_rate_flags: the TX rate flags, used for TX status reporting
181d6902
ID
100 * @desc: Pointer to descriptor part of the frame.
101 * Note that this pointer could point to something outside
102 * of the scope of the skb->data pointer.
1ce9cdac 103 * @iv: IV/EIV data used during encryption/decryption.
c4da0048 104 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
181d6902
ID
105 * @entry: The entry to which this sk buffer belongs.
106 */
107struct skb_frame_desc {
e6a9854b
JB
108 u8 flags;
109
110 u8 desc_len;
111 u8 tx_rate_idx;
112 u8 tx_rate_flags;
181d6902 113
c4da0048
GW
114 void *desc;
115
1ce9cdac 116 __le32 iv[2];
2bb057d0 117
c4da0048 118 dma_addr_t skb_dma;
181d6902 119
181d6902
ID
120 struct queue_entry *entry;
121};
122
e039fa4a
JB
123/**
124 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
125 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
126 */
181d6902
ID
127static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
128{
e039fa4a
JB
129 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
130 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
131 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
181d6902
ID
132}
133
19d30e02
ID
134/**
135 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
136 *
6c6aa3c0
ID
137 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
138 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
35f00cfc 139 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
19d30e02 140 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
74415edb
ID
141 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
142 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
9f166171 143 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
19d30e02
ID
144 */
145enum rxdone_entry_desc_flags {
35f00cfc
ID
146 RXDONE_SIGNAL_PLCP = BIT(0),
147 RXDONE_SIGNAL_BITRATE = BIT(1),
148 RXDONE_SIGNAL_MCS = BIT(2),
149 RXDONE_MY_BSS = BIT(3),
150 RXDONE_CRYPTO_IV = BIT(4),
151 RXDONE_CRYPTO_ICV = BIT(5),
152 RXDONE_L2PAD = BIT(6),
19d30e02
ID
153};
154
b30dd5c0
ID
155/**
156 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
157 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
158 * from &rxdone_entry_desc to a signal value type.
159 */
160#define RXDONE_SIGNAL_MASK \
35f00cfc 161 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
b30dd5c0 162
181d6902
ID
163/**
164 * struct rxdone_entry_desc: RX Entry descriptor
165 *
166 * Summary of information that has been read from the RX frame descriptor.
167 *
ae73e58e 168 * @timestamp: RX Timestamp
181d6902
ID
169 * @signal: Signal of the received frame.
170 * @rssi: RSSI of the received frame.
181d6902
ID
171 * @size: Data size of the received frame.
172 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
19d30e02 173 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
35f00cfc 174 * @rate_mode: Rate mode (See @enum rate_modulation).
2bb057d0
ID
175 * @cipher: Cipher type used during decryption.
176 * @cipher_status: Decryption status.
1ce9cdac 177 * @iv: IV/EIV data used during decryption.
2bb057d0 178 * @icv: ICV data used during decryption.
181d6902
ID
179 */
180struct rxdone_entry_desc {
ae73e58e 181 u64 timestamp;
181d6902
ID
182 int signal;
183 int rssi;
181d6902
ID
184 int size;
185 int flags;
19d30e02 186 int dev_flags;
35f00cfc 187 u16 rate_mode;
2bb057d0
ID
188 u8 cipher;
189 u8 cipher_status;
190
1ce9cdac 191 __le32 iv[2];
2bb057d0 192 __le32 icv;
181d6902
ID
193};
194
fb55f4d1
ID
195/**
196 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
197 *
46678b19
HS
198 * Every txdone report has to contain the basic result of the
199 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
200 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
201 * conjunction with all of these flags but should only be set
202 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
203 * in conjunction with &TXDONE_FAILURE.
204 *
fb55f4d1
ID
205 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
206 * @TXDONE_SUCCESS: Frame was successfully send
46678b19 207 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
fb55f4d1
ID
208 * @TXDONE_FAILURE: Frame was not successfully send
209 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
210 * frame transmission failed due to excessive retries.
211 */
212enum txdone_entry_desc_flags {
f126cba4
JF
213 TXDONE_UNKNOWN,
214 TXDONE_SUCCESS,
92ed48e5 215 TXDONE_FALLBACK,
f126cba4
JF
216 TXDONE_FAILURE,
217 TXDONE_EXCESSIVE_RETRY,
f16d2db7 218 TXDONE_AMPDU,
fb55f4d1
ID
219};
220
181d6902
ID
221/**
222 * struct txdone_entry_desc: TX done entry descriptor
223 *
224 * Summary of information that has been read from the TX frame descriptor
225 * after the device is done with transmission.
226 *
fb55f4d1 227 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
181d6902
ID
228 * @retry: Retry count.
229 */
230struct txdone_entry_desc {
fb55f4d1 231 unsigned long flags;
181d6902
ID
232 int retry;
233};
234
235/**
236 * enum txentry_desc_flags: Status flags for TX entry descriptor
237 *
238 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
7050ec82 239 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
5adf6d63 240 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
61486e0f 241 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
181d6902
ID
242 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
243 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
244 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
245 * @ENTRY_TXD_ACK: An ACK is required for this frame.
61486e0f 246 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
2bb057d0
ID
247 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
248 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
249 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
250 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
35f00cfc
ID
251 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
252 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
253 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
84804cdc 254 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
181d6902
ID
255 */
256enum txentry_desc_flags {
257 ENTRY_TXD_RTS_FRAME,
7050ec82 258 ENTRY_TXD_CTS_FRAME,
5adf6d63 259 ENTRY_TXD_GENERATE_SEQ,
61486e0f 260 ENTRY_TXD_FIRST_FRAGMENT,
181d6902
ID
261 ENTRY_TXD_MORE_FRAG,
262 ENTRY_TXD_REQ_TIMESTAMP,
263 ENTRY_TXD_BURST,
264 ENTRY_TXD_ACK,
61486e0f 265 ENTRY_TXD_RETRY_MODE,
2bb057d0
ID
266 ENTRY_TXD_ENCRYPT,
267 ENTRY_TXD_ENCRYPT_PAIRWISE,
268 ENTRY_TXD_ENCRYPT_IV,
269 ENTRY_TXD_ENCRYPT_MMIC,
35f00cfc
ID
270 ENTRY_TXD_HT_AMPDU,
271 ENTRY_TXD_HT_BW_40,
272 ENTRY_TXD_HT_SHORT_GI,
84804cdc 273 ENTRY_TXD_HT_MIMO_PS,
181d6902
ID
274};
275
276/**
277 * struct txentry_desc: TX Entry descriptor
278 *
279 * Summary of information for the frame descriptor before sending a TX frame.
280 *
281 * @flags: Descriptor flags (See &enum queue_entry_flags).
df624ca5 282 * @length: Length of the entire frame.
9f166171 283 * @header_length: Length of 802.11 header.
181d6902
ID
284 * @length_high: PLCP length high word.
285 * @length_low: PLCP length low word.
286 * @signal: PLCP signal.
287 * @service: PLCP service.
35f00cfc 288 * @msc: MCS.
5dada06c
HS
289 * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
290 * @ba_size: Size of the recepients RX reorder buffer - 1.
076f9582 291 * @rate_mode: Rate mode (See @enum rate_modulation).
35f00cfc 292 * @mpdu_density: MDPU density.
61486e0f 293 * @retry_limit: Max number of retries.
181d6902 294 * @ifs: IFS value.
1affa091 295 * @txop: IFS value for 11n capable chips.
2bb057d0
ID
296 * @cipher: Cipher type used for encryption.
297 * @key_idx: Key index used for encryption.
298 * @iv_offset: Position where IV should be inserted by hardware.
9eb4e21e 299 * @iv_len: Length of IV data.
181d6902
ID
300 */
301struct txentry_desc {
302 unsigned long flags;
303
df624ca5 304 u16 length;
9f166171 305 u16 header_length;
9f166171 306
26a1d07f
HS
307 union {
308 struct {
309 u16 length_high;
310 u16 length_low;
311 u16 signal;
312 u16 service;
2517794b 313 enum ifs ifs;
26a1d07f
HS
314 } plcp;
315
316 struct {
317 u16 mcs;
fe107a52
ID
318 u8 stbc;
319 u8 ba_size;
320 u8 mpdu_density;
321 enum txop txop;
ead2bb64 322 int wcid;
26a1d07f
HS
323 } ht;
324 } u;
181d6902 325
4df10c8c 326 enum rate_modulation rate_mode;
076f9582 327
61486e0f 328 short retry_limit;
2bb057d0
ID
329
330 enum cipher cipher;
331 u16 key_idx;
332 u16 iv_offset;
9eb4e21e 333 u16 iv_len;
181d6902
ID
334};
335
336/**
337 * enum queue_entry_flags: Status flags for queue entry
338 *
339 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
340 * As long as this bit is set, this entry may only be touched
341 * through the interface structure.
342 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
343 * transfer (either TX or RX depending on the queue). The entry should
344 * only be touched after the device has signaled it is done with it.
f019d514
ID
345 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
346 * for the signal to start sending.
25985edc
LDM
347 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
348 * while transferring the data to the hardware. No TX status report will
7e613e16 349 * be expected from the hardware.
dba5dc1a
ID
350 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
351 * returned. It is now waiting for the status reporting before the
352 * entry can be reused again.
181d6902 353 */
181d6902
ID
354enum queue_entry_flags {
355 ENTRY_BCN_ASSIGNED,
ba08910e 356 ENTRY_BCN_ENABLED,
181d6902 357 ENTRY_OWNER_DEVICE_DATA,
f019d514 358 ENTRY_DATA_PENDING,
dba5dc1a
ID
359 ENTRY_DATA_IO_FAILED,
360 ENTRY_DATA_STATUS_PENDING,
8857d6dc 361 ENTRY_DATA_STATUS_SET,
181d6902
ID
362};
363
364/**
365 * struct queue_entry: Entry inside the &struct data_queue
366 *
367 * @flags: Entry flags, see &enum queue_entry_flags.
75256f03 368 * @last_action: Timestamp of last change.
181d6902
ID
369 * @queue: The data queue (&struct data_queue) to which this entry belongs.
370 * @skb: The buffer which is currently being transmitted (for TX queue),
25985edc 371 * or used to directly receive data in (for RX queue).
181d6902
ID
372 * @entry_idx: The entry index number.
373 * @priv_data: Private data belonging to this queue entry. The pointer
374 * points to data specific to a particular driver and queue type.
8857d6dc 375 * @status: Device specific status
181d6902
ID
376 */
377struct queue_entry {
378 unsigned long flags;
75256f03 379 unsigned long last_action;
181d6902
ID
380
381 struct data_queue *queue;
382
383 struct sk_buff *skb;
384
385 unsigned int entry_idx;
386
8857d6dc
HS
387 u32 status;
388
181d6902
ID
389 void *priv_data;
390};
391
392/**
393 * enum queue_index: Queue index type
394 *
395 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
396 * owned by the hardware then the queue is considered to be full.
652a9dd2 397 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
25985edc 398 * transferred to the hardware.
181d6902
ID
399 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
400 * the hardware and for which we need to run the txdone handler. If this
401 * entry is not owned by the hardware the queue is considered to be empty.
181d6902
ID
402 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
403 * of the index array.
404 */
405enum queue_index {
406 Q_INDEX,
652a9dd2 407 Q_INDEX_DMA_DONE,
181d6902 408 Q_INDEX_DONE,
181d6902
ID
409 Q_INDEX_MAX,
410};
411
0b7fde54
ID
412/**
413 * enum data_queue_flags: Status flags for data queues
414 *
415 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
416 * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
417 * be transmitted and beacon queues will start beaconing the configured
418 * beacons.
419 * @QUEUE_PAUSED: The queue has been started but is currently paused.
420 * When this bit is set, the queue has been stopped in mac80211,
421 * preventing new frames to be enqueued. However, a few frames
422 * might still appear shortly after the pausing...
423 */
424enum data_queue_flags {
425 QUEUE_STARTED,
426 QUEUE_PAUSED,
427};
428
181d6902
ID
429/**
430 * struct data_queue: Data queue
431 *
432 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
433 * @entries: Base address of the &struct queue_entry which are
434 * part of this queue.
435 * @qid: The queue identification, see &enum data_queue_qid.
0b7fde54
ID
436 * @flags: Entry flags, see &enum queue_entry_flags.
437 * @status_lock: The mutex for protecting the start/stop/flush
438 * handling on this queue.
77a861c4 439 * @tx_lock: Spinlock to serialize tx operations on this queue.
813f0339 440 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
181d6902
ID
441 * @index_crypt needs to be changed this lock should be grabbed to prevent
442 * index corruption due to concurrency.
443 * @count: Number of frames handled in the queue.
444 * @limit: Maximum number of entries in the queue.
b869767b 445 * @threshold: Minimum number of free entries before queue is kicked by force.
181d6902
ID
446 * @length: Number of frames in queue.
447 * @index: Index pointers to entry positions in the queue,
448 * use &enum queue_index to get a specific index field.
2af0a570 449 * @txop: maximum burst time.
181d6902
ID
450 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
451 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
452 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
453 * @data_size: Maximum data size for the frames in this queue.
454 * @desc_size: Hardware descriptor size for the data in this queue.
568f7a43 455 * @priv_size: Size of per-queue_entry private data.
f1ca2167
ID
456 * @usb_endpoint: Device endpoint used for communication (USB only)
457 * @usb_maxpacket: Max packet size for given endpoint (USB only)
181d6902
ID
458 */
459struct data_queue {
460 struct rt2x00_dev *rt2x00dev;
461 struct queue_entry *entries;
462
463 enum data_queue_qid qid;
0b7fde54 464 unsigned long flags;
181d6902 465
0b7fde54 466 struct mutex status_lock;
77a861c4 467 spinlock_t tx_lock;
813f0339 468 spinlock_t index_lock;
0b7fde54 469
181d6902
ID
470 unsigned int count;
471 unsigned short limit;
b869767b 472 unsigned short threshold;
181d6902
ID
473 unsigned short length;
474 unsigned short index[Q_INDEX_MAX];
475
2af0a570 476 unsigned short txop;
181d6902
ID
477 unsigned short aifs;
478 unsigned short cw_min;
479 unsigned short cw_max;
480
481 unsigned short data_size;
f0bda571
SG
482 unsigned char desc_size;
483 unsigned char winfo_size;
568f7a43 484 unsigned short priv_size;
f1ca2167
ID
485
486 unsigned short usb_endpoint;
487 unsigned short usb_maxpacket;
181d6902
ID
488};
489
181d6902
ID
490/**
491 * queue_end - Return pointer to the last queue (HELPER MACRO).
492 * @__dev: Pointer to &struct rt2x00_dev
493 *
494 * Using the base rx pointer and the maximum number of available queues,
495 * this macro will return the address of 1 position beyond the end of the
496 * queues array.
497 */
498#define queue_end(__dev) \
499 &(__dev)->rx[(__dev)->data_queues]
500
501/**
502 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
503 * @__dev: Pointer to &struct rt2x00_dev
504 *
505 * Using the base tx pointer and the maximum number of available TX
506 * queues, this macro will return the address of 1 position beyond
507 * the end of the TX queue array.
508 */
509#define tx_queue_end(__dev) \
61448f88 510 &(__dev)->tx[(__dev)->ops->tx_queues]
181d6902 511
f1ca2167
ID
512/**
513 * queue_next - Return pointer to next queue in list (HELPER MACRO).
514 * @__queue: Current queue for which we need the next queue
515 *
516 * Using the current queue address we take the address directly
517 * after the queue to take the next queue. Note that this macro
518 * should be used carefully since it does not protect against
519 * moving past the end of the list. (See macros &queue_end and
520 * &tx_queue_end for determining the end of the queue).
521 */
522#define queue_next(__queue) \
523 &(__queue)[1]
524
181d6902
ID
525/**
526 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
527 * @__entry: Pointer where the current queue entry will be stored in.
528 * @__start: Start queue pointer.
529 * @__end: End queue pointer.
530 *
531 * This macro will loop through all queues between &__start and &__end.
532 */
533#define queue_loop(__entry, __start, __end) \
534 for ((__entry) = (__start); \
f1ca2167
ID
535 prefetch(queue_next(__entry)), (__entry) != (__end);\
536 (__entry) = queue_next(__entry))
181d6902
ID
537
538/**
539 * queue_for_each - Loop through all queues
540 * @__dev: Pointer to &struct rt2x00_dev
541 * @__entry: Pointer where the current queue entry will be stored in.
542 *
543 * This macro will loop through all available queues.
544 */
545#define queue_for_each(__dev, __entry) \
546 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
547
548/**
549 * tx_queue_for_each - Loop through the TX queues
550 * @__dev: Pointer to &struct rt2x00_dev
551 * @__entry: Pointer where the current queue entry will be stored in.
552 *
553 * This macro will loop through all TX related queues excluding
554 * the Beacon and Atim queues.
555 */
556#define tx_queue_for_each(__dev, __entry) \
557 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
558
559/**
560 * txall_queue_for_each - Loop through all TX related queues
561 * @__dev: Pointer to &struct rt2x00_dev
562 * @__entry: Pointer where the current queue entry will be stored in.
563 *
564 * This macro will loop through all TX related queues including
565 * the Beacon and Atim queues.
566 */
567#define txall_queue_for_each(__dev, __entry) \
568 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
569
5eb7efe8
ID
570/**
571 * rt2x00queue_for_each_entry - Loop through all entries in the queue
572 * @queue: Pointer to @data_queue
573 * @start: &enum queue_index Pointer to start index
574 * @end: &enum queue_index Pointer to end index
1dd0dbb3 575 * @data: Data to pass to the callback function
5eb7efe8
ID
576 * @fn: The function to call for each &struct queue_entry
577 *
578 * This will walk through all entries in the queue, in chronological
579 * order. This means it will start at the current @start pointer
580 * and will walk through the queue until it reaches the @end pointer.
10e11568
HS
581 *
582 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
583 * processing and return true as well.
5eb7efe8 584 */
10e11568 585bool rt2x00queue_for_each_entry(struct data_queue *queue,
5eb7efe8
ID
586 enum queue_index start,
587 enum queue_index end,
1dd0dbb3
HS
588 void *data,
589 bool (*fn)(struct queue_entry *entry,
590 void *data));
5eb7efe8 591
181d6902
ID
592/**
593 * rt2x00queue_empty - Check if the queue is empty.
594 * @queue: Queue to check if empty.
595 */
596static inline int rt2x00queue_empty(struct data_queue *queue)
597{
598 return queue->length == 0;
599}
600
601/**
602 * rt2x00queue_full - Check if the queue is full.
603 * @queue: Queue to check if full.
604 */
605static inline int rt2x00queue_full(struct data_queue *queue)
606{
607 return queue->length == queue->limit;
608}
609
610/**
611 * rt2x00queue_free - Check the number of available entries in queue.
612 * @queue: Queue to check.
613 */
614static inline int rt2x00queue_available(struct data_queue *queue)
615{
616 return queue->limit - queue->length;
617}
618
b869767b
ID
619/**
620 * rt2x00queue_threshold - Check if the queue is below threshold
621 * @queue: Queue to check.
622 */
623static inline int rt2x00queue_threshold(struct data_queue *queue)
624{
625 return rt2x00queue_available(queue) < queue->threshold;
626}
652a9dd2 627/**
cfef6047 628 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
75256f03 629 * @entry: Queue entry to check.
652a9dd2 630 */
75256f03 631static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
652a9dd2 632{
75256f03
JS
633 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
634 return false;
635 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
c965c74b
ID
636}
637
181d6902 638/**
2bb057d0
ID
639 * _rt2x00_desc_read - Read a word from the hardware descriptor.
640 * @desc: Base descriptor address
641 * @word: Word index from where the descriptor should be read.
642 * @value: Address where the descriptor value should be written into.
643 */
644static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
645{
646 *value = desc[word];
647}
648
649/**
650 * rt2x00_desc_read - Read a word from the hardware descriptor, this
651 * function will take care of the byte ordering.
181d6902
ID
652 * @desc: Base descriptor address
653 * @word: Word index from where the descriptor should be read.
654 * @value: Address where the descriptor value should be written into.
655 */
656static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
657{
2bb057d0
ID
658 __le32 tmp;
659 _rt2x00_desc_read(desc, word, &tmp);
660 *value = le32_to_cpu(tmp);
661}
662
663/**
664 * rt2x00_desc_write - write a word to the hardware descriptor, this
665 * function will take care of the byte ordering.
666 * @desc: Base descriptor address
667 * @word: Word index from where the descriptor should be written.
668 * @value: Value that should be written into the descriptor.
669 */
670static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
671{
672 desc[word] = value;
181d6902
ID
673}
674
675/**
2bb057d0 676 * rt2x00_desc_write - write a word to the hardware descriptor.
181d6902
ID
677 * @desc: Base descriptor address
678 * @word: Word index from where the descriptor should be written.
679 * @value: Value that should be written into the descriptor.
680 */
681static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
682{
2bb057d0 683 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
181d6902
ID
684}
685
686#endif /* RT2X00QUEUE_H */
This page took 0.859588 seconds and 5 git commands to generate.