iwlwifi: disable aggregation queue if stopped early
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
CommitLineData
181d6902 1/*
7e613e16
ID
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
9c9a0d14 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
181d6902
ID
5 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23/*
24 Module: rt2x00lib
25 Abstract: rt2x00 queue specific routines.
26 */
27
5a0e3ad6 28#include <linux/slab.h>
181d6902
ID
29#include <linux/kernel.h>
30#include <linux/module.h>
c4da0048 31#include <linux/dma-mapping.h>
181d6902
ID
32
33#include "rt2x00.h"
34#include "rt2x00lib.h"
35
c4da0048
GW
36struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
37 struct queue_entry *entry)
239c249d 38{
c4da0048
GW
39 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
2bb057d0
ID
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
239c249d
GW
44
45 /*
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
48 */
c4da0048 49 frame_size = entry->queue->data_size + entry->queue->desc_size;
239c249d
GW
50
51 /*
ff352391
ID
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
239c249d 55 */
2bb057d0
ID
56 head_size = 4;
57
58 /*
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
9c3444d3 61 * and 8 bytes for ICV data as tailroon.
2bb057d0 62 */
2bb057d0
ID
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
64 head_size += 8;
9c3444d3 65 tail_size += 8;
2bb057d0 66 }
239c249d
GW
67
68 /*
69 * Allocate skbuffer.
70 */
2bb057d0 71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
239c249d
GW
72 if (!skb)
73 return NULL;
74
2bb057d0
ID
75 /*
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
78 */
79 skb_reserve(skb, head_size);
239c249d
GW
80 skb_put(skb, frame_size);
81
c4da0048
GW
82 /*
83 * Populate skbdesc.
84 */
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data,
92 skb->len,
93 DMA_FROM_DEVICE);
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
95 }
96
239c249d
GW
97 return skb;
98}
30caa6e3 99
c4da0048 100void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
30caa6e3 101{
c4da0048
GW
102 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
103
3ee54a07
ID
104 skbdesc->skb_dma =
105 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
c4da0048
GW
106 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
107}
108EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
109
110void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
111{
112 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
113
114 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
115 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
116 DMA_FROM_DEVICE);
117 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
118 }
119
120 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
0b8004aa 121 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
c4da0048
GW
122 DMA_TO_DEVICE);
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 }
125}
0b8004aa 126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
c4da0048
GW
127
128void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
129{
9a613195
ID
130 if (!skb)
131 return;
132
61243d8e 133 rt2x00queue_unmap_skb(rt2x00dev, skb);
30caa6e3
GW
134 dev_kfree_skb_any(skb);
135}
239c249d 136
daee6c09 137void rt2x00queue_align_frame(struct sk_buff *skb)
9f166171 138{
9f166171 139 unsigned int frame_length = skb->len;
daee6c09 140 unsigned int align = ALIGN_SIZE(skb, 0);
9f166171
ID
141
142 if (!align)
143 return;
144
daee6c09
ID
145 skb_push(skb, align);
146 memmove(skb->data, skb->data + align, frame_length);
147 skb_trim(skb, frame_length);
148}
149
95d69aa0 150void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
daee6c09
ID
151{
152 unsigned int frame_length = skb->len;
95d69aa0 153 unsigned int align = ALIGN_SIZE(skb, header_length);
daee6c09
ID
154
155 if (!align)
156 return;
157
158 skb_push(skb, align);
159 memmove(skb->data, skb->data + align, frame_length);
160 skb_trim(skb, frame_length);
161}
162
163void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
164{
2e331462 165 unsigned int payload_length = skb->len - header_length;
daee6c09
ID
166 unsigned int header_align = ALIGN_SIZE(skb, 0);
167 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
e54be4e7 168 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
daee6c09 169
2e331462
GW
170 /*
171 * Adjust the header alignment if the payload needs to be moved more
172 * than the header.
173 */
174 if (payload_align > header_align)
175 header_align += 4;
176
177 /* There is nothing to do if no alignment is needed */
178 if (!header_align)
179 return;
daee6c09 180
2e331462
GW
181 /* Reserve the amount of space needed in front of the frame */
182 skb_push(skb, header_align);
183
184 /*
185 * Move the header.
186 */
187 memmove(skb->data, skb->data + header_align, header_length);
188
189 /* Move the payload, if present and if required */
190 if (payload_length && payload_align)
daee6c09 191 memmove(skb->data + header_length + l2pad,
a5186e99 192 skb->data + header_length + l2pad + payload_align,
2e331462
GW
193 payload_length);
194
195 /* Trim the skb to the correct size */
196 skb_trim(skb, header_length + l2pad + payload_length);
9f166171
ID
197}
198
daee6c09
ID
199void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
200{
77e73d18 201 unsigned int l2pad = L2PAD_SIZE(header_length);
daee6c09 202
354e39db 203 if (!l2pad)
daee6c09
ID
204 return;
205
206 memmove(skb->data + l2pad, skb->data, header_length);
207 skb_pull(skb, l2pad);
208}
209
7b40982e
ID
210static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
211 struct txentry_desc *txdesc)
212{
213 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
214 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
215 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
216 unsigned long irqflags;
217
218 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
219 unlikely(!tx_info->control.vif))
220 return;
221
222 /*
223 * Hardware should insert sequence counter.
224 * FIXME: We insert a software sequence counter first for
225 * hardware that doesn't support hardware sequence counting.
226 *
227 * This is wrong because beacons are not getting sequence
228 * numbers assigned properly.
229 *
230 * A secondary problem exists for drivers that cannot toggle
231 * sequence counting per-frame, since those will override the
232 * sequence counter given by mac80211.
233 */
234 spin_lock_irqsave(&intf->seqlock, irqflags);
235
236 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
237 intf->seqno += 0x10;
238 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
239 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
240
241 spin_unlock_irqrestore(&intf->seqlock, irqflags);
242
243 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
244}
245
246static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
247 struct txentry_desc *txdesc,
248 const struct rt2x00_rate *hwrate)
249{
250 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
251 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
252 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
253 unsigned int data_length;
254 unsigned int duration;
255 unsigned int residual;
256
257 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
258 data_length = entry->skb->len + 4;
259 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
260
261 /*
262 * PLCP setup
263 * Length calculation depends on OFDM/CCK rate.
264 */
265 txdesc->signal = hwrate->plcp;
266 txdesc->service = 0x04;
267
268 if (hwrate->flags & DEV_RATE_OFDM) {
269 txdesc->length_high = (data_length >> 6) & 0x3f;
270 txdesc->length_low = data_length & 0x3f;
271 } else {
272 /*
273 * Convert length to microseconds.
274 */
275 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
276 duration = GET_DURATION(data_length, hwrate->bitrate);
277
278 if (residual != 0) {
279 duration++;
280
281 /*
282 * Check if we need to set the Length Extension
283 */
284 if (hwrate->bitrate == 110 && residual <= 30)
285 txdesc->service |= 0x80;
286 }
287
288 txdesc->length_high = (duration >> 8) & 0xff;
289 txdesc->length_low = duration & 0xff;
290
291 /*
292 * When preamble is enabled we should set the
293 * preamble bit for the signal.
294 */
295 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
296 txdesc->signal |= 0x08;
297 }
298}
299
bd88a781
ID
300static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
301 struct txentry_desc *txdesc)
7050ec82 302{
2e92e6f2 303 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
e039fa4a 304 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
7050ec82 305 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
2e92e6f2 306 struct ieee80211_rate *rate =
e039fa4a 307 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
7050ec82 308 const struct rt2x00_rate *hwrate;
7050ec82
ID
309
310 memset(txdesc, 0, sizeof(*txdesc));
311
312 /*
313 * Initialize information from queue
314 */
315 txdesc->queue = entry->queue->qid;
316 txdesc->cw_min = entry->queue->cw_min;
317 txdesc->cw_max = entry->queue->cw_max;
318 txdesc->aifs = entry->queue->aifs;
319
9f166171 320 /*
df624ca5 321 * Header and frame information.
9f166171 322 */
df624ca5 323 txdesc->length = entry->skb->len;
9f166171 324 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
9f166171 325
7050ec82
ID
326 /*
327 * Check whether this frame is to be acked.
328 */
e039fa4a 329 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
7050ec82
ID
330 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
331
332 /*
333 * Check if this is a RTS/CTS frame
334 */
ac104462
ID
335 if (ieee80211_is_rts(hdr->frame_control) ||
336 ieee80211_is_cts(hdr->frame_control)) {
7050ec82 337 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
ac104462 338 if (ieee80211_is_rts(hdr->frame_control))
7050ec82 339 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
e039fa4a 340 else
7050ec82 341 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
e039fa4a 342 if (tx_info->control.rts_cts_rate_idx >= 0)
2e92e6f2 343 rate =
e039fa4a 344 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
345 }
346
347 /*
348 * Determine retry information.
349 */
e6a9854b 350 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
42c82857 351 if (txdesc->retry_limit >= rt2x00dev->long_retry)
7050ec82
ID
352 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
353
354 /*
355 * Check if more fragments are pending
356 */
2606e422 357 if (ieee80211_has_morefrags(hdr->frame_control)) {
7050ec82
ID
358 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
359 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
360 }
361
2606e422
HS
362 /*
363 * Check if more frames (!= fragments) are pending
364 */
365 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
366 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
367
7050ec82
ID
368 /*
369 * Beacons and probe responses require the tsf timestamp
e81e0aef
AB
370 * to be inserted into the frame, except for a frame that has been injected
371 * through a monitor interface. This latter is needed for testing a
372 * monitor interface.
7050ec82 373 */
e81e0aef
AB
374 if ((ieee80211_is_beacon(hdr->frame_control) ||
375 ieee80211_is_probe_resp(hdr->frame_control)) &&
376 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
7050ec82
ID
377 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
378
379 /*
380 * Determine with what IFS priority this frame should be send.
381 * Set ifs to IFS_SIFS when the this is not the first fragment,
382 * or this fragment came after RTS/CTS.
383 */
7b40982e
ID
384 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
385 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
7050ec82
ID
386 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
387 txdesc->ifs = IFS_BACKOFF;
7b40982e 388 } else
7050ec82 389 txdesc->ifs = IFS_SIFS;
7050ec82 390
076f9582
ID
391 /*
392 * Determine rate modulation.
393 */
7050ec82 394 hwrate = rt2x00_get_rate(rate->hw_value);
076f9582 395 txdesc->rate_mode = RATE_MODE_CCK;
7b40982e 396 if (hwrate->flags & DEV_RATE_OFDM)
076f9582 397 txdesc->rate_mode = RATE_MODE_OFDM;
7050ec82 398
7b40982e
ID
399 /*
400 * Apply TX descriptor handling by components
401 */
402 rt2x00crypto_create_tx_descriptor(entry, txdesc);
35f00cfc 403 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
7b40982e
ID
404 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
405 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
7050ec82 406}
7050ec82 407
78eea11b
GW
408static int rt2x00queue_write_tx_data(struct queue_entry *entry,
409 struct txentry_desc *txdesc)
410{
411 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
412
413 /*
414 * This should not happen, we already checked the entry
415 * was ours. When the hardware disagrees there has been
416 * a queue corruption!
417 */
418 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
419 rt2x00dev->ops->lib->get_entry_state(entry))) {
420 ERROR(rt2x00dev,
421 "Corrupt queue %d, accessing entry which is not ours.\n"
422 "Please file bug report to %s.\n",
423 entry->queue->qid, DRV_PROJECT);
424 return -EINVAL;
425 }
426
427 /*
428 * Add the requested extra tx headroom in front of the skb.
429 */
430 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
431 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
432
433 /*
76dd5ddf 434 * Call the driver's write_tx_data function, if it exists.
78eea11b 435 */
76dd5ddf
GW
436 if (rt2x00dev->ops->lib->write_tx_data)
437 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
78eea11b
GW
438
439 /*
440 * Map the skb to DMA.
441 */
442 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
443 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
444
445 return 0;
446}
447
bd88a781
ID
448static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
449 struct txentry_desc *txdesc)
7050ec82 450{
b869767b
ID
451 struct data_queue *queue = entry->queue;
452 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
7050ec82
ID
453
454 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
455
456 /*
457 * All processing on the frame has been completed, this means
458 * it is now ready to be dumped to userspace through debugfs.
459 */
5c3b685c 460 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
6295d815
GW
461}
462
463static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
464 struct txentry_desc *txdesc)
465{
466 struct data_queue *queue = entry->queue;
467 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
7050ec82
ID
468
469 /*
b869767b 470 * Check if we need to kick the queue, there are however a few rules
6295d815 471 * 1) Don't kick unless this is the last in frame in a burst.
b869767b
ID
472 * When the burst flag is set, this frame is always followed
473 * by another frame which in some way are related to eachother.
474 * This is true for fragments, RTS or CTS-to-self frames.
6295d815 475 * 2) Rule 1 can be broken when the available entries
b869767b 476 * in the queue are less then a certain threshold.
7050ec82 477 */
b869767b
ID
478 if (rt2x00queue_threshold(queue) ||
479 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
480 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
7050ec82 481}
7050ec82 482
7351c6bd
JB
483int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
484 bool local)
6db3786a 485{
e6a9854b 486 struct ieee80211_tx_info *tx_info;
6db3786a
ID
487 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
488 struct txentry_desc txdesc;
d74f5ba4 489 struct skb_frame_desc *skbdesc;
e6a9854b 490 u8 rate_idx, rate_flags;
6db3786a
ID
491
492 if (unlikely(rt2x00queue_full(queue)))
0e3de998 493 return -ENOBUFS;
6db3786a 494
0262ab0d 495 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
6db3786a
ID
496 ERROR(queue->rt2x00dev,
497 "Arrived at non-free entry in the non-full queue %d.\n"
498 "Please file bug report to %s.\n",
499 queue->qid, DRV_PROJECT);
500 return -EINVAL;
501 }
502
503 /*
504 * Copy all TX descriptor information into txdesc,
505 * after that we are free to use the skb->cb array
506 * for our information.
507 */
508 entry->skb = skb;
509 rt2x00queue_create_tx_descriptor(entry, &txdesc);
510
d74f5ba4 511 /*
e6a9854b 512 * All information is retrieved from the skb->cb array,
2bb057d0 513 * now we should claim ownership of the driver part of that
e6a9854b 514 * array, preserving the bitrate index and flags.
d74f5ba4 515 */
e6a9854b
JB
516 tx_info = IEEE80211_SKB_CB(skb);
517 rate_idx = tx_info->control.rates[0].idx;
518 rate_flags = tx_info->control.rates[0].flags;
0e3de998 519 skbdesc = get_skb_frame_desc(skb);
d74f5ba4
ID
520 memset(skbdesc, 0, sizeof(*skbdesc));
521 skbdesc->entry = entry;
e6a9854b
JB
522 skbdesc->tx_rate_idx = rate_idx;
523 skbdesc->tx_rate_flags = rate_flags;
d74f5ba4 524
7351c6bd
JB
525 if (local)
526 skbdesc->flags |= SKBDESC_NOT_MAC80211;
527
2bb057d0
ID
528 /*
529 * When hardware encryption is supported, and this frame
530 * is to be encrypted, we should strip the IV/EIV data from
3ad2f3fb 531 * the frame so we can provide it to the driver separately.
2bb057d0
ID
532 */
533 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
dddfb478 534 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
3f787bd6 535 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
9eb4e21e 536 rt2x00crypto_tx_copy_iv(skb, &txdesc);
dddfb478 537 else
9eb4e21e 538 rt2x00crypto_tx_remove_iv(skb, &txdesc);
dddfb478 539 }
2bb057d0 540
93354cbb
ID
541 /*
542 * When DMA allocation is required we should guarentee to the
543 * driver that the DMA is aligned to a 4-byte boundary.
93354cbb
ID
544 * However some drivers require L2 padding to pad the payload
545 * rather then the header. This could be a requirement for
546 * PCI and USB devices, while header alignment only is valid
547 * for PCI devices.
548 */
9f166171 549 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
daee6c09 550 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
93354cbb 551 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
daee6c09 552 rt2x00queue_align_frame(entry->skb);
9f166171 553
2bb057d0
ID
554 /*
555 * It could be possible that the queue was corrupted and this
0e3de998
ID
556 * call failed. Since we always return NETDEV_TX_OK to mac80211,
557 * this frame will simply be dropped.
2bb057d0 558 */
78eea11b 559 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
0262ab0d 560 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
2bb057d0 561 entry->skb = NULL;
0e3de998 562 return -EIO;
6db3786a
ID
563 }
564
0262ab0d 565 set_bit(ENTRY_DATA_PENDING, &entry->flags);
6db3786a
ID
566
567 rt2x00queue_index_inc(queue, Q_INDEX);
568 rt2x00queue_write_tx_descriptor(entry, &txdesc);
6295d815 569 rt2x00queue_kick_tx_queue(entry, &txdesc);
6db3786a
ID
570
571 return 0;
572}
573
bd88a781 574int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
a2c9b652
ID
575 struct ieee80211_vif *vif,
576 const bool enable_beacon)
bd88a781
ID
577{
578 struct rt2x00_intf *intf = vif_to_intf(vif);
579 struct skb_frame_desc *skbdesc;
580 struct txentry_desc txdesc;
bd88a781
ID
581
582 if (unlikely(!intf->beacon))
583 return -ENOBUFS;
584
17512dc3
IP
585 mutex_lock(&intf->beacon_skb_mutex);
586
587 /*
588 * Clean up the beacon skb.
589 */
590 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
591 intf->beacon->skb = NULL;
592
a2c9b652
ID
593 if (!enable_beacon) {
594 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
17512dc3 595 mutex_unlock(&intf->beacon_skb_mutex);
a2c9b652
ID
596 return 0;
597 }
598
bd88a781 599 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
17512dc3
IP
600 if (!intf->beacon->skb) {
601 mutex_unlock(&intf->beacon_skb_mutex);
bd88a781 602 return -ENOMEM;
17512dc3 603 }
bd88a781
ID
604
605 /*
606 * Copy all TX descriptor information into txdesc,
607 * after that we are free to use the skb->cb array
608 * for our information.
609 */
610 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
611
bd88a781
ID
612 /*
613 * Fill in skb descriptor
614 */
615 skbdesc = get_skb_frame_desc(intf->beacon->skb);
616 memset(skbdesc, 0, sizeof(*skbdesc));
bd88a781
ID
617 skbdesc->entry = intf->beacon;
618
bd88a781 619 /*
d61cb266 620 * Send beacon to hardware and enable beacon genaration..
bd88a781 621 */
f224f4ef 622 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
bd88a781 623
17512dc3
IP
624 mutex_unlock(&intf->beacon_skb_mutex);
625
bd88a781
ID
626 return 0;
627}
628
181d6902 629struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
e58c6aca 630 const enum data_queue_qid queue)
181d6902
ID
631{
632 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
633
a2c9b652
ID
634 if (queue == QID_RX)
635 return rt2x00dev->rx;
636
61448f88 637 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
181d6902
ID
638 return &rt2x00dev->tx[queue];
639
640 if (!rt2x00dev->bcn)
641 return NULL;
642
e58c6aca 643 if (queue == QID_BEACON)
181d6902 644 return &rt2x00dev->bcn[0];
e58c6aca 645 else if (queue == QID_ATIM && atim)
181d6902
ID
646 return &rt2x00dev->bcn[1];
647
648 return NULL;
649}
650EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
651
652struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
653 enum queue_index index)
654{
655 struct queue_entry *entry;
5f46c4d0 656 unsigned long irqflags;
181d6902
ID
657
658 if (unlikely(index >= Q_INDEX_MAX)) {
659 ERROR(queue->rt2x00dev,
660 "Entry requested from invalid index type (%d)\n", index);
661 return NULL;
662 }
663
5f46c4d0 664 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
665
666 entry = &queue->entries[queue->index[index]];
667
5f46c4d0 668 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
669
670 return entry;
671}
672EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
673
674void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
675{
5f46c4d0
ID
676 unsigned long irqflags;
677
181d6902
ID
678 if (unlikely(index >= Q_INDEX_MAX)) {
679 ERROR(queue->rt2x00dev,
680 "Index change on invalid index type (%d)\n", index);
681 return;
682 }
683
5f46c4d0 684 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
685
686 queue->index[index]++;
687 if (queue->index[index] >= queue->limit)
688 queue->index[index] = 0;
689
10b6b801
ID
690 if (index == Q_INDEX) {
691 queue->length++;
c965c74b 692 queue->last_index = jiffies;
10b6b801
ID
693 } else if (index == Q_INDEX_DONE) {
694 queue->length--;
55887511 695 queue->count++;
c965c74b 696 queue->last_index_done = jiffies;
10b6b801 697 }
181d6902 698
5f46c4d0 699 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902 700}
181d6902
ID
701
702static void rt2x00queue_reset(struct data_queue *queue)
703{
5f46c4d0
ID
704 unsigned long irqflags;
705
706 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
707
708 queue->count = 0;
709 queue->length = 0;
c965c74b
ID
710 queue->last_index = jiffies;
711 queue->last_index_done = jiffies;
181d6902
ID
712 memset(queue->index, 0, sizeof(queue->index));
713
5f46c4d0 714 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
715}
716
a2c9b652
ID
717void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
718{
719 struct data_queue *queue;
720
721 txall_queue_for_each(rt2x00dev, queue)
722 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
723}
724
798b7adb 725void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
181d6902
ID
726{
727 struct data_queue *queue;
728 unsigned int i;
729
798b7adb 730 queue_for_each(rt2x00dev, queue) {
181d6902
ID
731 rt2x00queue_reset(queue);
732
9c0ab712 733 for (i = 0; i < queue->limit; i++) {
798b7adb 734 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
7e613e16
ID
735 if (queue->qid == QID_RX)
736 rt2x00queue_index_inc(queue, Q_INDEX);
9c0ab712 737 }
181d6902
ID
738 }
739}
740
741static int rt2x00queue_alloc_entries(struct data_queue *queue,
742 const struct data_queue_desc *qdesc)
743{
744 struct queue_entry *entries;
745 unsigned int entry_size;
746 unsigned int i;
747
748 rt2x00queue_reset(queue);
749
750 queue->limit = qdesc->entry_num;
b869767b 751 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
181d6902
ID
752 queue->data_size = qdesc->data_size;
753 queue->desc_size = qdesc->desc_size;
754
755 /*
756 * Allocate all queue entries.
757 */
758 entry_size = sizeof(*entries) + qdesc->priv_size;
759 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
760 if (!entries)
761 return -ENOMEM;
762
763#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
231be4e9
AB
764 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
765 ((__index) * (__psize)) )
181d6902
ID
766
767 for (i = 0; i < queue->limit; i++) {
768 entries[i].flags = 0;
769 entries[i].queue = queue;
770 entries[i].skb = NULL;
771 entries[i].entry_idx = i;
772 entries[i].priv_data =
773 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
774 sizeof(*entries), qdesc->priv_size);
775 }
776
777#undef QUEUE_ENTRY_PRIV_OFFSET
778
779 queue->entries = entries;
780
781 return 0;
782}
783
c4da0048
GW
784static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
785 struct data_queue *queue)
30caa6e3
GW
786{
787 unsigned int i;
788
789 if (!queue->entries)
790 return;
791
792 for (i = 0; i < queue->limit; i++) {
793 if (queue->entries[i].skb)
c4da0048 794 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
30caa6e3
GW
795 }
796}
797
c4da0048
GW
798static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
799 struct data_queue *queue)
30caa6e3
GW
800{
801 unsigned int i;
802 struct sk_buff *skb;
803
804 for (i = 0; i < queue->limit; i++) {
c4da0048 805 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
30caa6e3 806 if (!skb)
61243d8e 807 return -ENOMEM;
30caa6e3
GW
808 queue->entries[i].skb = skb;
809 }
810
811 return 0;
30caa6e3
GW
812}
813
181d6902
ID
814int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
815{
816 struct data_queue *queue;
817 int status;
818
181d6902
ID
819 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
820 if (status)
821 goto exit;
822
823 tx_queue_for_each(rt2x00dev, queue) {
824 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
825 if (status)
826 goto exit;
827 }
828
829 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
830 if (status)
831 goto exit;
832
30caa6e3
GW
833 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
834 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
835 rt2x00dev->ops->atim);
836 if (status)
837 goto exit;
838 }
181d6902 839
c4da0048 840 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
181d6902
ID
841 if (status)
842 goto exit;
843
844 return 0;
845
846exit:
847 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
848
849 rt2x00queue_uninitialize(rt2x00dev);
850
851 return status;
852}
853
854void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
855{
856 struct data_queue *queue;
857
c4da0048 858 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
30caa6e3 859
181d6902
ID
860 queue_for_each(rt2x00dev, queue) {
861 kfree(queue->entries);
862 queue->entries = NULL;
863 }
864}
865
8f539276
ID
866static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
867 struct data_queue *queue, enum data_queue_qid qid)
868{
869 spin_lock_init(&queue->lock);
870
871 queue->rt2x00dev = rt2x00dev;
872 queue->qid = qid;
2af0a570 873 queue->txop = 0;
8f539276
ID
874 queue->aifs = 2;
875 queue->cw_min = 5;
876 queue->cw_max = 10;
877}
878
181d6902
ID
879int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
880{
881 struct data_queue *queue;
882 enum data_queue_qid qid;
883 unsigned int req_atim =
884 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
885
886 /*
887 * We need the following queues:
888 * RX: 1
61448f88 889 * TX: ops->tx_queues
181d6902
ID
890 * Beacon: 1
891 * Atim: 1 (if required)
892 */
61448f88 893 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
181d6902
ID
894
895 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
896 if (!queue) {
897 ERROR(rt2x00dev, "Queue allocation failed.\n");
898 return -ENOMEM;
899 }
900
901 /*
902 * Initialize pointers
903 */
904 rt2x00dev->rx = queue;
905 rt2x00dev->tx = &queue[1];
61448f88 906 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
181d6902
ID
907
908 /*
909 * Initialize queue parameters.
910 * RX: qid = QID_RX
911 * TX: qid = QID_AC_BE + index
912 * TX: cw_min: 2^5 = 32.
913 * TX: cw_max: 2^10 = 1024.
565a019a
ID
914 * BCN: qid = QID_BEACON
915 * ATIM: qid = QID_ATIM
181d6902 916 */
8f539276 917 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
181d6902 918
8f539276
ID
919 qid = QID_AC_BE;
920 tx_queue_for_each(rt2x00dev, queue)
921 rt2x00queue_init(rt2x00dev, queue, qid++);
181d6902 922
565a019a 923 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
181d6902 924 if (req_atim)
565a019a 925 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
181d6902
ID
926
927 return 0;
928}
929
930void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
931{
932 kfree(rt2x00dev->rx);
933 rt2x00dev->rx = NULL;
934 rt2x00dev->tx = NULL;
935 rt2x00dev->bcn = NULL;
936}
This page took 0.634762 seconds and 5 git commands to generate.