rt2x00: Remove input_polldev requirements for rfkill
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
CommitLineData
181d6902
ID
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
c4da0048 28#include <linux/dma-mapping.h>
181d6902
ID
29
30#include "rt2x00.h"
31#include "rt2x00lib.h"
32
c4da0048
GW
33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry)
239c249d 35{
239c249d
GW
36 unsigned int frame_size;
37 unsigned int reserved_size;
c4da0048
GW
38 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc;
239c249d
GW
40
41 /*
42 * The frame size includes descriptor size, because the
43 * hardware directly receive the frame into the skbuffer.
44 */
c4da0048 45 frame_size = entry->queue->data_size + entry->queue->desc_size;
239c249d
GW
46
47 /*
30caa6e3
GW
48 * Reserve a few bytes extra headroom to allow drivers some moving
49 * space (e.g. for alignment), while keeping the skb aligned.
239c249d 50 */
30caa6e3 51 reserved_size = 8;
239c249d
GW
52
53 /*
54 * Allocate skbuffer.
55 */
56 skb = dev_alloc_skb(frame_size + reserved_size);
57 if (!skb)
58 return NULL;
59
60 skb_reserve(skb, reserved_size);
61 skb_put(skb, frame_size);
62
c4da0048
GW
63 /*
64 * Populate skbdesc.
65 */
66 skbdesc = get_skb_frame_desc(skb);
67 memset(skbdesc, 0, sizeof(*skbdesc));
68 skbdesc->entry = entry;
69
70 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
71 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
72 skb->data,
73 skb->len,
74 DMA_FROM_DEVICE);
75 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
76 }
77
239c249d
GW
78 return skb;
79}
30caa6e3 80
c4da0048 81void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
30caa6e3 82{
c4da0048
GW
83 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
84
85 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
86 DMA_TO_DEVICE);
87 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
88}
89EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
90
91void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
92{
93 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
94
95 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
96 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
97 DMA_FROM_DEVICE);
98 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
99 }
100
101 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
102 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
103 DMA_TO_DEVICE);
104 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
105 }
106}
c4da0048
GW
107
108void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
109{
61243d8e 110 rt2x00queue_unmap_skb(rt2x00dev, skb);
30caa6e3
GW
111 dev_kfree_skb_any(skb);
112}
239c249d 113
7050ec82 114void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
e039fa4a 115 struct txentry_desc *txdesc)
7050ec82 116{
2e92e6f2 117 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
e039fa4a 118 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
7050ec82 119 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
2e92e6f2 120 struct ieee80211_rate *rate =
e039fa4a 121 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
122 const struct rt2x00_rate *hwrate;
123 unsigned int data_length;
124 unsigned int duration;
125 unsigned int residual;
7050ec82
ID
126
127 memset(txdesc, 0, sizeof(*txdesc));
128
129 /*
130 * Initialize information from queue
131 */
132 txdesc->queue = entry->queue->qid;
133 txdesc->cw_min = entry->queue->cw_min;
134 txdesc->cw_max = entry->queue->cw_max;
135 txdesc->aifs = entry->queue->aifs;
136
137 /* Data length should be extended with 4 bytes for CRC */
138 data_length = entry->skb->len + 4;
139
7050ec82
ID
140 /*
141 * Check whether this frame is to be acked.
142 */
e039fa4a 143 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
7050ec82
ID
144 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
145
146 /*
147 * Check if this is a RTS/CTS frame
148 */
ac104462
ID
149 if (ieee80211_is_rts(hdr->frame_control) ||
150 ieee80211_is_cts(hdr->frame_control)) {
7050ec82 151 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
ac104462 152 if (ieee80211_is_rts(hdr->frame_control))
7050ec82 153 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
e039fa4a 154 else
7050ec82 155 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
e039fa4a 156 if (tx_info->control.rts_cts_rate_idx >= 0)
2e92e6f2 157 rate =
e039fa4a 158 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
159 }
160
161 /*
162 * Determine retry information.
163 */
e039fa4a
JB
164 txdesc->retry_limit = tx_info->control.retry_limit;
165 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
7050ec82
ID
166 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
167
168 /*
169 * Check if more fragments are pending
170 */
8b7b1e05 171 if (ieee80211_has_morefrags(hdr->frame_control)) {
7050ec82
ID
172 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
173 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
174 }
175
176 /*
177 * Beacons and probe responses require the tsf timestamp
178 * to be inserted into the frame.
179 */
ac104462
ID
180 if (ieee80211_is_beacon(hdr->frame_control) ||
181 ieee80211_is_probe_resp(hdr->frame_control))
7050ec82
ID
182 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
183
184 /*
185 * Determine with what IFS priority this frame should be send.
186 * Set ifs to IFS_SIFS when the this is not the first fragment,
187 * or this fragment came after RTS/CTS.
188 */
189 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
190 txdesc->ifs = IFS_SIFS;
e039fa4a 191 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
7050ec82
ID
192 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
193 txdesc->ifs = IFS_BACKOFF;
194 } else {
195 txdesc->ifs = IFS_SIFS;
196 }
197
198 /*
199 * PLCP setup
200 * Length calculation depends on OFDM/CCK rate.
201 */
202 hwrate = rt2x00_get_rate(rate->hw_value);
203 txdesc->signal = hwrate->plcp;
204 txdesc->service = 0x04;
205
206 if (hwrate->flags & DEV_RATE_OFDM) {
207 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
208
209 txdesc->length_high = (data_length >> 6) & 0x3f;
210 txdesc->length_low = data_length & 0x3f;
211 } else {
212 /*
213 * Convert length to microseconds.
214 */
215 residual = get_duration_res(data_length, hwrate->bitrate);
216 duration = get_duration(data_length, hwrate->bitrate);
217
218 if (residual != 0) {
219 duration++;
220
221 /*
222 * Check if we need to set the Length Extension
223 */
224 if (hwrate->bitrate == 110 && residual <= 30)
225 txdesc->service |= 0x80;
226 }
227
228 txdesc->length_high = (duration >> 8) & 0xff;
229 txdesc->length_low = duration & 0xff;
230
231 /*
232 * When preamble is enabled we should set the
233 * preamble bit for the signal.
234 */
235 if (rt2x00_get_rate_preamble(rate->hw_value))
236 txdesc->signal |= 0x08;
237 }
238}
239EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
240
241void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
242 struct txentry_desc *txdesc)
243{
b869767b
ID
244 struct data_queue *queue = entry->queue;
245 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
7050ec82
ID
246
247 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
248
249 /*
250 * All processing on the frame has been completed, this means
251 * it is now ready to be dumped to userspace through debugfs.
252 */
253 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
254
255 /*
b869767b
ID
256 * Check if we need to kick the queue, there are however a few rules
257 * 1) Don't kick beacon queue
258 * 2) Don't kick unless this is the last in frame in a burst.
259 * When the burst flag is set, this frame is always followed
260 * by another frame which in some way are related to eachother.
261 * This is true for fragments, RTS or CTS-to-self frames.
262 * 3) Rule 2 can be broken when the available entries
263 * in the queue are less then a certain threshold.
7050ec82 264 */
b869767b
ID
265 if (entry->queue->qid == QID_BEACON)
266 return;
267
268 if (rt2x00queue_threshold(queue) ||
269 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
270 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
7050ec82
ID
271}
272EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
273
6db3786a
ID
274int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
275{
276 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
277 struct txentry_desc txdesc;
d74f5ba4 278 struct skb_frame_desc *skbdesc;
6db3786a
ID
279
280 if (unlikely(rt2x00queue_full(queue)))
281 return -EINVAL;
282
283 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
284 ERROR(queue->rt2x00dev,
285 "Arrived at non-free entry in the non-full queue %d.\n"
286 "Please file bug report to %s.\n",
287 queue->qid, DRV_PROJECT);
288 return -EINVAL;
289 }
290
291 /*
292 * Copy all TX descriptor information into txdesc,
293 * after that we are free to use the skb->cb array
294 * for our information.
295 */
296 entry->skb = skb;
297 rt2x00queue_create_tx_descriptor(entry, &txdesc);
298
d74f5ba4
ID
299 /*
300 * skb->cb array is now ours and we are free to use it.
301 */
302 skbdesc = get_skb_frame_desc(entry->skb);
303 memset(skbdesc, 0, sizeof(*skbdesc));
304 skbdesc->entry = entry;
305
6db3786a
ID
306 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
307 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
308 return -EIO;
309 }
310
d74f5ba4
ID
311 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
312 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
313
6db3786a
ID
314 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
315
316 rt2x00queue_index_inc(queue, Q_INDEX);
317 rt2x00queue_write_tx_descriptor(entry, &txdesc);
318
319 return 0;
320}
321
181d6902 322struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
e58c6aca 323 const enum data_queue_qid queue)
181d6902
ID
324{
325 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
326
61448f88 327 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
181d6902
ID
328 return &rt2x00dev->tx[queue];
329
330 if (!rt2x00dev->bcn)
331 return NULL;
332
e58c6aca 333 if (queue == QID_BEACON)
181d6902 334 return &rt2x00dev->bcn[0];
e58c6aca 335 else if (queue == QID_ATIM && atim)
181d6902
ID
336 return &rt2x00dev->bcn[1];
337
338 return NULL;
339}
340EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
341
342struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
343 enum queue_index index)
344{
345 struct queue_entry *entry;
5f46c4d0 346 unsigned long irqflags;
181d6902
ID
347
348 if (unlikely(index >= Q_INDEX_MAX)) {
349 ERROR(queue->rt2x00dev,
350 "Entry requested from invalid index type (%d)\n", index);
351 return NULL;
352 }
353
5f46c4d0 354 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
355
356 entry = &queue->entries[queue->index[index]];
357
5f46c4d0 358 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
359
360 return entry;
361}
362EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
363
364void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
365{
5f46c4d0
ID
366 unsigned long irqflags;
367
181d6902
ID
368 if (unlikely(index >= Q_INDEX_MAX)) {
369 ERROR(queue->rt2x00dev,
370 "Index change on invalid index type (%d)\n", index);
371 return;
372 }
373
5f46c4d0 374 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
375
376 queue->index[index]++;
377 if (queue->index[index] >= queue->limit)
378 queue->index[index] = 0;
379
10b6b801
ID
380 if (index == Q_INDEX) {
381 queue->length++;
382 } else if (index == Q_INDEX_DONE) {
383 queue->length--;
384 queue->count ++;
385 }
181d6902 386
5f46c4d0 387 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902 388}
181d6902
ID
389
390static void rt2x00queue_reset(struct data_queue *queue)
391{
5f46c4d0
ID
392 unsigned long irqflags;
393
394 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
395
396 queue->count = 0;
397 queue->length = 0;
398 memset(queue->index, 0, sizeof(queue->index));
399
5f46c4d0 400 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
401}
402
403void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
404{
405 struct data_queue *queue = rt2x00dev->rx;
406 unsigned int i;
407
408 rt2x00queue_reset(queue);
409
410 if (!rt2x00dev->ops->lib->init_rxentry)
411 return;
412
413 for (i = 0; i < queue->limit; i++)
414 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
415 &queue->entries[i]);
416}
417
418void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
419{
420 struct data_queue *queue;
421 unsigned int i;
422
423 txall_queue_for_each(rt2x00dev, queue) {
424 rt2x00queue_reset(queue);
425
426 if (!rt2x00dev->ops->lib->init_txentry)
427 continue;
428
429 for (i = 0; i < queue->limit; i++)
430 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
431 &queue->entries[i]);
432 }
433}
434
435static int rt2x00queue_alloc_entries(struct data_queue *queue,
436 const struct data_queue_desc *qdesc)
437{
438 struct queue_entry *entries;
439 unsigned int entry_size;
440 unsigned int i;
441
442 rt2x00queue_reset(queue);
443
444 queue->limit = qdesc->entry_num;
b869767b 445 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
181d6902
ID
446 queue->data_size = qdesc->data_size;
447 queue->desc_size = qdesc->desc_size;
448
449 /*
450 * Allocate all queue entries.
451 */
452 entry_size = sizeof(*entries) + qdesc->priv_size;
453 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
454 if (!entries)
455 return -ENOMEM;
456
457#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
231be4e9
AB
458 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
459 ((__index) * (__psize)) )
181d6902
ID
460
461 for (i = 0; i < queue->limit; i++) {
462 entries[i].flags = 0;
463 entries[i].queue = queue;
464 entries[i].skb = NULL;
465 entries[i].entry_idx = i;
466 entries[i].priv_data =
467 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
468 sizeof(*entries), qdesc->priv_size);
469 }
470
471#undef QUEUE_ENTRY_PRIV_OFFSET
472
473 queue->entries = entries;
474
475 return 0;
476}
477
c4da0048
GW
478static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
479 struct data_queue *queue)
30caa6e3
GW
480{
481 unsigned int i;
482
483 if (!queue->entries)
484 return;
485
486 for (i = 0; i < queue->limit; i++) {
487 if (queue->entries[i].skb)
c4da0048 488 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
30caa6e3
GW
489 }
490}
491
c4da0048
GW
492static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
493 struct data_queue *queue)
30caa6e3
GW
494{
495 unsigned int i;
496 struct sk_buff *skb;
497
498 for (i = 0; i < queue->limit; i++) {
c4da0048 499 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
30caa6e3 500 if (!skb)
61243d8e 501 return -ENOMEM;
30caa6e3
GW
502 queue->entries[i].skb = skb;
503 }
504
505 return 0;
30caa6e3
GW
506}
507
181d6902
ID
508int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
509{
510 struct data_queue *queue;
511 int status;
512
181d6902
ID
513 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
514 if (status)
515 goto exit;
516
517 tx_queue_for_each(rt2x00dev, queue) {
518 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
519 if (status)
520 goto exit;
521 }
522
523 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
524 if (status)
525 goto exit;
526
30caa6e3
GW
527 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
528 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
529 rt2x00dev->ops->atim);
530 if (status)
531 goto exit;
532 }
181d6902 533
c4da0048 534 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
181d6902
ID
535 if (status)
536 goto exit;
537
538 return 0;
539
540exit:
541 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
542
543 rt2x00queue_uninitialize(rt2x00dev);
544
545 return status;
546}
547
548void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
549{
550 struct data_queue *queue;
551
c4da0048 552 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
30caa6e3 553
181d6902
ID
554 queue_for_each(rt2x00dev, queue) {
555 kfree(queue->entries);
556 queue->entries = NULL;
557 }
558}
559
8f539276
ID
560static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
561 struct data_queue *queue, enum data_queue_qid qid)
562{
563 spin_lock_init(&queue->lock);
564
565 queue->rt2x00dev = rt2x00dev;
566 queue->qid = qid;
567 queue->aifs = 2;
568 queue->cw_min = 5;
569 queue->cw_max = 10;
570}
571
181d6902
ID
572int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
573{
574 struct data_queue *queue;
575 enum data_queue_qid qid;
576 unsigned int req_atim =
577 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
578
579 /*
580 * We need the following queues:
581 * RX: 1
61448f88 582 * TX: ops->tx_queues
181d6902
ID
583 * Beacon: 1
584 * Atim: 1 (if required)
585 */
61448f88 586 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
181d6902
ID
587
588 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
589 if (!queue) {
590 ERROR(rt2x00dev, "Queue allocation failed.\n");
591 return -ENOMEM;
592 }
593
594 /*
595 * Initialize pointers
596 */
597 rt2x00dev->rx = queue;
598 rt2x00dev->tx = &queue[1];
61448f88 599 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
181d6902
ID
600
601 /*
602 * Initialize queue parameters.
603 * RX: qid = QID_RX
604 * TX: qid = QID_AC_BE + index
605 * TX: cw_min: 2^5 = 32.
606 * TX: cw_max: 2^10 = 1024.
565a019a
ID
607 * BCN: qid = QID_BEACON
608 * ATIM: qid = QID_ATIM
181d6902 609 */
8f539276 610 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
181d6902 611
8f539276
ID
612 qid = QID_AC_BE;
613 tx_queue_for_each(rt2x00dev, queue)
614 rt2x00queue_init(rt2x00dev, queue, qid++);
181d6902 615
565a019a 616 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
181d6902 617 if (req_atim)
565a019a 618 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
181d6902
ID
619
620 return 0;
621}
622
623void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
624{
625 kfree(rt2x00dev->rx);
626 rt2x00dev->rx = NULL;
627 rt2x00dev->tx = NULL;
628 rt2x00dev->bcn = NULL;
629}
This page took 0.114904 seconds and 5 git commands to generate.