rt2x00: Cleanup struct skb_frame_desc.
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
CommitLineData
181d6902
ID
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
7050ec82 32void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
e039fa4a 33 struct txentry_desc *txdesc)
7050ec82 34{
2e92e6f2 35 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
e039fa4a 36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
7050ec82 37 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
2e92e6f2 38 struct ieee80211_rate *rate =
e039fa4a 39 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
40 const struct rt2x00_rate *hwrate;
41 unsigned int data_length;
42 unsigned int duration;
43 unsigned int residual;
44 u16 frame_control;
45
46 memset(txdesc, 0, sizeof(*txdesc));
47
48 /*
49 * Initialize information from queue
50 */
51 txdesc->queue = entry->queue->qid;
52 txdesc->cw_min = entry->queue->cw_min;
53 txdesc->cw_max = entry->queue->cw_max;
54 txdesc->aifs = entry->queue->aifs;
55
56 /* Data length should be extended with 4 bytes for CRC */
57 data_length = entry->skb->len + 4;
58
59 /*
60 * Read required fields from ieee80211 header.
61 */
62 frame_control = le16_to_cpu(hdr->frame_control);
63
64 /*
65 * Check whether this frame is to be acked.
66 */
e039fa4a 67 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
7050ec82
ID
68 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
69
70 /*
71 * Check if this is a RTS/CTS frame
72 */
73 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
74 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
e039fa4a 75 if (is_rts_frame(frame_control))
7050ec82 76 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
e039fa4a 77 else
7050ec82 78 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
e039fa4a 79 if (tx_info->control.rts_cts_rate_idx >= 0)
2e92e6f2 80 rate =
e039fa4a 81 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
7050ec82
ID
82 }
83
84 /*
85 * Determine retry information.
86 */
e039fa4a
JB
87 txdesc->retry_limit = tx_info->control.retry_limit;
88 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
7050ec82
ID
89 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
90
91 /*
92 * Check if more fragments are pending
93 */
94 if (ieee80211_get_morefrag(hdr)) {
95 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
96 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
97 }
98
99 /*
100 * Beacons and probe responses require the tsf timestamp
101 * to be inserted into the frame.
102 */
103 if (txdesc->queue == QID_BEACON || is_probe_resp(frame_control))
104 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
105
106 /*
107 * Determine with what IFS priority this frame should be send.
108 * Set ifs to IFS_SIFS when the this is not the first fragment,
109 * or this fragment came after RTS/CTS.
110 */
111 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
112 txdesc->ifs = IFS_SIFS;
e039fa4a 113 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
7050ec82
ID
114 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
115 txdesc->ifs = IFS_BACKOFF;
116 } else {
117 txdesc->ifs = IFS_SIFS;
118 }
119
120 /*
121 * PLCP setup
122 * Length calculation depends on OFDM/CCK rate.
123 */
124 hwrate = rt2x00_get_rate(rate->hw_value);
125 txdesc->signal = hwrate->plcp;
126 txdesc->service = 0x04;
127
128 if (hwrate->flags & DEV_RATE_OFDM) {
129 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
130
131 txdesc->length_high = (data_length >> 6) & 0x3f;
132 txdesc->length_low = data_length & 0x3f;
133 } else {
134 /*
135 * Convert length to microseconds.
136 */
137 residual = get_duration_res(data_length, hwrate->bitrate);
138 duration = get_duration(data_length, hwrate->bitrate);
139
140 if (residual != 0) {
141 duration++;
142
143 /*
144 * Check if we need to set the Length Extension
145 */
146 if (hwrate->bitrate == 110 && residual <= 30)
147 txdesc->service |= 0x80;
148 }
149
150 txdesc->length_high = (duration >> 8) & 0xff;
151 txdesc->length_low = duration & 0xff;
152
153 /*
154 * When preamble is enabled we should set the
155 * preamble bit for the signal.
156 */
157 if (rt2x00_get_rate_preamble(rate->hw_value))
158 txdesc->signal |= 0x08;
159 }
160}
161EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
162
163void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
164 struct txentry_desc *txdesc)
165{
b869767b
ID
166 struct data_queue *queue = entry->queue;
167 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
7050ec82
ID
168
169 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
170
171 /*
172 * All processing on the frame has been completed, this means
173 * it is now ready to be dumped to userspace through debugfs.
174 */
175 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
176
177 /*
b869767b
ID
178 * Check if we need to kick the queue, there are however a few rules
179 * 1) Don't kick beacon queue
180 * 2) Don't kick unless this is the last in frame in a burst.
181 * When the burst flag is set, this frame is always followed
182 * by another frame which in some way are related to eachother.
183 * This is true for fragments, RTS or CTS-to-self frames.
184 * 3) Rule 2 can be broken when the available entries
185 * in the queue are less then a certain threshold.
7050ec82 186 */
b869767b
ID
187 if (entry->queue->qid == QID_BEACON)
188 return;
189
190 if (rt2x00queue_threshold(queue) ||
191 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
192 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
7050ec82
ID
193}
194EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
195
6db3786a
ID
196int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
197{
198 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
199 struct txentry_desc txdesc;
200
201 if (unlikely(rt2x00queue_full(queue)))
202 return -EINVAL;
203
204 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
205 ERROR(queue->rt2x00dev,
206 "Arrived at non-free entry in the non-full queue %d.\n"
207 "Please file bug report to %s.\n",
208 queue->qid, DRV_PROJECT);
209 return -EINVAL;
210 }
211
212 /*
213 * Copy all TX descriptor information into txdesc,
214 * after that we are free to use the skb->cb array
215 * for our information.
216 */
217 entry->skb = skb;
218 rt2x00queue_create_tx_descriptor(entry, &txdesc);
219
220 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
221 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
222 return -EIO;
223 }
224
225 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
226
227 rt2x00queue_index_inc(queue, Q_INDEX);
228 rt2x00queue_write_tx_descriptor(entry, &txdesc);
229
230 return 0;
231}
232
181d6902 233struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
e58c6aca 234 const enum data_queue_qid queue)
181d6902
ID
235{
236 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
237
61448f88 238 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
181d6902
ID
239 return &rt2x00dev->tx[queue];
240
241 if (!rt2x00dev->bcn)
242 return NULL;
243
e58c6aca 244 if (queue == QID_BEACON)
181d6902 245 return &rt2x00dev->bcn[0];
e58c6aca 246 else if (queue == QID_ATIM && atim)
181d6902
ID
247 return &rt2x00dev->bcn[1];
248
249 return NULL;
250}
251EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
252
253struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
254 enum queue_index index)
255{
256 struct queue_entry *entry;
5f46c4d0 257 unsigned long irqflags;
181d6902
ID
258
259 if (unlikely(index >= Q_INDEX_MAX)) {
260 ERROR(queue->rt2x00dev,
261 "Entry requested from invalid index type (%d)\n", index);
262 return NULL;
263 }
264
5f46c4d0 265 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
266
267 entry = &queue->entries[queue->index[index]];
268
5f46c4d0 269 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
270
271 return entry;
272}
273EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
274
275void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
276{
5f46c4d0
ID
277 unsigned long irqflags;
278
181d6902
ID
279 if (unlikely(index >= Q_INDEX_MAX)) {
280 ERROR(queue->rt2x00dev,
281 "Index change on invalid index type (%d)\n", index);
282 return;
283 }
284
5f46c4d0 285 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
286
287 queue->index[index]++;
288 if (queue->index[index] >= queue->limit)
289 queue->index[index] = 0;
290
10b6b801
ID
291 if (index == Q_INDEX) {
292 queue->length++;
293 } else if (index == Q_INDEX_DONE) {
294 queue->length--;
295 queue->count ++;
296 }
181d6902 297
5f46c4d0 298 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
299}
300EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
301
302static void rt2x00queue_reset(struct data_queue *queue)
303{
5f46c4d0
ID
304 unsigned long irqflags;
305
306 spin_lock_irqsave(&queue->lock, irqflags);
181d6902
ID
307
308 queue->count = 0;
309 queue->length = 0;
310 memset(queue->index, 0, sizeof(queue->index));
311
5f46c4d0 312 spin_unlock_irqrestore(&queue->lock, irqflags);
181d6902
ID
313}
314
315void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
316{
317 struct data_queue *queue = rt2x00dev->rx;
318 unsigned int i;
319
320 rt2x00queue_reset(queue);
321
322 if (!rt2x00dev->ops->lib->init_rxentry)
323 return;
324
325 for (i = 0; i < queue->limit; i++)
326 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
327 &queue->entries[i]);
328}
329
330void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
331{
332 struct data_queue *queue;
333 unsigned int i;
334
335 txall_queue_for_each(rt2x00dev, queue) {
336 rt2x00queue_reset(queue);
337
338 if (!rt2x00dev->ops->lib->init_txentry)
339 continue;
340
341 for (i = 0; i < queue->limit; i++)
342 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
343 &queue->entries[i]);
344 }
345}
346
347static int rt2x00queue_alloc_entries(struct data_queue *queue,
348 const struct data_queue_desc *qdesc)
349{
350 struct queue_entry *entries;
351 unsigned int entry_size;
352 unsigned int i;
353
354 rt2x00queue_reset(queue);
355
356 queue->limit = qdesc->entry_num;
b869767b 357 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
181d6902
ID
358 queue->data_size = qdesc->data_size;
359 queue->desc_size = qdesc->desc_size;
360
361 /*
362 * Allocate all queue entries.
363 */
364 entry_size = sizeof(*entries) + qdesc->priv_size;
365 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
366 if (!entries)
367 return -ENOMEM;
368
369#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
231be4e9
AB
370 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
371 ((__index) * (__psize)) )
181d6902
ID
372
373 for (i = 0; i < queue->limit; i++) {
374 entries[i].flags = 0;
375 entries[i].queue = queue;
376 entries[i].skb = NULL;
377 entries[i].entry_idx = i;
378 entries[i].priv_data =
379 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
380 sizeof(*entries), qdesc->priv_size);
381 }
382
383#undef QUEUE_ENTRY_PRIV_OFFSET
384
385 queue->entries = entries;
386
387 return 0;
388}
389
390int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
391{
392 struct data_queue *queue;
393 int status;
394
395
396 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
397 if (status)
398 goto exit;
399
400 tx_queue_for_each(rt2x00dev, queue) {
401 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
402 if (status)
403 goto exit;
404 }
405
406 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
407 if (status)
408 goto exit;
409
410 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
411 return 0;
412
413 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
414 rt2x00dev->ops->atim);
415 if (status)
416 goto exit;
417
418 return 0;
419
420exit:
421 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
422
423 rt2x00queue_uninitialize(rt2x00dev);
424
425 return status;
426}
427
428void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
429{
430 struct data_queue *queue;
431
432 queue_for_each(rt2x00dev, queue) {
433 kfree(queue->entries);
434 queue->entries = NULL;
435 }
436}
437
8f539276
ID
438static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
439 struct data_queue *queue, enum data_queue_qid qid)
440{
441 spin_lock_init(&queue->lock);
442
443 queue->rt2x00dev = rt2x00dev;
444 queue->qid = qid;
445 queue->aifs = 2;
446 queue->cw_min = 5;
447 queue->cw_max = 10;
448}
449
181d6902
ID
450int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
451{
452 struct data_queue *queue;
453 enum data_queue_qid qid;
454 unsigned int req_atim =
455 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
456
457 /*
458 * We need the following queues:
459 * RX: 1
61448f88 460 * TX: ops->tx_queues
181d6902
ID
461 * Beacon: 1
462 * Atim: 1 (if required)
463 */
61448f88 464 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
181d6902
ID
465
466 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
467 if (!queue) {
468 ERROR(rt2x00dev, "Queue allocation failed.\n");
469 return -ENOMEM;
470 }
471
472 /*
473 * Initialize pointers
474 */
475 rt2x00dev->rx = queue;
476 rt2x00dev->tx = &queue[1];
61448f88 477 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
181d6902
ID
478
479 /*
480 * Initialize queue parameters.
481 * RX: qid = QID_RX
482 * TX: qid = QID_AC_BE + index
483 * TX: cw_min: 2^5 = 32.
484 * TX: cw_max: 2^10 = 1024.
565a019a
ID
485 * BCN: qid = QID_BEACON
486 * ATIM: qid = QID_ATIM
181d6902 487 */
8f539276 488 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
181d6902 489
8f539276
ID
490 qid = QID_AC_BE;
491 tx_queue_for_each(rt2x00dev, queue)
492 rt2x00queue_init(rt2x00dev, queue, qid++);
181d6902 493
565a019a 494 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
181d6902 495 if (req_atim)
565a019a 496 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
181d6902
ID
497
498 return 0;
499}
500
501void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
502{
503 kfree(rt2x00dev->rx);
504 rt2x00dev->rx = NULL;
505 rt2x00dev->tx = NULL;
506 rt2x00dev->bcn = NULL;
507}
This page took 0.085555 seconds and 5 git commands to generate.