rt2x00: Centralize allocation of RX skbs.
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
1 /*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 /*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28
29 #include "rt2x00.h"
30 #include "rt2x00lib.h"
31
32 struct sk_buff *rt2x00queue_alloc_skb(struct data_queue *queue)
33 {
34 struct sk_buff *skb;
35 unsigned int frame_size;
36 unsigned int reserved_size;
37
38 /*
39 * The frame size includes descriptor size, because the
40 * hardware directly receive the frame into the skbuffer.
41 */
42 frame_size = queue->data_size + queue->desc_size;
43
44 /*
45 * Reserve a few bytes extra headroom to allow drivers some moving
46 * space (e.g. for alignment), while keeping the skb aligned.
47 */
48 reserved_size = 8;
49
50 /*
51 * Allocate skbuffer.
52 */
53 skb = dev_alloc_skb(frame_size + reserved_size);
54 if (!skb)
55 return NULL;
56
57 skb_reserve(skb, reserved_size);
58 skb_put(skb, frame_size);
59
60 return skb;
61 }
62 EXPORT_SYMBOL_GPL(rt2x00queue_alloc_skb);
63
64 void rt2x00queue_free_skb(struct sk_buff *skb)
65 {
66 dev_kfree_skb_any(skb);
67 }
68 EXPORT_SYMBOL_GPL(rt2x00queue_free_skb);
69
70 void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
71 struct txentry_desc *txdesc)
72 {
73 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
74 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
75 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
76 struct ieee80211_rate *rate =
77 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
78 const struct rt2x00_rate *hwrate;
79 unsigned int data_length;
80 unsigned int duration;
81 unsigned int residual;
82
83 memset(txdesc, 0, sizeof(*txdesc));
84
85 /*
86 * Initialize information from queue
87 */
88 txdesc->queue = entry->queue->qid;
89 txdesc->cw_min = entry->queue->cw_min;
90 txdesc->cw_max = entry->queue->cw_max;
91 txdesc->aifs = entry->queue->aifs;
92
93 /* Data length should be extended with 4 bytes for CRC */
94 data_length = entry->skb->len + 4;
95
96 /*
97 * Check whether this frame is to be acked.
98 */
99 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
100 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
101
102 /*
103 * Check if this is a RTS/CTS frame
104 */
105 if (ieee80211_is_rts(hdr->frame_control) ||
106 ieee80211_is_cts(hdr->frame_control)) {
107 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
108 if (ieee80211_is_rts(hdr->frame_control))
109 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
110 else
111 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
112 if (tx_info->control.rts_cts_rate_idx >= 0)
113 rate =
114 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
115 }
116
117 /*
118 * Determine retry information.
119 */
120 txdesc->retry_limit = tx_info->control.retry_limit;
121 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
122 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
123
124 /*
125 * Check if more fragments are pending
126 */
127 if (ieee80211_has_morefrags(hdr->frame_control)) {
128 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
129 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
130 }
131
132 /*
133 * Beacons and probe responses require the tsf timestamp
134 * to be inserted into the frame.
135 */
136 if (ieee80211_is_beacon(hdr->frame_control) ||
137 ieee80211_is_probe_resp(hdr->frame_control))
138 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
139
140 /*
141 * Determine with what IFS priority this frame should be send.
142 * Set ifs to IFS_SIFS when the this is not the first fragment,
143 * or this fragment came after RTS/CTS.
144 */
145 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
146 txdesc->ifs = IFS_SIFS;
147 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
148 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
149 txdesc->ifs = IFS_BACKOFF;
150 } else {
151 txdesc->ifs = IFS_SIFS;
152 }
153
154 /*
155 * PLCP setup
156 * Length calculation depends on OFDM/CCK rate.
157 */
158 hwrate = rt2x00_get_rate(rate->hw_value);
159 txdesc->signal = hwrate->plcp;
160 txdesc->service = 0x04;
161
162 if (hwrate->flags & DEV_RATE_OFDM) {
163 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
164
165 txdesc->length_high = (data_length >> 6) & 0x3f;
166 txdesc->length_low = data_length & 0x3f;
167 } else {
168 /*
169 * Convert length to microseconds.
170 */
171 residual = get_duration_res(data_length, hwrate->bitrate);
172 duration = get_duration(data_length, hwrate->bitrate);
173
174 if (residual != 0) {
175 duration++;
176
177 /*
178 * Check if we need to set the Length Extension
179 */
180 if (hwrate->bitrate == 110 && residual <= 30)
181 txdesc->service |= 0x80;
182 }
183
184 txdesc->length_high = (duration >> 8) & 0xff;
185 txdesc->length_low = duration & 0xff;
186
187 /*
188 * When preamble is enabled we should set the
189 * preamble bit for the signal.
190 */
191 if (rt2x00_get_rate_preamble(rate->hw_value))
192 txdesc->signal |= 0x08;
193 }
194 }
195 EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
196
197 void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
198 struct txentry_desc *txdesc)
199 {
200 struct data_queue *queue = entry->queue;
201 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
202
203 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
204
205 /*
206 * All processing on the frame has been completed, this means
207 * it is now ready to be dumped to userspace through debugfs.
208 */
209 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
210
211 /*
212 * Check if we need to kick the queue, there are however a few rules
213 * 1) Don't kick beacon queue
214 * 2) Don't kick unless this is the last in frame in a burst.
215 * When the burst flag is set, this frame is always followed
216 * by another frame which in some way are related to eachother.
217 * This is true for fragments, RTS or CTS-to-self frames.
218 * 3) Rule 2 can be broken when the available entries
219 * in the queue are less then a certain threshold.
220 */
221 if (entry->queue->qid == QID_BEACON)
222 return;
223
224 if (rt2x00queue_threshold(queue) ||
225 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
226 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
227 }
228 EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
229
230 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
231 {
232 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
233 struct txentry_desc txdesc;
234
235 if (unlikely(rt2x00queue_full(queue)))
236 return -EINVAL;
237
238 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
239 ERROR(queue->rt2x00dev,
240 "Arrived at non-free entry in the non-full queue %d.\n"
241 "Please file bug report to %s.\n",
242 queue->qid, DRV_PROJECT);
243 return -EINVAL;
244 }
245
246 /*
247 * Copy all TX descriptor information into txdesc,
248 * after that we are free to use the skb->cb array
249 * for our information.
250 */
251 entry->skb = skb;
252 rt2x00queue_create_tx_descriptor(entry, &txdesc);
253
254 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
255 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
256 return -EIO;
257 }
258
259 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
260
261 rt2x00queue_index_inc(queue, Q_INDEX);
262 rt2x00queue_write_tx_descriptor(entry, &txdesc);
263
264 return 0;
265 }
266
267 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
268 const enum data_queue_qid queue)
269 {
270 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
271
272 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
273 return &rt2x00dev->tx[queue];
274
275 if (!rt2x00dev->bcn)
276 return NULL;
277
278 if (queue == QID_BEACON)
279 return &rt2x00dev->bcn[0];
280 else if (queue == QID_ATIM && atim)
281 return &rt2x00dev->bcn[1];
282
283 return NULL;
284 }
285 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
286
287 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
288 enum queue_index index)
289 {
290 struct queue_entry *entry;
291 unsigned long irqflags;
292
293 if (unlikely(index >= Q_INDEX_MAX)) {
294 ERROR(queue->rt2x00dev,
295 "Entry requested from invalid index type (%d)\n", index);
296 return NULL;
297 }
298
299 spin_lock_irqsave(&queue->lock, irqflags);
300
301 entry = &queue->entries[queue->index[index]];
302
303 spin_unlock_irqrestore(&queue->lock, irqflags);
304
305 return entry;
306 }
307 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
308
309 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
310 {
311 unsigned long irqflags;
312
313 if (unlikely(index >= Q_INDEX_MAX)) {
314 ERROR(queue->rt2x00dev,
315 "Index change on invalid index type (%d)\n", index);
316 return;
317 }
318
319 spin_lock_irqsave(&queue->lock, irqflags);
320
321 queue->index[index]++;
322 if (queue->index[index] >= queue->limit)
323 queue->index[index] = 0;
324
325 if (index == Q_INDEX) {
326 queue->length++;
327 } else if (index == Q_INDEX_DONE) {
328 queue->length--;
329 queue->count ++;
330 }
331
332 spin_unlock_irqrestore(&queue->lock, irqflags);
333 }
334 EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
335
336 static void rt2x00queue_reset(struct data_queue *queue)
337 {
338 unsigned long irqflags;
339
340 spin_lock_irqsave(&queue->lock, irqflags);
341
342 queue->count = 0;
343 queue->length = 0;
344 memset(queue->index, 0, sizeof(queue->index));
345
346 spin_unlock_irqrestore(&queue->lock, irqflags);
347 }
348
349 void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
350 {
351 struct data_queue *queue = rt2x00dev->rx;
352 unsigned int i;
353
354 rt2x00queue_reset(queue);
355
356 if (!rt2x00dev->ops->lib->init_rxentry)
357 return;
358
359 for (i = 0; i < queue->limit; i++)
360 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
361 &queue->entries[i]);
362 }
363
364 void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
365 {
366 struct data_queue *queue;
367 unsigned int i;
368
369 txall_queue_for_each(rt2x00dev, queue) {
370 rt2x00queue_reset(queue);
371
372 if (!rt2x00dev->ops->lib->init_txentry)
373 continue;
374
375 for (i = 0; i < queue->limit; i++)
376 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
377 &queue->entries[i]);
378 }
379 }
380
381 static int rt2x00queue_alloc_entries(struct data_queue *queue,
382 const struct data_queue_desc *qdesc)
383 {
384 struct queue_entry *entries;
385 unsigned int entry_size;
386 unsigned int i;
387
388 rt2x00queue_reset(queue);
389
390 queue->limit = qdesc->entry_num;
391 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
392 queue->data_size = qdesc->data_size;
393 queue->desc_size = qdesc->desc_size;
394
395 /*
396 * Allocate all queue entries.
397 */
398 entry_size = sizeof(*entries) + qdesc->priv_size;
399 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
400 if (!entries)
401 return -ENOMEM;
402
403 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
404 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
405 ((__index) * (__psize)) )
406
407 for (i = 0; i < queue->limit; i++) {
408 entries[i].flags = 0;
409 entries[i].queue = queue;
410 entries[i].skb = NULL;
411 entries[i].entry_idx = i;
412 entries[i].priv_data =
413 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
414 sizeof(*entries), qdesc->priv_size);
415 }
416
417 #undef QUEUE_ENTRY_PRIV_OFFSET
418
419 queue->entries = entries;
420
421 return 0;
422 }
423
424 static void rt2x00queue_free_skbs(struct data_queue *queue)
425 {
426 unsigned int i;
427
428 if (!queue->entries)
429 return;
430
431 for (i = 0; i < queue->limit; i++) {
432 if (queue->entries[i].skb)
433 rt2x00queue_free_skb(queue->entries[i].skb);
434 }
435 }
436
437 static int rt2x00queue_alloc_skbs(struct data_queue *queue)
438 {
439 unsigned int i;
440 struct sk_buff *skb;
441
442 for (i = 0; i < queue->limit; i++) {
443 skb = rt2x00queue_alloc_skb(queue);
444 if (!skb)
445 goto exit;
446
447 queue->entries[i].skb = skb;
448 }
449
450 return 0;
451
452 exit:
453 rt2x00queue_free_skbs(queue);
454
455 return -ENOMEM;
456 }
457
458 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
459 {
460 struct data_queue *queue;
461 int status;
462
463 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
464 if (status)
465 goto exit;
466
467 tx_queue_for_each(rt2x00dev, queue) {
468 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
469 if (status)
470 goto exit;
471 }
472
473 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
474 if (status)
475 goto exit;
476
477 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
478 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
479 rt2x00dev->ops->atim);
480 if (status)
481 goto exit;
482 }
483
484 status = rt2x00queue_alloc_skbs(rt2x00dev->rx);
485 if (status)
486 goto exit;
487
488 return 0;
489
490 exit:
491 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
492
493 rt2x00queue_uninitialize(rt2x00dev);
494
495 return status;
496 }
497
498 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
499 {
500 struct data_queue *queue;
501
502 rt2x00queue_free_skbs(rt2x00dev->rx);
503
504 queue_for_each(rt2x00dev, queue) {
505 kfree(queue->entries);
506 queue->entries = NULL;
507 }
508 }
509
510 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
511 struct data_queue *queue, enum data_queue_qid qid)
512 {
513 spin_lock_init(&queue->lock);
514
515 queue->rt2x00dev = rt2x00dev;
516 queue->qid = qid;
517 queue->aifs = 2;
518 queue->cw_min = 5;
519 queue->cw_max = 10;
520 }
521
522 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
523 {
524 struct data_queue *queue;
525 enum data_queue_qid qid;
526 unsigned int req_atim =
527 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
528
529 /*
530 * We need the following queues:
531 * RX: 1
532 * TX: ops->tx_queues
533 * Beacon: 1
534 * Atim: 1 (if required)
535 */
536 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
537
538 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
539 if (!queue) {
540 ERROR(rt2x00dev, "Queue allocation failed.\n");
541 return -ENOMEM;
542 }
543
544 /*
545 * Initialize pointers
546 */
547 rt2x00dev->rx = queue;
548 rt2x00dev->tx = &queue[1];
549 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
550
551 /*
552 * Initialize queue parameters.
553 * RX: qid = QID_RX
554 * TX: qid = QID_AC_BE + index
555 * TX: cw_min: 2^5 = 32.
556 * TX: cw_max: 2^10 = 1024.
557 * BCN: qid = QID_BEACON
558 * ATIM: qid = QID_ATIM
559 */
560 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
561
562 qid = QID_AC_BE;
563 tx_queue_for_each(rt2x00dev, queue)
564 rt2x00queue_init(rt2x00dev, queue, qid++);
565
566 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
567 if (req_atim)
568 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
569
570 return 0;
571 }
572
573 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
574 {
575 kfree(rt2x00dev->rx);
576 rt2x00dev->rx = NULL;
577 rt2x00dev->tx = NULL;
578 rt2x00dev->bcn = NULL;
579 }
This page took 0.066346 seconds and 5 git commands to generate.