mac80211: reorder some transmit handlers
[deliverable/linux.git] / net / mac80211 / wme.c
CommitLineData
f0706e82
JB
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
f0706e82 21/* maximum number of hardware queues we support. */
e100bb64
JB
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
9e723492 25
e100bb64
JB
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
9e723492 30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
f0706e82
JB
31
32struct ieee80211_sched_data
33{
e100bb64 34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
f0706e82 35 struct tcf_proto *filter_list;
e100bb64
JB
36 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
f0706e82
JB
38};
39
a8bdf29c 40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
f0706e82
JB
41
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45 struct iphdr *ip;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58
59 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority.
61 * This is used to allow 802.1d priority to be passed directly in
62 * from VLAN tags, etc. */
63 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256;
65
66 /* check there is a valid IP header present */
a8bdf29c
GC
67 offset = ieee80211_get_hdrlen_from_skb(skb);
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
f0706e82
JB
70 return 0;
71
a8bdf29c 72 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
f0706e82
JB
73
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c)
76 return 0;
77 return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83 switch (skb->priority) {
84 case 6:
85 case 7:
86 skb->priority = 5; /* VO -> VI */
87 return 0;
88 case 4:
89 case 5:
90 skb->priority = 3; /* VI -> BE */
91 return 0;
92 case 0:
93 case 3:
94 skb->priority = 2; /* BE -> BK */
95 return 0;
96 default:
97 return -1;
98 }
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
e100bb64 104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
f0706e82
JB
105{
106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
f0706e82
JB
107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 unsigned short fc = le16_to_cpu(hdr->frame_control);
109 int qos;
f0706e82
JB
110
111 /* see if frame is data or non data frame */
112 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
113 /* management frames go on AC_VO queue, but are sent
114 * without QoS control fields */
e100bb64 115 return 0;
f0706e82
JB
116 }
117
f9d540ee
JB
118 if (0 /* injected */) {
119 /* use AC from radiotap */
f0706e82
JB
120 }
121
122 /* is this a QoS frame? */
123 qos = fc & IEEE80211_STYPE_QOS_DATA;
124
125 if (!qos) {
126 skb->priority = 0; /* required for correct WPA/11i MIC */
127 return ieee802_1d_to_ac[skb->priority];
128 }
129
130 /* use the data classifier to determine what 802.1d tag the
3c3b00ca 131 * data frame has */
f0706e82
JB
132 skb->priority = classify_1d(skb, qd);
133
3c3b00ca 134 /* in case we are a client verify acm is not set for this ac */
f0706e82
JB
135 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
136 if (wme_downgrade_ac(skb)) {
3c3b00ca 137 /* No AC with lower priority has acm=0, drop packet. */
f0706e82
JB
138 return -1;
139 }
140 }
141
142 /* look up which queue to use for frames with this 1d tag */
143 return ieee802_1d_to_ac[skb->priority];
144}
145
146
147static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
148{
149 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
e100bb64 150 struct ieee80211_hw *hw = &local->hw;
f0706e82
JB
151 struct ieee80211_sched_data *q = qdisc_priv(qd);
152 struct ieee80211_tx_packet_data *pkt_data =
153 (struct ieee80211_tx_packet_data *) skb->cb;
154 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
155 unsigned short fc = le16_to_cpu(hdr->frame_control);
156 struct Qdisc *qdisc;
9e723492 157 struct sta_info *sta;
5c5e1289 158 int err, queue;
9e723492 159 u8 tid;
f0706e82 160
e8bf9649 161 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
9e723492 162 queue = pkt_data->queue;
d0709a65 163 rcu_read_lock();
9e723492
RR
164 sta = sta_info_get(local, hdr->addr1);
165 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
166 if (sta) {
167 int ampdu_queue = sta->tid_to_tx_q[tid];
e100bb64 168 if ((ampdu_queue < QD_NUM(hw)) &&
a9af2013 169 test_bit(ampdu_queue, q->qdisc_pool)) {
9e723492
RR
170 queue = ampdu_queue;
171 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
172 } else {
173 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
174 }
9e723492 175 }
d0709a65 176 rcu_read_unlock();
9e723492 177 skb_queue_tail(&q->requeued[queue], skb);
f0706e82
JB
178 qd->q.qlen++;
179 return 0;
180 }
181
182 queue = classify80211(skb, qd);
183
e100bb64
JB
184 if (unlikely(queue >= local->hw.queues))
185 queue = local->hw.queues - 1;
186
f0706e82
JB
187 /* now we know the 1d priority, fill in the QoS header if there is one
188 */
189 if (WLAN_FC_IS_QOS_DATA(fc)) {
190 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
9e723492
RR
191 u8 ack_policy = 0;
192 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
f0706e82 193 if (local->wifi_wme_noack_test)
9e723492 194 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
f0706e82
JB
195 QOS_CONTROL_ACK_POLICY_SHIFT;
196 /* qos header is 2 bytes, second reserved */
9e723492 197 *p = ack_policy | tid;
f0706e82
JB
198 p++;
199 *p = 0;
9e723492 200
d0709a65
JB
201 rcu_read_lock();
202
9e723492
RR
203 sta = sta_info_get(local, hdr->addr1);
204 if (sta) {
205 int ampdu_queue = sta->tid_to_tx_q[tid];
e100bb64
JB
206 if ((ampdu_queue < QD_NUM(hw)) &&
207 test_bit(ampdu_queue, q->qdisc_pool)) {
9e723492
RR
208 queue = ampdu_queue;
209 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
210 } else {
211 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
212 }
9e723492 213 }
d0709a65
JB
214
215 rcu_read_unlock();
f0706e82
JB
216 }
217
5c5e1289
JB
218 if (unlikely(queue < 0)) {
219 kfree_skb(skb);
220 err = NET_XMIT_DROP;
221 } else {
222 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
223 pkt_data->queue = (unsigned int) queue;
224 qdisc = q->queues[queue];
225 err = qdisc->enqueue(skb, qdisc);
226 if (err == NET_XMIT_SUCCESS) {
227 qd->q.qlen++;
228 qd->bstats.bytes += skb->len;
229 qd->bstats.packets++;
230 return NET_XMIT_SUCCESS;
231 }
f0706e82
JB
232 }
233 qd->qstats.drops++;
234 return err;
235}
236
237
238/* TODO: clean up the cases where master_hard_start_xmit
239 * returns non 0 - it shouldn't ever do that. Once done we
240 * can remove this function */
241static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
242{
243 struct ieee80211_sched_data *q = qdisc_priv(qd);
244 struct ieee80211_tx_packet_data *pkt_data =
245 (struct ieee80211_tx_packet_data *) skb->cb;
246 struct Qdisc *qdisc;
247 int err;
248
249 /* we recorded which queue to use earlier! */
250 qdisc = q->queues[pkt_data->queue];
251
252 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
253 qd->q.qlen++;
254 return 0;
255 }
256 qd->qstats.drops++;
257 return err;
258}
259
260
261static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
262{
263 struct ieee80211_sched_data *q = qdisc_priv(qd);
264 struct net_device *dev = qd->dev;
265 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
266 struct ieee80211_hw *hw = &local->hw;
267 struct sk_buff *skb;
268 struct Qdisc *qdisc;
269 int queue;
270
271 /* check all the h/w queues in numeric/priority order */
e100bb64 272 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82 273 /* see if there is room in this hardware queue */
9e723492
RR
274 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
275 &local->state[queue])) ||
276 (test_bit(IEEE80211_LINK_STATE_PENDING,
277 &local->state[queue])) ||
a9af2013 278 (!test_bit(queue, q->qdisc_pool)))
f0706e82
JB
279 continue;
280
281 /* there is space - try and get a frame */
282 skb = skb_dequeue(&q->requeued[queue]);
283 if (skb) {
284 qd->q.qlen--;
285 return skb;
286 }
287
288 qdisc = q->queues[queue];
289 skb = qdisc->dequeue(qdisc);
290 if (skb) {
291 qd->q.qlen--;
292 return skb;
293 }
294 }
295 /* returning a NULL here when all the h/w queues are full means we
296 * never need to call netif_stop_queue in the driver */
297 return NULL;
298}
299
300
301static void wme_qdiscop_reset(struct Qdisc* qd)
302{
303 struct ieee80211_sched_data *q = qdisc_priv(qd);
304 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
305 struct ieee80211_hw *hw = &local->hw;
306 int queue;
307
308 /* QUESTION: should we have some hardware flush functionality here? */
309
e100bb64 310 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
311 skb_queue_purge(&q->requeued[queue]);
312 qdisc_reset(q->queues[queue]);
313 }
314 qd->q.qlen = 0;
315}
316
317
318static void wme_qdiscop_destroy(struct Qdisc* qd)
319{
320 struct ieee80211_sched_data *q = qdisc_priv(qd);
321 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
322 struct ieee80211_hw *hw = &local->hw;
323 int queue;
324
325 tcf_destroy_chain(q->filter_list);
326 q->filter_list = NULL;
327
e100bb64 328 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
329 skb_queue_purge(&q->requeued[queue]);
330 qdisc_destroy(q->queues[queue]);
331 q->queues[queue] = &noop_qdisc;
332 }
333}
334
335
336/* called whenever parameters are updated on existing qdisc */
1e90474c 337static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
f0706e82 338{
f0706e82
JB
339 return 0;
340}
341
342
343/* called during initial creation of qdisc on device */
1e90474c 344static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
f0706e82
JB
345{
346 struct ieee80211_sched_data *q = qdisc_priv(qd);
347 struct net_device *dev = qd->dev;
348 struct ieee80211_local *local;
e100bb64 349 struct ieee80211_hw *hw;
f0706e82
JB
350 int err = 0, i;
351
352 /* check that device is a mac80211 device */
353 if (!dev->ieee80211_ptr ||
354 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
355 return -EINVAL;
356
e100bb64
JB
357 local = wdev_priv(dev->ieee80211_ptr);
358 hw = &local->hw;
359
360 /* only allow on master dev */
361 if (dev != local->mdev)
f0706e82
JB
362 return -EINVAL;
363
e100bb64
JB
364 /* ensure that we are root qdisc */
365 if (qd->parent != TC_H_ROOT)
f0706e82
JB
366 return -EINVAL;
367
368 if (qd->flags & TCQ_F_INGRESS)
369 return -EINVAL;
370
f0706e82 371 /* if options were passed in, set them */
e100bb64 372 if (opt)
f0706e82 373 err = wme_qdiscop_tune(qd, opt);
f0706e82
JB
374
375 /* create child queues */
e100bb64 376 for (i = 0; i < QD_NUM(hw); i++) {
f0706e82
JB
377 skb_queue_head_init(&q->requeued[i]);
378 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
379 qd->handle);
136e83d6 380 if (!q->queues[i]) {
f0706e82 381 q->queues[i] = &noop_qdisc;
a4278e18
PR
382 printk(KERN_ERR "%s child qdisc %i creation failed\n",
383 dev->name, i);
f0706e82
JB
384 }
385 }
386
e100bb64
JB
387 /* non-aggregation queues: reserve/mark as used */
388 for (i = 0; i < local->hw.queues; i++)
a9af2013 389 set_bit(i, q->qdisc_pool);
9e723492 390
f0706e82
JB
391 return err;
392}
393
394static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
395{
f0706e82
JB
396 return -1;
397}
398
399
400static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
401 struct Qdisc *new, struct Qdisc **old)
402{
403 struct ieee80211_sched_data *q = qdisc_priv(qd);
404 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
405 struct ieee80211_hw *hw = &local->hw;
406 unsigned long queue = arg - 1;
407
e100bb64 408 if (queue >= QD_NUM(hw))
f0706e82
JB
409 return -EINVAL;
410
411 if (!new)
412 new = &noop_qdisc;
413
414 sch_tree_lock(qd);
415 *old = q->queues[queue];
416 q->queues[queue] = new;
417 qdisc_reset(*old);
418 sch_tree_unlock(qd);
419
420 return 0;
421}
422
423
424static struct Qdisc *
425wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
426{
427 struct ieee80211_sched_data *q = qdisc_priv(qd);
428 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
429 struct ieee80211_hw *hw = &local->hw;
430 unsigned long queue = arg - 1;
431
e100bb64 432 if (queue >= QD_NUM(hw))
f0706e82
JB
433 return NULL;
434
435 return q->queues[queue];
436}
437
438
439static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
440{
441 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
442 struct ieee80211_hw *hw = &local->hw;
443 unsigned long queue = TC_H_MIN(classid);
444
e100bb64 445 if (queue - 1 >= QD_NUM(hw))
f0706e82
JB
446 return 0;
447
448 return queue;
449}
450
451
452static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
453 u32 classid)
454{
455 return wme_classop_get(qd, classid);
456}
457
458
459static void wme_classop_put(struct Qdisc *q, unsigned long cl)
460{
461}
462
463
464static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
1e90474c 465 struct nlattr **tca, unsigned long *arg)
f0706e82
JB
466{
467 unsigned long cl = *arg;
468 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
469 struct ieee80211_hw *hw = &local->hw;
470
e100bb64 471 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
472 return -ENOENT;
473
474 /* TODO: put code to program hardware queue parameters here,
475 * to allow programming from tc command line */
476
477 return 0;
478}
479
480
481/* we don't support deleting hardware queues
482 * when we add WMM-SA support - TSPECs may be deleted here */
483static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
484{
485 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
486 struct ieee80211_hw *hw = &local->hw;
487
e100bb64 488 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
489 return -ENOENT;
490 return 0;
491}
492
493
494static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
495 struct sk_buff *skb, struct tcmsg *tcm)
496{
497 struct ieee80211_sched_data *q = qdisc_priv(qd);
498 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
499 struct ieee80211_hw *hw = &local->hw;
500
e100bb64 501 if (cl - 1 > QD_NUM(hw))
f0706e82
JB
502 return -ENOENT;
503 tcm->tcm_handle = TC_H_MIN(cl);
504 tcm->tcm_parent = qd->handle;
505 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
506 return 0;
507}
508
509
510static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
511{
512 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
513 struct ieee80211_hw *hw = &local->hw;
514 int queue;
515
516 if (arg->stop)
517 return;
518
e100bb64 519 for (queue = 0; queue < QD_NUM(hw); queue++) {
f0706e82
JB
520 if (arg->count < arg->skip) {
521 arg->count++;
522 continue;
523 }
524 /* we should return classids for our internal queues here
525 * as well as the external ones */
526 if (arg->fn(qd, queue+1, arg) < 0) {
527 arg->stop = 1;
528 break;
529 }
530 arg->count++;
531 }
532}
533
534
535static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
536 unsigned long cl)
537{
538 struct ieee80211_sched_data *q = qdisc_priv(qd);
539
540 if (cl)
541 return NULL;
542
543 return &q->filter_list;
544}
545
546
547/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
548 * - these are the operations on the classes */
20fea08b 549static const struct Qdisc_class_ops class_ops =
f0706e82
JB
550{
551 .graft = wme_classop_graft,
552 .leaf = wme_classop_leaf,
553
554 .get = wme_classop_get,
555 .put = wme_classop_put,
556 .change = wme_classop_change,
557 .delete = wme_classop_delete,
558 .walk = wme_classop_walk,
559
560 .tcf_chain = wme_classop_find_tcf,
561 .bind_tcf = wme_classop_bind,
562 .unbind_tcf = wme_classop_put,
563
564 .dump = wme_classop_dump_class,
565};
566
567
568/* queueing discipline operations */
20fea08b 569static struct Qdisc_ops wme_qdisc_ops __read_mostly =
f0706e82
JB
570{
571 .next = NULL,
572 .cl_ops = &class_ops,
573 .id = "ieee80211",
574 .priv_size = sizeof(struct ieee80211_sched_data),
575
576 .enqueue = wme_qdiscop_enqueue,
577 .dequeue = wme_qdiscop_dequeue,
578 .requeue = wme_qdiscop_requeue,
579 .drop = NULL, /* drop not needed since we are always the root qdisc */
580
581 .init = wme_qdiscop_init,
582 .reset = wme_qdiscop_reset,
583 .destroy = wme_qdiscop_destroy,
584 .change = wme_qdiscop_tune,
585
586 .dump = wme_qdiscop_dump,
587};
588
589
590void ieee80211_install_qdisc(struct net_device *dev)
591{
592 struct Qdisc *qdisc;
593
594 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
595 if (!qdisc) {
596 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
597 return;
598 }
599
600 /* same handle as would be allocated by qdisc_alloc_handle() */
601 qdisc->handle = 0x80010000;
602
603 qdisc_lock_tree(dev);
604 list_add_tail(&qdisc->list, &dev->qdisc_list);
605 dev->qdisc_sleeping = qdisc;
606 qdisc_unlock_tree(dev);
607}
608
609
610int ieee80211_qdisc_installed(struct net_device *dev)
611{
612 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
613}
614
615
616int ieee80211_wme_register(void)
617{
618 return register_qdisc(&wme_qdisc_ops);
619}
620
621
622void ieee80211_wme_unregister(void)
623{
624 unregister_qdisc(&wme_qdisc_ops);
625}
9e723492
RR
626
627int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
628 struct sta_info *sta, u16 tid)
629{
630 int i;
631 struct ieee80211_sched_data *q =
632 qdisc_priv(local->mdev->qdisc_sleeping);
633 DECLARE_MAC_BUF(mac);
634
635 /* prepare the filter and save it for the SW queue
e100bb64
JB
636 * matching the received HW queue */
637
638 if (!local->hw.ampdu_queues)
639 return -EPERM;
9e723492
RR
640
641 /* try to get a Qdisc from the pool */
e100bb64 642 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
a9af2013 643 if (!test_and_set_bit(i, q->qdisc_pool)) {
9e723492
RR
644 ieee80211_stop_queue(local_to_hw(local), i);
645 sta->tid_to_tx_q[tid] = i;
646
647 /* IF there are already pending packets
648 * on this tid first we need to drain them
649 * on the previous queue
650 * since HT is strict in order */
651#ifdef CONFIG_MAC80211_HT_DEBUG
652 if (net_ratelimit())
653 printk(KERN_DEBUG "allocated aggregation queue"
a9af2013 654 " %d tid %d addr %s pool=0x%lX",
9e723492 655 i, tid, print_mac(mac, sta->addr),
a9af2013 656 q->qdisc_pool[0]);
9e723492
RR
657#endif /* CONFIG_MAC80211_HT_DEBUG */
658 return 0;
659 }
660
661 return -EAGAIN;
662}
663
664/**
665 * the caller needs to hold local->mdev->queue_lock
666 */
667void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
668 struct sta_info *sta, u16 tid,
669 u8 requeue)
670{
e100bb64 671 struct ieee80211_hw *hw = &local->hw;
9e723492
RR
672 struct ieee80211_sched_data *q =
673 qdisc_priv(local->mdev->qdisc_sleeping);
674 int agg_queue = sta->tid_to_tx_q[tid];
675
676 /* return the qdisc to the pool */
a9af2013 677 clear_bit(agg_queue, q->qdisc_pool);
e100bb64 678 sta->tid_to_tx_q[tid] = QD_NUM(hw);
9e723492
RR
679
680 if (requeue)
681 ieee80211_requeue(local, agg_queue);
682 else
683 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
684}
685
686void ieee80211_requeue(struct ieee80211_local *local, int queue)
687{
688 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
689 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
690 struct Qdisc *qdisc = q->queues[queue];
691 struct sk_buff *skb = NULL;
0da926f0 692 u32 len;
9e723492
RR
693
694 if (!qdisc || !qdisc->dequeue)
695 return;
696
697 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
698 for (len = qdisc->q.qlen; len > 0; len--) {
699 skb = qdisc->dequeue(qdisc);
700 root_qd->q.qlen--;
701 /* packet will be classified again and */
702 /* skb->packet_data->queue will be overridden if needed */
703 if (skb)
704 wme_qdiscop_enqueue(skb, root_qd);
705 }
706}
This page took 0.646866 seconds and 5 git commands to generate.