Merge commit 'gcl/next' into next
[deliverable/linux.git] / net / mac80211 / wme.c
CommitLineData
f0706e82
JB
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
51cb6db0 21/* Default mapping in classifier to work with default
e100bb64
JB
22 * queue setup.
23 */
9e723492 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
f0706e82 25
a8bdf29c 26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
f0706e82 27
51cb6db0
DM
28/* Given a data frame determine the 802.1p/1d tag to use. */
29static unsigned int classify_1d(struct sk_buff *skb)
f0706e82 30{
51cb6db0 31 unsigned int dscp;
f0706e82
JB
32
33 /* skb->priority values from 256->263 are magic values to
51cb6db0
DM
34 * directly indicate a specific 802.1d priority. This is used
35 * to allow 802.1d priority to be passed directly in from VLAN
36 * tags, etc.
37 */
f0706e82
JB
38 if (skb->priority >= 256 && skb->priority <= 263)
39 return skb->priority - 256;
40
51cb6db0 41 switch (skb->protocol) {
60678040 42 case htons(ETH_P_IP):
51cb6db0
DM
43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break;
f0706e82 45
51cb6db0
DM
46 default:
47 return 0;
48 }
f0706e82 49
f0706e82
JB
50 return dscp >> 5;
51}
52
53
51cb6db0 54static int wme_downgrade_ac(struct sk_buff *skb)
f0706e82
JB
55{
56 switch (skb->priority) {
57 case 6:
58 case 7:
59 skb->priority = 5; /* VO -> VI */
60 return 0;
61 case 4:
62 case 5:
63 skb->priority = 3; /* VI -> BE */
64 return 0;
65 case 0:
66 case 3:
67 skb->priority = 2; /* BE -> BK */
68 return 0;
69 default:
70 return -1;
71 }
72}
73
74
51cb6db0 75/* Indicate which queue to use. */
b4a4bf5d 76static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
f0706e82 77{
f0706e82 78 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
f0706e82 79
002aaf4e 80 if (!ieee80211_is_data(hdr->frame_control)) {
f0706e82
JB
81 /* management frames go on AC_VO queue, but are sent
82 * without QoS control fields */
e100bb64 83 return 0;
f0706e82
JB
84 }
85
f9d540ee
JB
86 if (0 /* injected */) {
87 /* use AC from radiotap */
f0706e82
JB
88 }
89
002aaf4e 90 if (!ieee80211_is_data_qos(hdr->frame_control)) {
f0706e82
JB
91 skb->priority = 0; /* required for correct WPA/11i MIC */
92 return ieee802_1d_to_ac[skb->priority];
93 }
94
95 /* use the data classifier to determine what 802.1d tag the
3c3b00ca 96 * data frame has */
51cb6db0 97 skb->priority = classify_1d(skb);
f0706e82 98
3c3b00ca 99 /* in case we are a client verify acm is not set for this ac */
f0706e82
JB
100 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
101 if (wme_downgrade_ac(skb)) {
51cb6db0
DM
102 /* The old code would drop the packet in this
103 * case.
104 */
105 return 0;
f0706e82
JB
106 }
107 }
108
109 /* look up which queue to use for frames with this 1d tag */
110 return ieee802_1d_to_ac[skb->priority];
111}
112
51cb6db0 113u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
f0706e82 114{
b4a4bf5d
JB
115 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
116 struct ieee80211_local *local = mpriv->local;
8b30b1fe 117 struct ieee80211_hw *hw = &local->hw;
f0706e82 118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
9e723492 119 struct sta_info *sta;
51cb6db0 120 u16 queue;
9e723492 121 u8 tid;
f0706e82 122
b4a4bf5d 123 queue = classify80211(local, skb);
51cb6db0
DM
124 if (unlikely(queue >= local->hw.queues))
125 queue = local->hw.queues - 1;
126
8b30b1fe
S
127 if (skb->requeue) {
128 if (!hw->ampdu_queues)
129 return queue;
130
d0709a65 131 rcu_read_lock();
9e723492 132 sta = sta_info_get(local, hdr->addr1);
238f74a2 133 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
9e723492
RR
134 if (sta) {
135 int ampdu_queue = sta->tid_to_tx_q[tid];
51cb6db0
DM
136
137 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
8b30b1fe 138 test_bit(ampdu_queue, local->queue_pool))
9e723492 139 queue = ampdu_queue;
9e723492 140 }
d0709a65 141 rcu_read_unlock();
f0706e82 142
51cb6db0
DM
143 return queue;
144 }
e100bb64 145
51cb6db0
DM
146 /* Now we know the 1d priority, fill in the QoS header if
147 * there is one.
f0706e82 148 */
002aaf4e
HH
149 if (ieee80211_is_data_qos(hdr->frame_control)) {
150 u8 *p = ieee80211_get_qos_ctl(hdr);
9e723492 151 u8 ack_policy = 0;
238f74a2 152 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
f0706e82 153 if (local->wifi_wme_noack_test)
9e723492 154 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
f0706e82
JB
155 QOS_CONTROL_ACK_POLICY_SHIFT;
156 /* qos header is 2 bytes, second reserved */
002aaf4e 157 *p++ = ack_policy | tid;
f0706e82 158 *p = 0;
9e723492 159
8b30b1fe
S
160 if (!hw->ampdu_queues)
161 return queue;
162
d0709a65
JB
163 rcu_read_lock();
164
9e723492
RR
165 sta = sta_info_get(local, hdr->addr1);
166 if (sta) {
167 int ampdu_queue = sta->tid_to_tx_q[tid];
51cb6db0
DM
168
169 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
8b30b1fe 170 test_bit(ampdu_queue, local->queue_pool))
9e723492 171 queue = ampdu_queue;
9e723492 172 }
d0709a65
JB
173
174 rcu_read_unlock();
f0706e82
JB
175 }
176
f0706e82
JB
177 return queue;
178}
179
9e723492 180int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
51cb6db0 181 struct sta_info *sta, u16 tid)
9e723492
RR
182{
183 int i;
9e723492 184
d0f09804
JB
185 /* XXX: currently broken due to cb/requeue use */
186 return -EPERM;
187
9e723492 188 /* prepare the filter and save it for the SW queue
e100bb64
JB
189 * matching the received HW queue */
190
191 if (!local->hw.ampdu_queues)
192 return -EPERM;
9e723492
RR
193
194 /* try to get a Qdisc from the pool */
51cb6db0
DM
195 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
196 if (!test_and_set_bit(i, local->queue_pool)) {
9e723492
RR
197 ieee80211_stop_queue(local_to_hw(local), i);
198 sta->tid_to_tx_q[tid] = i;
199
200 /* IF there are already pending packets
201 * on this tid first we need to drain them
202 * on the previous queue
203 * since HT is strict in order */
204#ifdef CONFIG_MAC80211_HT_DEBUG
0c68ae26 205 if (net_ratelimit())
9e723492 206 printk(KERN_DEBUG "allocated aggregation queue"
0c68ae26
JB
207 " %d tid %d addr %pM pool=0x%lX\n",
208 i, tid, sta->sta.addr,
51cb6db0 209 local->queue_pool[0]);
9e723492
RR
210#endif /* CONFIG_MAC80211_HT_DEBUG */
211 return 0;
212 }
213
214 return -EAGAIN;
215}
216
217/**
e8a0464c 218 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
9e723492
RR
219 */
220void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
221 struct sta_info *sta, u16 tid,
222 u8 requeue)
223{
9e723492 224 int agg_queue = sta->tid_to_tx_q[tid];
51cb6db0 225 struct ieee80211_hw *hw = &local->hw;
9e723492
RR
226
227 /* return the qdisc to the pool */
51cb6db0
DM
228 clear_bit(agg_queue, local->queue_pool);
229 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
9e723492 230
51cb6db0 231 if (requeue) {
9e723492 232 ieee80211_requeue(local, agg_queue);
51cb6db0
DM
233 } else {
234 struct netdev_queue *txq;
83874000 235 spinlock_t *root_lock;
35ed4e75 236 struct Qdisc *q;
51cb6db0
DM
237
238 txq = netdev_get_tx_queue(local->mdev, agg_queue);
35ed4e75
DM
239 q = rcu_dereference(txq->qdisc);
240 root_lock = qdisc_lock(q);
51cb6db0 241
83874000 242 spin_lock_bh(root_lock);
35ed4e75 243 qdisc_reset(q);
83874000 244 spin_unlock_bh(root_lock);
51cb6db0 245 }
9e723492
RR
246}
247
248void ieee80211_requeue(struct ieee80211_local *local, int queue)
249{
51cb6db0
DM
250 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
251 struct sk_buff_head list;
83874000 252 spinlock_t *root_lock;
51cb6db0 253 struct Qdisc *qdisc;
0da926f0 254 u32 len;
9e723492 255
51cb6db0
DM
256 rcu_read_lock_bh();
257
258 qdisc = rcu_dereference(txq->qdisc);
9e723492 259 if (!qdisc || !qdisc->dequeue)
51cb6db0
DM
260 goto out_unlock;
261
262 skb_queue_head_init(&list);
9e723492 263
83874000
DM
264 root_lock = qdisc_root_lock(qdisc);
265 spin_lock(root_lock);
9e723492 266 for (len = qdisc->q.qlen; len > 0; len--) {
51cb6db0
DM
267 struct sk_buff *skb = qdisc->dequeue(qdisc);
268
9e723492 269 if (skb)
51cb6db0
DM
270 __skb_queue_tail(&list, skb);
271 }
83874000 272 spin_unlock(root_lock);
51cb6db0
DM
273
274 for (len = list.qlen; len > 0; len--) {
275 struct sk_buff *skb = __skb_dequeue(&list);
276 u16 new_queue;
277
278 BUG_ON(!skb);
279 new_queue = ieee80211_select_queue(local->mdev, skb);
280 skb_set_queue_mapping(skb, new_queue);
281
282 txq = netdev_get_tx_queue(local->mdev, new_queue);
283
51cb6db0
DM
284
285 qdisc = rcu_dereference(txq->qdisc);
83874000 286 root_lock = qdisc_root_lock(qdisc);
51cb6db0 287
83874000 288 spin_lock(root_lock);
5f86173b 289 qdisc_enqueue_root(skb, qdisc);
83874000 290 spin_unlock(root_lock);
9e723492 291 }
51cb6db0
DM
292
293out_unlock:
294 rcu_read_unlock_bh();
9e723492 295}
This page took 0.236833 seconds and 5 git commands to generate.