Commit | Line | Data |
---|---|---|
92651940 AD |
1 | /* |
2 | * Copyright (c) 2008, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
c057b190 | 14 | * this program; if not, see <http://www.gnu.org/licenses/>. |
92651940 AD |
15 | * |
16 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> | |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
92651940 AD |
21 | #include <linux/types.h> |
22 | #include <linux/kernel.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <net/netlink.h> | |
27 | #include <net/pkt_sched.h> | |
28 | ||
29 | ||
30 | struct multiq_sched_data { | |
31 | u16 bands; | |
32 | u16 max_bands; | |
33 | u16 curband; | |
25d8c0d5 | 34 | struct tcf_proto __rcu *filter_list; |
92651940 AD |
35 | struct Qdisc **queues; |
36 | }; | |
37 | ||
38 | ||
39 | static struct Qdisc * | |
40 | multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
41 | { | |
42 | struct multiq_sched_data *q = qdisc_priv(sch); | |
43 | u32 band; | |
44 | struct tcf_result res; | |
25d8c0d5 | 45 | struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); |
92651940 AD |
46 | int err; |
47 | ||
48 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
3b3ae880 | 49 | err = tc_classify(skb, fl, &res, false); |
92651940 AD |
50 | #ifdef CONFIG_NET_CLS_ACT |
51 | switch (err) { | |
52 | case TC_ACT_STOLEN: | |
53 | case TC_ACT_QUEUED: | |
54 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
55 | case TC_ACT_SHOT: | |
56 | return NULL; | |
57 | } | |
58 | #endif | |
59 | band = skb_get_queue_mapping(skb); | |
60 | ||
61 | if (band >= q->bands) | |
62 | return q->queues[0]; | |
63 | ||
64 | return q->queues[band]; | |
65 | } | |
66 | ||
67 | static int | |
68 | multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
69 | { | |
70 | struct Qdisc *qdisc; | |
71 | int ret; | |
72 | ||
73 | qdisc = multiq_classify(skb, sch, &ret); | |
74 | #ifdef CONFIG_NET_CLS_ACT | |
75 | if (qdisc == NULL) { | |
76 | ||
77 | if (ret & __NET_XMIT_BYPASS) | |
25331d6c | 78 | qdisc_qstats_drop(sch); |
92651940 AD |
79 | kfree_skb(skb); |
80 | return ret; | |
81 | } | |
82 | #endif | |
83 | ||
84 | ret = qdisc_enqueue(skb, qdisc); | |
85 | if (ret == NET_XMIT_SUCCESS) { | |
92651940 AD |
86 | sch->q.qlen++; |
87 | return NET_XMIT_SUCCESS; | |
88 | } | |
89 | if (net_xmit_drop_count(ret)) | |
25331d6c | 90 | qdisc_qstats_drop(sch); |
92651940 AD |
91 | return ret; |
92 | } | |
93 | ||
92651940 AD |
94 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) |
95 | { | |
96 | struct multiq_sched_data *q = qdisc_priv(sch); | |
97 | struct Qdisc *qdisc; | |
98 | struct sk_buff *skb; | |
99 | int band; | |
100 | ||
101 | for (band = 0; band < q->bands; band++) { | |
102 | /* cycle through bands to ensure fairness */ | |
103 | q->curband++; | |
104 | if (q->curband >= q->bands) | |
105 | q->curband = 0; | |
106 | ||
107 | /* Check that target subqueue is available before | |
f30ab418 | 108 | * pulling an skb to avoid head-of-line blocking. |
92651940 | 109 | */ |
73466498 TH |
110 | if (!netif_xmit_stopped( |
111 | netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { | |
92651940 AD |
112 | qdisc = q->queues[q->curband]; |
113 | skb = qdisc->dequeue(qdisc); | |
114 | if (skb) { | |
9190b3b3 | 115 | qdisc_bstats_update(sch, skb); |
92651940 AD |
116 | sch->q.qlen--; |
117 | return skb; | |
118 | } | |
119 | } | |
120 | } | |
121 | return NULL; | |
122 | ||
123 | } | |
124 | ||
8e3af978 JP |
125 | static struct sk_buff *multiq_peek(struct Qdisc *sch) |
126 | { | |
127 | struct multiq_sched_data *q = qdisc_priv(sch); | |
128 | unsigned int curband = q->curband; | |
129 | struct Qdisc *qdisc; | |
130 | struct sk_buff *skb; | |
131 | int band; | |
132 | ||
133 | for (band = 0; band < q->bands; band++) { | |
134 | /* cycle through bands to ensure fairness */ | |
135 | curband++; | |
136 | if (curband >= q->bands) | |
137 | curband = 0; | |
138 | ||
139 | /* Check that target subqueue is available before | |
f30ab418 | 140 | * pulling an skb to avoid head-of-line blocking. |
8e3af978 | 141 | */ |
73466498 TH |
142 | if (!netif_xmit_stopped( |
143 | netdev_get_tx_queue(qdisc_dev(sch), curband))) { | |
8e3af978 JP |
144 | qdisc = q->queues[curband]; |
145 | skb = qdisc->ops->peek(qdisc); | |
146 | if (skb) | |
147 | return skb; | |
148 | } | |
149 | } | |
150 | return NULL; | |
151 | ||
152 | } | |
153 | ||
92651940 AD |
154 | static unsigned int multiq_drop(struct Qdisc *sch) |
155 | { | |
156 | struct multiq_sched_data *q = qdisc_priv(sch); | |
157 | int band; | |
158 | unsigned int len; | |
159 | struct Qdisc *qdisc; | |
160 | ||
cc7ec456 | 161 | for (band = q->bands - 1; band >= 0; band--) { |
92651940 AD |
162 | qdisc = q->queues[band]; |
163 | if (qdisc->ops->drop) { | |
164 | len = qdisc->ops->drop(qdisc); | |
165 | if (len != 0) { | |
166 | sch->q.qlen--; | |
167 | return len; | |
168 | } | |
169 | } | |
170 | } | |
171 | return 0; | |
172 | } | |
173 | ||
174 | ||
175 | static void | |
176 | multiq_reset(struct Qdisc *sch) | |
177 | { | |
178 | u16 band; | |
179 | struct multiq_sched_data *q = qdisc_priv(sch); | |
180 | ||
181 | for (band = 0; band < q->bands; band++) | |
182 | qdisc_reset(q->queues[band]); | |
183 | sch->q.qlen = 0; | |
184 | q->curband = 0; | |
185 | } | |
186 | ||
187 | static void | |
188 | multiq_destroy(struct Qdisc *sch) | |
189 | { | |
190 | int band; | |
191 | struct multiq_sched_data *q = qdisc_priv(sch); | |
192 | ||
193 | tcf_destroy_chain(&q->filter_list); | |
194 | for (band = 0; band < q->bands; band++) | |
195 | qdisc_destroy(q->queues[band]); | |
196 | ||
197 | kfree(q->queues); | |
198 | } | |
199 | ||
200 | static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) | |
201 | { | |
202 | struct multiq_sched_data *q = qdisc_priv(sch); | |
203 | struct tc_multiq_qopt *qopt; | |
204 | int i; | |
205 | ||
206 | if (!netif_is_multiqueue(qdisc_dev(sch))) | |
149490f1 | 207 | return -EOPNOTSUPP; |
92651940 AD |
208 | if (nla_len(opt) < sizeof(*qopt)) |
209 | return -EINVAL; | |
210 | ||
211 | qopt = nla_data(opt); | |
212 | ||
213 | qopt->bands = qdisc_dev(sch)->real_num_tx_queues; | |
214 | ||
215 | sch_tree_lock(sch); | |
216 | q->bands = qopt->bands; | |
217 | for (i = q->bands; i < q->max_bands; i++) { | |
f07d1501 | 218 | if (q->queues[i] != &noop_qdisc) { |
b94c8afc PM |
219 | struct Qdisc *child = q->queues[i]; |
220 | q->queues[i] = &noop_qdisc; | |
2ccccf5f WC |
221 | qdisc_tree_reduce_backlog(child, child->q.qlen, |
222 | child->qstats.backlog); | |
92651940 AD |
223 | qdisc_destroy(child); |
224 | } | |
225 | } | |
226 | ||
227 | sch_tree_unlock(sch); | |
228 | ||
229 | for (i = 0; i < q->bands; i++) { | |
230 | if (q->queues[i] == &noop_qdisc) { | |
b94c8afc | 231 | struct Qdisc *child, *old; |
3511c913 | 232 | child = qdisc_create_dflt(sch->dev_queue, |
92651940 AD |
233 | &pfifo_qdisc_ops, |
234 | TC_H_MAKE(sch->handle, | |
235 | i + 1)); | |
236 | if (child) { | |
237 | sch_tree_lock(sch); | |
b94c8afc PM |
238 | old = q->queues[i]; |
239 | q->queues[i] = child; | |
92651940 | 240 | |
b94c8afc | 241 | if (old != &noop_qdisc) { |
2ccccf5f WC |
242 | qdisc_tree_reduce_backlog(old, |
243 | old->q.qlen, | |
244 | old->qstats.backlog); | |
b94c8afc | 245 | qdisc_destroy(old); |
92651940 AD |
246 | } |
247 | sch_tree_unlock(sch); | |
248 | } | |
249 | } | |
250 | } | |
251 | return 0; | |
252 | } | |
253 | ||
254 | static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | |
255 | { | |
256 | struct multiq_sched_data *q = qdisc_priv(sch); | |
f07d1501 | 257 | int i, err; |
92651940 AD |
258 | |
259 | q->queues = NULL; | |
260 | ||
261 | if (opt == NULL) | |
262 | return -EINVAL; | |
263 | ||
264 | q->max_bands = qdisc_dev(sch)->num_tx_queues; | |
265 | ||
266 | q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); | |
267 | if (!q->queues) | |
268 | return -ENOBUFS; | |
269 | for (i = 0; i < q->max_bands; i++) | |
270 | q->queues[i] = &noop_qdisc; | |
271 | ||
cc7ec456 | 272 | err = multiq_tune(sch, opt); |
f07d1501 AD |
273 | |
274 | if (err) | |
275 | kfree(q->queues); | |
276 | ||
277 | return err; | |
92651940 AD |
278 | } |
279 | ||
280 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | |
281 | { | |
282 | struct multiq_sched_data *q = qdisc_priv(sch); | |
283 | unsigned char *b = skb_tail_pointer(skb); | |
284 | struct tc_multiq_qopt opt; | |
285 | ||
286 | opt.bands = q->bands; | |
287 | opt.max_bands = q->max_bands; | |
288 | ||
1b34ec43 DM |
289 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
290 | goto nla_put_failure; | |
92651940 AD |
291 | |
292 | return skb->len; | |
293 | ||
294 | nla_put_failure: | |
295 | nlmsg_trim(skb, b); | |
296 | return -1; | |
297 | } | |
298 | ||
299 | static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
300 | struct Qdisc **old) | |
301 | { | |
302 | struct multiq_sched_data *q = qdisc_priv(sch); | |
303 | unsigned long band = arg - 1; | |
304 | ||
92651940 AD |
305 | if (new == NULL) |
306 | new = &noop_qdisc; | |
307 | ||
86a7996c | 308 | *old = qdisc_replace(sch, new, &q->queues[band]); |
92651940 AD |
309 | return 0; |
310 | } | |
311 | ||
312 | static struct Qdisc * | |
313 | multiq_leaf(struct Qdisc *sch, unsigned long arg) | |
314 | { | |
315 | struct multiq_sched_data *q = qdisc_priv(sch); | |
316 | unsigned long band = arg - 1; | |
317 | ||
92651940 AD |
318 | return q->queues[band]; |
319 | } | |
320 | ||
321 | static unsigned long multiq_get(struct Qdisc *sch, u32 classid) | |
322 | { | |
323 | struct multiq_sched_data *q = qdisc_priv(sch); | |
324 | unsigned long band = TC_H_MIN(classid); | |
325 | ||
326 | if (band - 1 >= q->bands) | |
327 | return 0; | |
328 | return band; | |
329 | } | |
330 | ||
331 | static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | |
332 | u32 classid) | |
333 | { | |
334 | return multiq_get(sch, classid); | |
335 | } | |
336 | ||
337 | ||
338 | static void multiq_put(struct Qdisc *q, unsigned long cl) | |
339 | { | |
92651940 AD |
340 | } |
341 | ||
92651940 AD |
342 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, |
343 | struct sk_buff *skb, struct tcmsg *tcm) | |
344 | { | |
345 | struct multiq_sched_data *q = qdisc_priv(sch); | |
346 | ||
92651940 | 347 | tcm->tcm_handle |= TC_H_MIN(cl); |
cc7ec456 | 348 | tcm->tcm_info = q->queues[cl - 1]->handle; |
92651940 AD |
349 | return 0; |
350 | } | |
351 | ||
352 | static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
353 | struct gnet_dump *d) | |
354 | { | |
355 | struct multiq_sched_data *q = qdisc_priv(sch); | |
356 | struct Qdisc *cl_q; | |
357 | ||
358 | cl_q = q->queues[cl - 1]; | |
22e0f8b9 | 359 | if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || |
b0ab6f92 | 360 | gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) |
92651940 AD |
361 | return -1; |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
367 | { | |
368 | struct multiq_sched_data *q = qdisc_priv(sch); | |
369 | int band; | |
370 | ||
371 | if (arg->stop) | |
372 | return; | |
373 | ||
374 | for (band = 0; band < q->bands; band++) { | |
375 | if (arg->count < arg->skip) { | |
376 | arg->count++; | |
377 | continue; | |
378 | } | |
cc7ec456 | 379 | if (arg->fn(sch, band + 1, arg) < 0) { |
92651940 AD |
380 | arg->stop = 1; |
381 | break; | |
382 | } | |
383 | arg->count++; | |
384 | } | |
385 | } | |
386 | ||
25d8c0d5 JF |
387 | static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, |
388 | unsigned long cl) | |
92651940 AD |
389 | { |
390 | struct multiq_sched_data *q = qdisc_priv(sch); | |
391 | ||
392 | if (cl) | |
393 | return NULL; | |
394 | return &q->filter_list; | |
395 | } | |
396 | ||
397 | static const struct Qdisc_class_ops multiq_class_ops = { | |
398 | .graft = multiq_graft, | |
399 | .leaf = multiq_leaf, | |
400 | .get = multiq_get, | |
401 | .put = multiq_put, | |
92651940 AD |
402 | .walk = multiq_walk, |
403 | .tcf_chain = multiq_find_tcf, | |
404 | .bind_tcf = multiq_bind, | |
405 | .unbind_tcf = multiq_put, | |
406 | .dump = multiq_dump_class, | |
407 | .dump_stats = multiq_dump_class_stats, | |
408 | }; | |
409 | ||
410 | static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | |
411 | .next = NULL, | |
412 | .cl_ops = &multiq_class_ops, | |
413 | .id = "multiq", | |
414 | .priv_size = sizeof(struct multiq_sched_data), | |
415 | .enqueue = multiq_enqueue, | |
416 | .dequeue = multiq_dequeue, | |
8e3af978 | 417 | .peek = multiq_peek, |
92651940 AD |
418 | .drop = multiq_drop, |
419 | .init = multiq_init, | |
420 | .reset = multiq_reset, | |
421 | .destroy = multiq_destroy, | |
422 | .change = multiq_tune, | |
423 | .dump = multiq_dump, | |
424 | .owner = THIS_MODULE, | |
425 | }; | |
426 | ||
427 | static int __init multiq_module_init(void) | |
428 | { | |
429 | return register_qdisc(&multiq_qdisc_ops); | |
430 | } | |
431 | ||
432 | static void __exit multiq_module_exit(void) | |
433 | { | |
434 | unregister_qdisc(&multiq_qdisc_ops); | |
435 | } | |
436 | ||
437 | module_init(multiq_module_init) | |
438 | module_exit(multiq_module_exit) | |
439 | ||
440 | MODULE_LICENSE("GPL"); |