net: sched: implement qstat helper routines
[deliverable/linux.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32
33 #include <net/net_namespace.h>
34 #include <net/sock.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37
38 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
40 struct Qdisc *old, struct Qdisc *new);
41 static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
44
45 /*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134 /* The list of all installed queueing disciplines. */
135
136 static struct Qdisc_ops *qdisc_base;
137
138 /* Register/unregister queueing discipline */
139
140 int register_qdisc(struct Qdisc_ops *qops)
141 {
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek;
155 else
156 goto out_einval;
157 }
158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174 out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
177
178 out_einval:
179 rc = -EINVAL;
180 goto out;
181 }
182 EXPORT_SYMBOL(register_qdisc);
183
184 int unregister_qdisc(struct Qdisc_ops *qops)
185 {
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200 }
201 EXPORT_SYMBOL(unregister_qdisc);
202
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name, size_t len)
205 {
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209 }
210
211 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 {
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224 }
225
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name)
228 {
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253 }
254
255 /* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
259 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
260 {
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272 }
273
274 void qdisc_list_add(struct Qdisc *q)
275 {
276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
278
279 WARN_ON_ONCE(root == &noop_qdisc);
280 list_add_tail(&q->list, &root->list);
281 }
282 }
283 EXPORT_SYMBOL(qdisc_list_add);
284
285 void qdisc_list_del(struct Qdisc *q)
286 {
287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
288 list_del(&q->list);
289 }
290 EXPORT_SYMBOL(qdisc_list_del);
291
292 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
293 {
294 struct Qdisc *q;
295
296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
299
300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
304 out:
305 return q;
306 }
307
308 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309 {
310 unsigned long cl;
311 struct Qdisc *leaf;
312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323 }
324
325 /* Find queueing discipline by name */
326
327 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
328 {
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
334 if (nla_strcmp(kind, q->id) == 0) {
335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343 }
344
345 /* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363 {
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382 }
383
384 static struct qdisc_rate_table *qdisc_rtab_list;
385
386 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
387 {
388 struct qdisc_rate_table *rtab;
389
390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
406 memcpy(rtab->data, nla_data(tab), 1024);
407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413 }
414 EXPORT_SYMBOL(qdisc_get_rtab);
415
416 void qdisc_put_rtab(struct qdisc_rate_table *tab)
417 {
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432 }
433 EXPORT_SYMBOL(qdisc_put_rtab);
434
435 static LIST_HEAD(qdisc_stab_list);
436 static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441 };
442
443 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444 {
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
467 if (tsize != s->tsize || (!tab && tsize > 0))
468 return ERR_PTR(-EINVAL);
469
470 spin_lock(&qdisc_stab_lock);
471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
478 spin_unlock(&qdisc_stab_lock);
479 return stab;
480 }
481
482 spin_unlock(&qdisc_stab_lock);
483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
493 spin_lock(&qdisc_stab_lock);
494 list_add_tail(&stab->list, &qdisc_stab_list);
495 spin_unlock(&qdisc_stab_lock);
496
497 return stab;
498 }
499
500 static void stab_kfree_rcu(struct rcu_head *head)
501 {
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503 }
504
505 void qdisc_put_stab(struct qdisc_size_table *tab)
506 {
507 if (!tab)
508 return;
509
510 spin_lock(&qdisc_stab_lock);
511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
515 }
516
517 spin_unlock(&qdisc_stab_lock);
518 }
519 EXPORT_SYMBOL(qdisc_put_stab);
520
521 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522 {
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
526 if (nest == NULL)
527 goto nla_put_failure;
528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534 nla_put_failure:
535 return -1;
536 }
537
538 void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
539 {
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559 out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563 }
564 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
565
566 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
567 {
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573 }
574 EXPORT_SYMBOL(qdisc_warn_nonwc);
575
576 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577 {
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
579 timer);
580
581 qdisc_unthrottled(wd->qdisc);
582 __netif_schedule(qdisc_root(wd->qdisc));
583
584 return HRTIMER_NORESTART;
585 }
586
587 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
588 {
589 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
590 wd->timer.function = qdisc_watchdog;
591 wd->qdisc = qdisc;
592 }
593 EXPORT_SYMBOL(qdisc_watchdog_init);
594
595 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
596 {
597 if (test_bit(__QDISC_STATE_DEACTIVATED,
598 &qdisc_root_sleeping(wd->qdisc)->state))
599 return;
600
601 qdisc_throttled(wd->qdisc);
602
603 hrtimer_start(&wd->timer,
604 ns_to_ktime(expires),
605 HRTIMER_MODE_ABS_PINNED);
606 }
607 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
608
609 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
610 {
611 hrtimer_cancel(&wd->timer);
612 qdisc_unthrottled(wd->qdisc);
613 }
614 EXPORT_SYMBOL(qdisc_watchdog_cancel);
615
616 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
617 {
618 unsigned int size = n * sizeof(struct hlist_head), i;
619 struct hlist_head *h;
620
621 if (size <= PAGE_SIZE)
622 h = kmalloc(size, GFP_KERNEL);
623 else
624 h = (struct hlist_head *)
625 __get_free_pages(GFP_KERNEL, get_order(size));
626
627 if (h != NULL) {
628 for (i = 0; i < n; i++)
629 INIT_HLIST_HEAD(&h[i]);
630 }
631 return h;
632 }
633
634 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
635 {
636 unsigned int size = n * sizeof(struct hlist_head);
637
638 if (size <= PAGE_SIZE)
639 kfree(h);
640 else
641 free_pages((unsigned long)h, get_order(size));
642 }
643
644 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
645 {
646 struct Qdisc_class_common *cl;
647 struct hlist_node *next;
648 struct hlist_head *nhash, *ohash;
649 unsigned int nsize, nmask, osize;
650 unsigned int i, h;
651
652 /* Rehash when load factor exceeds 0.75 */
653 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
654 return;
655 nsize = clhash->hashsize * 2;
656 nmask = nsize - 1;
657 nhash = qdisc_class_hash_alloc(nsize);
658 if (nhash == NULL)
659 return;
660
661 ohash = clhash->hash;
662 osize = clhash->hashsize;
663
664 sch_tree_lock(sch);
665 for (i = 0; i < osize; i++) {
666 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
667 h = qdisc_class_hash(cl->classid, nmask);
668 hlist_add_head(&cl->hnode, &nhash[h]);
669 }
670 }
671 clhash->hash = nhash;
672 clhash->hashsize = nsize;
673 clhash->hashmask = nmask;
674 sch_tree_unlock(sch);
675
676 qdisc_class_hash_free(ohash, osize);
677 }
678 EXPORT_SYMBOL(qdisc_class_hash_grow);
679
680 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
681 {
682 unsigned int size = 4;
683
684 clhash->hash = qdisc_class_hash_alloc(size);
685 if (clhash->hash == NULL)
686 return -ENOMEM;
687 clhash->hashsize = size;
688 clhash->hashmask = size - 1;
689 clhash->hashelems = 0;
690 return 0;
691 }
692 EXPORT_SYMBOL(qdisc_class_hash_init);
693
694 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
695 {
696 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
697 }
698 EXPORT_SYMBOL(qdisc_class_hash_destroy);
699
700 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
701 struct Qdisc_class_common *cl)
702 {
703 unsigned int h;
704
705 INIT_HLIST_NODE(&cl->hnode);
706 h = qdisc_class_hash(cl->classid, clhash->hashmask);
707 hlist_add_head(&cl->hnode, &clhash->hash[h]);
708 clhash->hashelems++;
709 }
710 EXPORT_SYMBOL(qdisc_class_hash_insert);
711
712 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
713 struct Qdisc_class_common *cl)
714 {
715 hlist_del(&cl->hnode);
716 clhash->hashelems--;
717 }
718 EXPORT_SYMBOL(qdisc_class_hash_remove);
719
720 /* Allocate an unique handle from space managed by kernel
721 * Possible range is [8000-FFFF]:0000 (0x8000 values)
722 */
723 static u32 qdisc_alloc_handle(struct net_device *dev)
724 {
725 int i = 0x8000;
726 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
727
728 do {
729 autohandle += TC_H_MAKE(0x10000U, 0);
730 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
731 autohandle = TC_H_MAKE(0x80000000U, 0);
732 if (!qdisc_lookup(dev, autohandle))
733 return autohandle;
734 cond_resched();
735 } while (--i > 0);
736
737 return 0;
738 }
739
740 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
741 {
742 const struct Qdisc_class_ops *cops;
743 unsigned long cl;
744 u32 parentid;
745 int drops;
746
747 if (n == 0)
748 return;
749 drops = max_t(int, n, 0);
750 while ((parentid = sch->parent)) {
751 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
752 return;
753
754 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
755 if (sch == NULL) {
756 WARN_ON(parentid != TC_H_ROOT);
757 return;
758 }
759 cops = sch->ops->cl_ops;
760 if (cops->qlen_notify) {
761 cl = cops->get(sch, parentid);
762 cops->qlen_notify(sch, cl);
763 cops->put(sch, cl);
764 }
765 sch->q.qlen -= n;
766 __qdisc_qstats_drop(sch, drops);
767 }
768 }
769 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
770
771 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
772 struct nlmsghdr *n, u32 clid,
773 struct Qdisc *old, struct Qdisc *new)
774 {
775 if (new || old)
776 qdisc_notify(net, skb, n, clid, old, new);
777
778 if (old)
779 qdisc_destroy(old);
780 }
781
782 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
783 * to device "dev".
784 *
785 * When appropriate send a netlink notification using 'skb'
786 * and "n".
787 *
788 * On success, destroy old qdisc.
789 */
790
791 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
792 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
793 struct Qdisc *new, struct Qdisc *old)
794 {
795 struct Qdisc *q = old;
796 struct net *net = dev_net(dev);
797 int err = 0;
798
799 if (parent == NULL) {
800 unsigned int i, num_q, ingress;
801
802 ingress = 0;
803 num_q = dev->num_tx_queues;
804 if ((q && q->flags & TCQ_F_INGRESS) ||
805 (new && new->flags & TCQ_F_INGRESS)) {
806 num_q = 1;
807 ingress = 1;
808 if (!dev_ingress_queue(dev))
809 return -ENOENT;
810 }
811
812 if (dev->flags & IFF_UP)
813 dev_deactivate(dev);
814
815 if (new && new->ops->attach) {
816 new->ops->attach(new);
817 num_q = 0;
818 }
819
820 for (i = 0; i < num_q; i++) {
821 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
822
823 if (!ingress)
824 dev_queue = netdev_get_tx_queue(dev, i);
825
826 old = dev_graft_qdisc(dev_queue, new);
827 if (new && i > 0)
828 atomic_inc(&new->refcnt);
829
830 if (!ingress)
831 qdisc_destroy(old);
832 }
833
834 if (!ingress) {
835 notify_and_destroy(net, skb, n, classid,
836 dev->qdisc, new);
837 if (new && !new->ops->attach)
838 atomic_inc(&new->refcnt);
839 dev->qdisc = new ? : &noop_qdisc;
840 } else {
841 notify_and_destroy(net, skb, n, classid, old, new);
842 }
843
844 if (dev->flags & IFF_UP)
845 dev_activate(dev);
846 } else {
847 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
848
849 err = -EOPNOTSUPP;
850 if (cops && cops->graft) {
851 unsigned long cl = cops->get(parent, classid);
852 if (cl) {
853 err = cops->graft(parent, cl, new, &old);
854 cops->put(parent, cl);
855 } else
856 err = -ENOENT;
857 }
858 if (!err)
859 notify_and_destroy(net, skb, n, classid, old, new);
860 }
861 return err;
862 }
863
864 /* lockdep annotation is needed for ingress; egress gets it only for name */
865 static struct lock_class_key qdisc_tx_lock;
866 static struct lock_class_key qdisc_rx_lock;
867
868 /*
869 Allocate and initialize new qdisc.
870
871 Parameters are passed via opt.
872 */
873
874 static struct Qdisc *
875 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
876 struct Qdisc *p, u32 parent, u32 handle,
877 struct nlattr **tca, int *errp)
878 {
879 int err;
880 struct nlattr *kind = tca[TCA_KIND];
881 struct Qdisc *sch;
882 struct Qdisc_ops *ops;
883 struct qdisc_size_table *stab;
884
885 ops = qdisc_lookup_ops(kind);
886 #ifdef CONFIG_MODULES
887 if (ops == NULL && kind != NULL) {
888 char name[IFNAMSIZ];
889 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
890 /* We dropped the RTNL semaphore in order to
891 * perform the module load. So, even if we
892 * succeeded in loading the module we have to
893 * tell the caller to replay the request. We
894 * indicate this using -EAGAIN.
895 * We replay the request because the device may
896 * go away in the mean time.
897 */
898 rtnl_unlock();
899 request_module("sch_%s", name);
900 rtnl_lock();
901 ops = qdisc_lookup_ops(kind);
902 if (ops != NULL) {
903 /* We will try again qdisc_lookup_ops,
904 * so don't keep a reference.
905 */
906 module_put(ops->owner);
907 err = -EAGAIN;
908 goto err_out;
909 }
910 }
911 }
912 #endif
913
914 err = -ENOENT;
915 if (ops == NULL)
916 goto err_out;
917
918 sch = qdisc_alloc(dev_queue, ops);
919 if (IS_ERR(sch)) {
920 err = PTR_ERR(sch);
921 goto err_out2;
922 }
923
924 sch->parent = parent;
925
926 if (handle == TC_H_INGRESS) {
927 sch->flags |= TCQ_F_INGRESS;
928 handle = TC_H_MAKE(TC_H_INGRESS, 0);
929 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
930 } else {
931 if (handle == 0) {
932 handle = qdisc_alloc_handle(dev);
933 err = -ENOMEM;
934 if (handle == 0)
935 goto err_out3;
936 }
937 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
938 if (!netif_is_multiqueue(dev))
939 sch->flags |= TCQ_F_ONETXQUEUE;
940 }
941
942 sch->handle = handle;
943
944 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
945 if (qdisc_is_percpu_stats(sch)) {
946 sch->cpu_bstats =
947 alloc_percpu(struct gnet_stats_basic_cpu);
948 if (!sch->cpu_bstats)
949 goto err_out4;
950 }
951
952 if (tca[TCA_STAB]) {
953 stab = qdisc_get_stab(tca[TCA_STAB]);
954 if (IS_ERR(stab)) {
955 err = PTR_ERR(stab);
956 goto err_out4;
957 }
958 rcu_assign_pointer(sch->stab, stab);
959 }
960 if (tca[TCA_RATE]) {
961 spinlock_t *root_lock;
962
963 err = -EOPNOTSUPP;
964 if (sch->flags & TCQ_F_MQROOT)
965 goto err_out4;
966
967 if ((sch->parent != TC_H_ROOT) &&
968 !(sch->flags & TCQ_F_INGRESS) &&
969 (!p || !(p->flags & TCQ_F_MQROOT)))
970 root_lock = qdisc_root_sleeping_lock(sch);
971 else
972 root_lock = qdisc_lock(sch);
973
974 err = gen_new_estimator(&sch->bstats,
975 sch->cpu_bstats,
976 &sch->rate_est,
977 root_lock,
978 tca[TCA_RATE]);
979 if (err)
980 goto err_out4;
981 }
982
983 qdisc_list_add(sch);
984
985 return sch;
986 }
987 err_out3:
988 dev_put(dev);
989 kfree((char *) sch - sch->padded);
990 err_out2:
991 module_put(ops->owner);
992 err_out:
993 *errp = err;
994 return NULL;
995
996 err_out4:
997 free_percpu(sch->cpu_bstats);
998 /*
999 * Any broken qdiscs that would require a ops->reset() here?
1000 * The qdisc was never in action so it shouldn't be necessary.
1001 */
1002 qdisc_put_stab(rtnl_dereference(sch->stab));
1003 if (ops->destroy)
1004 ops->destroy(sch);
1005 goto err_out3;
1006 }
1007
1008 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1009 {
1010 struct qdisc_size_table *ostab, *stab = NULL;
1011 int err = 0;
1012
1013 if (tca[TCA_OPTIONS]) {
1014 if (sch->ops->change == NULL)
1015 return -EINVAL;
1016 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1017 if (err)
1018 return err;
1019 }
1020
1021 if (tca[TCA_STAB]) {
1022 stab = qdisc_get_stab(tca[TCA_STAB]);
1023 if (IS_ERR(stab))
1024 return PTR_ERR(stab);
1025 }
1026
1027 ostab = rtnl_dereference(sch->stab);
1028 rcu_assign_pointer(sch->stab, stab);
1029 qdisc_put_stab(ostab);
1030
1031 if (tca[TCA_RATE]) {
1032 /* NB: ignores errors from replace_estimator
1033 because change can't be undone. */
1034 if (sch->flags & TCQ_F_MQROOT)
1035 goto out;
1036 gen_replace_estimator(&sch->bstats,
1037 sch->cpu_bstats,
1038 &sch->rate_est,
1039 qdisc_root_sleeping_lock(sch),
1040 tca[TCA_RATE]);
1041 }
1042 out:
1043 return 0;
1044 }
1045
1046 struct check_loop_arg {
1047 struct qdisc_walker w;
1048 struct Qdisc *p;
1049 int depth;
1050 };
1051
1052 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1053
1054 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1055 {
1056 struct check_loop_arg arg;
1057
1058 if (q->ops->cl_ops == NULL)
1059 return 0;
1060
1061 arg.w.stop = arg.w.skip = arg.w.count = 0;
1062 arg.w.fn = check_loop_fn;
1063 arg.depth = depth;
1064 arg.p = p;
1065 q->ops->cl_ops->walk(q, &arg.w);
1066 return arg.w.stop ? -ELOOP : 0;
1067 }
1068
1069 static int
1070 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1071 {
1072 struct Qdisc *leaf;
1073 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1074 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1075
1076 leaf = cops->leaf(q, cl);
1077 if (leaf) {
1078 if (leaf == arg->p || arg->depth > 7)
1079 return -ELOOP;
1080 return check_loop(leaf, arg->p, arg->depth + 1);
1081 }
1082 return 0;
1083 }
1084
1085 /*
1086 * Delete/get qdisc.
1087 */
1088
1089 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1090 {
1091 struct net *net = sock_net(skb->sk);
1092 struct tcmsg *tcm = nlmsg_data(n);
1093 struct nlattr *tca[TCA_MAX + 1];
1094 struct net_device *dev;
1095 u32 clid;
1096 struct Qdisc *q = NULL;
1097 struct Qdisc *p = NULL;
1098 int err;
1099
1100 if ((n->nlmsg_type != RTM_GETQDISC) &&
1101 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1102 return -EPERM;
1103
1104 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1105 if (err < 0)
1106 return err;
1107
1108 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1109 if (!dev)
1110 return -ENODEV;
1111
1112 clid = tcm->tcm_parent;
1113 if (clid) {
1114 if (clid != TC_H_ROOT) {
1115 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1116 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1117 if (!p)
1118 return -ENOENT;
1119 q = qdisc_leaf(p, clid);
1120 } else if (dev_ingress_queue(dev)) {
1121 q = dev_ingress_queue(dev)->qdisc_sleeping;
1122 }
1123 } else {
1124 q = dev->qdisc;
1125 }
1126 if (!q)
1127 return -ENOENT;
1128
1129 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1130 return -EINVAL;
1131 } else {
1132 q = qdisc_lookup(dev, tcm->tcm_handle);
1133 if (!q)
1134 return -ENOENT;
1135 }
1136
1137 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1138 return -EINVAL;
1139
1140 if (n->nlmsg_type == RTM_DELQDISC) {
1141 if (!clid)
1142 return -EINVAL;
1143 if (q->handle == 0)
1144 return -ENOENT;
1145 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1146 if (err != 0)
1147 return err;
1148 } else {
1149 qdisc_notify(net, skb, n, clid, NULL, q);
1150 }
1151 return 0;
1152 }
1153
1154 /*
1155 * Create/change qdisc.
1156 */
1157
1158 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1159 {
1160 struct net *net = sock_net(skb->sk);
1161 struct tcmsg *tcm;
1162 struct nlattr *tca[TCA_MAX + 1];
1163 struct net_device *dev;
1164 u32 clid;
1165 struct Qdisc *q, *p;
1166 int err;
1167
1168 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1169 return -EPERM;
1170
1171 replay:
1172 /* Reinit, just in case something touches this. */
1173 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1174 if (err < 0)
1175 return err;
1176
1177 tcm = nlmsg_data(n);
1178 clid = tcm->tcm_parent;
1179 q = p = NULL;
1180
1181 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1182 if (!dev)
1183 return -ENODEV;
1184
1185
1186 if (clid) {
1187 if (clid != TC_H_ROOT) {
1188 if (clid != TC_H_INGRESS) {
1189 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1190 if (!p)
1191 return -ENOENT;
1192 q = qdisc_leaf(p, clid);
1193 } else if (dev_ingress_queue_create(dev)) {
1194 q = dev_ingress_queue(dev)->qdisc_sleeping;
1195 }
1196 } else {
1197 q = dev->qdisc;
1198 }
1199
1200 /* It may be default qdisc, ignore it */
1201 if (q && q->handle == 0)
1202 q = NULL;
1203
1204 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1205 if (tcm->tcm_handle) {
1206 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1207 return -EEXIST;
1208 if (TC_H_MIN(tcm->tcm_handle))
1209 return -EINVAL;
1210 q = qdisc_lookup(dev, tcm->tcm_handle);
1211 if (!q)
1212 goto create_n_graft;
1213 if (n->nlmsg_flags & NLM_F_EXCL)
1214 return -EEXIST;
1215 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1216 return -EINVAL;
1217 if (q == p ||
1218 (p && check_loop(q, p, 0)))
1219 return -ELOOP;
1220 atomic_inc(&q->refcnt);
1221 goto graft;
1222 } else {
1223 if (!q)
1224 goto create_n_graft;
1225
1226 /* This magic test requires explanation.
1227 *
1228 * We know, that some child q is already
1229 * attached to this parent and have choice:
1230 * either to change it or to create/graft new one.
1231 *
1232 * 1. We are allowed to create/graft only
1233 * if CREATE and REPLACE flags are set.
1234 *
1235 * 2. If EXCL is set, requestor wanted to say,
1236 * that qdisc tcm_handle is not expected
1237 * to exist, so that we choose create/graft too.
1238 *
1239 * 3. The last case is when no flags are set.
1240 * Alas, it is sort of hole in API, we
1241 * cannot decide what to do unambiguously.
1242 * For now we select create/graft, if
1243 * user gave KIND, which does not match existing.
1244 */
1245 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1246 (n->nlmsg_flags & NLM_F_REPLACE) &&
1247 ((n->nlmsg_flags & NLM_F_EXCL) ||
1248 (tca[TCA_KIND] &&
1249 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1250 goto create_n_graft;
1251 }
1252 }
1253 } else {
1254 if (!tcm->tcm_handle)
1255 return -EINVAL;
1256 q = qdisc_lookup(dev, tcm->tcm_handle);
1257 }
1258
1259 /* Change qdisc parameters */
1260 if (q == NULL)
1261 return -ENOENT;
1262 if (n->nlmsg_flags & NLM_F_EXCL)
1263 return -EEXIST;
1264 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1265 return -EINVAL;
1266 err = qdisc_change(q, tca);
1267 if (err == 0)
1268 qdisc_notify(net, skb, n, clid, NULL, q);
1269 return err;
1270
1271 create_n_graft:
1272 if (!(n->nlmsg_flags & NLM_F_CREATE))
1273 return -ENOENT;
1274 if (clid == TC_H_INGRESS) {
1275 if (dev_ingress_queue(dev))
1276 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1277 tcm->tcm_parent, tcm->tcm_parent,
1278 tca, &err);
1279 else
1280 err = -ENOENT;
1281 } else {
1282 struct netdev_queue *dev_queue;
1283
1284 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1285 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1286 else if (p)
1287 dev_queue = p->dev_queue;
1288 else
1289 dev_queue = netdev_get_tx_queue(dev, 0);
1290
1291 q = qdisc_create(dev, dev_queue, p,
1292 tcm->tcm_parent, tcm->tcm_handle,
1293 tca, &err);
1294 }
1295 if (q == NULL) {
1296 if (err == -EAGAIN)
1297 goto replay;
1298 return err;
1299 }
1300
1301 graft:
1302 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1303 if (err) {
1304 if (q)
1305 qdisc_destroy(q);
1306 return err;
1307 }
1308
1309 return 0;
1310 }
1311
1312 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1313 u32 portid, u32 seq, u16 flags, int event)
1314 {
1315 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1316 struct tcmsg *tcm;
1317 struct nlmsghdr *nlh;
1318 unsigned char *b = skb_tail_pointer(skb);
1319 struct gnet_dump d;
1320 struct qdisc_size_table *stab;
1321
1322 cond_resched();
1323 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1324 if (!nlh)
1325 goto out_nlmsg_trim;
1326 tcm = nlmsg_data(nlh);
1327 tcm->tcm_family = AF_UNSPEC;
1328 tcm->tcm__pad1 = 0;
1329 tcm->tcm__pad2 = 0;
1330 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1331 tcm->tcm_parent = clid;
1332 tcm->tcm_handle = q->handle;
1333 tcm->tcm_info = atomic_read(&q->refcnt);
1334 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1335 goto nla_put_failure;
1336 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1337 goto nla_put_failure;
1338 q->qstats.qlen = q->q.qlen;
1339
1340 stab = rtnl_dereference(q->stab);
1341 if (stab && qdisc_dump_stab(skb, stab) < 0)
1342 goto nla_put_failure;
1343
1344 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1345 qdisc_root_sleeping_lock(q), &d) < 0)
1346 goto nla_put_failure;
1347
1348 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1349 goto nla_put_failure;
1350
1351 if (qdisc_is_percpu_stats(q))
1352 cpu_bstats = q->cpu_bstats;
1353
1354 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1355 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1356 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1357 goto nla_put_failure;
1358
1359 if (gnet_stats_finish_copy(&d) < 0)
1360 goto nla_put_failure;
1361
1362 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1363 return skb->len;
1364
1365 out_nlmsg_trim:
1366 nla_put_failure:
1367 nlmsg_trim(skb, b);
1368 return -1;
1369 }
1370
1371 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1372 {
1373 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1374 }
1375
1376 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1377 struct nlmsghdr *n, u32 clid,
1378 struct Qdisc *old, struct Qdisc *new)
1379 {
1380 struct sk_buff *skb;
1381 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1382
1383 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1384 if (!skb)
1385 return -ENOBUFS;
1386
1387 if (old && !tc_qdisc_dump_ignore(old)) {
1388 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1389 0, RTM_DELQDISC) < 0)
1390 goto err_out;
1391 }
1392 if (new && !tc_qdisc_dump_ignore(new)) {
1393 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1394 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1395 goto err_out;
1396 }
1397
1398 if (skb->len)
1399 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1400 n->nlmsg_flags & NLM_F_ECHO);
1401
1402 err_out:
1403 kfree_skb(skb);
1404 return -EINVAL;
1405 }
1406
1407 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1408 struct netlink_callback *cb,
1409 int *q_idx_p, int s_q_idx)
1410 {
1411 int ret = 0, q_idx = *q_idx_p;
1412 struct Qdisc *q;
1413
1414 if (!root)
1415 return 0;
1416
1417 q = root;
1418 if (q_idx < s_q_idx) {
1419 q_idx++;
1420 } else {
1421 if (!tc_qdisc_dump_ignore(q) &&
1422 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1423 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1424 goto done;
1425 q_idx++;
1426 }
1427 list_for_each_entry(q, &root->list, list) {
1428 if (q_idx < s_q_idx) {
1429 q_idx++;
1430 continue;
1431 }
1432 if (!tc_qdisc_dump_ignore(q) &&
1433 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1434 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1435 goto done;
1436 q_idx++;
1437 }
1438
1439 out:
1440 *q_idx_p = q_idx;
1441 return ret;
1442 done:
1443 ret = -1;
1444 goto out;
1445 }
1446
1447 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1448 {
1449 struct net *net = sock_net(skb->sk);
1450 int idx, q_idx;
1451 int s_idx, s_q_idx;
1452 struct net_device *dev;
1453
1454 s_idx = cb->args[0];
1455 s_q_idx = q_idx = cb->args[1];
1456
1457 idx = 0;
1458 ASSERT_RTNL();
1459 for_each_netdev(net, dev) {
1460 struct netdev_queue *dev_queue;
1461
1462 if (idx < s_idx)
1463 goto cont;
1464 if (idx > s_idx)
1465 s_q_idx = 0;
1466 q_idx = 0;
1467
1468 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1469 goto done;
1470
1471 dev_queue = dev_ingress_queue(dev);
1472 if (dev_queue &&
1473 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1474 &q_idx, s_q_idx) < 0)
1475 goto done;
1476
1477 cont:
1478 idx++;
1479 }
1480
1481 done:
1482 cb->args[0] = idx;
1483 cb->args[1] = q_idx;
1484
1485 return skb->len;
1486 }
1487
1488
1489
1490 /************************************************
1491 * Traffic classes manipulation. *
1492 ************************************************/
1493
1494
1495
1496 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1497 {
1498 struct net *net = sock_net(skb->sk);
1499 struct tcmsg *tcm = nlmsg_data(n);
1500 struct nlattr *tca[TCA_MAX + 1];
1501 struct net_device *dev;
1502 struct Qdisc *q = NULL;
1503 const struct Qdisc_class_ops *cops;
1504 unsigned long cl = 0;
1505 unsigned long new_cl;
1506 u32 portid;
1507 u32 clid;
1508 u32 qid;
1509 int err;
1510
1511 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1512 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1513 return -EPERM;
1514
1515 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1516 if (err < 0)
1517 return err;
1518
1519 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1520 if (!dev)
1521 return -ENODEV;
1522
1523 /*
1524 parent == TC_H_UNSPEC - unspecified parent.
1525 parent == TC_H_ROOT - class is root, which has no parent.
1526 parent == X:0 - parent is root class.
1527 parent == X:Y - parent is a node in hierarchy.
1528 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1529
1530 handle == 0:0 - generate handle from kernel pool.
1531 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1532 handle == X:Y - clear.
1533 handle == X:0 - root class.
1534 */
1535
1536 /* Step 1. Determine qdisc handle X:0 */
1537
1538 portid = tcm->tcm_parent;
1539 clid = tcm->tcm_handle;
1540 qid = TC_H_MAJ(clid);
1541
1542 if (portid != TC_H_ROOT) {
1543 u32 qid1 = TC_H_MAJ(portid);
1544
1545 if (qid && qid1) {
1546 /* If both majors are known, they must be identical. */
1547 if (qid != qid1)
1548 return -EINVAL;
1549 } else if (qid1) {
1550 qid = qid1;
1551 } else if (qid == 0)
1552 qid = dev->qdisc->handle;
1553
1554 /* Now qid is genuine qdisc handle consistent
1555 * both with parent and child.
1556 *
1557 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1558 */
1559 if (portid)
1560 portid = TC_H_MAKE(qid, portid);
1561 } else {
1562 if (qid == 0)
1563 qid = dev->qdisc->handle;
1564 }
1565
1566 /* OK. Locate qdisc */
1567 q = qdisc_lookup(dev, qid);
1568 if (!q)
1569 return -ENOENT;
1570
1571 /* An check that it supports classes */
1572 cops = q->ops->cl_ops;
1573 if (cops == NULL)
1574 return -EINVAL;
1575
1576 /* Now try to get class */
1577 if (clid == 0) {
1578 if (portid == TC_H_ROOT)
1579 clid = qid;
1580 } else
1581 clid = TC_H_MAKE(qid, clid);
1582
1583 if (clid)
1584 cl = cops->get(q, clid);
1585
1586 if (cl == 0) {
1587 err = -ENOENT;
1588 if (n->nlmsg_type != RTM_NEWTCLASS ||
1589 !(n->nlmsg_flags & NLM_F_CREATE))
1590 goto out;
1591 } else {
1592 switch (n->nlmsg_type) {
1593 case RTM_NEWTCLASS:
1594 err = -EEXIST;
1595 if (n->nlmsg_flags & NLM_F_EXCL)
1596 goto out;
1597 break;
1598 case RTM_DELTCLASS:
1599 err = -EOPNOTSUPP;
1600 if (cops->delete)
1601 err = cops->delete(q, cl);
1602 if (err == 0)
1603 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1604 goto out;
1605 case RTM_GETTCLASS:
1606 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1607 goto out;
1608 default:
1609 err = -EINVAL;
1610 goto out;
1611 }
1612 }
1613
1614 new_cl = cl;
1615 err = -EOPNOTSUPP;
1616 if (cops->change)
1617 err = cops->change(q, clid, portid, tca, &new_cl);
1618 if (err == 0)
1619 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1620
1621 out:
1622 if (cl)
1623 cops->put(q, cl);
1624
1625 return err;
1626 }
1627
1628
1629 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1630 unsigned long cl,
1631 u32 portid, u32 seq, u16 flags, int event)
1632 {
1633 struct tcmsg *tcm;
1634 struct nlmsghdr *nlh;
1635 unsigned char *b = skb_tail_pointer(skb);
1636 struct gnet_dump d;
1637 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1638
1639 cond_resched();
1640 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1641 if (!nlh)
1642 goto out_nlmsg_trim;
1643 tcm = nlmsg_data(nlh);
1644 tcm->tcm_family = AF_UNSPEC;
1645 tcm->tcm__pad1 = 0;
1646 tcm->tcm__pad2 = 0;
1647 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1648 tcm->tcm_parent = q->handle;
1649 tcm->tcm_handle = q->handle;
1650 tcm->tcm_info = 0;
1651 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1652 goto nla_put_failure;
1653 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1654 goto nla_put_failure;
1655
1656 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1657 qdisc_root_sleeping_lock(q), &d) < 0)
1658 goto nla_put_failure;
1659
1660 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1661 goto nla_put_failure;
1662
1663 if (gnet_stats_finish_copy(&d) < 0)
1664 goto nla_put_failure;
1665
1666 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1667 return skb->len;
1668
1669 out_nlmsg_trim:
1670 nla_put_failure:
1671 nlmsg_trim(skb, b);
1672 return -1;
1673 }
1674
1675 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1676 struct nlmsghdr *n, struct Qdisc *q,
1677 unsigned long cl, int event)
1678 {
1679 struct sk_buff *skb;
1680 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1681
1682 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1683 if (!skb)
1684 return -ENOBUFS;
1685
1686 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1687 kfree_skb(skb);
1688 return -EINVAL;
1689 }
1690
1691 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1692 n->nlmsg_flags & NLM_F_ECHO);
1693 }
1694
1695 struct qdisc_dump_args {
1696 struct qdisc_walker w;
1697 struct sk_buff *skb;
1698 struct netlink_callback *cb;
1699 };
1700
1701 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1702 {
1703 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1704
1705 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1706 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1707 }
1708
1709 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1710 struct tcmsg *tcm, struct netlink_callback *cb,
1711 int *t_p, int s_t)
1712 {
1713 struct qdisc_dump_args arg;
1714
1715 if (tc_qdisc_dump_ignore(q) ||
1716 *t_p < s_t || !q->ops->cl_ops ||
1717 (tcm->tcm_parent &&
1718 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1719 (*t_p)++;
1720 return 0;
1721 }
1722 if (*t_p > s_t)
1723 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1724 arg.w.fn = qdisc_class_dump;
1725 arg.skb = skb;
1726 arg.cb = cb;
1727 arg.w.stop = 0;
1728 arg.w.skip = cb->args[1];
1729 arg.w.count = 0;
1730 q->ops->cl_ops->walk(q, &arg.w);
1731 cb->args[1] = arg.w.count;
1732 if (arg.w.stop)
1733 return -1;
1734 (*t_p)++;
1735 return 0;
1736 }
1737
1738 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1739 struct tcmsg *tcm, struct netlink_callback *cb,
1740 int *t_p, int s_t)
1741 {
1742 struct Qdisc *q;
1743
1744 if (!root)
1745 return 0;
1746
1747 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1748 return -1;
1749
1750 list_for_each_entry(q, &root->list, list) {
1751 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1752 return -1;
1753 }
1754
1755 return 0;
1756 }
1757
1758 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1759 {
1760 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1761 struct net *net = sock_net(skb->sk);
1762 struct netdev_queue *dev_queue;
1763 struct net_device *dev;
1764 int t, s_t;
1765
1766 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1767 return 0;
1768 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1769 if (!dev)
1770 return 0;
1771
1772 s_t = cb->args[0];
1773 t = 0;
1774
1775 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1776 goto done;
1777
1778 dev_queue = dev_ingress_queue(dev);
1779 if (dev_queue &&
1780 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1781 &t, s_t) < 0)
1782 goto done;
1783
1784 done:
1785 cb->args[0] = t;
1786
1787 dev_put(dev);
1788 return skb->len;
1789 }
1790
1791 /* Main classifier routine: scans classifier chain attached
1792 * to this qdisc, (optionally) tests for protocol and asks
1793 * specific classifiers.
1794 */
1795 int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1796 struct tcf_result *res)
1797 {
1798 __be16 protocol = skb->protocol;
1799 int err;
1800
1801 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1802 if (tp->protocol != protocol &&
1803 tp->protocol != htons(ETH_P_ALL))
1804 continue;
1805 err = tp->classify(skb, tp, res);
1806
1807 if (err >= 0) {
1808 #ifdef CONFIG_NET_CLS_ACT
1809 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1810 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1811 #endif
1812 return err;
1813 }
1814 }
1815 return -1;
1816 }
1817 EXPORT_SYMBOL(tc_classify_compat);
1818
1819 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1820 struct tcf_result *res)
1821 {
1822 int err = 0;
1823 #ifdef CONFIG_NET_CLS_ACT
1824 const struct tcf_proto *otp = tp;
1825 reclassify:
1826 #endif
1827
1828 err = tc_classify_compat(skb, tp, res);
1829 #ifdef CONFIG_NET_CLS_ACT
1830 if (err == TC_ACT_RECLASSIFY) {
1831 u32 verd = G_TC_VERD(skb->tc_verd);
1832 tp = otp;
1833
1834 if (verd++ >= MAX_REC_LOOP) {
1835 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1836 tp->q->ops->id,
1837 tp->prio & 0xffff,
1838 ntohs(tp->protocol));
1839 return TC_ACT_SHOT;
1840 }
1841 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1842 goto reclassify;
1843 }
1844 #endif
1845 return err;
1846 }
1847 EXPORT_SYMBOL(tc_classify);
1848
1849 void tcf_destroy(struct tcf_proto *tp)
1850 {
1851 tp->ops->destroy(tp);
1852 module_put(tp->ops->owner);
1853 kfree_rcu(tp, rcu);
1854 }
1855
1856 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1857 {
1858 struct tcf_proto *tp;
1859
1860 while ((tp = rtnl_dereference(*fl)) != NULL) {
1861 RCU_INIT_POINTER(*fl, tp->next);
1862 tcf_destroy(tp);
1863 }
1864 }
1865 EXPORT_SYMBOL(tcf_destroy_chain);
1866
1867 #ifdef CONFIG_PROC_FS
1868 static int psched_show(struct seq_file *seq, void *v)
1869 {
1870 struct timespec ts;
1871
1872 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1873 seq_printf(seq, "%08x %08x %08x %08x\n",
1874 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1875 1000000,
1876 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1877
1878 return 0;
1879 }
1880
1881 static int psched_open(struct inode *inode, struct file *file)
1882 {
1883 return single_open(file, psched_show, NULL);
1884 }
1885
1886 static const struct file_operations psched_fops = {
1887 .owner = THIS_MODULE,
1888 .open = psched_open,
1889 .read = seq_read,
1890 .llseek = seq_lseek,
1891 .release = single_release,
1892 };
1893
1894 static int __net_init psched_net_init(struct net *net)
1895 {
1896 struct proc_dir_entry *e;
1897
1898 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1899 if (e == NULL)
1900 return -ENOMEM;
1901
1902 return 0;
1903 }
1904
1905 static void __net_exit psched_net_exit(struct net *net)
1906 {
1907 remove_proc_entry("psched", net->proc_net);
1908 }
1909 #else
1910 static int __net_init psched_net_init(struct net *net)
1911 {
1912 return 0;
1913 }
1914
1915 static void __net_exit psched_net_exit(struct net *net)
1916 {
1917 }
1918 #endif
1919
1920 static struct pernet_operations psched_net_ops = {
1921 .init = psched_net_init,
1922 .exit = psched_net_exit,
1923 };
1924
1925 static int __init pktsched_init(void)
1926 {
1927 int err;
1928
1929 err = register_pernet_subsys(&psched_net_ops);
1930 if (err) {
1931 pr_err("pktsched_init: "
1932 "cannot initialize per netns operations\n");
1933 return err;
1934 }
1935
1936 register_qdisc(&pfifo_fast_ops);
1937 register_qdisc(&pfifo_qdisc_ops);
1938 register_qdisc(&bfifo_qdisc_ops);
1939 register_qdisc(&pfifo_head_drop_qdisc_ops);
1940 register_qdisc(&mq_qdisc_ops);
1941
1942 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1943 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1944 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1945 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1946 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1947 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1948
1949 return 0;
1950 }
1951
1952 subsys_initcall(pktsched_init);
This page took 0.112109 seconds and 5 git commands to generate.