Merge branch 'linux-2.6'
[deliverable/linux.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30
31 #include <net/net_namespace.h>
32 #include <net/sock.h>
33 #include <net/netlink.h>
34 #include <net/pkt_sched.h>
35
36 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
37 struct Qdisc *old, struct Qdisc *new);
38 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
39 struct Qdisc *q, unsigned long cl, int event);
40
41 /*
42
43 Short review.
44 -------------
45
46 This file consists of two interrelated parts:
47
48 1. queueing disciplines manager frontend.
49 2. traffic classes manager frontend.
50
51 Generally, queueing discipline ("qdisc") is a black box,
52 which is able to enqueue packets and to dequeue them (when
53 device is ready to send something) in order and at times
54 determined by algorithm hidden in it.
55
56 qdisc's are divided to two categories:
57 - "queues", which have no internal structure visible from outside.
58 - "schedulers", which split all the packets to "traffic classes",
59 using "packet classifiers" (look at cls_api.c)
60
61 In turn, classes may have child qdiscs (as rule, queues)
62 attached to them etc. etc. etc.
63
64 The goal of the routines in this file is to translate
65 information supplied by user in the form of handles
66 to more intelligible for kernel form, to make some sanity
67 checks and part of work, which is common to all qdiscs
68 and to provide rtnetlink notifications.
69
70 All real intelligent work is done inside qdisc modules.
71
72
73
74 Every discipline has two major routines: enqueue and dequeue.
75
76 ---dequeue
77
78 dequeue usually returns a skb to send. It is allowed to return NULL,
79 but it does not mean that queue is empty, it just means that
80 discipline does not want to send anything this time.
81 Queue is really empty if q->q.qlen == 0.
82 For complicated disciplines with multiple queues q->q is not
83 real packet queue, but however q->q.qlen must be valid.
84
85 ---enqueue
86
87 enqueue returns 0, if packet was enqueued successfully.
88 If packet (this one or another one) was dropped, it returns
89 not zero error code.
90 NET_XMIT_DROP - this packet dropped
91 Expected action: do not backoff, but wait until queue will clear.
92 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
93 Expected action: backoff or ignore
94 NET_XMIT_POLICED - dropped by police.
95 Expected action: backoff or error to real-time apps.
96
97 Auxiliary routines:
98
99 ---requeue
100
101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if dev->tbusy=0.
103
104 ---reset
105
106 returns qdisc to initial state: purge all buffers, clear all
107 timers, counters (except for statistics) etc.
108
109 ---init
110
111 initializes newly created qdisc.
112
113 ---destroy
114
115 destroys resources allocated by init and during lifetime of qdisc.
116
117 ---change
118
119 changes qdisc parameters.
120 */
121
122 /* Protects list of registered TC modules. It is pure SMP lock. */
123 static DEFINE_RWLOCK(qdisc_mod_lock);
124
125
126 /************************************************
127 * Queueing disciplines manipulation. *
128 ************************************************/
129
130
131 /* The list of all installed queueing disciplines. */
132
133 static struct Qdisc_ops *qdisc_base;
134
135 /* Register/uregister queueing discipline */
136
137 int register_qdisc(struct Qdisc_ops *qops)
138 {
139 struct Qdisc_ops *q, **qp;
140 int rc = -EEXIST;
141
142 write_lock(&qdisc_mod_lock);
143 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
144 if (!strcmp(qops->id, q->id))
145 goto out;
146
147 if (qops->enqueue == NULL)
148 qops->enqueue = noop_qdisc_ops.enqueue;
149 if (qops->requeue == NULL)
150 qops->requeue = noop_qdisc_ops.requeue;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 qops->next = NULL;
155 *qp = qops;
156 rc = 0;
157 out:
158 write_unlock(&qdisc_mod_lock);
159 return rc;
160 }
161 EXPORT_SYMBOL(register_qdisc);
162
163 int unregister_qdisc(struct Qdisc_ops *qops)
164 {
165 struct Qdisc_ops *q, **qp;
166 int err = -ENOENT;
167
168 write_lock(&qdisc_mod_lock);
169 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
170 if (q == qops)
171 break;
172 if (q) {
173 *qp = q->next;
174 q->next = NULL;
175 err = 0;
176 }
177 write_unlock(&qdisc_mod_lock);
178 return err;
179 }
180 EXPORT_SYMBOL(unregister_qdisc);
181
182 /* We know handle. Find qdisc among all qdisc's attached to device
183 (root qdisc, all its children, children of children etc.)
184 */
185
186 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187 {
188 struct Qdisc *q;
189
190 list_for_each_entry(q, &dev->qdisc_list, list) {
191 if (q->handle == handle)
192 return q;
193 }
194 return NULL;
195 }
196
197 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
198 {
199 unsigned long cl;
200 struct Qdisc *leaf;
201 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
202
203 if (cops == NULL)
204 return NULL;
205 cl = cops->get(p, classid);
206
207 if (cl == 0)
208 return NULL;
209 leaf = cops->leaf(p, cl);
210 cops->put(p, cl);
211 return leaf;
212 }
213
214 /* Find queueing discipline by name */
215
216 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
217 {
218 struct Qdisc_ops *q = NULL;
219
220 if (kind) {
221 read_lock(&qdisc_mod_lock);
222 for (q = qdisc_base; q; q = q->next) {
223 if (nla_strcmp(kind, q->id) == 0) {
224 if (!try_module_get(q->owner))
225 q = NULL;
226 break;
227 }
228 }
229 read_unlock(&qdisc_mod_lock);
230 }
231 return q;
232 }
233
234 static struct qdisc_rate_table *qdisc_rtab_list;
235
236 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
237 {
238 struct qdisc_rate_table *rtab;
239
240 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
241 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
242 rtab->refcnt++;
243 return rtab;
244 }
245 }
246
247 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
248 nla_len(tab) != TC_RTAB_SIZE)
249 return NULL;
250
251 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
252 if (rtab) {
253 rtab->rate = *r;
254 rtab->refcnt = 1;
255 memcpy(rtab->data, nla_data(tab), 1024);
256 rtab->next = qdisc_rtab_list;
257 qdisc_rtab_list = rtab;
258 }
259 return rtab;
260 }
261 EXPORT_SYMBOL(qdisc_get_rtab);
262
263 void qdisc_put_rtab(struct qdisc_rate_table *tab)
264 {
265 struct qdisc_rate_table *rtab, **rtabp;
266
267 if (!tab || --tab->refcnt)
268 return;
269
270 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
271 if (rtab == tab) {
272 *rtabp = rtab->next;
273 kfree(rtab);
274 return;
275 }
276 }
277 }
278 EXPORT_SYMBOL(qdisc_put_rtab);
279
280 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
281 {
282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
283 timer);
284 struct net_device *dev = wd->qdisc->dev;
285
286 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
287 smp_wmb();
288 netif_schedule(dev);
289
290 return HRTIMER_NORESTART;
291 }
292
293 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
294 {
295 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
296 wd->timer.function = qdisc_watchdog;
297 wd->qdisc = qdisc;
298 }
299 EXPORT_SYMBOL(qdisc_watchdog_init);
300
301 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
302 {
303 ktime_t time;
304
305 wd->qdisc->flags |= TCQ_F_THROTTLED;
306 time = ktime_set(0, 0);
307 time = ktime_add_ns(time, PSCHED_US2NS(expires));
308 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
309 }
310 EXPORT_SYMBOL(qdisc_watchdog_schedule);
311
312 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
313 {
314 hrtimer_cancel(&wd->timer);
315 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
316 }
317 EXPORT_SYMBOL(qdisc_watchdog_cancel);
318
319 /* Allocate an unique handle from space managed by kernel */
320
321 static u32 qdisc_alloc_handle(struct net_device *dev)
322 {
323 int i = 0x10000;
324 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
325
326 do {
327 autohandle += TC_H_MAKE(0x10000U, 0);
328 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
329 autohandle = TC_H_MAKE(0x80000000U, 0);
330 } while (qdisc_lookup(dev, autohandle) && --i > 0);
331
332 return i>0 ? autohandle : 0;
333 }
334
335 /* Attach toplevel qdisc to device dev */
336
337 static struct Qdisc *
338 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
339 {
340 struct Qdisc *oqdisc;
341
342 if (dev->flags & IFF_UP)
343 dev_deactivate(dev);
344
345 qdisc_lock_tree(dev);
346 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
347 oqdisc = dev->qdisc_ingress;
348 /* Prune old scheduler */
349 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
350 /* delete */
351 qdisc_reset(oqdisc);
352 dev->qdisc_ingress = NULL;
353 } else { /* new */
354 dev->qdisc_ingress = qdisc;
355 }
356
357 } else {
358
359 oqdisc = dev->qdisc_sleeping;
360
361 /* Prune old scheduler */
362 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
363 qdisc_reset(oqdisc);
364
365 /* ... and graft new one */
366 if (qdisc == NULL)
367 qdisc = &noop_qdisc;
368 dev->qdisc_sleeping = qdisc;
369 dev->qdisc = &noop_qdisc;
370 }
371
372 qdisc_unlock_tree(dev);
373
374 if (dev->flags & IFF_UP)
375 dev_activate(dev);
376
377 return oqdisc;
378 }
379
380 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
381 {
382 const struct Qdisc_class_ops *cops;
383 unsigned long cl;
384 u32 parentid;
385
386 if (n == 0)
387 return;
388 while ((parentid = sch->parent)) {
389 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
390 if (sch == NULL) {
391 WARN_ON(parentid != TC_H_ROOT);
392 return;
393 }
394 cops = sch->ops->cl_ops;
395 if (cops->qlen_notify) {
396 cl = cops->get(sch, parentid);
397 cops->qlen_notify(sch, cl);
398 cops->put(sch, cl);
399 }
400 sch->q.qlen -= n;
401 }
402 }
403 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
404
405 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
406 to device "dev".
407
408 Old qdisc is not destroyed but returned in *old.
409 */
410
411 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
412 u32 classid,
413 struct Qdisc *new, struct Qdisc **old)
414 {
415 int err = 0;
416 struct Qdisc *q = *old;
417
418
419 if (parent == NULL) {
420 if (q && q->flags&TCQ_F_INGRESS) {
421 *old = dev_graft_qdisc(dev, q);
422 } else {
423 *old = dev_graft_qdisc(dev, new);
424 }
425 } else {
426 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
427
428 err = -EINVAL;
429
430 if (cops) {
431 unsigned long cl = cops->get(parent, classid);
432 if (cl) {
433 err = cops->graft(parent, cl, new, old);
434 cops->put(parent, cl);
435 }
436 }
437 }
438 return err;
439 }
440
441 /*
442 Allocate and initialize new qdisc.
443
444 Parameters are passed via opt.
445 */
446
447 static struct Qdisc *
448 qdisc_create(struct net_device *dev, u32 parent, u32 handle,
449 struct nlattr **tca, int *errp)
450 {
451 int err;
452 struct nlattr *kind = tca[TCA_KIND];
453 struct Qdisc *sch;
454 struct Qdisc_ops *ops;
455
456 ops = qdisc_lookup_ops(kind);
457 #ifdef CONFIG_KMOD
458 if (ops == NULL && kind != NULL) {
459 char name[IFNAMSIZ];
460 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
461 /* We dropped the RTNL semaphore in order to
462 * perform the module load. So, even if we
463 * succeeded in loading the module we have to
464 * tell the caller to replay the request. We
465 * indicate this using -EAGAIN.
466 * We replay the request because the device may
467 * go away in the mean time.
468 */
469 rtnl_unlock();
470 request_module("sch_%s", name);
471 rtnl_lock();
472 ops = qdisc_lookup_ops(kind);
473 if (ops != NULL) {
474 /* We will try again qdisc_lookup_ops,
475 * so don't keep a reference.
476 */
477 module_put(ops->owner);
478 err = -EAGAIN;
479 goto err_out;
480 }
481 }
482 }
483 #endif
484
485 err = -ENOENT;
486 if (ops == NULL)
487 goto err_out;
488
489 sch = qdisc_alloc(dev, ops);
490 if (IS_ERR(sch)) {
491 err = PTR_ERR(sch);
492 goto err_out2;
493 }
494
495 sch->parent = parent;
496
497 if (handle == TC_H_INGRESS) {
498 sch->flags |= TCQ_F_INGRESS;
499 sch->stats_lock = &dev->ingress_lock;
500 handle = TC_H_MAKE(TC_H_INGRESS, 0);
501 } else {
502 sch->stats_lock = &dev->queue_lock;
503 if (handle == 0) {
504 handle = qdisc_alloc_handle(dev);
505 err = -ENOMEM;
506 if (handle == 0)
507 goto err_out3;
508 }
509 }
510
511 sch->handle = handle;
512
513 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
514 if (tca[TCA_RATE]) {
515 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
516 sch->stats_lock,
517 tca[TCA_RATE]);
518 if (err) {
519 /*
520 * Any broken qdiscs that would require
521 * a ops->reset() here? The qdisc was never
522 * in action so it shouldn't be necessary.
523 */
524 if (ops->destroy)
525 ops->destroy(sch);
526 goto err_out3;
527 }
528 }
529 qdisc_lock_tree(dev);
530 list_add_tail(&sch->list, &dev->qdisc_list);
531 qdisc_unlock_tree(dev);
532
533 return sch;
534 }
535 err_out3:
536 dev_put(dev);
537 kfree((char *) sch - sch->padded);
538 err_out2:
539 module_put(ops->owner);
540 err_out:
541 *errp = err;
542 return NULL;
543 }
544
545 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
546 {
547 if (tca[TCA_OPTIONS]) {
548 int err;
549
550 if (sch->ops->change == NULL)
551 return -EINVAL;
552 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
553 if (err)
554 return err;
555 }
556 if (tca[TCA_RATE])
557 gen_replace_estimator(&sch->bstats, &sch->rate_est,
558 sch->stats_lock, tca[TCA_RATE]);
559 return 0;
560 }
561
562 struct check_loop_arg
563 {
564 struct qdisc_walker w;
565 struct Qdisc *p;
566 int depth;
567 };
568
569 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
570
571 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
572 {
573 struct check_loop_arg arg;
574
575 if (q->ops->cl_ops == NULL)
576 return 0;
577
578 arg.w.stop = arg.w.skip = arg.w.count = 0;
579 arg.w.fn = check_loop_fn;
580 arg.depth = depth;
581 arg.p = p;
582 q->ops->cl_ops->walk(q, &arg.w);
583 return arg.w.stop ? -ELOOP : 0;
584 }
585
586 static int
587 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
588 {
589 struct Qdisc *leaf;
590 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
591 struct check_loop_arg *arg = (struct check_loop_arg *)w;
592
593 leaf = cops->leaf(q, cl);
594 if (leaf) {
595 if (leaf == arg->p || arg->depth > 7)
596 return -ELOOP;
597 return check_loop(leaf, arg->p, arg->depth + 1);
598 }
599 return 0;
600 }
601
602 /*
603 * Delete/get qdisc.
604 */
605
606 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
607 {
608 struct net *net = skb->sk->sk_net;
609 struct tcmsg *tcm = NLMSG_DATA(n);
610 struct nlattr *tca[TCA_MAX + 1];
611 struct net_device *dev;
612 u32 clid = tcm->tcm_parent;
613 struct Qdisc *q = NULL;
614 struct Qdisc *p = NULL;
615 int err;
616
617 if (net != &init_net)
618 return -EINVAL;
619
620 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
621 return -ENODEV;
622
623 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
624 if (err < 0)
625 return err;
626
627 if (clid) {
628 if (clid != TC_H_ROOT) {
629 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
630 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
631 return -ENOENT;
632 q = qdisc_leaf(p, clid);
633 } else { /* ingress */
634 q = dev->qdisc_ingress;
635 }
636 } else {
637 q = dev->qdisc_sleeping;
638 }
639 if (!q)
640 return -ENOENT;
641
642 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
643 return -EINVAL;
644 } else {
645 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
646 return -ENOENT;
647 }
648
649 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
650 return -EINVAL;
651
652 if (n->nlmsg_type == RTM_DELQDISC) {
653 if (!clid)
654 return -EINVAL;
655 if (q->handle == 0)
656 return -ENOENT;
657 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
658 return err;
659 if (q) {
660 qdisc_notify(skb, n, clid, q, NULL);
661 qdisc_lock_tree(dev);
662 qdisc_destroy(q);
663 qdisc_unlock_tree(dev);
664 }
665 } else {
666 qdisc_notify(skb, n, clid, NULL, q);
667 }
668 return 0;
669 }
670
671 /*
672 Create/change qdisc.
673 */
674
675 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
676 {
677 struct net *net = skb->sk->sk_net;
678 struct tcmsg *tcm;
679 struct nlattr *tca[TCA_MAX + 1];
680 struct net_device *dev;
681 u32 clid;
682 struct Qdisc *q, *p;
683 int err;
684
685 if (net != &init_net)
686 return -EINVAL;
687
688 replay:
689 /* Reinit, just in case something touches this. */
690 tcm = NLMSG_DATA(n);
691 clid = tcm->tcm_parent;
692 q = p = NULL;
693
694 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
695 return -ENODEV;
696
697 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
698 if (err < 0)
699 return err;
700
701 if (clid) {
702 if (clid != TC_H_ROOT) {
703 if (clid != TC_H_INGRESS) {
704 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
705 return -ENOENT;
706 q = qdisc_leaf(p, clid);
707 } else { /*ingress */
708 q = dev->qdisc_ingress;
709 }
710 } else {
711 q = dev->qdisc_sleeping;
712 }
713
714 /* It may be default qdisc, ignore it */
715 if (q && q->handle == 0)
716 q = NULL;
717
718 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
719 if (tcm->tcm_handle) {
720 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
721 return -EEXIST;
722 if (TC_H_MIN(tcm->tcm_handle))
723 return -EINVAL;
724 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
725 goto create_n_graft;
726 if (n->nlmsg_flags&NLM_F_EXCL)
727 return -EEXIST;
728 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
729 return -EINVAL;
730 if (q == p ||
731 (p && check_loop(q, p, 0)))
732 return -ELOOP;
733 atomic_inc(&q->refcnt);
734 goto graft;
735 } else {
736 if (q == NULL)
737 goto create_n_graft;
738
739 /* This magic test requires explanation.
740 *
741 * We know, that some child q is already
742 * attached to this parent and have choice:
743 * either to change it or to create/graft new one.
744 *
745 * 1. We are allowed to create/graft only
746 * if CREATE and REPLACE flags are set.
747 *
748 * 2. If EXCL is set, requestor wanted to say,
749 * that qdisc tcm_handle is not expected
750 * to exist, so that we choose create/graft too.
751 *
752 * 3. The last case is when no flags are set.
753 * Alas, it is sort of hole in API, we
754 * cannot decide what to do unambiguously.
755 * For now we select create/graft, if
756 * user gave KIND, which does not match existing.
757 */
758 if ((n->nlmsg_flags&NLM_F_CREATE) &&
759 (n->nlmsg_flags&NLM_F_REPLACE) &&
760 ((n->nlmsg_flags&NLM_F_EXCL) ||
761 (tca[TCA_KIND] &&
762 nla_strcmp(tca[TCA_KIND], q->ops->id))))
763 goto create_n_graft;
764 }
765 }
766 } else {
767 if (!tcm->tcm_handle)
768 return -EINVAL;
769 q = qdisc_lookup(dev, tcm->tcm_handle);
770 }
771
772 /* Change qdisc parameters */
773 if (q == NULL)
774 return -ENOENT;
775 if (n->nlmsg_flags&NLM_F_EXCL)
776 return -EEXIST;
777 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
778 return -EINVAL;
779 err = qdisc_change(q, tca);
780 if (err == 0)
781 qdisc_notify(skb, n, clid, NULL, q);
782 return err;
783
784 create_n_graft:
785 if (!(n->nlmsg_flags&NLM_F_CREATE))
786 return -ENOENT;
787 if (clid == TC_H_INGRESS)
788 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent,
789 tca, &err);
790 else
791 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle,
792 tca, &err);
793 if (q == NULL) {
794 if (err == -EAGAIN)
795 goto replay;
796 return err;
797 }
798
799 graft:
800 if (1) {
801 struct Qdisc *old_q = NULL;
802 err = qdisc_graft(dev, p, clid, q, &old_q);
803 if (err) {
804 if (q) {
805 qdisc_lock_tree(dev);
806 qdisc_destroy(q);
807 qdisc_unlock_tree(dev);
808 }
809 return err;
810 }
811 qdisc_notify(skb, n, clid, old_q, q);
812 if (old_q) {
813 qdisc_lock_tree(dev);
814 qdisc_destroy(old_q);
815 qdisc_unlock_tree(dev);
816 }
817 }
818 return 0;
819 }
820
821 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
822 u32 pid, u32 seq, u16 flags, int event)
823 {
824 struct tcmsg *tcm;
825 struct nlmsghdr *nlh;
826 unsigned char *b = skb_tail_pointer(skb);
827 struct gnet_dump d;
828
829 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
830 tcm = NLMSG_DATA(nlh);
831 tcm->tcm_family = AF_UNSPEC;
832 tcm->tcm__pad1 = 0;
833 tcm->tcm__pad2 = 0;
834 tcm->tcm_ifindex = q->dev->ifindex;
835 tcm->tcm_parent = clid;
836 tcm->tcm_handle = q->handle;
837 tcm->tcm_info = atomic_read(&q->refcnt);
838 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
839 if (q->ops->dump && q->ops->dump(q, skb) < 0)
840 goto nla_put_failure;
841 q->qstats.qlen = q->q.qlen;
842
843 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
844 TCA_XSTATS, q->stats_lock, &d) < 0)
845 goto nla_put_failure;
846
847 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
848 goto nla_put_failure;
849
850 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
851 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
852 gnet_stats_copy_queue(&d, &q->qstats) < 0)
853 goto nla_put_failure;
854
855 if (gnet_stats_finish_copy(&d) < 0)
856 goto nla_put_failure;
857
858 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
859 return skb->len;
860
861 nlmsg_failure:
862 nla_put_failure:
863 nlmsg_trim(skb, b);
864 return -1;
865 }
866
867 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
868 u32 clid, struct Qdisc *old, struct Qdisc *new)
869 {
870 struct sk_buff *skb;
871 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
872
873 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
874 if (!skb)
875 return -ENOBUFS;
876
877 if (old && old->handle) {
878 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
879 goto err_out;
880 }
881 if (new) {
882 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
883 goto err_out;
884 }
885
886 if (skb->len)
887 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
888
889 err_out:
890 kfree_skb(skb);
891 return -EINVAL;
892 }
893
894 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
895 {
896 struct net *net = skb->sk->sk_net;
897 int idx, q_idx;
898 int s_idx, s_q_idx;
899 struct net_device *dev;
900 struct Qdisc *q;
901
902 if (net != &init_net)
903 return 0;
904
905 s_idx = cb->args[0];
906 s_q_idx = q_idx = cb->args[1];
907 read_lock(&dev_base_lock);
908 idx = 0;
909 for_each_netdev(&init_net, dev) {
910 if (idx < s_idx)
911 goto cont;
912 if (idx > s_idx)
913 s_q_idx = 0;
914 q_idx = 0;
915 list_for_each_entry(q, &dev->qdisc_list, list) {
916 if (q_idx < s_q_idx) {
917 q_idx++;
918 continue;
919 }
920 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
921 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
922 goto done;
923 q_idx++;
924 }
925 cont:
926 idx++;
927 }
928
929 done:
930 read_unlock(&dev_base_lock);
931
932 cb->args[0] = idx;
933 cb->args[1] = q_idx;
934
935 return skb->len;
936 }
937
938
939
940 /************************************************
941 * Traffic classes manipulation. *
942 ************************************************/
943
944
945
946 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
947 {
948 struct net *net = skb->sk->sk_net;
949 struct tcmsg *tcm = NLMSG_DATA(n);
950 struct nlattr *tca[TCA_MAX + 1];
951 struct net_device *dev;
952 struct Qdisc *q = NULL;
953 const struct Qdisc_class_ops *cops;
954 unsigned long cl = 0;
955 unsigned long new_cl;
956 u32 pid = tcm->tcm_parent;
957 u32 clid = tcm->tcm_handle;
958 u32 qid = TC_H_MAJ(clid);
959 int err;
960
961 if (net != &init_net)
962 return -EINVAL;
963
964 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
965 return -ENODEV;
966
967 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
968 if (err < 0)
969 return err;
970
971 /*
972 parent == TC_H_UNSPEC - unspecified parent.
973 parent == TC_H_ROOT - class is root, which has no parent.
974 parent == X:0 - parent is root class.
975 parent == X:Y - parent is a node in hierarchy.
976 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
977
978 handle == 0:0 - generate handle from kernel pool.
979 handle == 0:Y - class is X:Y, where X:0 is qdisc.
980 handle == X:Y - clear.
981 handle == X:0 - root class.
982 */
983
984 /* Step 1. Determine qdisc handle X:0 */
985
986 if (pid != TC_H_ROOT) {
987 u32 qid1 = TC_H_MAJ(pid);
988
989 if (qid && qid1) {
990 /* If both majors are known, they must be identical. */
991 if (qid != qid1)
992 return -EINVAL;
993 } else if (qid1) {
994 qid = qid1;
995 } else if (qid == 0)
996 qid = dev->qdisc_sleeping->handle;
997
998 /* Now qid is genuine qdisc handle consistent
999 both with parent and child.
1000
1001 TC_H_MAJ(pid) still may be unspecified, complete it now.
1002 */
1003 if (pid)
1004 pid = TC_H_MAKE(qid, pid);
1005 } else {
1006 if (qid == 0)
1007 qid = dev->qdisc_sleeping->handle;
1008 }
1009
1010 /* OK. Locate qdisc */
1011 if ((q = qdisc_lookup(dev, qid)) == NULL)
1012 return -ENOENT;
1013
1014 /* An check that it supports classes */
1015 cops = q->ops->cl_ops;
1016 if (cops == NULL)
1017 return -EINVAL;
1018
1019 /* Now try to get class */
1020 if (clid == 0) {
1021 if (pid == TC_H_ROOT)
1022 clid = qid;
1023 } else
1024 clid = TC_H_MAKE(qid, clid);
1025
1026 if (clid)
1027 cl = cops->get(q, clid);
1028
1029 if (cl == 0) {
1030 err = -ENOENT;
1031 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1032 goto out;
1033 } else {
1034 switch (n->nlmsg_type) {
1035 case RTM_NEWTCLASS:
1036 err = -EEXIST;
1037 if (n->nlmsg_flags&NLM_F_EXCL)
1038 goto out;
1039 break;
1040 case RTM_DELTCLASS:
1041 err = cops->delete(q, cl);
1042 if (err == 0)
1043 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1044 goto out;
1045 case RTM_GETTCLASS:
1046 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1047 goto out;
1048 default:
1049 err = -EINVAL;
1050 goto out;
1051 }
1052 }
1053
1054 new_cl = cl;
1055 err = cops->change(q, clid, pid, tca, &new_cl);
1056 if (err == 0)
1057 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1058
1059 out:
1060 if (cl)
1061 cops->put(q, cl);
1062
1063 return err;
1064 }
1065
1066
1067 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1068 unsigned long cl,
1069 u32 pid, u32 seq, u16 flags, int event)
1070 {
1071 struct tcmsg *tcm;
1072 struct nlmsghdr *nlh;
1073 unsigned char *b = skb_tail_pointer(skb);
1074 struct gnet_dump d;
1075 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1076
1077 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1078 tcm = NLMSG_DATA(nlh);
1079 tcm->tcm_family = AF_UNSPEC;
1080 tcm->tcm_ifindex = q->dev->ifindex;
1081 tcm->tcm_parent = q->handle;
1082 tcm->tcm_handle = q->handle;
1083 tcm->tcm_info = 0;
1084 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1085 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1086 goto nla_put_failure;
1087
1088 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1089 TCA_XSTATS, q->stats_lock, &d) < 0)
1090 goto nla_put_failure;
1091
1092 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1093 goto nla_put_failure;
1094
1095 if (gnet_stats_finish_copy(&d) < 0)
1096 goto nla_put_failure;
1097
1098 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1099 return skb->len;
1100
1101 nlmsg_failure:
1102 nla_put_failure:
1103 nlmsg_trim(skb, b);
1104 return -1;
1105 }
1106
1107 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1108 struct Qdisc *q, unsigned long cl, int event)
1109 {
1110 struct sk_buff *skb;
1111 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1112
1113 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1114 if (!skb)
1115 return -ENOBUFS;
1116
1117 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1118 kfree_skb(skb);
1119 return -EINVAL;
1120 }
1121
1122 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1123 }
1124
1125 struct qdisc_dump_args
1126 {
1127 struct qdisc_walker w;
1128 struct sk_buff *skb;
1129 struct netlink_callback *cb;
1130 };
1131
1132 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1133 {
1134 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1135
1136 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1137 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1138 }
1139
1140 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1141 {
1142 struct net *net = skb->sk->sk_net;
1143 int t;
1144 int s_t;
1145 struct net_device *dev;
1146 struct Qdisc *q;
1147 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1148 struct qdisc_dump_args arg;
1149
1150 if (net != &init_net)
1151 return 0;
1152
1153 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1154 return 0;
1155 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1156 return 0;
1157
1158 s_t = cb->args[0];
1159 t = 0;
1160
1161 list_for_each_entry(q, &dev->qdisc_list, list) {
1162 if (t < s_t || !q->ops->cl_ops ||
1163 (tcm->tcm_parent &&
1164 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1165 t++;
1166 continue;
1167 }
1168 if (t > s_t)
1169 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1170 arg.w.fn = qdisc_class_dump;
1171 arg.skb = skb;
1172 arg.cb = cb;
1173 arg.w.stop = 0;
1174 arg.w.skip = cb->args[1];
1175 arg.w.count = 0;
1176 q->ops->cl_ops->walk(q, &arg.w);
1177 cb->args[1] = arg.w.count;
1178 if (arg.w.stop)
1179 break;
1180 t++;
1181 }
1182
1183 cb->args[0] = t;
1184
1185 dev_put(dev);
1186 return skb->len;
1187 }
1188
1189 /* Main classifier routine: scans classifier chain attached
1190 to this qdisc, (optionally) tests for protocol and asks
1191 specific classifiers.
1192 */
1193 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1194 struct tcf_result *res)
1195 {
1196 __be16 protocol = skb->protocol;
1197 int err = 0;
1198
1199 for (; tp; tp = tp->next) {
1200 if ((tp->protocol == protocol ||
1201 tp->protocol == htons(ETH_P_ALL)) &&
1202 (err = tp->classify(skb, tp, res)) >= 0) {
1203 #ifdef CONFIG_NET_CLS_ACT
1204 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1205 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1206 #endif
1207 return err;
1208 }
1209 }
1210 return -1;
1211 }
1212 EXPORT_SYMBOL(tc_classify_compat);
1213
1214 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1215 struct tcf_result *res)
1216 {
1217 int err = 0;
1218 __be16 protocol;
1219 #ifdef CONFIG_NET_CLS_ACT
1220 struct tcf_proto *otp = tp;
1221 reclassify:
1222 #endif
1223 protocol = skb->protocol;
1224
1225 err = tc_classify_compat(skb, tp, res);
1226 #ifdef CONFIG_NET_CLS_ACT
1227 if (err == TC_ACT_RECLASSIFY) {
1228 u32 verd = G_TC_VERD(skb->tc_verd);
1229 tp = otp;
1230
1231 if (verd++ >= MAX_REC_LOOP) {
1232 printk("rule prio %u protocol %02x reclassify loop, "
1233 "packet dropped\n",
1234 tp->prio&0xffff, ntohs(tp->protocol));
1235 return TC_ACT_SHOT;
1236 }
1237 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1238 goto reclassify;
1239 }
1240 #endif
1241 return err;
1242 }
1243 EXPORT_SYMBOL(tc_classify);
1244
1245 void tcf_destroy(struct tcf_proto *tp)
1246 {
1247 tp->ops->destroy(tp);
1248 module_put(tp->ops->owner);
1249 kfree(tp);
1250 }
1251
1252 void tcf_destroy_chain(struct tcf_proto *fl)
1253 {
1254 struct tcf_proto *tp;
1255
1256 while ((tp = fl) != NULL) {
1257 fl = tp->next;
1258 tcf_destroy(tp);
1259 }
1260 }
1261 EXPORT_SYMBOL(tcf_destroy_chain);
1262
1263 #ifdef CONFIG_PROC_FS
1264 static int psched_show(struct seq_file *seq, void *v)
1265 {
1266 struct timespec ts;
1267
1268 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1269 seq_printf(seq, "%08x %08x %08x %08x\n",
1270 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1271 1000000,
1272 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1273
1274 return 0;
1275 }
1276
1277 static int psched_open(struct inode *inode, struct file *file)
1278 {
1279 return single_open(file, psched_show, PDE(inode)->data);
1280 }
1281
1282 static const struct file_operations psched_fops = {
1283 .owner = THIS_MODULE,
1284 .open = psched_open,
1285 .read = seq_read,
1286 .llseek = seq_lseek,
1287 .release = single_release,
1288 };
1289 #endif
1290
1291 static int __init pktsched_init(void)
1292 {
1293 register_qdisc(&pfifo_qdisc_ops);
1294 register_qdisc(&bfifo_qdisc_ops);
1295 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1296
1297 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1298 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1299 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1300 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1301 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1302 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1303
1304 return 0;
1305 }
1306
1307 subsys_initcall(pktsched_init);
This page took 0.055291 seconds and 6 git commands to generate.