Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
[deliverable/linux.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33
34 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
35 struct Qdisc *old, struct Qdisc *new);
36 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
37 struct Qdisc *q, unsigned long cl, int event);
38
39 /*
40
41 Short review.
42 -------------
43
44 This file consists of two interrelated parts:
45
46 1. queueing disciplines manager frontend.
47 2. traffic classes manager frontend.
48
49 Generally, queueing discipline ("qdisc") is a black box,
50 which is able to enqueue packets and to dequeue them (when
51 device is ready to send something) in order and at times
52 determined by algorithm hidden in it.
53
54 qdisc's are divided to two categories:
55 - "queues", which have no internal structure visible from outside.
56 - "schedulers", which split all the packets to "traffic classes",
57 using "packet classifiers" (look at cls_api.c)
58
59 In turn, classes may have child qdiscs (as rule, queues)
60 attached to them etc. etc. etc.
61
62 The goal of the routines in this file is to translate
63 information supplied by user in the form of handles
64 to more intelligible for kernel form, to make some sanity
65 checks and part of work, which is common to all qdiscs
66 and to provide rtnetlink notifications.
67
68 All real intelligent work is done inside qdisc modules.
69
70
71
72 Every discipline has two major routines: enqueue and dequeue.
73
74 ---dequeue
75
76 dequeue usually returns a skb to send. It is allowed to return NULL,
77 but it does not mean that queue is empty, it just means that
78 discipline does not want to send anything this time.
79 Queue is really empty if q->q.qlen == 0.
80 For complicated disciplines with multiple queues q->q is not
81 real packet queue, but however q->q.qlen must be valid.
82
83 ---enqueue
84
85 enqueue returns 0, if packet was enqueued successfully.
86 If packet (this one or another one) was dropped, it returns
87 not zero error code.
88 NET_XMIT_DROP - this packet dropped
89 Expected action: do not backoff, but wait until queue will clear.
90 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
91 Expected action: backoff or ignore
92 NET_XMIT_POLICED - dropped by police.
93 Expected action: backoff or error to real-time apps.
94
95 Auxiliary routines:
96
97 ---requeue
98
99 requeues once dequeued packet. It is used for non-standard or
100 just buggy devices, which can defer output even if dev->tbusy=0.
101
102 ---reset
103
104 returns qdisc to initial state: purge all buffers, clear all
105 timers, counters (except for statistics) etc.
106
107 ---init
108
109 initializes newly created qdisc.
110
111 ---destroy
112
113 destroys resources allocated by init and during lifetime of qdisc.
114
115 ---change
116
117 changes qdisc parameters.
118 */
119
120 /* Protects list of registered TC modules. It is pure SMP lock. */
121 static DEFINE_RWLOCK(qdisc_mod_lock);
122
123
124 /************************************************
125 * Queueing disciplines manipulation. *
126 ************************************************/
127
128
129 /* The list of all installed queueing disciplines. */
130
131 static struct Qdisc_ops *qdisc_base;
132
133 /* Register/uregister queueing discipline */
134
135 int register_qdisc(struct Qdisc_ops *qops)
136 {
137 struct Qdisc_ops *q, **qp;
138 int rc = -EEXIST;
139
140 write_lock(&qdisc_mod_lock);
141 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
142 if (!strcmp(qops->id, q->id))
143 goto out;
144
145 if (qops->enqueue == NULL)
146 qops->enqueue = noop_qdisc_ops.enqueue;
147 if (qops->requeue == NULL)
148 qops->requeue = noop_qdisc_ops.requeue;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
151
152 qops->next = NULL;
153 *qp = qops;
154 rc = 0;
155 out:
156 write_unlock(&qdisc_mod_lock);
157 return rc;
158 }
159
160 int unregister_qdisc(struct Qdisc_ops *qops)
161 {
162 struct Qdisc_ops *q, **qp;
163 int err = -ENOENT;
164
165 write_lock(&qdisc_mod_lock);
166 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
167 if (q == qops)
168 break;
169 if (q) {
170 *qp = q->next;
171 q->next = NULL;
172 err = 0;
173 }
174 write_unlock(&qdisc_mod_lock);
175 return err;
176 }
177
178 /* We know handle. Find qdisc among all qdisc's attached to device
179 (root qdisc, all its children, children of children etc.)
180 */
181
182 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
183 {
184 struct Qdisc *q;
185
186 list_for_each_entry(q, &dev->qdisc_list, list) {
187 if (q->handle == handle)
188 return q;
189 }
190 return NULL;
191 }
192
193 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
194 {
195 unsigned long cl;
196 struct Qdisc *leaf;
197 struct Qdisc_class_ops *cops = p->ops->cl_ops;
198
199 if (cops == NULL)
200 return NULL;
201 cl = cops->get(p, classid);
202
203 if (cl == 0)
204 return NULL;
205 leaf = cops->leaf(p, cl);
206 cops->put(p, cl);
207 return leaf;
208 }
209
210 /* Find queueing discipline by name */
211
212 static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
213 {
214 struct Qdisc_ops *q = NULL;
215
216 if (kind) {
217 read_lock(&qdisc_mod_lock);
218 for (q = qdisc_base; q; q = q->next) {
219 if (rtattr_strcmp(kind, q->id) == 0) {
220 if (!try_module_get(q->owner))
221 q = NULL;
222 break;
223 }
224 }
225 read_unlock(&qdisc_mod_lock);
226 }
227 return q;
228 }
229
230 static struct qdisc_rate_table *qdisc_rtab_list;
231
232 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab)
233 {
234 struct qdisc_rate_table *rtab;
235
236 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
237 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
238 rtab->refcnt++;
239 return rtab;
240 }
241 }
242
243 if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024)
244 return NULL;
245
246 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
247 if (rtab) {
248 rtab->rate = *r;
249 rtab->refcnt = 1;
250 memcpy(rtab->data, RTA_DATA(tab), 1024);
251 rtab->next = qdisc_rtab_list;
252 qdisc_rtab_list = rtab;
253 }
254 return rtab;
255 }
256
257 void qdisc_put_rtab(struct qdisc_rate_table *tab)
258 {
259 struct qdisc_rate_table *rtab, **rtabp;
260
261 if (!tab || --tab->refcnt)
262 return;
263
264 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
265 if (rtab == tab) {
266 *rtabp = rtab->next;
267 kfree(rtab);
268 return;
269 }
270 }
271 }
272
273 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
274 {
275 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
276 timer);
277 struct net_device *dev = wd->qdisc->dev;
278
279 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
280 smp_wmb();
281 if (spin_trylock(&dev->queue_lock)) {
282 qdisc_run(dev);
283 spin_unlock(&dev->queue_lock);
284 } else
285 netif_schedule(dev);
286
287 return HRTIMER_NORESTART;
288 }
289
290 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
291 {
292 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
293 wd->timer.function = qdisc_watchdog;
294 wd->qdisc = qdisc;
295 }
296 EXPORT_SYMBOL(qdisc_watchdog_init);
297
298 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
299 {
300 ktime_t time;
301
302 wd->qdisc->flags |= TCQ_F_THROTTLED;
303 time = ktime_set(0, 0);
304 time = ktime_add_ns(time, PSCHED_US2NS(expires));
305 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
306 }
307 EXPORT_SYMBOL(qdisc_watchdog_schedule);
308
309 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
310 {
311 hrtimer_cancel(&wd->timer);
312 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
313 }
314 EXPORT_SYMBOL(qdisc_watchdog_cancel);
315
316 /* Allocate an unique handle from space managed by kernel */
317
318 static u32 qdisc_alloc_handle(struct net_device *dev)
319 {
320 int i = 0x10000;
321 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
322
323 do {
324 autohandle += TC_H_MAKE(0x10000U, 0);
325 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
326 autohandle = TC_H_MAKE(0x80000000U, 0);
327 } while (qdisc_lookup(dev, autohandle) && --i > 0);
328
329 return i>0 ? autohandle : 0;
330 }
331
332 /* Attach toplevel qdisc to device dev */
333
334 static struct Qdisc *
335 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
336 {
337 struct Qdisc *oqdisc;
338
339 if (dev->flags & IFF_UP)
340 dev_deactivate(dev);
341
342 qdisc_lock_tree(dev);
343 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
344 oqdisc = dev->qdisc_ingress;
345 /* Prune old scheduler */
346 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
347 /* delete */
348 qdisc_reset(oqdisc);
349 dev->qdisc_ingress = NULL;
350 } else { /* new */
351 dev->qdisc_ingress = qdisc;
352 }
353
354 } else {
355
356 oqdisc = dev->qdisc_sleeping;
357
358 /* Prune old scheduler */
359 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
360 qdisc_reset(oqdisc);
361
362 /* ... and graft new one */
363 if (qdisc == NULL)
364 qdisc = &noop_qdisc;
365 dev->qdisc_sleeping = qdisc;
366 dev->qdisc = &noop_qdisc;
367 }
368
369 qdisc_unlock_tree(dev);
370
371 if (dev->flags & IFF_UP)
372 dev_activate(dev);
373
374 return oqdisc;
375 }
376
377 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
378 {
379 struct Qdisc_class_ops *cops;
380 unsigned long cl;
381 u32 parentid;
382
383 if (n == 0)
384 return;
385 while ((parentid = sch->parent)) {
386 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
387 cops = sch->ops->cl_ops;
388 if (cops->qlen_notify) {
389 cl = cops->get(sch, parentid);
390 cops->qlen_notify(sch, cl);
391 cops->put(sch, cl);
392 }
393 sch->q.qlen -= n;
394 }
395 }
396 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
397
398 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
399 to device "dev".
400
401 Old qdisc is not destroyed but returned in *old.
402 */
403
404 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
405 u32 classid,
406 struct Qdisc *new, struct Qdisc **old)
407 {
408 int err = 0;
409 struct Qdisc *q = *old;
410
411
412 if (parent == NULL) {
413 if (q && q->flags&TCQ_F_INGRESS) {
414 *old = dev_graft_qdisc(dev, q);
415 } else {
416 *old = dev_graft_qdisc(dev, new);
417 }
418 } else {
419 struct Qdisc_class_ops *cops = parent->ops->cl_ops;
420
421 err = -EINVAL;
422
423 if (cops) {
424 unsigned long cl = cops->get(parent, classid);
425 if (cl) {
426 err = cops->graft(parent, cl, new, old);
427 if (new)
428 new->parent = classid;
429 cops->put(parent, cl);
430 }
431 }
432 }
433 return err;
434 }
435
436 /*
437 Allocate and initialize new qdisc.
438
439 Parameters are passed via opt.
440 */
441
442 static struct Qdisc *
443 qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
444 {
445 int err;
446 struct rtattr *kind = tca[TCA_KIND-1];
447 struct Qdisc *sch;
448 struct Qdisc_ops *ops;
449
450 ops = qdisc_lookup_ops(kind);
451 #ifdef CONFIG_KMOD
452 if (ops == NULL && kind != NULL) {
453 char name[IFNAMSIZ];
454 if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
455 /* We dropped the RTNL semaphore in order to
456 * perform the module load. So, even if we
457 * succeeded in loading the module we have to
458 * tell the caller to replay the request. We
459 * indicate this using -EAGAIN.
460 * We replay the request because the device may
461 * go away in the mean time.
462 */
463 rtnl_unlock();
464 request_module("sch_%s", name);
465 rtnl_lock();
466 ops = qdisc_lookup_ops(kind);
467 if (ops != NULL) {
468 /* We will try again qdisc_lookup_ops,
469 * so don't keep a reference.
470 */
471 module_put(ops->owner);
472 err = -EAGAIN;
473 goto err_out;
474 }
475 }
476 }
477 #endif
478
479 err = -ENOENT;
480 if (ops == NULL)
481 goto err_out;
482
483 sch = qdisc_alloc(dev, ops);
484 if (IS_ERR(sch)) {
485 err = PTR_ERR(sch);
486 goto err_out2;
487 }
488
489 if (handle == TC_H_INGRESS) {
490 sch->flags |= TCQ_F_INGRESS;
491 sch->stats_lock = &dev->ingress_lock;
492 handle = TC_H_MAKE(TC_H_INGRESS, 0);
493 } else {
494 sch->stats_lock = &dev->queue_lock;
495 if (handle == 0) {
496 handle = qdisc_alloc_handle(dev);
497 err = -ENOMEM;
498 if (handle == 0)
499 goto err_out3;
500 }
501 }
502
503 sch->handle = handle;
504
505 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
506 if (tca[TCA_RATE-1]) {
507 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
508 sch->stats_lock,
509 tca[TCA_RATE-1]);
510 if (err) {
511 /*
512 * Any broken qdiscs that would require
513 * a ops->reset() here? The qdisc was never
514 * in action so it shouldn't be necessary.
515 */
516 if (ops->destroy)
517 ops->destroy(sch);
518 goto err_out3;
519 }
520 }
521 qdisc_lock_tree(dev);
522 list_add_tail(&sch->list, &dev->qdisc_list);
523 qdisc_unlock_tree(dev);
524
525 return sch;
526 }
527 err_out3:
528 dev_put(dev);
529 kfree((char *) sch - sch->padded);
530 err_out2:
531 module_put(ops->owner);
532 err_out:
533 *errp = err;
534 return NULL;
535 }
536
537 static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
538 {
539 if (tca[TCA_OPTIONS-1]) {
540 int err;
541
542 if (sch->ops->change == NULL)
543 return -EINVAL;
544 err = sch->ops->change(sch, tca[TCA_OPTIONS-1]);
545 if (err)
546 return err;
547 }
548 if (tca[TCA_RATE-1])
549 gen_replace_estimator(&sch->bstats, &sch->rate_est,
550 sch->stats_lock, tca[TCA_RATE-1]);
551 return 0;
552 }
553
554 struct check_loop_arg
555 {
556 struct qdisc_walker w;
557 struct Qdisc *p;
558 int depth;
559 };
560
561 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
562
563 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
564 {
565 struct check_loop_arg arg;
566
567 if (q->ops->cl_ops == NULL)
568 return 0;
569
570 arg.w.stop = arg.w.skip = arg.w.count = 0;
571 arg.w.fn = check_loop_fn;
572 arg.depth = depth;
573 arg.p = p;
574 q->ops->cl_ops->walk(q, &arg.w);
575 return arg.w.stop ? -ELOOP : 0;
576 }
577
578 static int
579 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
580 {
581 struct Qdisc *leaf;
582 struct Qdisc_class_ops *cops = q->ops->cl_ops;
583 struct check_loop_arg *arg = (struct check_loop_arg *)w;
584
585 leaf = cops->leaf(q, cl);
586 if (leaf) {
587 if (leaf == arg->p || arg->depth > 7)
588 return -ELOOP;
589 return check_loop(leaf, arg->p, arg->depth + 1);
590 }
591 return 0;
592 }
593
594 /*
595 * Delete/get qdisc.
596 */
597
598 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
599 {
600 struct tcmsg *tcm = NLMSG_DATA(n);
601 struct rtattr **tca = arg;
602 struct net_device *dev;
603 u32 clid = tcm->tcm_parent;
604 struct Qdisc *q = NULL;
605 struct Qdisc *p = NULL;
606 int err;
607
608 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
609 return -ENODEV;
610
611 if (clid) {
612 if (clid != TC_H_ROOT) {
613 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
614 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
615 return -ENOENT;
616 q = qdisc_leaf(p, clid);
617 } else { /* ingress */
618 q = dev->qdisc_ingress;
619 }
620 } else {
621 q = dev->qdisc_sleeping;
622 }
623 if (!q)
624 return -ENOENT;
625
626 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
627 return -EINVAL;
628 } else {
629 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
630 return -ENOENT;
631 }
632
633 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
634 return -EINVAL;
635
636 if (n->nlmsg_type == RTM_DELQDISC) {
637 if (!clid)
638 return -EINVAL;
639 if (q->handle == 0)
640 return -ENOENT;
641 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
642 return err;
643 if (q) {
644 qdisc_notify(skb, n, clid, q, NULL);
645 qdisc_lock_tree(dev);
646 qdisc_destroy(q);
647 qdisc_unlock_tree(dev);
648 }
649 } else {
650 qdisc_notify(skb, n, clid, NULL, q);
651 }
652 return 0;
653 }
654
655 /*
656 Create/change qdisc.
657 */
658
659 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
660 {
661 struct tcmsg *tcm;
662 struct rtattr **tca;
663 struct net_device *dev;
664 u32 clid;
665 struct Qdisc *q, *p;
666 int err;
667
668 replay:
669 /* Reinit, just in case something touches this. */
670 tcm = NLMSG_DATA(n);
671 tca = arg;
672 clid = tcm->tcm_parent;
673 q = p = NULL;
674
675 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
676 return -ENODEV;
677
678 if (clid) {
679 if (clid != TC_H_ROOT) {
680 if (clid != TC_H_INGRESS) {
681 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
682 return -ENOENT;
683 q = qdisc_leaf(p, clid);
684 } else { /*ingress */
685 q = dev->qdisc_ingress;
686 }
687 } else {
688 q = dev->qdisc_sleeping;
689 }
690
691 /* It may be default qdisc, ignore it */
692 if (q && q->handle == 0)
693 q = NULL;
694
695 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
696 if (tcm->tcm_handle) {
697 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
698 return -EEXIST;
699 if (TC_H_MIN(tcm->tcm_handle))
700 return -EINVAL;
701 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
702 goto create_n_graft;
703 if (n->nlmsg_flags&NLM_F_EXCL)
704 return -EEXIST;
705 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
706 return -EINVAL;
707 if (q == p ||
708 (p && check_loop(q, p, 0)))
709 return -ELOOP;
710 atomic_inc(&q->refcnt);
711 goto graft;
712 } else {
713 if (q == NULL)
714 goto create_n_graft;
715
716 /* This magic test requires explanation.
717 *
718 * We know, that some child q is already
719 * attached to this parent and have choice:
720 * either to change it or to create/graft new one.
721 *
722 * 1. We are allowed to create/graft only
723 * if CREATE and REPLACE flags are set.
724 *
725 * 2. If EXCL is set, requestor wanted to say,
726 * that qdisc tcm_handle is not expected
727 * to exist, so that we choose create/graft too.
728 *
729 * 3. The last case is when no flags are set.
730 * Alas, it is sort of hole in API, we
731 * cannot decide what to do unambiguously.
732 * For now we select create/graft, if
733 * user gave KIND, which does not match existing.
734 */
735 if ((n->nlmsg_flags&NLM_F_CREATE) &&
736 (n->nlmsg_flags&NLM_F_REPLACE) &&
737 ((n->nlmsg_flags&NLM_F_EXCL) ||
738 (tca[TCA_KIND-1] &&
739 rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))))
740 goto create_n_graft;
741 }
742 }
743 } else {
744 if (!tcm->tcm_handle)
745 return -EINVAL;
746 q = qdisc_lookup(dev, tcm->tcm_handle);
747 }
748
749 /* Change qdisc parameters */
750 if (q == NULL)
751 return -ENOENT;
752 if (n->nlmsg_flags&NLM_F_EXCL)
753 return -EEXIST;
754 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
755 return -EINVAL;
756 err = qdisc_change(q, tca);
757 if (err == 0)
758 qdisc_notify(skb, n, clid, NULL, q);
759 return err;
760
761 create_n_graft:
762 if (!(n->nlmsg_flags&NLM_F_CREATE))
763 return -ENOENT;
764 if (clid == TC_H_INGRESS)
765 q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
766 else
767 q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
768 if (q == NULL) {
769 if (err == -EAGAIN)
770 goto replay;
771 return err;
772 }
773
774 graft:
775 if (1) {
776 struct Qdisc *old_q = NULL;
777 err = qdisc_graft(dev, p, clid, q, &old_q);
778 if (err) {
779 if (q) {
780 qdisc_lock_tree(dev);
781 qdisc_destroy(q);
782 qdisc_unlock_tree(dev);
783 }
784 return err;
785 }
786 qdisc_notify(skb, n, clid, old_q, q);
787 if (old_q) {
788 qdisc_lock_tree(dev);
789 qdisc_destroy(old_q);
790 qdisc_unlock_tree(dev);
791 }
792 }
793 return 0;
794 }
795
796 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
797 u32 pid, u32 seq, u16 flags, int event)
798 {
799 struct tcmsg *tcm;
800 struct nlmsghdr *nlh;
801 unsigned char *b = skb_tail_pointer(skb);
802 struct gnet_dump d;
803
804 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
805 tcm = NLMSG_DATA(nlh);
806 tcm->tcm_family = AF_UNSPEC;
807 tcm->tcm__pad1 = 0;
808 tcm->tcm__pad2 = 0;
809 tcm->tcm_ifindex = q->dev->ifindex;
810 tcm->tcm_parent = clid;
811 tcm->tcm_handle = q->handle;
812 tcm->tcm_info = atomic_read(&q->refcnt);
813 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
814 if (q->ops->dump && q->ops->dump(q, skb) < 0)
815 goto rtattr_failure;
816 q->qstats.qlen = q->q.qlen;
817
818 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
819 TCA_XSTATS, q->stats_lock, &d) < 0)
820 goto rtattr_failure;
821
822 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
823 goto rtattr_failure;
824
825 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
826 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
827 gnet_stats_copy_queue(&d, &q->qstats) < 0)
828 goto rtattr_failure;
829
830 if (gnet_stats_finish_copy(&d) < 0)
831 goto rtattr_failure;
832
833 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
834 return skb->len;
835
836 nlmsg_failure:
837 rtattr_failure:
838 nlmsg_trim(skb, b);
839 return -1;
840 }
841
842 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
843 u32 clid, struct Qdisc *old, struct Qdisc *new)
844 {
845 struct sk_buff *skb;
846 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
847
848 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
849 if (!skb)
850 return -ENOBUFS;
851
852 if (old && old->handle) {
853 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
854 goto err_out;
855 }
856 if (new) {
857 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
858 goto err_out;
859 }
860
861 if (skb->len)
862 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
863
864 err_out:
865 kfree_skb(skb);
866 return -EINVAL;
867 }
868
869 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
870 {
871 int idx, q_idx;
872 int s_idx, s_q_idx;
873 struct net_device *dev;
874 struct Qdisc *q;
875
876 s_idx = cb->args[0];
877 s_q_idx = q_idx = cb->args[1];
878 read_lock(&dev_base_lock);
879 idx = 0;
880 for_each_netdev(dev) {
881 if (idx < s_idx)
882 goto cont;
883 if (idx > s_idx)
884 s_q_idx = 0;
885 q_idx = 0;
886 list_for_each_entry(q, &dev->qdisc_list, list) {
887 if (q_idx < s_q_idx) {
888 q_idx++;
889 continue;
890 }
891 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
892 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
893 goto done;
894 q_idx++;
895 }
896 cont:
897 idx++;
898 }
899
900 done:
901 read_unlock(&dev_base_lock);
902
903 cb->args[0] = idx;
904 cb->args[1] = q_idx;
905
906 return skb->len;
907 }
908
909
910
911 /************************************************
912 * Traffic classes manipulation. *
913 ************************************************/
914
915
916
917 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
918 {
919 struct tcmsg *tcm = NLMSG_DATA(n);
920 struct rtattr **tca = arg;
921 struct net_device *dev;
922 struct Qdisc *q = NULL;
923 struct Qdisc_class_ops *cops;
924 unsigned long cl = 0;
925 unsigned long new_cl;
926 u32 pid = tcm->tcm_parent;
927 u32 clid = tcm->tcm_handle;
928 u32 qid = TC_H_MAJ(clid);
929 int err;
930
931 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
932 return -ENODEV;
933
934 /*
935 parent == TC_H_UNSPEC - unspecified parent.
936 parent == TC_H_ROOT - class is root, which has no parent.
937 parent == X:0 - parent is root class.
938 parent == X:Y - parent is a node in hierarchy.
939 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
940
941 handle == 0:0 - generate handle from kernel pool.
942 handle == 0:Y - class is X:Y, where X:0 is qdisc.
943 handle == X:Y - clear.
944 handle == X:0 - root class.
945 */
946
947 /* Step 1. Determine qdisc handle X:0 */
948
949 if (pid != TC_H_ROOT) {
950 u32 qid1 = TC_H_MAJ(pid);
951
952 if (qid && qid1) {
953 /* If both majors are known, they must be identical. */
954 if (qid != qid1)
955 return -EINVAL;
956 } else if (qid1) {
957 qid = qid1;
958 } else if (qid == 0)
959 qid = dev->qdisc_sleeping->handle;
960
961 /* Now qid is genuine qdisc handle consistent
962 both with parent and child.
963
964 TC_H_MAJ(pid) still may be unspecified, complete it now.
965 */
966 if (pid)
967 pid = TC_H_MAKE(qid, pid);
968 } else {
969 if (qid == 0)
970 qid = dev->qdisc_sleeping->handle;
971 }
972
973 /* OK. Locate qdisc */
974 if ((q = qdisc_lookup(dev, qid)) == NULL)
975 return -ENOENT;
976
977 /* An check that it supports classes */
978 cops = q->ops->cl_ops;
979 if (cops == NULL)
980 return -EINVAL;
981
982 /* Now try to get class */
983 if (clid == 0) {
984 if (pid == TC_H_ROOT)
985 clid = qid;
986 } else
987 clid = TC_H_MAKE(qid, clid);
988
989 if (clid)
990 cl = cops->get(q, clid);
991
992 if (cl == 0) {
993 err = -ENOENT;
994 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
995 goto out;
996 } else {
997 switch (n->nlmsg_type) {
998 case RTM_NEWTCLASS:
999 err = -EEXIST;
1000 if (n->nlmsg_flags&NLM_F_EXCL)
1001 goto out;
1002 break;
1003 case RTM_DELTCLASS:
1004 err = cops->delete(q, cl);
1005 if (err == 0)
1006 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1007 goto out;
1008 case RTM_GETTCLASS:
1009 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1010 goto out;
1011 default:
1012 err = -EINVAL;
1013 goto out;
1014 }
1015 }
1016
1017 new_cl = cl;
1018 err = cops->change(q, clid, pid, tca, &new_cl);
1019 if (err == 0)
1020 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1021
1022 out:
1023 if (cl)
1024 cops->put(q, cl);
1025
1026 return err;
1027 }
1028
1029
1030 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1031 unsigned long cl,
1032 u32 pid, u32 seq, u16 flags, int event)
1033 {
1034 struct tcmsg *tcm;
1035 struct nlmsghdr *nlh;
1036 unsigned char *b = skb_tail_pointer(skb);
1037 struct gnet_dump d;
1038 struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1039
1040 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1041 tcm = NLMSG_DATA(nlh);
1042 tcm->tcm_family = AF_UNSPEC;
1043 tcm->tcm_ifindex = q->dev->ifindex;
1044 tcm->tcm_parent = q->handle;
1045 tcm->tcm_handle = q->handle;
1046 tcm->tcm_info = 0;
1047 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
1048 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1049 goto rtattr_failure;
1050
1051 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1052 TCA_XSTATS, q->stats_lock, &d) < 0)
1053 goto rtattr_failure;
1054
1055 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1056 goto rtattr_failure;
1057
1058 if (gnet_stats_finish_copy(&d) < 0)
1059 goto rtattr_failure;
1060
1061 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1062 return skb->len;
1063
1064 nlmsg_failure:
1065 rtattr_failure:
1066 nlmsg_trim(skb, b);
1067 return -1;
1068 }
1069
1070 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1071 struct Qdisc *q, unsigned long cl, int event)
1072 {
1073 struct sk_buff *skb;
1074 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1075
1076 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1077 if (!skb)
1078 return -ENOBUFS;
1079
1080 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1081 kfree_skb(skb);
1082 return -EINVAL;
1083 }
1084
1085 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1086 }
1087
1088 struct qdisc_dump_args
1089 {
1090 struct qdisc_walker w;
1091 struct sk_buff *skb;
1092 struct netlink_callback *cb;
1093 };
1094
1095 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1096 {
1097 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1098
1099 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1100 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1101 }
1102
1103 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1104 {
1105 int t;
1106 int s_t;
1107 struct net_device *dev;
1108 struct Qdisc *q;
1109 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1110 struct qdisc_dump_args arg;
1111
1112 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1113 return 0;
1114 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
1115 return 0;
1116
1117 s_t = cb->args[0];
1118 t = 0;
1119
1120 list_for_each_entry(q, &dev->qdisc_list, list) {
1121 if (t < s_t || !q->ops->cl_ops ||
1122 (tcm->tcm_parent &&
1123 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1124 t++;
1125 continue;
1126 }
1127 if (t > s_t)
1128 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1129 arg.w.fn = qdisc_class_dump;
1130 arg.skb = skb;
1131 arg.cb = cb;
1132 arg.w.stop = 0;
1133 arg.w.skip = cb->args[1];
1134 arg.w.count = 0;
1135 q->ops->cl_ops->walk(q, &arg.w);
1136 cb->args[1] = arg.w.count;
1137 if (arg.w.stop)
1138 break;
1139 t++;
1140 }
1141
1142 cb->args[0] = t;
1143
1144 dev_put(dev);
1145 return skb->len;
1146 }
1147
1148 /* Main classifier routine: scans classifier chain attached
1149 to this qdisc, (optionally) tests for protocol and asks
1150 specific classifiers.
1151 */
1152 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1153 struct tcf_result *res)
1154 {
1155 int err = 0;
1156 __be16 protocol = skb->protocol;
1157 #ifdef CONFIG_NET_CLS_ACT
1158 struct tcf_proto *otp = tp;
1159 reclassify:
1160 #endif
1161 protocol = skb->protocol;
1162
1163 for ( ; tp; tp = tp->next) {
1164 if ((tp->protocol == protocol ||
1165 tp->protocol == htons(ETH_P_ALL)) &&
1166 (err = tp->classify(skb, tp, res)) >= 0) {
1167 #ifdef CONFIG_NET_CLS_ACT
1168 if ( TC_ACT_RECLASSIFY == err) {
1169 __u32 verd = (__u32) G_TC_VERD(skb->tc_verd);
1170 tp = otp;
1171
1172 if (MAX_REC_LOOP < verd++) {
1173 printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n",
1174 tp->prio&0xffff, ntohs(tp->protocol));
1175 return TC_ACT_SHOT;
1176 }
1177 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1178 goto reclassify;
1179 } else {
1180 if (skb->tc_verd)
1181 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1182 return err;
1183 }
1184 #else
1185
1186 return err;
1187 #endif
1188 }
1189
1190 }
1191 return -1;
1192 }
1193
1194 void tcf_destroy(struct tcf_proto *tp)
1195 {
1196 tp->ops->destroy(tp);
1197 module_put(tp->ops->owner);
1198 kfree(tp);
1199 }
1200
1201 void tcf_destroy_chain(struct tcf_proto *fl)
1202 {
1203 struct tcf_proto *tp;
1204
1205 while ((tp = fl) != NULL) {
1206 fl = tp->next;
1207 tcf_destroy(tp);
1208 }
1209 }
1210 EXPORT_SYMBOL(tcf_destroy_chain);
1211
1212 #ifdef CONFIG_PROC_FS
1213 static int psched_show(struct seq_file *seq, void *v)
1214 {
1215 seq_printf(seq, "%08x %08x %08x %08x\n",
1216 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1217 1000000,
1218 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(KTIME_MONOTONIC_RES));
1219
1220 return 0;
1221 }
1222
1223 static int psched_open(struct inode *inode, struct file *file)
1224 {
1225 return single_open(file, psched_show, PDE(inode)->data);
1226 }
1227
1228 static const struct file_operations psched_fops = {
1229 .owner = THIS_MODULE,
1230 .open = psched_open,
1231 .read = seq_read,
1232 .llseek = seq_lseek,
1233 .release = single_release,
1234 };
1235 #endif
1236
1237 static int __init pktsched_init(void)
1238 {
1239 register_qdisc(&pfifo_qdisc_ops);
1240 register_qdisc(&bfifo_qdisc_ops);
1241 proc_net_fops_create("psched", 0, &psched_fops);
1242
1243 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1244 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1245 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1246 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1247 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1248 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1249
1250 return 0;
1251 }
1252
1253 subsys_initcall(pktsched_init);
1254
1255 EXPORT_SYMBOL(qdisc_get_rtab);
1256 EXPORT_SYMBOL(qdisc_put_rtab);
1257 EXPORT_SYMBOL(register_qdisc);
1258 EXPORT_SYMBOL(unregister_qdisc);
1259 EXPORT_SYMBOL(tc_classify);
This page took 0.055477 seconds and 6 git commands to generate.