igb: Kill CONFIG_NETDEVICES_MULTIQUEUE references, no longer exists.
[deliverable/linux.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30
31 #include <net/net_namespace.h>
32 #include <net/sock.h>
33 #include <net/netlink.h>
34 #include <net/pkt_sched.h>
35
36 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
37 struct Qdisc *old, struct Qdisc *new);
38 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
39 struct Qdisc *q, unsigned long cl, int event);
40
41 /*
42
43 Short review.
44 -------------
45
46 This file consists of two interrelated parts:
47
48 1. queueing disciplines manager frontend.
49 2. traffic classes manager frontend.
50
51 Generally, queueing discipline ("qdisc") is a black box,
52 which is able to enqueue packets and to dequeue them (when
53 device is ready to send something) in order and at times
54 determined by algorithm hidden in it.
55
56 qdisc's are divided to two categories:
57 - "queues", which have no internal structure visible from outside.
58 - "schedulers", which split all the packets to "traffic classes",
59 using "packet classifiers" (look at cls_api.c)
60
61 In turn, classes may have child qdiscs (as rule, queues)
62 attached to them etc. etc. etc.
63
64 The goal of the routines in this file is to translate
65 information supplied by user in the form of handles
66 to more intelligible for kernel form, to make some sanity
67 checks and part of work, which is common to all qdiscs
68 and to provide rtnetlink notifications.
69
70 All real intelligent work is done inside qdisc modules.
71
72
73
74 Every discipline has two major routines: enqueue and dequeue.
75
76 ---dequeue
77
78 dequeue usually returns a skb to send. It is allowed to return NULL,
79 but it does not mean that queue is empty, it just means that
80 discipline does not want to send anything this time.
81 Queue is really empty if q->q.qlen == 0.
82 For complicated disciplines with multiple queues q->q is not
83 real packet queue, but however q->q.qlen must be valid.
84
85 ---enqueue
86
87 enqueue returns 0, if packet was enqueued successfully.
88 If packet (this one or another one) was dropped, it returns
89 not zero error code.
90 NET_XMIT_DROP - this packet dropped
91 Expected action: do not backoff, but wait until queue will clear.
92 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
93 Expected action: backoff or ignore
94 NET_XMIT_POLICED - dropped by police.
95 Expected action: backoff or error to real-time apps.
96
97 Auxiliary routines:
98
99 ---requeue
100
101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
103
104 ---reset
105
106 returns qdisc to initial state: purge all buffers, clear all
107 timers, counters (except for statistics) etc.
108
109 ---init
110
111 initializes newly created qdisc.
112
113 ---destroy
114
115 destroys resources allocated by init and during lifetime of qdisc.
116
117 ---change
118
119 changes qdisc parameters.
120 */
121
122 /* Protects list of registered TC modules. It is pure SMP lock. */
123 static DEFINE_RWLOCK(qdisc_mod_lock);
124
125
126 /************************************************
127 * Queueing disciplines manipulation. *
128 ************************************************/
129
130
131 /* The list of all installed queueing disciplines. */
132
133 static struct Qdisc_ops *qdisc_base;
134
135 /* Register/uregister queueing discipline */
136
137 int register_qdisc(struct Qdisc_ops *qops)
138 {
139 struct Qdisc_ops *q, **qp;
140 int rc = -EEXIST;
141
142 write_lock(&qdisc_mod_lock);
143 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
144 if (!strcmp(qops->id, q->id))
145 goto out;
146
147 if (qops->enqueue == NULL)
148 qops->enqueue = noop_qdisc_ops.enqueue;
149 if (qops->requeue == NULL)
150 qops->requeue = noop_qdisc_ops.requeue;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 qops->next = NULL;
155 *qp = qops;
156 rc = 0;
157 out:
158 write_unlock(&qdisc_mod_lock);
159 return rc;
160 }
161 EXPORT_SYMBOL(register_qdisc);
162
163 int unregister_qdisc(struct Qdisc_ops *qops)
164 {
165 struct Qdisc_ops *q, **qp;
166 int err = -ENOENT;
167
168 write_lock(&qdisc_mod_lock);
169 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
170 if (q == qops)
171 break;
172 if (q) {
173 *qp = q->next;
174 q->next = NULL;
175 err = 0;
176 }
177 write_unlock(&qdisc_mod_lock);
178 return err;
179 }
180 EXPORT_SYMBOL(unregister_qdisc);
181
182 /* We know handle. Find qdisc among all qdisc's attached to device
183 (root qdisc, all its children, children of children etc.)
184 */
185
186 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187 {
188 struct netdev_queue *dev_queue = &dev->tx_queue;
189 struct Qdisc *q;
190
191 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
192 if (q->handle == handle)
193 return q;
194 }
195 return NULL;
196 }
197
198 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
199 {
200 unsigned long cl;
201 struct Qdisc *leaf;
202 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
203
204 if (cops == NULL)
205 return NULL;
206 cl = cops->get(p, classid);
207
208 if (cl == 0)
209 return NULL;
210 leaf = cops->leaf(p, cl);
211 cops->put(p, cl);
212 return leaf;
213 }
214
215 /* Find queueing discipline by name */
216
217 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
218 {
219 struct Qdisc_ops *q = NULL;
220
221 if (kind) {
222 read_lock(&qdisc_mod_lock);
223 for (q = qdisc_base; q; q = q->next) {
224 if (nla_strcmp(kind, q->id) == 0) {
225 if (!try_module_get(q->owner))
226 q = NULL;
227 break;
228 }
229 }
230 read_unlock(&qdisc_mod_lock);
231 }
232 return q;
233 }
234
235 static struct qdisc_rate_table *qdisc_rtab_list;
236
237 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
238 {
239 struct qdisc_rate_table *rtab;
240
241 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
242 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
243 rtab->refcnt++;
244 return rtab;
245 }
246 }
247
248 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
249 nla_len(tab) != TC_RTAB_SIZE)
250 return NULL;
251
252 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
253 if (rtab) {
254 rtab->rate = *r;
255 rtab->refcnt = 1;
256 memcpy(rtab->data, nla_data(tab), 1024);
257 rtab->next = qdisc_rtab_list;
258 qdisc_rtab_list = rtab;
259 }
260 return rtab;
261 }
262 EXPORT_SYMBOL(qdisc_get_rtab);
263
264 void qdisc_put_rtab(struct qdisc_rate_table *tab)
265 {
266 struct qdisc_rate_table *rtab, **rtabp;
267
268 if (!tab || --tab->refcnt)
269 return;
270
271 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
272 if (rtab == tab) {
273 *rtabp = rtab->next;
274 kfree(rtab);
275 return;
276 }
277 }
278 }
279 EXPORT_SYMBOL(qdisc_put_rtab);
280
281 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
282 {
283 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
284 timer);
285 struct netdev_queue *txq = wd->qdisc->dev_queue;
286
287 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
288 smp_wmb();
289 netif_schedule_queue(txq);
290
291 return HRTIMER_NORESTART;
292 }
293
294 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
295 {
296 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
297 wd->timer.function = qdisc_watchdog;
298 wd->qdisc = qdisc;
299 }
300 EXPORT_SYMBOL(qdisc_watchdog_init);
301
302 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
303 {
304 ktime_t time;
305
306 wd->qdisc->flags |= TCQ_F_THROTTLED;
307 time = ktime_set(0, 0);
308 time = ktime_add_ns(time, PSCHED_US2NS(expires));
309 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
310 }
311 EXPORT_SYMBOL(qdisc_watchdog_schedule);
312
313 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
314 {
315 hrtimer_cancel(&wd->timer);
316 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
317 }
318 EXPORT_SYMBOL(qdisc_watchdog_cancel);
319
320 struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
321 {
322 unsigned int size = n * sizeof(struct hlist_head), i;
323 struct hlist_head *h;
324
325 if (size <= PAGE_SIZE)
326 h = kmalloc(size, GFP_KERNEL);
327 else
328 h = (struct hlist_head *)
329 __get_free_pages(GFP_KERNEL, get_order(size));
330
331 if (h != NULL) {
332 for (i = 0; i < n; i++)
333 INIT_HLIST_HEAD(&h[i]);
334 }
335 return h;
336 }
337
338 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
339 {
340 unsigned int size = n * sizeof(struct hlist_head);
341
342 if (size <= PAGE_SIZE)
343 kfree(h);
344 else
345 free_pages((unsigned long)h, get_order(size));
346 }
347
348 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
349 {
350 struct Qdisc_class_common *cl;
351 struct hlist_node *n, *next;
352 struct hlist_head *nhash, *ohash;
353 unsigned int nsize, nmask, osize;
354 unsigned int i, h;
355
356 /* Rehash when load factor exceeds 0.75 */
357 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
358 return;
359 nsize = clhash->hashsize * 2;
360 nmask = nsize - 1;
361 nhash = qdisc_class_hash_alloc(nsize);
362 if (nhash == NULL)
363 return;
364
365 ohash = clhash->hash;
366 osize = clhash->hashsize;
367
368 sch_tree_lock(sch);
369 for (i = 0; i < osize; i++) {
370 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
371 h = qdisc_class_hash(cl->classid, nmask);
372 hlist_add_head(&cl->hnode, &nhash[h]);
373 }
374 }
375 clhash->hash = nhash;
376 clhash->hashsize = nsize;
377 clhash->hashmask = nmask;
378 sch_tree_unlock(sch);
379
380 qdisc_class_hash_free(ohash, osize);
381 }
382 EXPORT_SYMBOL(qdisc_class_hash_grow);
383
384 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
385 {
386 unsigned int size = 4;
387
388 clhash->hash = qdisc_class_hash_alloc(size);
389 if (clhash->hash == NULL)
390 return -ENOMEM;
391 clhash->hashsize = size;
392 clhash->hashmask = size - 1;
393 clhash->hashelems = 0;
394 return 0;
395 }
396 EXPORT_SYMBOL(qdisc_class_hash_init);
397
398 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
399 {
400 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
401 }
402 EXPORT_SYMBOL(qdisc_class_hash_destroy);
403
404 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
405 struct Qdisc_class_common *cl)
406 {
407 unsigned int h;
408
409 INIT_HLIST_NODE(&cl->hnode);
410 h = qdisc_class_hash(cl->classid, clhash->hashmask);
411 hlist_add_head(&cl->hnode, &clhash->hash[h]);
412 clhash->hashelems++;
413 }
414 EXPORT_SYMBOL(qdisc_class_hash_insert);
415
416 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
417 struct Qdisc_class_common *cl)
418 {
419 hlist_del(&cl->hnode);
420 clhash->hashelems--;
421 }
422 EXPORT_SYMBOL(qdisc_class_hash_remove);
423
424 /* Allocate an unique handle from space managed by kernel */
425
426 static u32 qdisc_alloc_handle(struct net_device *dev)
427 {
428 int i = 0x10000;
429 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
430
431 do {
432 autohandle += TC_H_MAKE(0x10000U, 0);
433 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
434 autohandle = TC_H_MAKE(0x80000000U, 0);
435 } while (qdisc_lookup(dev, autohandle) && --i > 0);
436
437 return i>0 ? autohandle : 0;
438 }
439
440 /* Attach toplevel qdisc to device dev */
441
442 static struct Qdisc *
443 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
444 {
445 struct netdev_queue *dev_queue;
446 struct Qdisc *oqdisc;
447
448 if (dev->flags & IFF_UP)
449 dev_deactivate(dev);
450
451 qdisc_lock_tree(dev);
452 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
453 dev_queue = &dev->rx_queue;
454 oqdisc = dev_queue->qdisc;
455 /* Prune old scheduler */
456 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
457 /* delete */
458 qdisc_reset(oqdisc);
459 dev_queue->qdisc = NULL;
460 } else { /* new */
461 dev_queue->qdisc = qdisc;
462 }
463
464 } else {
465 dev_queue = &dev->tx_queue;
466 oqdisc = dev_queue->qdisc_sleeping;
467
468 /* Prune old scheduler */
469 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
470 qdisc_reset(oqdisc);
471
472 /* ... and graft new one */
473 if (qdisc == NULL)
474 qdisc = &noop_qdisc;
475 dev_queue->qdisc_sleeping = qdisc;
476 dev_queue->qdisc = &noop_qdisc;
477 }
478
479 qdisc_unlock_tree(dev);
480
481 if (dev->flags & IFF_UP)
482 dev_activate(dev);
483
484 return oqdisc;
485 }
486
487 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
488 {
489 const struct Qdisc_class_ops *cops;
490 unsigned long cl;
491 u32 parentid;
492
493 if (n == 0)
494 return;
495 while ((parentid = sch->parent)) {
496 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
497 return;
498
499 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
500 if (sch == NULL) {
501 WARN_ON(parentid != TC_H_ROOT);
502 return;
503 }
504 cops = sch->ops->cl_ops;
505 if (cops->qlen_notify) {
506 cl = cops->get(sch, parentid);
507 cops->qlen_notify(sch, cl);
508 cops->put(sch, cl);
509 }
510 sch->q.qlen -= n;
511 }
512 }
513 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
514
515 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
516 to device "dev".
517
518 Old qdisc is not destroyed but returned in *old.
519 */
520
521 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
522 u32 classid,
523 struct Qdisc *new, struct Qdisc **old)
524 {
525 int err = 0;
526 struct Qdisc *q = *old;
527
528
529 if (parent == NULL) {
530 if (q && q->flags&TCQ_F_INGRESS) {
531 *old = dev_graft_qdisc(dev, q);
532 } else {
533 *old = dev_graft_qdisc(dev, new);
534 }
535 } else {
536 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
537
538 err = -EINVAL;
539
540 if (cops) {
541 unsigned long cl = cops->get(parent, classid);
542 if (cl) {
543 err = cops->graft(parent, cl, new, old);
544 cops->put(parent, cl);
545 }
546 }
547 }
548 return err;
549 }
550
551 /*
552 Allocate and initialize new qdisc.
553
554 Parameters are passed via opt.
555 */
556
557 static struct Qdisc *
558 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
559 u32 parent, u32 handle, struct nlattr **tca, int *errp)
560 {
561 int err;
562 struct nlattr *kind = tca[TCA_KIND];
563 struct Qdisc *sch;
564 struct Qdisc_ops *ops;
565
566 ops = qdisc_lookup_ops(kind);
567 #ifdef CONFIG_KMOD
568 if (ops == NULL && kind != NULL) {
569 char name[IFNAMSIZ];
570 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
571 /* We dropped the RTNL semaphore in order to
572 * perform the module load. So, even if we
573 * succeeded in loading the module we have to
574 * tell the caller to replay the request. We
575 * indicate this using -EAGAIN.
576 * We replay the request because the device may
577 * go away in the mean time.
578 */
579 rtnl_unlock();
580 request_module("sch_%s", name);
581 rtnl_lock();
582 ops = qdisc_lookup_ops(kind);
583 if (ops != NULL) {
584 /* We will try again qdisc_lookup_ops,
585 * so don't keep a reference.
586 */
587 module_put(ops->owner);
588 err = -EAGAIN;
589 goto err_out;
590 }
591 }
592 }
593 #endif
594
595 err = -ENOENT;
596 if (ops == NULL)
597 goto err_out;
598
599 sch = qdisc_alloc(dev_queue, ops);
600 if (IS_ERR(sch)) {
601 err = PTR_ERR(sch);
602 goto err_out2;
603 }
604
605 sch->parent = parent;
606
607 if (handle == TC_H_INGRESS) {
608 sch->flags |= TCQ_F_INGRESS;
609 handle = TC_H_MAKE(TC_H_INGRESS, 0);
610 } else {
611 if (handle == 0) {
612 handle = qdisc_alloc_handle(dev);
613 err = -ENOMEM;
614 if (handle == 0)
615 goto err_out3;
616 }
617 }
618
619 sch->handle = handle;
620
621 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
622 if (tca[TCA_RATE]) {
623 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
624 &sch->dev_queue->lock,
625 tca[TCA_RATE]);
626 if (err) {
627 /*
628 * Any broken qdiscs that would require
629 * a ops->reset() here? The qdisc was never
630 * in action so it shouldn't be necessary.
631 */
632 if (ops->destroy)
633 ops->destroy(sch);
634 goto err_out3;
635 }
636 }
637 qdisc_lock_tree(dev);
638 list_add_tail(&sch->list, &dev_queue->qdisc_list);
639 qdisc_unlock_tree(dev);
640
641 return sch;
642 }
643 err_out3:
644 dev_put(dev);
645 kfree((char *) sch - sch->padded);
646 err_out2:
647 module_put(ops->owner);
648 err_out:
649 *errp = err;
650 return NULL;
651 }
652
653 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
654 {
655 if (tca[TCA_OPTIONS]) {
656 int err;
657
658 if (sch->ops->change == NULL)
659 return -EINVAL;
660 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
661 if (err)
662 return err;
663 }
664 if (tca[TCA_RATE])
665 gen_replace_estimator(&sch->bstats, &sch->rate_est,
666 &sch->dev_queue->lock, tca[TCA_RATE]);
667 return 0;
668 }
669
670 struct check_loop_arg
671 {
672 struct qdisc_walker w;
673 struct Qdisc *p;
674 int depth;
675 };
676
677 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
678
679 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
680 {
681 struct check_loop_arg arg;
682
683 if (q->ops->cl_ops == NULL)
684 return 0;
685
686 arg.w.stop = arg.w.skip = arg.w.count = 0;
687 arg.w.fn = check_loop_fn;
688 arg.depth = depth;
689 arg.p = p;
690 q->ops->cl_ops->walk(q, &arg.w);
691 return arg.w.stop ? -ELOOP : 0;
692 }
693
694 static int
695 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
696 {
697 struct Qdisc *leaf;
698 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
699 struct check_loop_arg *arg = (struct check_loop_arg *)w;
700
701 leaf = cops->leaf(q, cl);
702 if (leaf) {
703 if (leaf == arg->p || arg->depth > 7)
704 return -ELOOP;
705 return check_loop(leaf, arg->p, arg->depth + 1);
706 }
707 return 0;
708 }
709
710 /*
711 * Delete/get qdisc.
712 */
713
714 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
715 {
716 struct net *net = sock_net(skb->sk);
717 struct tcmsg *tcm = NLMSG_DATA(n);
718 struct nlattr *tca[TCA_MAX + 1];
719 struct net_device *dev;
720 u32 clid = tcm->tcm_parent;
721 struct Qdisc *q = NULL;
722 struct Qdisc *p = NULL;
723 int err;
724
725 if (net != &init_net)
726 return -EINVAL;
727
728 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
729 return -ENODEV;
730
731 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
732 if (err < 0)
733 return err;
734
735 if (clid) {
736 if (clid != TC_H_ROOT) {
737 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
738 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
739 return -ENOENT;
740 q = qdisc_leaf(p, clid);
741 } else { /* ingress */
742 q = dev->rx_queue.qdisc;
743 }
744 } else {
745 struct netdev_queue *dev_queue = &dev->tx_queue;
746 q = dev_queue->qdisc_sleeping;
747 }
748 if (!q)
749 return -ENOENT;
750
751 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
752 return -EINVAL;
753 } else {
754 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
755 return -ENOENT;
756 }
757
758 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
759 return -EINVAL;
760
761 if (n->nlmsg_type == RTM_DELQDISC) {
762 if (!clid)
763 return -EINVAL;
764 if (q->handle == 0)
765 return -ENOENT;
766 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
767 return err;
768 if (q) {
769 qdisc_notify(skb, n, clid, q, NULL);
770 qdisc_lock_tree(dev);
771 qdisc_destroy(q);
772 qdisc_unlock_tree(dev);
773 }
774 } else {
775 qdisc_notify(skb, n, clid, NULL, q);
776 }
777 return 0;
778 }
779
780 /*
781 Create/change qdisc.
782 */
783
784 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
785 {
786 struct net *net = sock_net(skb->sk);
787 struct tcmsg *tcm;
788 struct nlattr *tca[TCA_MAX + 1];
789 struct net_device *dev;
790 u32 clid;
791 struct Qdisc *q, *p;
792 int err;
793
794 if (net != &init_net)
795 return -EINVAL;
796
797 replay:
798 /* Reinit, just in case something touches this. */
799 tcm = NLMSG_DATA(n);
800 clid = tcm->tcm_parent;
801 q = p = NULL;
802
803 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
804 return -ENODEV;
805
806 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
807 if (err < 0)
808 return err;
809
810 if (clid) {
811 if (clid != TC_H_ROOT) {
812 if (clid != TC_H_INGRESS) {
813 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
814 return -ENOENT;
815 q = qdisc_leaf(p, clid);
816 } else { /*ingress */
817 q = dev->rx_queue.qdisc;
818 }
819 } else {
820 struct netdev_queue *dev_queue = &dev->tx_queue;
821 q = dev_queue->qdisc_sleeping;
822 }
823
824 /* It may be default qdisc, ignore it */
825 if (q && q->handle == 0)
826 q = NULL;
827
828 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
829 if (tcm->tcm_handle) {
830 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
831 return -EEXIST;
832 if (TC_H_MIN(tcm->tcm_handle))
833 return -EINVAL;
834 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
835 goto create_n_graft;
836 if (n->nlmsg_flags&NLM_F_EXCL)
837 return -EEXIST;
838 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
839 return -EINVAL;
840 if (q == p ||
841 (p && check_loop(q, p, 0)))
842 return -ELOOP;
843 atomic_inc(&q->refcnt);
844 goto graft;
845 } else {
846 if (q == NULL)
847 goto create_n_graft;
848
849 /* This magic test requires explanation.
850 *
851 * We know, that some child q is already
852 * attached to this parent and have choice:
853 * either to change it or to create/graft new one.
854 *
855 * 1. We are allowed to create/graft only
856 * if CREATE and REPLACE flags are set.
857 *
858 * 2. If EXCL is set, requestor wanted to say,
859 * that qdisc tcm_handle is not expected
860 * to exist, so that we choose create/graft too.
861 *
862 * 3. The last case is when no flags are set.
863 * Alas, it is sort of hole in API, we
864 * cannot decide what to do unambiguously.
865 * For now we select create/graft, if
866 * user gave KIND, which does not match existing.
867 */
868 if ((n->nlmsg_flags&NLM_F_CREATE) &&
869 (n->nlmsg_flags&NLM_F_REPLACE) &&
870 ((n->nlmsg_flags&NLM_F_EXCL) ||
871 (tca[TCA_KIND] &&
872 nla_strcmp(tca[TCA_KIND], q->ops->id))))
873 goto create_n_graft;
874 }
875 }
876 } else {
877 if (!tcm->tcm_handle)
878 return -EINVAL;
879 q = qdisc_lookup(dev, tcm->tcm_handle);
880 }
881
882 /* Change qdisc parameters */
883 if (q == NULL)
884 return -ENOENT;
885 if (n->nlmsg_flags&NLM_F_EXCL)
886 return -EEXIST;
887 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
888 return -EINVAL;
889 err = qdisc_change(q, tca);
890 if (err == 0)
891 qdisc_notify(skb, n, clid, NULL, q);
892 return err;
893
894 create_n_graft:
895 if (!(n->nlmsg_flags&NLM_F_CREATE))
896 return -ENOENT;
897 if (clid == TC_H_INGRESS)
898 q = qdisc_create(dev, &dev->rx_queue,
899 tcm->tcm_parent, tcm->tcm_parent,
900 tca, &err);
901 else
902 q = qdisc_create(dev, &dev->tx_queue,
903 tcm->tcm_parent, tcm->tcm_handle,
904 tca, &err);
905 if (q == NULL) {
906 if (err == -EAGAIN)
907 goto replay;
908 return err;
909 }
910
911 graft:
912 if (1) {
913 struct Qdisc *old_q = NULL;
914 err = qdisc_graft(dev, p, clid, q, &old_q);
915 if (err) {
916 if (q) {
917 qdisc_lock_tree(dev);
918 qdisc_destroy(q);
919 qdisc_unlock_tree(dev);
920 }
921 return err;
922 }
923 qdisc_notify(skb, n, clid, old_q, q);
924 if (old_q) {
925 qdisc_lock_tree(dev);
926 qdisc_destroy(old_q);
927 qdisc_unlock_tree(dev);
928 }
929 }
930 return 0;
931 }
932
933 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
934 u32 pid, u32 seq, u16 flags, int event)
935 {
936 struct tcmsg *tcm;
937 struct nlmsghdr *nlh;
938 unsigned char *b = skb_tail_pointer(skb);
939 struct gnet_dump d;
940
941 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
942 tcm = NLMSG_DATA(nlh);
943 tcm->tcm_family = AF_UNSPEC;
944 tcm->tcm__pad1 = 0;
945 tcm->tcm__pad2 = 0;
946 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
947 tcm->tcm_parent = clid;
948 tcm->tcm_handle = q->handle;
949 tcm->tcm_info = atomic_read(&q->refcnt);
950 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
951 if (q->ops->dump && q->ops->dump(q, skb) < 0)
952 goto nla_put_failure;
953 q->qstats.qlen = q->q.qlen;
954
955 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
956 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
957 goto nla_put_failure;
958
959 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
960 goto nla_put_failure;
961
962 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
963 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
964 gnet_stats_copy_queue(&d, &q->qstats) < 0)
965 goto nla_put_failure;
966
967 if (gnet_stats_finish_copy(&d) < 0)
968 goto nla_put_failure;
969
970 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
971 return skb->len;
972
973 nlmsg_failure:
974 nla_put_failure:
975 nlmsg_trim(skb, b);
976 return -1;
977 }
978
979 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
980 u32 clid, struct Qdisc *old, struct Qdisc *new)
981 {
982 struct sk_buff *skb;
983 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
984
985 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
986 if (!skb)
987 return -ENOBUFS;
988
989 if (old && old->handle) {
990 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
991 goto err_out;
992 }
993 if (new) {
994 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
995 goto err_out;
996 }
997
998 if (skb->len)
999 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1000
1001 err_out:
1002 kfree_skb(skb);
1003 return -EINVAL;
1004 }
1005
1006 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1007 {
1008 struct net *net = sock_net(skb->sk);
1009 int idx, q_idx;
1010 int s_idx, s_q_idx;
1011 struct net_device *dev;
1012 struct Qdisc *q;
1013
1014 if (net != &init_net)
1015 return 0;
1016
1017 s_idx = cb->args[0];
1018 s_q_idx = q_idx = cb->args[1];
1019 read_lock(&dev_base_lock);
1020 idx = 0;
1021 for_each_netdev(&init_net, dev) {
1022 struct netdev_queue *dev_queue;
1023 if (idx < s_idx)
1024 goto cont;
1025 if (idx > s_idx)
1026 s_q_idx = 0;
1027 q_idx = 0;
1028 dev_queue = &dev->tx_queue;
1029 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1030 if (q_idx < s_q_idx) {
1031 q_idx++;
1032 continue;
1033 }
1034 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1035 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1036 goto done;
1037 q_idx++;
1038 }
1039 cont:
1040 idx++;
1041 }
1042
1043 done:
1044 read_unlock(&dev_base_lock);
1045
1046 cb->args[0] = idx;
1047 cb->args[1] = q_idx;
1048
1049 return skb->len;
1050 }
1051
1052
1053
1054 /************************************************
1055 * Traffic classes manipulation. *
1056 ************************************************/
1057
1058
1059
1060 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1061 {
1062 struct net *net = sock_net(skb->sk);
1063 struct netdev_queue *dev_queue;
1064 struct tcmsg *tcm = NLMSG_DATA(n);
1065 struct nlattr *tca[TCA_MAX + 1];
1066 struct net_device *dev;
1067 struct Qdisc *q = NULL;
1068 const struct Qdisc_class_ops *cops;
1069 unsigned long cl = 0;
1070 unsigned long new_cl;
1071 u32 pid = tcm->tcm_parent;
1072 u32 clid = tcm->tcm_handle;
1073 u32 qid = TC_H_MAJ(clid);
1074 int err;
1075
1076 if (net != &init_net)
1077 return -EINVAL;
1078
1079 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1080 return -ENODEV;
1081
1082 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1083 if (err < 0)
1084 return err;
1085
1086 /*
1087 parent == TC_H_UNSPEC - unspecified parent.
1088 parent == TC_H_ROOT - class is root, which has no parent.
1089 parent == X:0 - parent is root class.
1090 parent == X:Y - parent is a node in hierarchy.
1091 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1092
1093 handle == 0:0 - generate handle from kernel pool.
1094 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1095 handle == X:Y - clear.
1096 handle == X:0 - root class.
1097 */
1098
1099 /* Step 1. Determine qdisc handle X:0 */
1100
1101 dev_queue = &dev->tx_queue;
1102 if (pid != TC_H_ROOT) {
1103 u32 qid1 = TC_H_MAJ(pid);
1104
1105 if (qid && qid1) {
1106 /* If both majors are known, they must be identical. */
1107 if (qid != qid1)
1108 return -EINVAL;
1109 } else if (qid1) {
1110 qid = qid1;
1111 } else if (qid == 0)
1112 qid = dev_queue->qdisc_sleeping->handle;
1113
1114 /* Now qid is genuine qdisc handle consistent
1115 both with parent and child.
1116
1117 TC_H_MAJ(pid) still may be unspecified, complete it now.
1118 */
1119 if (pid)
1120 pid = TC_H_MAKE(qid, pid);
1121 } else {
1122 if (qid == 0)
1123 qid = dev_queue->qdisc_sleeping->handle;
1124 }
1125
1126 /* OK. Locate qdisc */
1127 if ((q = qdisc_lookup(dev, qid)) == NULL)
1128 return -ENOENT;
1129
1130 /* An check that it supports classes */
1131 cops = q->ops->cl_ops;
1132 if (cops == NULL)
1133 return -EINVAL;
1134
1135 /* Now try to get class */
1136 if (clid == 0) {
1137 if (pid == TC_H_ROOT)
1138 clid = qid;
1139 } else
1140 clid = TC_H_MAKE(qid, clid);
1141
1142 if (clid)
1143 cl = cops->get(q, clid);
1144
1145 if (cl == 0) {
1146 err = -ENOENT;
1147 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1148 goto out;
1149 } else {
1150 switch (n->nlmsg_type) {
1151 case RTM_NEWTCLASS:
1152 err = -EEXIST;
1153 if (n->nlmsg_flags&NLM_F_EXCL)
1154 goto out;
1155 break;
1156 case RTM_DELTCLASS:
1157 err = cops->delete(q, cl);
1158 if (err == 0)
1159 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1160 goto out;
1161 case RTM_GETTCLASS:
1162 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1163 goto out;
1164 default:
1165 err = -EINVAL;
1166 goto out;
1167 }
1168 }
1169
1170 new_cl = cl;
1171 err = cops->change(q, clid, pid, tca, &new_cl);
1172 if (err == 0)
1173 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1174
1175 out:
1176 if (cl)
1177 cops->put(q, cl);
1178
1179 return err;
1180 }
1181
1182
1183 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1184 unsigned long cl,
1185 u32 pid, u32 seq, u16 flags, int event)
1186 {
1187 struct tcmsg *tcm;
1188 struct nlmsghdr *nlh;
1189 unsigned char *b = skb_tail_pointer(skb);
1190 struct gnet_dump d;
1191 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1192
1193 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1194 tcm = NLMSG_DATA(nlh);
1195 tcm->tcm_family = AF_UNSPEC;
1196 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1197 tcm->tcm_parent = q->handle;
1198 tcm->tcm_handle = q->handle;
1199 tcm->tcm_info = 0;
1200 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1201 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1202 goto nla_put_failure;
1203
1204 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1205 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
1206 goto nla_put_failure;
1207
1208 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1209 goto nla_put_failure;
1210
1211 if (gnet_stats_finish_copy(&d) < 0)
1212 goto nla_put_failure;
1213
1214 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1215 return skb->len;
1216
1217 nlmsg_failure:
1218 nla_put_failure:
1219 nlmsg_trim(skb, b);
1220 return -1;
1221 }
1222
1223 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1224 struct Qdisc *q, unsigned long cl, int event)
1225 {
1226 struct sk_buff *skb;
1227 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1228
1229 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1230 if (!skb)
1231 return -ENOBUFS;
1232
1233 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1234 kfree_skb(skb);
1235 return -EINVAL;
1236 }
1237
1238 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1239 }
1240
1241 struct qdisc_dump_args
1242 {
1243 struct qdisc_walker w;
1244 struct sk_buff *skb;
1245 struct netlink_callback *cb;
1246 };
1247
1248 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1249 {
1250 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1251
1252 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1253 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1254 }
1255
1256 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1257 {
1258 struct net *net = sock_net(skb->sk);
1259 struct netdev_queue *dev_queue;
1260 int t;
1261 int s_t;
1262 struct net_device *dev;
1263 struct Qdisc *q;
1264 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1265 struct qdisc_dump_args arg;
1266
1267 if (net != &init_net)
1268 return 0;
1269
1270 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1271 return 0;
1272 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1273 return 0;
1274
1275 s_t = cb->args[0];
1276 t = 0;
1277
1278 dev_queue = &dev->tx_queue;
1279 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1280 if (t < s_t || !q->ops->cl_ops ||
1281 (tcm->tcm_parent &&
1282 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1283 t++;
1284 continue;
1285 }
1286 if (t > s_t)
1287 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1288 arg.w.fn = qdisc_class_dump;
1289 arg.skb = skb;
1290 arg.cb = cb;
1291 arg.w.stop = 0;
1292 arg.w.skip = cb->args[1];
1293 arg.w.count = 0;
1294 q->ops->cl_ops->walk(q, &arg.w);
1295 cb->args[1] = arg.w.count;
1296 if (arg.w.stop)
1297 break;
1298 t++;
1299 }
1300
1301 cb->args[0] = t;
1302
1303 dev_put(dev);
1304 return skb->len;
1305 }
1306
1307 /* Main classifier routine: scans classifier chain attached
1308 to this qdisc, (optionally) tests for protocol and asks
1309 specific classifiers.
1310 */
1311 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1312 struct tcf_result *res)
1313 {
1314 __be16 protocol = skb->protocol;
1315 int err = 0;
1316
1317 for (; tp; tp = tp->next) {
1318 if ((tp->protocol == protocol ||
1319 tp->protocol == htons(ETH_P_ALL)) &&
1320 (err = tp->classify(skb, tp, res)) >= 0) {
1321 #ifdef CONFIG_NET_CLS_ACT
1322 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1323 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1324 #endif
1325 return err;
1326 }
1327 }
1328 return -1;
1329 }
1330 EXPORT_SYMBOL(tc_classify_compat);
1331
1332 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1333 struct tcf_result *res)
1334 {
1335 int err = 0;
1336 __be16 protocol;
1337 #ifdef CONFIG_NET_CLS_ACT
1338 struct tcf_proto *otp = tp;
1339 reclassify:
1340 #endif
1341 protocol = skb->protocol;
1342
1343 err = tc_classify_compat(skb, tp, res);
1344 #ifdef CONFIG_NET_CLS_ACT
1345 if (err == TC_ACT_RECLASSIFY) {
1346 u32 verd = G_TC_VERD(skb->tc_verd);
1347 tp = otp;
1348
1349 if (verd++ >= MAX_REC_LOOP) {
1350 printk("rule prio %u protocol %02x reclassify loop, "
1351 "packet dropped\n",
1352 tp->prio&0xffff, ntohs(tp->protocol));
1353 return TC_ACT_SHOT;
1354 }
1355 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1356 goto reclassify;
1357 }
1358 #endif
1359 return err;
1360 }
1361 EXPORT_SYMBOL(tc_classify);
1362
1363 void tcf_destroy(struct tcf_proto *tp)
1364 {
1365 tp->ops->destroy(tp);
1366 module_put(tp->ops->owner);
1367 kfree(tp);
1368 }
1369
1370 void tcf_destroy_chain(struct tcf_proto **fl)
1371 {
1372 struct tcf_proto *tp;
1373
1374 while ((tp = *fl) != NULL) {
1375 *fl = tp->next;
1376 tcf_destroy(tp);
1377 }
1378 }
1379 EXPORT_SYMBOL(tcf_destroy_chain);
1380
1381 #ifdef CONFIG_PROC_FS
1382 static int psched_show(struct seq_file *seq, void *v)
1383 {
1384 struct timespec ts;
1385
1386 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1387 seq_printf(seq, "%08x %08x %08x %08x\n",
1388 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1389 1000000,
1390 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1391
1392 return 0;
1393 }
1394
1395 static int psched_open(struct inode *inode, struct file *file)
1396 {
1397 return single_open(file, psched_show, PDE(inode)->data);
1398 }
1399
1400 static const struct file_operations psched_fops = {
1401 .owner = THIS_MODULE,
1402 .open = psched_open,
1403 .read = seq_read,
1404 .llseek = seq_lseek,
1405 .release = single_release,
1406 };
1407 #endif
1408
1409 static int __init pktsched_init(void)
1410 {
1411 register_qdisc(&pfifo_qdisc_ops);
1412 register_qdisc(&bfifo_qdisc_ops);
1413 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1414
1415 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1416 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1417 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1418 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1419 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1420 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1421
1422 return 0;
1423 }
1424
1425 subsys_initcall(pktsched_init);
This page took 0.058383 seconds and 5 git commands to generate.