Revert "UBI: use mtd->writebufsize to set minimal I/O unit size"
[deliverable/linux.git] / net / sched / sch_htb.c
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 */
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/pkt_sched.h>
42
43 /* HTB algorithm.
44 Author: devik@cdi.cz
45 ========================================================================
46 HTB is like TBF with multiple classes. It is also similar to CBQ because
47 it allows to assign priority to each class in hierarchy.
48 In fact it is another implementation of Floyd's formal sharing.
49
50 Levels:
51 Each class is assigned level. Leaf has ALWAYS level 0 and root
52 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
53 one less than their parent.
54 */
55
56 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
57 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
58
59 #if HTB_VER >> 16 != TC_HTB_PROTOVER
60 #error "Mismatched sch_htb.c and pkt_sch.h"
61 #endif
62
63 /* Module parameter and sysfs export */
64 module_param (htb_hysteresis, int, 0640);
65 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
66
67 /* used internaly to keep status of single class */
68 enum htb_cmode {
69 HTB_CANT_SEND, /* class can't send and can't borrow */
70 HTB_MAY_BORROW, /* class can't send but may borrow */
71 HTB_CAN_SEND /* class can send */
72 };
73
74 /* interior & leaf nodes; props specific to leaves are marked L: */
75 struct htb_class {
76 struct Qdisc_class_common common;
77 /* general class parameters */
78 struct gnet_stats_basic_packed bstats;
79 struct gnet_stats_queue qstats;
80 struct gnet_stats_rate_est rate_est;
81 struct tc_htb_xstats xstats; /* our special stats */
82 int refcnt; /* usage count of this class */
83
84 /* topology */
85 int level; /* our level (see above) */
86 unsigned int children;
87 struct htb_class *parent; /* parent class */
88
89 int prio; /* these two are used only by leaves... */
90 int quantum; /* but stored for parent-to-leaf return */
91
92 union {
93 struct htb_class_leaf {
94 struct Qdisc *q;
95 int deficit[TC_HTB_MAXDEPTH];
96 struct list_head drop_list;
97 } leaf;
98 struct htb_class_inner {
99 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
100 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
101 /* When class changes from state 1->2 and disconnects from
102 parent's feed then we lost ptr value and start from the
103 first child again. Here we store classid of the
104 last valid ptr (used when ptr is NULL). */
105 u32 last_ptr_id[TC_HTB_NUMPRIO];
106 } inner;
107 } un;
108 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
109 struct rb_node pq_node; /* node for event queue */
110 psched_time_t pq_key;
111
112 int prio_activity; /* for which prios are we active */
113 enum htb_cmode cmode; /* current mode of the class */
114
115 /* class attached filters */
116 struct tcf_proto *filter_list;
117 int filter_cnt;
118
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
126 };
127
128 struct htb_sched {
129 struct Qdisc_class_hash clhash;
130 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
131
132 /* self list - roots of self generating tree */
133 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
134 int row_mask[TC_HTB_MAXDEPTH];
135 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
136 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
137
138 /* self wait list - roots of wait PQs per row */
139 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
140
141 /* time of nearest event per level (row) */
142 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
143
144 int defcls; /* class where unclassified flows go to */
145
146 /* filters for qdisc itself */
147 struct tcf_proto *filter_list;
148
149 int rate2quantum; /* quant = rate / rate2quantum */
150 psched_time_t now; /* cached dequeue time */
151 struct qdisc_watchdog watchdog;
152
153 /* non shaped skbs; let them go directly thru */
154 struct sk_buff_head direct_queue;
155 int direct_qlen; /* max qlen of above */
156
157 long direct_pkts;
158
159 #define HTB_WARN_TOOMANYEVENTS 0x1
160 unsigned int warned; /* only one warning */
161 struct work_struct work;
162 };
163
164 /* find class in global hash table using given handle */
165 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
166 {
167 struct htb_sched *q = qdisc_priv(sch);
168 struct Qdisc_class_common *clc;
169
170 clc = qdisc_class_find(&q->clhash, handle);
171 if (clc == NULL)
172 return NULL;
173 return container_of(clc, struct htb_class, common);
174 }
175
176 /**
177 * htb_classify - classify a packet into class
178 *
179 * It returns NULL if the packet should be dropped or -1 if the packet
180 * should be passed directly thru. In all other cases leaf class is returned.
181 * We allow direct class selection by classid in priority. The we examine
182 * filters in qdisc and in inner nodes (if higher filter points to the inner
183 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
184 * internal fifo (direct). These packets then go directly thru. If we still
185 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
186 * then finish and return direct queue.
187 */
188 #define HTB_DIRECT (struct htb_class*)-1
189
190 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
191 int *qerr)
192 {
193 struct htb_sched *q = qdisc_priv(sch);
194 struct htb_class *cl;
195 struct tcf_result res;
196 struct tcf_proto *tcf;
197 int result;
198
199 /* allow to select class by setting skb->priority to valid classid;
200 note that nfmark can be used too by attaching filter fw with no
201 rules in it */
202 if (skb->priority == sch->handle)
203 return HTB_DIRECT; /* X:0 (direct flow) selected */
204 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
205 return cl;
206
207 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
208 tcf = q->filter_list;
209 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
210 #ifdef CONFIG_NET_CLS_ACT
211 switch (result) {
212 case TC_ACT_QUEUED:
213 case TC_ACT_STOLEN:
214 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
215 case TC_ACT_SHOT:
216 return NULL;
217 }
218 #endif
219 if ((cl = (void *)res.class) == NULL) {
220 if (res.classid == sch->handle)
221 return HTB_DIRECT; /* X:0 (direct flow) */
222 if ((cl = htb_find(res.classid, sch)) == NULL)
223 break; /* filter selected invalid classid */
224 }
225 if (!cl->level)
226 return cl; /* we hit leaf; return it */
227
228 /* we have got inner class; apply inner filter chain */
229 tcf = cl->filter_list;
230 }
231 /* classification failed; try to use default class */
232 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
233 if (!cl || cl->level)
234 return HTB_DIRECT; /* bad default .. this is safe bet */
235 return cl;
236 }
237
238 /**
239 * htb_add_to_id_tree - adds class to the round robin list
240 *
241 * Routine adds class to the list (actually tree) sorted by classid.
242 * Make sure that class is not already on such list for given prio.
243 */
244 static void htb_add_to_id_tree(struct rb_root *root,
245 struct htb_class *cl, int prio)
246 {
247 struct rb_node **p = &root->rb_node, *parent = NULL;
248
249 while (*p) {
250 struct htb_class *c;
251 parent = *p;
252 c = rb_entry(parent, struct htb_class, node[prio]);
253
254 if (cl->common.classid > c->common.classid)
255 p = &parent->rb_right;
256 else
257 p = &parent->rb_left;
258 }
259 rb_link_node(&cl->node[prio], parent, p);
260 rb_insert_color(&cl->node[prio], root);
261 }
262
263 /**
264 * htb_add_to_wait_tree - adds class to the event queue with delay
265 *
266 * The class is added to priority event queue to indicate that class will
267 * change its mode in cl->pq_key microseconds. Make sure that class is not
268 * already in the queue.
269 */
270 static void htb_add_to_wait_tree(struct htb_sched *q,
271 struct htb_class *cl, long delay)
272 {
273 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
274
275 cl->pq_key = q->now + delay;
276 if (cl->pq_key == q->now)
277 cl->pq_key++;
278
279 /* update the nearest event cache */
280 if (q->near_ev_cache[cl->level] > cl->pq_key)
281 q->near_ev_cache[cl->level] = cl->pq_key;
282
283 while (*p) {
284 struct htb_class *c;
285 parent = *p;
286 c = rb_entry(parent, struct htb_class, pq_node);
287 if (cl->pq_key >= c->pq_key)
288 p = &parent->rb_right;
289 else
290 p = &parent->rb_left;
291 }
292 rb_link_node(&cl->pq_node, parent, p);
293 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
294 }
295
296 /**
297 * htb_next_rb_node - finds next node in binary tree
298 *
299 * When we are past last key we return NULL.
300 * Average complexity is 2 steps per call.
301 */
302 static inline void htb_next_rb_node(struct rb_node **n)
303 {
304 *n = rb_next(*n);
305 }
306
307 /**
308 * htb_add_class_to_row - add class to its row
309 *
310 * The class is added to row at priorities marked in mask.
311 * It does nothing if mask == 0.
312 */
313 static inline void htb_add_class_to_row(struct htb_sched *q,
314 struct htb_class *cl, int mask)
315 {
316 q->row_mask[cl->level] |= mask;
317 while (mask) {
318 int prio = ffz(~mask);
319 mask &= ~(1 << prio);
320 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
321 }
322 }
323
324 /* If this triggers, it is a bug in this code, but it need not be fatal */
325 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
326 {
327 if (RB_EMPTY_NODE(rb)) {
328 WARN_ON(1);
329 } else {
330 rb_erase(rb, root);
331 RB_CLEAR_NODE(rb);
332 }
333 }
334
335
336 /**
337 * htb_remove_class_from_row - removes class from its row
338 *
339 * The class is removed from row at priorities marked in mask.
340 * It does nothing if mask == 0.
341 */
342 static inline void htb_remove_class_from_row(struct htb_sched *q,
343 struct htb_class *cl, int mask)
344 {
345 int m = 0;
346
347 while (mask) {
348 int prio = ffz(~mask);
349
350 mask &= ~(1 << prio);
351 if (q->ptr[cl->level][prio] == cl->node + prio)
352 htb_next_rb_node(q->ptr[cl->level] + prio);
353
354 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
355 if (!q->row[cl->level][prio].rb_node)
356 m |= 1 << prio;
357 }
358 q->row_mask[cl->level] &= ~m;
359 }
360
361 /**
362 * htb_activate_prios - creates active classe's feed chain
363 *
364 * The class is connected to ancestors and/or appropriate rows
365 * for priorities it is participating on. cl->cmode must be new
366 * (activated) mode. It does nothing if cl->prio_activity == 0.
367 */
368 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
369 {
370 struct htb_class *p = cl->parent;
371 long m, mask = cl->prio_activity;
372
373 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
374 m = mask;
375 while (m) {
376 int prio = ffz(~m);
377 m &= ~(1 << prio);
378
379 if (p->un.inner.feed[prio].rb_node)
380 /* parent already has its feed in use so that
381 reset bit in mask as parent is already ok */
382 mask &= ~(1 << prio);
383
384 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
385 }
386 p->prio_activity |= mask;
387 cl = p;
388 p = cl->parent;
389
390 }
391 if (cl->cmode == HTB_CAN_SEND && mask)
392 htb_add_class_to_row(q, cl, mask);
393 }
394
395 /**
396 * htb_deactivate_prios - remove class from feed chain
397 *
398 * cl->cmode must represent old mode (before deactivation). It does
399 * nothing if cl->prio_activity == 0. Class is removed from all feed
400 * chains and rows.
401 */
402 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
403 {
404 struct htb_class *p = cl->parent;
405 long m, mask = cl->prio_activity;
406
407 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
408 m = mask;
409 mask = 0;
410 while (m) {
411 int prio = ffz(~m);
412 m &= ~(1 << prio);
413
414 if (p->un.inner.ptr[prio] == cl->node + prio) {
415 /* we are removing child which is pointed to from
416 parent feed - forget the pointer but remember
417 classid */
418 p->un.inner.last_ptr_id[prio] = cl->common.classid;
419 p->un.inner.ptr[prio] = NULL;
420 }
421
422 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
423
424 if (!p->un.inner.feed[prio].rb_node)
425 mask |= 1 << prio;
426 }
427
428 p->prio_activity &= ~mask;
429 cl = p;
430 p = cl->parent;
431
432 }
433 if (cl->cmode == HTB_CAN_SEND && mask)
434 htb_remove_class_from_row(q, cl, mask);
435 }
436
437 static inline long htb_lowater(const struct htb_class *cl)
438 {
439 if (htb_hysteresis)
440 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
441 else
442 return 0;
443 }
444 static inline long htb_hiwater(const struct htb_class *cl)
445 {
446 if (htb_hysteresis)
447 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
448 else
449 return 0;
450 }
451
452
453 /**
454 * htb_class_mode - computes and returns current class mode
455 *
456 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
457 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
458 * from now to time when cl will change its state.
459 * Also it is worth to note that class mode doesn't change simply
460 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
461 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
462 * mode transitions per time unit. The speed gain is about 1/6.
463 */
464 static inline enum htb_cmode
465 htb_class_mode(struct htb_class *cl, long *diff)
466 {
467 long toks;
468
469 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
470 *diff = -toks;
471 return HTB_CANT_SEND;
472 }
473
474 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
475 return HTB_CAN_SEND;
476
477 *diff = -toks;
478 return HTB_MAY_BORROW;
479 }
480
481 /**
482 * htb_change_class_mode - changes classe's mode
483 *
484 * This should be the only way how to change classe's mode under normal
485 * cirsumstances. Routine will update feed lists linkage, change mode
486 * and add class to the wait event queue if appropriate. New mode should
487 * be different from old one and cl->pq_key has to be valid if changing
488 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
489 */
490 static void
491 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
492 {
493 enum htb_cmode new_mode = htb_class_mode(cl, diff);
494
495 if (new_mode == cl->cmode)
496 return;
497
498 if (cl->prio_activity) { /* not necessary: speed optimization */
499 if (cl->cmode != HTB_CANT_SEND)
500 htb_deactivate_prios(q, cl);
501 cl->cmode = new_mode;
502 if (new_mode != HTB_CANT_SEND)
503 htb_activate_prios(q, cl);
504 } else
505 cl->cmode = new_mode;
506 }
507
508 /**
509 * htb_activate - inserts leaf cl into appropriate active feeds
510 *
511 * Routine learns (new) priority of leaf and activates feed chain
512 * for the prio. It can be called on already active leaf safely.
513 * It also adds leaf into droplist.
514 */
515 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
516 {
517 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
518
519 if (!cl->prio_activity) {
520 cl->prio_activity = 1 << cl->prio;
521 htb_activate_prios(q, cl);
522 list_add_tail(&cl->un.leaf.drop_list,
523 q->drops + cl->prio);
524 }
525 }
526
527 /**
528 * htb_deactivate - remove leaf cl from active feeds
529 *
530 * Make sure that leaf is active. In the other words it can't be called
531 * with non-active leaf. It also removes class from the drop list.
532 */
533 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
534 {
535 WARN_ON(!cl->prio_activity);
536
537 htb_deactivate_prios(q, cl);
538 cl->prio_activity = 0;
539 list_del_init(&cl->un.leaf.drop_list);
540 }
541
542 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
543 {
544 int uninitialized_var(ret);
545 struct htb_sched *q = qdisc_priv(sch);
546 struct htb_class *cl = htb_classify(skb, sch, &ret);
547
548 if (cl == HTB_DIRECT) {
549 /* enqueue to helper queue */
550 if (q->direct_queue.qlen < q->direct_qlen) {
551 __skb_queue_tail(&q->direct_queue, skb);
552 q->direct_pkts++;
553 } else {
554 kfree_skb(skb);
555 sch->qstats.drops++;
556 return NET_XMIT_DROP;
557 }
558 #ifdef CONFIG_NET_CLS_ACT
559 } else if (!cl) {
560 if (ret & __NET_XMIT_BYPASS)
561 sch->qstats.drops++;
562 kfree_skb(skb);
563 return ret;
564 #endif
565 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
566 if (net_xmit_drop_count(ret)) {
567 sch->qstats.drops++;
568 cl->qstats.drops++;
569 }
570 return ret;
571 } else {
572 bstats_update(&cl->bstats, skb);
573 htb_activate(q, cl);
574 }
575
576 sch->q.qlen++;
577 qdisc_bstats_update(sch, skb);
578 return NET_XMIT_SUCCESS;
579 }
580
581 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
582 {
583 long toks = diff + cl->tokens;
584
585 if (toks > cl->buffer)
586 toks = cl->buffer;
587 toks -= (long) qdisc_l2t(cl->rate, bytes);
588 if (toks <= -cl->mbuffer)
589 toks = 1 - cl->mbuffer;
590
591 cl->tokens = toks;
592 }
593
594 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
595 {
596 long toks = diff + cl->ctokens;
597
598 if (toks > cl->cbuffer)
599 toks = cl->cbuffer;
600 toks -= (long) qdisc_l2t(cl->ceil, bytes);
601 if (toks <= -cl->mbuffer)
602 toks = 1 - cl->mbuffer;
603
604 cl->ctokens = toks;
605 }
606
607 /**
608 * htb_charge_class - charges amount "bytes" to leaf and ancestors
609 *
610 * Routine assumes that packet "bytes" long was dequeued from leaf cl
611 * borrowing from "level". It accounts bytes to ceil leaky bucket for
612 * leaf and all ancestors and to rate bucket for ancestors at levels
613 * "level" and higher. It also handles possible change of mode resulting
614 * from the update. Note that mode can also increase here (MAY_BORROW to
615 * CAN_SEND) because we can use more precise clock that event queue here.
616 * In such case we remove class from event queue first.
617 */
618 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
619 int level, struct sk_buff *skb)
620 {
621 int bytes = qdisc_pkt_len(skb);
622 enum htb_cmode old_mode;
623 long diff;
624
625 while (cl) {
626 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
627 if (cl->level >= level) {
628 if (cl->level == level)
629 cl->xstats.lends++;
630 htb_accnt_tokens(cl, bytes, diff);
631 } else {
632 cl->xstats.borrows++;
633 cl->tokens += diff; /* we moved t_c; update tokens */
634 }
635 htb_accnt_ctokens(cl, bytes, diff);
636 cl->t_c = q->now;
637
638 old_mode = cl->cmode;
639 diff = 0;
640 htb_change_class_mode(q, cl, &diff);
641 if (old_mode != cl->cmode) {
642 if (old_mode != HTB_CAN_SEND)
643 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
644 if (cl->cmode != HTB_CAN_SEND)
645 htb_add_to_wait_tree(q, cl, diff);
646 }
647
648 /* update basic stats except for leaves which are already updated */
649 if (cl->level)
650 bstats_update(&cl->bstats, skb);
651
652 cl = cl->parent;
653 }
654 }
655
656 /**
657 * htb_do_events - make mode changes to classes at the level
658 *
659 * Scans event queue for pending events and applies them. Returns time of
660 * next pending event (0 for no event in pq, q->now for too many events).
661 * Note: Applied are events whose have cl->pq_key <= q->now.
662 */
663 static psched_time_t htb_do_events(struct htb_sched *q, int level,
664 unsigned long start)
665 {
666 /* don't run for longer than 2 jiffies; 2 is used instead of
667 1 to simplify things when jiffy is going to be incremented
668 too soon */
669 unsigned long stop_at = start + 2;
670 while (time_before(jiffies, stop_at)) {
671 struct htb_class *cl;
672 long diff;
673 struct rb_node *p = rb_first(&q->wait_pq[level]);
674
675 if (!p)
676 return 0;
677
678 cl = rb_entry(p, struct htb_class, pq_node);
679 if (cl->pq_key > q->now)
680 return cl->pq_key;
681
682 htb_safe_rb_erase(p, q->wait_pq + level);
683 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
684 htb_change_class_mode(q, cl, &diff);
685 if (cl->cmode != HTB_CAN_SEND)
686 htb_add_to_wait_tree(q, cl, diff);
687 }
688
689 /* too much load - let's continue after a break for scheduling */
690 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
691 printk(KERN_WARNING "htb: too many events!\n");
692 q->warned |= HTB_WARN_TOOMANYEVENTS;
693 }
694
695 return q->now;
696 }
697
698 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
699 is no such one exists. */
700 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
701 u32 id)
702 {
703 struct rb_node *r = NULL;
704 while (n) {
705 struct htb_class *cl =
706 rb_entry(n, struct htb_class, node[prio]);
707
708 if (id > cl->common.classid) {
709 n = n->rb_right;
710 } else if (id < cl->common.classid) {
711 r = n;
712 n = n->rb_left;
713 } else {
714 return n;
715 }
716 }
717 return r;
718 }
719
720 /**
721 * htb_lookup_leaf - returns next leaf class in DRR order
722 *
723 * Find leaf where current feed pointers points to.
724 */
725 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
726 struct rb_node **pptr, u32 * pid)
727 {
728 int i;
729 struct {
730 struct rb_node *root;
731 struct rb_node **pptr;
732 u32 *pid;
733 } stk[TC_HTB_MAXDEPTH], *sp = stk;
734
735 BUG_ON(!tree->rb_node);
736 sp->root = tree->rb_node;
737 sp->pptr = pptr;
738 sp->pid = pid;
739
740 for (i = 0; i < 65535; i++) {
741 if (!*sp->pptr && *sp->pid) {
742 /* ptr was invalidated but id is valid - try to recover
743 the original or next ptr */
744 *sp->pptr =
745 htb_id_find_next_upper(prio, sp->root, *sp->pid);
746 }
747 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
748 can become out of date quickly */
749 if (!*sp->pptr) { /* we are at right end; rewind & go up */
750 *sp->pptr = sp->root;
751 while ((*sp->pptr)->rb_left)
752 *sp->pptr = (*sp->pptr)->rb_left;
753 if (sp > stk) {
754 sp--;
755 if (!*sp->pptr) {
756 WARN_ON(1);
757 return NULL;
758 }
759 htb_next_rb_node(sp->pptr);
760 }
761 } else {
762 struct htb_class *cl;
763 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
764 if (!cl->level)
765 return cl;
766 (++sp)->root = cl->un.inner.feed[prio].rb_node;
767 sp->pptr = cl->un.inner.ptr + prio;
768 sp->pid = cl->un.inner.last_ptr_id + prio;
769 }
770 }
771 WARN_ON(1);
772 return NULL;
773 }
774
775 /* dequeues packet at given priority and level; call only if
776 you are sure that there is active class at prio/level */
777 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
778 int level)
779 {
780 struct sk_buff *skb = NULL;
781 struct htb_class *cl, *start;
782 /* look initial class up in the row */
783 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
784 q->ptr[level] + prio,
785 q->last_ptr_id[level] + prio);
786
787 do {
788 next:
789 if (unlikely(!cl))
790 return NULL;
791
792 /* class can be empty - it is unlikely but can be true if leaf
793 qdisc drops packets in enqueue routine or if someone used
794 graft operation on the leaf since last dequeue;
795 simply deactivate and skip such class */
796 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
797 struct htb_class *next;
798 htb_deactivate(q, cl);
799
800 /* row/level might become empty */
801 if ((q->row_mask[level] & (1 << prio)) == 0)
802 return NULL;
803
804 next = htb_lookup_leaf(q->row[level] + prio,
805 prio, q->ptr[level] + prio,
806 q->last_ptr_id[level] + prio);
807
808 if (cl == start) /* fix start if we just deleted it */
809 start = next;
810 cl = next;
811 goto next;
812 }
813
814 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
815 if (likely(skb != NULL))
816 break;
817
818 qdisc_warn_nonwc("htb", cl->un.leaf.q);
819 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
820 ptr[0]) + prio);
821 cl = htb_lookup_leaf(q->row[level] + prio, prio,
822 q->ptr[level] + prio,
823 q->last_ptr_id[level] + prio);
824
825 } while (cl != start);
826
827 if (likely(skb != NULL)) {
828 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
829 if (cl->un.leaf.deficit[level] < 0) {
830 cl->un.leaf.deficit[level] += cl->quantum;
831 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
832 ptr[0]) + prio);
833 }
834 /* this used to be after charge_class but this constelation
835 gives us slightly better performance */
836 if (!cl->un.leaf.q->q.qlen)
837 htb_deactivate(q, cl);
838 htb_charge_class(q, cl, level, skb);
839 }
840 return skb;
841 }
842
843 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
844 {
845 struct sk_buff *skb = NULL;
846 struct htb_sched *q = qdisc_priv(sch);
847 int level;
848 psched_time_t next_event;
849 unsigned long start_at;
850
851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
852 skb = __skb_dequeue(&q->direct_queue);
853 if (skb != NULL) {
854 sch->flags &= ~TCQ_F_THROTTLED;
855 sch->q.qlen--;
856 return skb;
857 }
858
859 if (!sch->q.qlen)
860 goto fin;
861 q->now = psched_get_time();
862 start_at = jiffies;
863
864 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
865
866 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
867 /* common case optimization - skip event handler quickly */
868 int m;
869 psched_time_t event;
870
871 if (q->now >= q->near_ev_cache[level]) {
872 event = htb_do_events(q, level, start_at);
873 if (!event)
874 event = q->now + PSCHED_TICKS_PER_SEC;
875 q->near_ev_cache[level] = event;
876 } else
877 event = q->near_ev_cache[level];
878
879 if (next_event > event)
880 next_event = event;
881
882 m = ~q->row_mask[level];
883 while (m != (int)(-1)) {
884 int prio = ffz(m);
885 m |= 1 << prio;
886 skb = htb_dequeue_tree(q, prio, level);
887 if (likely(skb != NULL)) {
888 sch->q.qlen--;
889 sch->flags &= ~TCQ_F_THROTTLED;
890 goto fin;
891 }
892 }
893 }
894 sch->qstats.overlimits++;
895 if (likely(next_event > q->now))
896 qdisc_watchdog_schedule(&q->watchdog, next_event);
897 else
898 schedule_work(&q->work);
899 fin:
900 return skb;
901 }
902
903 /* try to drop from each class (by prio) until one succeed */
904 static unsigned int htb_drop(struct Qdisc *sch)
905 {
906 struct htb_sched *q = qdisc_priv(sch);
907 int prio;
908
909 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
910 struct list_head *p;
911 list_for_each(p, q->drops + prio) {
912 struct htb_class *cl = list_entry(p, struct htb_class,
913 un.leaf.drop_list);
914 unsigned int len;
915 if (cl->un.leaf.q->ops->drop &&
916 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
917 sch->q.qlen--;
918 if (!cl->un.leaf.q->q.qlen)
919 htb_deactivate(q, cl);
920 return len;
921 }
922 }
923 }
924 return 0;
925 }
926
927 /* reset all classes */
928 /* always caled under BH & queue lock */
929 static void htb_reset(struct Qdisc *sch)
930 {
931 struct htb_sched *q = qdisc_priv(sch);
932 struct htb_class *cl;
933 struct hlist_node *n;
934 unsigned int i;
935
936 for (i = 0; i < q->clhash.hashsize; i++) {
937 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
938 if (cl->level)
939 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
940 else {
941 if (cl->un.leaf.q)
942 qdisc_reset(cl->un.leaf.q);
943 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
944 }
945 cl->prio_activity = 0;
946 cl->cmode = HTB_CAN_SEND;
947
948 }
949 }
950 qdisc_watchdog_cancel(&q->watchdog);
951 __skb_queue_purge(&q->direct_queue);
952 sch->q.qlen = 0;
953 memset(q->row, 0, sizeof(q->row));
954 memset(q->row_mask, 0, sizeof(q->row_mask));
955 memset(q->wait_pq, 0, sizeof(q->wait_pq));
956 memset(q->ptr, 0, sizeof(q->ptr));
957 for (i = 0; i < TC_HTB_NUMPRIO; i++)
958 INIT_LIST_HEAD(q->drops + i);
959 }
960
961 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
962 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
963 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
964 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
965 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
966 };
967
968 static void htb_work_func(struct work_struct *work)
969 {
970 struct htb_sched *q = container_of(work, struct htb_sched, work);
971 struct Qdisc *sch = q->watchdog.qdisc;
972
973 __netif_schedule(qdisc_root(sch));
974 }
975
976 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
977 {
978 struct htb_sched *q = qdisc_priv(sch);
979 struct nlattr *tb[TCA_HTB_INIT + 1];
980 struct tc_htb_glob *gopt;
981 int err;
982 int i;
983
984 if (!opt)
985 return -EINVAL;
986
987 err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
988 if (err < 0)
989 return err;
990
991 if (tb[TCA_HTB_INIT] == NULL) {
992 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
993 return -EINVAL;
994 }
995 gopt = nla_data(tb[TCA_HTB_INIT]);
996 if (gopt->version != HTB_VER >> 16) {
997 printk(KERN_ERR
998 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
999 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1000 return -EINVAL;
1001 }
1002
1003 err = qdisc_class_hash_init(&q->clhash);
1004 if (err < 0)
1005 return err;
1006 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1007 INIT_LIST_HEAD(q->drops + i);
1008
1009 qdisc_watchdog_init(&q->watchdog, sch);
1010 INIT_WORK(&q->work, htb_work_func);
1011 skb_queue_head_init(&q->direct_queue);
1012
1013 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1014 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1015 q->direct_qlen = 2;
1016
1017 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1018 q->rate2quantum = 1;
1019 q->defcls = gopt->defcls;
1020
1021 return 0;
1022 }
1023
1024 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1025 {
1026 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1027 struct htb_sched *q = qdisc_priv(sch);
1028 struct nlattr *nest;
1029 struct tc_htb_glob gopt;
1030
1031 spin_lock_bh(root_lock);
1032
1033 gopt.direct_pkts = q->direct_pkts;
1034 gopt.version = HTB_VER;
1035 gopt.rate2quantum = q->rate2quantum;
1036 gopt.defcls = q->defcls;
1037 gopt.debug = 0;
1038
1039 nest = nla_nest_start(skb, TCA_OPTIONS);
1040 if (nest == NULL)
1041 goto nla_put_failure;
1042 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1043 nla_nest_end(skb, nest);
1044
1045 spin_unlock_bh(root_lock);
1046 return skb->len;
1047
1048 nla_put_failure:
1049 spin_unlock_bh(root_lock);
1050 nla_nest_cancel(skb, nest);
1051 return -1;
1052 }
1053
1054 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1055 struct sk_buff *skb, struct tcmsg *tcm)
1056 {
1057 struct htb_class *cl = (struct htb_class *)arg;
1058 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1059 struct nlattr *nest;
1060 struct tc_htb_opt opt;
1061
1062 spin_lock_bh(root_lock);
1063 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1064 tcm->tcm_handle = cl->common.classid;
1065 if (!cl->level && cl->un.leaf.q)
1066 tcm->tcm_info = cl->un.leaf.q->handle;
1067
1068 nest = nla_nest_start(skb, TCA_OPTIONS);
1069 if (nest == NULL)
1070 goto nla_put_failure;
1071
1072 memset(&opt, 0, sizeof(opt));
1073
1074 opt.rate = cl->rate->rate;
1075 opt.buffer = cl->buffer;
1076 opt.ceil = cl->ceil->rate;
1077 opt.cbuffer = cl->cbuffer;
1078 opt.quantum = cl->quantum;
1079 opt.prio = cl->prio;
1080 opt.level = cl->level;
1081 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1082
1083 nla_nest_end(skb, nest);
1084 spin_unlock_bh(root_lock);
1085 return skb->len;
1086
1087 nla_put_failure:
1088 spin_unlock_bh(root_lock);
1089 nla_nest_cancel(skb, nest);
1090 return -1;
1091 }
1092
1093 static int
1094 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1095 {
1096 struct htb_class *cl = (struct htb_class *)arg;
1097
1098 if (!cl->level && cl->un.leaf.q)
1099 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1100 cl->xstats.tokens = cl->tokens;
1101 cl->xstats.ctokens = cl->ctokens;
1102
1103 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1104 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1105 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1106 return -1;
1107
1108 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1109 }
1110
1111 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1112 struct Qdisc **old)
1113 {
1114 struct htb_class *cl = (struct htb_class *)arg;
1115
1116 if (cl->level)
1117 return -EINVAL;
1118 if (new == NULL &&
1119 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1120 cl->common.classid)) == NULL)
1121 return -ENOBUFS;
1122
1123 sch_tree_lock(sch);
1124 *old = cl->un.leaf.q;
1125 cl->un.leaf.q = new;
1126 if (*old != NULL) {
1127 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1128 qdisc_reset(*old);
1129 }
1130 sch_tree_unlock(sch);
1131 return 0;
1132 }
1133
1134 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1135 {
1136 struct htb_class *cl = (struct htb_class *)arg;
1137 return !cl->level ? cl->un.leaf.q : NULL;
1138 }
1139
1140 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1141 {
1142 struct htb_class *cl = (struct htb_class *)arg;
1143
1144 if (cl->un.leaf.q->q.qlen == 0)
1145 htb_deactivate(qdisc_priv(sch), cl);
1146 }
1147
1148 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1149 {
1150 struct htb_class *cl = htb_find(classid, sch);
1151 if (cl)
1152 cl->refcnt++;
1153 return (unsigned long)cl;
1154 }
1155
1156 static inline int htb_parent_last_child(struct htb_class *cl)
1157 {
1158 if (!cl->parent)
1159 /* the root class */
1160 return 0;
1161 if (cl->parent->children > 1)
1162 /* not the last child */
1163 return 0;
1164 return 1;
1165 }
1166
1167 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1168 struct Qdisc *new_q)
1169 {
1170 struct htb_class *parent = cl->parent;
1171
1172 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1173
1174 if (parent->cmode != HTB_CAN_SEND)
1175 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1176
1177 parent->level = 0;
1178 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1179 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1180 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1181 parent->tokens = parent->buffer;
1182 parent->ctokens = parent->cbuffer;
1183 parent->t_c = psched_get_time();
1184 parent->cmode = HTB_CAN_SEND;
1185 }
1186
1187 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1188 {
1189 if (!cl->level) {
1190 WARN_ON(!cl->un.leaf.q);
1191 qdisc_destroy(cl->un.leaf.q);
1192 }
1193 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1194 qdisc_put_rtab(cl->rate);
1195 qdisc_put_rtab(cl->ceil);
1196
1197 tcf_destroy_chain(&cl->filter_list);
1198 kfree(cl);
1199 }
1200
1201 static void htb_destroy(struct Qdisc *sch)
1202 {
1203 struct htb_sched *q = qdisc_priv(sch);
1204 struct hlist_node *n, *next;
1205 struct htb_class *cl;
1206 unsigned int i;
1207
1208 cancel_work_sync(&q->work);
1209 qdisc_watchdog_cancel(&q->watchdog);
1210 /* This line used to be after htb_destroy_class call below
1211 and surprisingly it worked in 2.4. But it must precede it
1212 because filter need its target class alive to be able to call
1213 unbind_filter on it (without Oops). */
1214 tcf_destroy_chain(&q->filter_list);
1215
1216 for (i = 0; i < q->clhash.hashsize; i++) {
1217 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1218 tcf_destroy_chain(&cl->filter_list);
1219 }
1220 for (i = 0; i < q->clhash.hashsize; i++) {
1221 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1222 common.hnode)
1223 htb_destroy_class(sch, cl);
1224 }
1225 qdisc_class_hash_destroy(&q->clhash);
1226 __skb_queue_purge(&q->direct_queue);
1227 }
1228
1229 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1230 {
1231 struct htb_sched *q = qdisc_priv(sch);
1232 struct htb_class *cl = (struct htb_class *)arg;
1233 unsigned int qlen;
1234 struct Qdisc *new_q = NULL;
1235 int last_child = 0;
1236
1237 // TODO: why don't allow to delete subtree ? references ? does
1238 // tc subsys quarantee us that in htb_destroy it holds no class
1239 // refs so that we can remove children safely there ?
1240 if (cl->children || cl->filter_cnt)
1241 return -EBUSY;
1242
1243 if (!cl->level && htb_parent_last_child(cl)) {
1244 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1245 cl->parent->common.classid);
1246 last_child = 1;
1247 }
1248
1249 sch_tree_lock(sch);
1250
1251 if (!cl->level) {
1252 qlen = cl->un.leaf.q->q.qlen;
1253 qdisc_reset(cl->un.leaf.q);
1254 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1255 }
1256
1257 /* delete from hash and active; remainder in destroy_class */
1258 qdisc_class_hash_remove(&q->clhash, &cl->common);
1259 if (cl->parent)
1260 cl->parent->children--;
1261
1262 if (cl->prio_activity)
1263 htb_deactivate(q, cl);
1264
1265 if (cl->cmode != HTB_CAN_SEND)
1266 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1267
1268 if (last_child)
1269 htb_parent_to_leaf(q, cl, new_q);
1270
1271 BUG_ON(--cl->refcnt == 0);
1272 /*
1273 * This shouldn't happen: we "hold" one cops->get() when called
1274 * from tc_ctl_tclass; the destroy method is done from cops->put().
1275 */
1276
1277 sch_tree_unlock(sch);
1278 return 0;
1279 }
1280
1281 static void htb_put(struct Qdisc *sch, unsigned long arg)
1282 {
1283 struct htb_class *cl = (struct htb_class *)arg;
1284
1285 if (--cl->refcnt == 0)
1286 htb_destroy_class(sch, cl);
1287 }
1288
1289 static int htb_change_class(struct Qdisc *sch, u32 classid,
1290 u32 parentid, struct nlattr **tca,
1291 unsigned long *arg)
1292 {
1293 int err = -EINVAL;
1294 struct htb_sched *q = qdisc_priv(sch);
1295 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1296 struct nlattr *opt = tca[TCA_OPTIONS];
1297 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1298 struct nlattr *tb[__TCA_HTB_MAX];
1299 struct tc_htb_opt *hopt;
1300
1301 /* extract all subattrs from opt attr */
1302 if (!opt)
1303 goto failure;
1304
1305 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1306 if (err < 0)
1307 goto failure;
1308
1309 err = -EINVAL;
1310 if (tb[TCA_HTB_PARMS] == NULL)
1311 goto failure;
1312
1313 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1314
1315 hopt = nla_data(tb[TCA_HTB_PARMS]);
1316
1317 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1318 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1319 if (!rtab || !ctab)
1320 goto failure;
1321
1322 if (!cl) { /* new class */
1323 struct Qdisc *new_q;
1324 int prio;
1325 struct {
1326 struct nlattr nla;
1327 struct gnet_estimator opt;
1328 } est = {
1329 .nla = {
1330 .nla_len = nla_attr_size(sizeof(est.opt)),
1331 .nla_type = TCA_RATE,
1332 },
1333 .opt = {
1334 /* 4s interval, 16s averaging constant */
1335 .interval = 2,
1336 .ewma_log = 2,
1337 },
1338 };
1339
1340 /* check for valid classid */
1341 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1342 htb_find(classid, sch))
1343 goto failure;
1344
1345 /* check maximal depth */
1346 if (parent && parent->parent && parent->parent->level < 2) {
1347 printk(KERN_ERR "htb: tree is too deep\n");
1348 goto failure;
1349 }
1350 err = -ENOBUFS;
1351 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1352 goto failure;
1353
1354 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1355 qdisc_root_sleeping_lock(sch),
1356 tca[TCA_RATE] ? : &est.nla);
1357 if (err) {
1358 kfree(cl);
1359 goto failure;
1360 }
1361
1362 cl->refcnt = 1;
1363 cl->children = 0;
1364 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1365 RB_CLEAR_NODE(&cl->pq_node);
1366
1367 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1368 RB_CLEAR_NODE(&cl->node[prio]);
1369
1370 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1371 so that can't be used inside of sch_tree_lock
1372 -- thanks to Karlis Peisenieks */
1373 new_q = qdisc_create_dflt(sch->dev_queue,
1374 &pfifo_qdisc_ops, classid);
1375 sch_tree_lock(sch);
1376 if (parent && !parent->level) {
1377 unsigned int qlen = parent->un.leaf.q->q.qlen;
1378
1379 /* turn parent into inner node */
1380 qdisc_reset(parent->un.leaf.q);
1381 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1382 qdisc_destroy(parent->un.leaf.q);
1383 if (parent->prio_activity)
1384 htb_deactivate(q, parent);
1385
1386 /* remove from evt list because of level change */
1387 if (parent->cmode != HTB_CAN_SEND) {
1388 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1389 parent->cmode = HTB_CAN_SEND;
1390 }
1391 parent->level = (parent->parent ? parent->parent->level
1392 : TC_HTB_MAXDEPTH) - 1;
1393 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1394 }
1395 /* leaf (we) needs elementary qdisc */
1396 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1397
1398 cl->common.classid = classid;
1399 cl->parent = parent;
1400
1401 /* set class to be in HTB_CAN_SEND state */
1402 cl->tokens = hopt->buffer;
1403 cl->ctokens = hopt->cbuffer;
1404 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1405 cl->t_c = psched_get_time();
1406 cl->cmode = HTB_CAN_SEND;
1407
1408 /* attach to the hash list and parent's family */
1409 qdisc_class_hash_insert(&q->clhash, &cl->common);
1410 if (parent)
1411 parent->children++;
1412 } else {
1413 if (tca[TCA_RATE]) {
1414 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1415 qdisc_root_sleeping_lock(sch),
1416 tca[TCA_RATE]);
1417 if (err)
1418 return err;
1419 }
1420 sch_tree_lock(sch);
1421 }
1422
1423 /* it used to be a nasty bug here, we have to check that node
1424 is really leaf before changing cl->un.leaf ! */
1425 if (!cl->level) {
1426 cl->quantum = rtab->rate.rate / q->rate2quantum;
1427 if (!hopt->quantum && cl->quantum < 1000) {
1428 printk(KERN_WARNING
1429 "HTB: quantum of class %X is small. Consider r2q change.\n",
1430 cl->common.classid);
1431 cl->quantum = 1000;
1432 }
1433 if (!hopt->quantum && cl->quantum > 200000) {
1434 printk(KERN_WARNING
1435 "HTB: quantum of class %X is big. Consider r2q change.\n",
1436 cl->common.classid);
1437 cl->quantum = 200000;
1438 }
1439 if (hopt->quantum)
1440 cl->quantum = hopt->quantum;
1441 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1442 cl->prio = TC_HTB_NUMPRIO - 1;
1443 }
1444
1445 cl->buffer = hopt->buffer;
1446 cl->cbuffer = hopt->cbuffer;
1447 if (cl->rate)
1448 qdisc_put_rtab(cl->rate);
1449 cl->rate = rtab;
1450 if (cl->ceil)
1451 qdisc_put_rtab(cl->ceil);
1452 cl->ceil = ctab;
1453 sch_tree_unlock(sch);
1454
1455 qdisc_class_hash_grow(sch, &q->clhash);
1456
1457 *arg = (unsigned long)cl;
1458 return 0;
1459
1460 failure:
1461 if (rtab)
1462 qdisc_put_rtab(rtab);
1463 if (ctab)
1464 qdisc_put_rtab(ctab);
1465 return err;
1466 }
1467
1468 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1469 {
1470 struct htb_sched *q = qdisc_priv(sch);
1471 struct htb_class *cl = (struct htb_class *)arg;
1472 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1473
1474 return fl;
1475 }
1476
1477 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1478 u32 classid)
1479 {
1480 struct htb_class *cl = htb_find(classid, sch);
1481
1482 /*if (cl && !cl->level) return 0;
1483 The line above used to be there to prevent attaching filters to
1484 leaves. But at least tc_index filter uses this just to get class
1485 for other reasons so that we have to allow for it.
1486 ----
1487 19.6.2002 As Werner explained it is ok - bind filter is just
1488 another way to "lock" the class - unlike "get" this lock can
1489 be broken by class during destroy IIUC.
1490 */
1491 if (cl)
1492 cl->filter_cnt++;
1493 return (unsigned long)cl;
1494 }
1495
1496 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1497 {
1498 struct htb_class *cl = (struct htb_class *)arg;
1499
1500 if (cl)
1501 cl->filter_cnt--;
1502 }
1503
1504 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1505 {
1506 struct htb_sched *q = qdisc_priv(sch);
1507 struct htb_class *cl;
1508 struct hlist_node *n;
1509 unsigned int i;
1510
1511 if (arg->stop)
1512 return;
1513
1514 for (i = 0; i < q->clhash.hashsize; i++) {
1515 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1516 if (arg->count < arg->skip) {
1517 arg->count++;
1518 continue;
1519 }
1520 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1521 arg->stop = 1;
1522 return;
1523 }
1524 arg->count++;
1525 }
1526 }
1527 }
1528
1529 static const struct Qdisc_class_ops htb_class_ops = {
1530 .graft = htb_graft,
1531 .leaf = htb_leaf,
1532 .qlen_notify = htb_qlen_notify,
1533 .get = htb_get,
1534 .put = htb_put,
1535 .change = htb_change_class,
1536 .delete = htb_delete,
1537 .walk = htb_walk,
1538 .tcf_chain = htb_find_tcf,
1539 .bind_tcf = htb_bind_filter,
1540 .unbind_tcf = htb_unbind_filter,
1541 .dump = htb_dump_class,
1542 .dump_stats = htb_dump_class_stats,
1543 };
1544
1545 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1546 .cl_ops = &htb_class_ops,
1547 .id = "htb",
1548 .priv_size = sizeof(struct htb_sched),
1549 .enqueue = htb_enqueue,
1550 .dequeue = htb_dequeue,
1551 .peek = qdisc_peek_dequeued,
1552 .drop = htb_drop,
1553 .init = htb_init,
1554 .reset = htb_reset,
1555 .destroy = htb_destroy,
1556 .dump = htb_dump,
1557 .owner = THIS_MODULE,
1558 };
1559
1560 static int __init htb_module_init(void)
1561 {
1562 return register_qdisc(&htb_qdisc_ops);
1563 }
1564 static void __exit htb_module_exit(void)
1565 {
1566 unregister_qdisc(&htb_qdisc_ops);
1567 }
1568
1569 module_init(htb_module_init)
1570 module_exit(htb_module_exit)
1571 MODULE_LICENSE("GPL");
This page took 0.081683 seconds and 5 git commands to generate.