Pull bugzilla-7880 into release branch
[deliverable/linux.git] / net / sched / sch_htb.c
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/skbuff.h>
36 #include <linux/list.h>
37 #include <linux/compiler.h>
38 #include <linux/rbtree.h>
39 #include <net/netlink.h>
40 #include <net/pkt_sched.h>
41
42 /* HTB algorithm.
43 Author: devik@cdi.cz
44 ========================================================================
45 HTB is like TBF with multiple classes. It is also similar to CBQ because
46 it allows to assign priority to each class in hierarchy.
47 In fact it is another implementation of Floyd's formal sharing.
48
49 Levels:
50 Each class is assigned level. Leaf has ALWAYS level 0 and root
51 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
52 one less than their parent.
53 */
54
55 #define HTB_HSIZE 16 /* classid hash size */
56 #define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
57 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
58
59 #if HTB_VER >> 16 != TC_HTB_PROTOVER
60 #error "Mismatched sch_htb.c and pkt_sch.h"
61 #endif
62
63 /* used internaly to keep status of single class */
64 enum htb_cmode {
65 HTB_CANT_SEND, /* class can't send and can't borrow */
66 HTB_MAY_BORROW, /* class can't send but may borrow */
67 HTB_CAN_SEND /* class can send */
68 };
69
70 /* interior & leaf nodes; props specific to leaves are marked L: */
71 struct htb_class {
72 /* general class parameters */
73 u32 classid;
74 struct gnet_stats_basic bstats;
75 struct gnet_stats_queue qstats;
76 struct gnet_stats_rate_est rate_est;
77 struct tc_htb_xstats xstats; /* our special stats */
78 int refcnt; /* usage count of this class */
79
80 /* topology */
81 int level; /* our level (see above) */
82 struct htb_class *parent; /* parent class */
83 struct hlist_node hlist; /* classid hash list item */
84 struct list_head sibling; /* sibling list item */
85 struct list_head children; /* children list */
86
87 union {
88 struct htb_class_leaf {
89 struct Qdisc *q;
90 int prio;
91 int aprio;
92 int quantum;
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
95 } leaf;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
104 } inner;
105 } un;
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
108 psched_time_t pq_key;
109
110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
112
113 /* class attached filters */
114 struct tcf_proto *filter_list;
115 int filter_cnt;
116
117 int warned; /* only one warning about non work conserving .. */
118
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
126
127 int prio; /* For parent to leaf return possible here */
128 int quantum; /* we do backup. Finally full replacement */
129 /* of un.leaf originals should be done. */
130 };
131
132 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
133 int size)
134 {
135 int slot = size >> rate->rate.cell_log;
136 if (slot > 255)
137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
138 return rate->data[slot];
139 }
140
141 struct htb_sched {
142 struct list_head root; /* root classes list */
143 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
144 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
145
146 /* self list - roots of self generating tree */
147 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
148 int row_mask[TC_HTB_MAXDEPTH];
149 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
150 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
151
152 /* self wait list - roots of wait PQs per row */
153 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
154
155 /* time of nearest event per level (row) */
156 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
157
158 /* whether we hit non-work conserving class during this dequeue; we use */
159 int nwc_hit; /* this to disable mindelay complaint in dequeue */
160
161 int defcls; /* class where unclassified flows go to */
162
163 /* filters for qdisc itself */
164 struct tcf_proto *filter_list;
165 int filter_cnt;
166
167 int rate2quantum; /* quant = rate / rate2quantum */
168 psched_time_t now; /* cached dequeue time */
169 struct qdisc_watchdog watchdog;
170
171 /* non shaped skbs; let them go directly thru */
172 struct sk_buff_head direct_queue;
173 int direct_qlen; /* max qlen of above */
174
175 long direct_pkts;
176 };
177
178 /* compute hash of size HTB_HSIZE for given handle */
179 static inline int htb_hash(u32 h)
180 {
181 #if HTB_HSIZE != 16
182 #error "Declare new hash for your HTB_HSIZE"
183 #endif
184 h ^= h >> 8; /* stolen from cbq_hash */
185 h ^= h >> 4;
186 return h & 0xf;
187 }
188
189 /* find class in global hash table using given handle */
190 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
191 {
192 struct htb_sched *q = qdisc_priv(sch);
193 struct hlist_node *p;
194 struct htb_class *cl;
195
196 if (TC_H_MAJ(handle) != sch->handle)
197 return NULL;
198
199 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
200 if (cl->classid == handle)
201 return cl;
202 }
203 return NULL;
204 }
205
206 /**
207 * htb_classify - classify a packet into class
208 *
209 * It returns NULL if the packet should be dropped or -1 if the packet
210 * should be passed directly thru. In all other cases leaf class is returned.
211 * We allow direct class selection by classid in priority. The we examine
212 * filters in qdisc and in inner nodes (if higher filter points to the inner
213 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
214 * internal fifo (direct). These packets then go directly thru. If we still
215 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
216 * then finish and return direct queue.
217 */
218 #define HTB_DIRECT (struct htb_class*)-1
219 static inline u32 htb_classid(struct htb_class *cl)
220 {
221 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
222 }
223
224 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
225 int *qerr)
226 {
227 struct htb_sched *q = qdisc_priv(sch);
228 struct htb_class *cl;
229 struct tcf_result res;
230 struct tcf_proto *tcf;
231 int result;
232
233 /* allow to select class by setting skb->priority to valid classid;
234 note that nfmark can be used too by attaching filter fw with no
235 rules in it */
236 if (skb->priority == sch->handle)
237 return HTB_DIRECT; /* X:0 (direct flow) selected */
238 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
239 return cl;
240
241 *qerr = NET_XMIT_BYPASS;
242 tcf = q->filter_list;
243 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
244 #ifdef CONFIG_NET_CLS_ACT
245 switch (result) {
246 case TC_ACT_QUEUED:
247 case TC_ACT_STOLEN:
248 *qerr = NET_XMIT_SUCCESS;
249 case TC_ACT_SHOT:
250 return NULL;
251 }
252 #endif
253 if ((cl = (void *)res.class) == NULL) {
254 if (res.classid == sch->handle)
255 return HTB_DIRECT; /* X:0 (direct flow) */
256 if ((cl = htb_find(res.classid, sch)) == NULL)
257 break; /* filter selected invalid classid */
258 }
259 if (!cl->level)
260 return cl; /* we hit leaf; return it */
261
262 /* we have got inner class; apply inner filter chain */
263 tcf = cl->filter_list;
264 }
265 /* classification failed; try to use default class */
266 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
267 if (!cl || cl->level)
268 return HTB_DIRECT; /* bad default .. this is safe bet */
269 return cl;
270 }
271
272 /**
273 * htb_add_to_id_tree - adds class to the round robin list
274 *
275 * Routine adds class to the list (actually tree) sorted by classid.
276 * Make sure that class is not already on such list for given prio.
277 */
278 static void htb_add_to_id_tree(struct rb_root *root,
279 struct htb_class *cl, int prio)
280 {
281 struct rb_node **p = &root->rb_node, *parent = NULL;
282
283 while (*p) {
284 struct htb_class *c;
285 parent = *p;
286 c = rb_entry(parent, struct htb_class, node[prio]);
287
288 if (cl->classid > c->classid)
289 p = &parent->rb_right;
290 else
291 p = &parent->rb_left;
292 }
293 rb_link_node(&cl->node[prio], parent, p);
294 rb_insert_color(&cl->node[prio], root);
295 }
296
297 /**
298 * htb_add_to_wait_tree - adds class to the event queue with delay
299 *
300 * The class is added to priority event queue to indicate that class will
301 * change its mode in cl->pq_key microseconds. Make sure that class is not
302 * already in the queue.
303 */
304 static void htb_add_to_wait_tree(struct htb_sched *q,
305 struct htb_class *cl, long delay)
306 {
307 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
308
309 cl->pq_key = q->now + delay;
310 if (cl->pq_key == q->now)
311 cl->pq_key++;
312
313 /* update the nearest event cache */
314 if (q->near_ev_cache[cl->level] > cl->pq_key)
315 q->near_ev_cache[cl->level] = cl->pq_key;
316
317 while (*p) {
318 struct htb_class *c;
319 parent = *p;
320 c = rb_entry(parent, struct htb_class, pq_node);
321 if (cl->pq_key >= c->pq_key)
322 p = &parent->rb_right;
323 else
324 p = &parent->rb_left;
325 }
326 rb_link_node(&cl->pq_node, parent, p);
327 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
328 }
329
330 /**
331 * htb_next_rb_node - finds next node in binary tree
332 *
333 * When we are past last key we return NULL.
334 * Average complexity is 2 steps per call.
335 */
336 static inline void htb_next_rb_node(struct rb_node **n)
337 {
338 *n = rb_next(*n);
339 }
340
341 /**
342 * htb_add_class_to_row - add class to its row
343 *
344 * The class is added to row at priorities marked in mask.
345 * It does nothing if mask == 0.
346 */
347 static inline void htb_add_class_to_row(struct htb_sched *q,
348 struct htb_class *cl, int mask)
349 {
350 q->row_mask[cl->level] |= mask;
351 while (mask) {
352 int prio = ffz(~mask);
353 mask &= ~(1 << prio);
354 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
355 }
356 }
357
358 /* If this triggers, it is a bug in this code, but it need not be fatal */
359 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
360 {
361 if (RB_EMPTY_NODE(rb)) {
362 WARN_ON(1);
363 } else {
364 rb_erase(rb, root);
365 RB_CLEAR_NODE(rb);
366 }
367 }
368
369
370 /**
371 * htb_remove_class_from_row - removes class from its row
372 *
373 * The class is removed from row at priorities marked in mask.
374 * It does nothing if mask == 0.
375 */
376 static inline void htb_remove_class_from_row(struct htb_sched *q,
377 struct htb_class *cl, int mask)
378 {
379 int m = 0;
380
381 while (mask) {
382 int prio = ffz(~mask);
383
384 mask &= ~(1 << prio);
385 if (q->ptr[cl->level][prio] == cl->node + prio)
386 htb_next_rb_node(q->ptr[cl->level] + prio);
387
388 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
389 if (!q->row[cl->level][prio].rb_node)
390 m |= 1 << prio;
391 }
392 q->row_mask[cl->level] &= ~m;
393 }
394
395 /**
396 * htb_activate_prios - creates active classe's feed chain
397 *
398 * The class is connected to ancestors and/or appropriate rows
399 * for priorities it is participating on. cl->cmode must be new
400 * (activated) mode. It does nothing if cl->prio_activity == 0.
401 */
402 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
403 {
404 struct htb_class *p = cl->parent;
405 long m, mask = cl->prio_activity;
406
407 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
408 m = mask;
409 while (m) {
410 int prio = ffz(~m);
411 m &= ~(1 << prio);
412
413 if (p->un.inner.feed[prio].rb_node)
414 /* parent already has its feed in use so that
415 reset bit in mask as parent is already ok */
416 mask &= ~(1 << prio);
417
418 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
419 }
420 p->prio_activity |= mask;
421 cl = p;
422 p = cl->parent;
423
424 }
425 if (cl->cmode == HTB_CAN_SEND && mask)
426 htb_add_class_to_row(q, cl, mask);
427 }
428
429 /**
430 * htb_deactivate_prios - remove class from feed chain
431 *
432 * cl->cmode must represent old mode (before deactivation). It does
433 * nothing if cl->prio_activity == 0. Class is removed from all feed
434 * chains and rows.
435 */
436 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
437 {
438 struct htb_class *p = cl->parent;
439 long m, mask = cl->prio_activity;
440
441 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
442 m = mask;
443 mask = 0;
444 while (m) {
445 int prio = ffz(~m);
446 m &= ~(1 << prio);
447
448 if (p->un.inner.ptr[prio] == cl->node + prio) {
449 /* we are removing child which is pointed to from
450 parent feed - forget the pointer but remember
451 classid */
452 p->un.inner.last_ptr_id[prio] = cl->classid;
453 p->un.inner.ptr[prio] = NULL;
454 }
455
456 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
457
458 if (!p->un.inner.feed[prio].rb_node)
459 mask |= 1 << prio;
460 }
461
462 p->prio_activity &= ~mask;
463 cl = p;
464 p = cl->parent;
465
466 }
467 if (cl->cmode == HTB_CAN_SEND && mask)
468 htb_remove_class_from_row(q, cl, mask);
469 }
470
471 #if HTB_HYSTERESIS
472 static inline long htb_lowater(const struct htb_class *cl)
473 {
474 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
475 }
476 static inline long htb_hiwater(const struct htb_class *cl)
477 {
478 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
479 }
480 #else
481 #define htb_lowater(cl) (0)
482 #define htb_hiwater(cl) (0)
483 #endif
484
485 /**
486 * htb_class_mode - computes and returns current class mode
487 *
488 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
489 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
490 * from now to time when cl will change its state.
491 * Also it is worth to note that class mode doesn't change simply
492 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
493 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
494 * mode transitions per time unit. The speed gain is about 1/6.
495 */
496 static inline enum htb_cmode
497 htb_class_mode(struct htb_class *cl, long *diff)
498 {
499 long toks;
500
501 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
502 *diff = -toks;
503 return HTB_CANT_SEND;
504 }
505
506 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
507 return HTB_CAN_SEND;
508
509 *diff = -toks;
510 return HTB_MAY_BORROW;
511 }
512
513 /**
514 * htb_change_class_mode - changes classe's mode
515 *
516 * This should be the only way how to change classe's mode under normal
517 * cirsumstances. Routine will update feed lists linkage, change mode
518 * and add class to the wait event queue if appropriate. New mode should
519 * be different from old one and cl->pq_key has to be valid if changing
520 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
521 */
522 static void
523 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
524 {
525 enum htb_cmode new_mode = htb_class_mode(cl, diff);
526
527 if (new_mode == cl->cmode)
528 return;
529
530 if (cl->prio_activity) { /* not necessary: speed optimization */
531 if (cl->cmode != HTB_CANT_SEND)
532 htb_deactivate_prios(q, cl);
533 cl->cmode = new_mode;
534 if (new_mode != HTB_CANT_SEND)
535 htb_activate_prios(q, cl);
536 } else
537 cl->cmode = new_mode;
538 }
539
540 /**
541 * htb_activate - inserts leaf cl into appropriate active feeds
542 *
543 * Routine learns (new) priority of leaf and activates feed chain
544 * for the prio. It can be called on already active leaf safely.
545 * It also adds leaf into droplist.
546 */
547 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
548 {
549 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
550
551 if (!cl->prio_activity) {
552 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
553 htb_activate_prios(q, cl);
554 list_add_tail(&cl->un.leaf.drop_list,
555 q->drops + cl->un.leaf.aprio);
556 }
557 }
558
559 /**
560 * htb_deactivate - remove leaf cl from active feeds
561 *
562 * Make sure that leaf is active. In the other words it can't be called
563 * with non-active leaf. It also removes class from the drop list.
564 */
565 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
566 {
567 BUG_TRAP(cl->prio_activity);
568
569 htb_deactivate_prios(q, cl);
570 cl->prio_activity = 0;
571 list_del_init(&cl->un.leaf.drop_list);
572 }
573
574 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
575 {
576 int ret;
577 struct htb_sched *q = qdisc_priv(sch);
578 struct htb_class *cl = htb_classify(skb, sch, &ret);
579
580 if (cl == HTB_DIRECT) {
581 /* enqueue to helper queue */
582 if (q->direct_queue.qlen < q->direct_qlen) {
583 __skb_queue_tail(&q->direct_queue, skb);
584 q->direct_pkts++;
585 } else {
586 kfree_skb(skb);
587 sch->qstats.drops++;
588 return NET_XMIT_DROP;
589 }
590 #ifdef CONFIG_NET_CLS_ACT
591 } else if (!cl) {
592 if (ret == NET_XMIT_BYPASS)
593 sch->qstats.drops++;
594 kfree_skb(skb);
595 return ret;
596 #endif
597 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
598 NET_XMIT_SUCCESS) {
599 sch->qstats.drops++;
600 cl->qstats.drops++;
601 return NET_XMIT_DROP;
602 } else {
603 cl->bstats.packets +=
604 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
605 cl->bstats.bytes += skb->len;
606 htb_activate(q, cl);
607 }
608
609 sch->q.qlen++;
610 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
611 sch->bstats.bytes += skb->len;
612 return NET_XMIT_SUCCESS;
613 }
614
615 /* TODO: requeuing packet charges it to policers again !! */
616 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
617 {
618 struct htb_sched *q = qdisc_priv(sch);
619 int ret = NET_XMIT_SUCCESS;
620 struct htb_class *cl = htb_classify(skb, sch, &ret);
621 struct sk_buff *tskb;
622
623 if (cl == HTB_DIRECT || !cl) {
624 /* enqueue to helper queue */
625 if (q->direct_queue.qlen < q->direct_qlen && cl) {
626 __skb_queue_head(&q->direct_queue, skb);
627 } else {
628 __skb_queue_head(&q->direct_queue, skb);
629 tskb = __skb_dequeue_tail(&q->direct_queue);
630 kfree_skb(tskb);
631 sch->qstats.drops++;
632 return NET_XMIT_CN;
633 }
634 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
635 NET_XMIT_SUCCESS) {
636 sch->qstats.drops++;
637 cl->qstats.drops++;
638 return NET_XMIT_DROP;
639 } else
640 htb_activate(q, cl);
641
642 sch->q.qlen++;
643 sch->qstats.requeues++;
644 return NET_XMIT_SUCCESS;
645 }
646
647 /**
648 * htb_charge_class - charges amount "bytes" to leaf and ancestors
649 *
650 * Routine assumes that packet "bytes" long was dequeued from leaf cl
651 * borrowing from "level". It accounts bytes to ceil leaky bucket for
652 * leaf and all ancestors and to rate bucket for ancestors at levels
653 * "level" and higher. It also handles possible change of mode resulting
654 * from the update. Note that mode can also increase here (MAY_BORROW to
655 * CAN_SEND) because we can use more precise clock that event queue here.
656 * In such case we remove class from event queue first.
657 */
658 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
659 int level, struct sk_buff *skb)
660 {
661 int bytes = skb->len;
662 long toks, diff;
663 enum htb_cmode old_mode;
664
665 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
666 if (toks > cl->B) toks = cl->B; \
667 toks -= L2T(cl, cl->R, bytes); \
668 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
669 cl->T = toks
670
671 while (cl) {
672 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
673 if (cl->level >= level) {
674 if (cl->level == level)
675 cl->xstats.lends++;
676 HTB_ACCNT(tokens, buffer, rate);
677 } else {
678 cl->xstats.borrows++;
679 cl->tokens += diff; /* we moved t_c; update tokens */
680 }
681 HTB_ACCNT(ctokens, cbuffer, ceil);
682 cl->t_c = q->now;
683
684 old_mode = cl->cmode;
685 diff = 0;
686 htb_change_class_mode(q, cl, &diff);
687 if (old_mode != cl->cmode) {
688 if (old_mode != HTB_CAN_SEND)
689 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
690 if (cl->cmode != HTB_CAN_SEND)
691 htb_add_to_wait_tree(q, cl, diff);
692 }
693
694 /* update byte stats except for leaves which are already updated */
695 if (cl->level) {
696 cl->bstats.bytes += bytes;
697 cl->bstats.packets += skb_is_gso(skb)?
698 skb_shinfo(skb)->gso_segs:1;
699 }
700 cl = cl->parent;
701 }
702 }
703
704 /**
705 * htb_do_events - make mode changes to classes at the level
706 *
707 * Scans event queue for pending events and applies them. Returns time of
708 * next pending event (0 for no event in pq).
709 * Note: Applied are events whose have cl->pq_key <= q->now.
710 */
711 static psched_time_t htb_do_events(struct htb_sched *q, int level)
712 {
713 int i;
714
715 for (i = 0; i < 500; i++) {
716 struct htb_class *cl;
717 long diff;
718 struct rb_node *p = rb_first(&q->wait_pq[level]);
719
720 if (!p)
721 return 0;
722
723 cl = rb_entry(p, struct htb_class, pq_node);
724 if (cl->pq_key > q->now)
725 return cl->pq_key;
726
727 htb_safe_rb_erase(p, q->wait_pq + level);
728 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
729 htb_change_class_mode(q, cl, &diff);
730 if (cl->cmode != HTB_CAN_SEND)
731 htb_add_to_wait_tree(q, cl, diff);
732 }
733 if (net_ratelimit())
734 printk(KERN_WARNING "htb: too many events !\n");
735 return q->now + PSCHED_TICKS_PER_SEC / 10;
736 }
737
738 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
739 is no such one exists. */
740 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
741 u32 id)
742 {
743 struct rb_node *r = NULL;
744 while (n) {
745 struct htb_class *cl =
746 rb_entry(n, struct htb_class, node[prio]);
747 if (id == cl->classid)
748 return n;
749
750 if (id > cl->classid) {
751 n = n->rb_right;
752 } else {
753 r = n;
754 n = n->rb_left;
755 }
756 }
757 return r;
758 }
759
760 /**
761 * htb_lookup_leaf - returns next leaf class in DRR order
762 *
763 * Find leaf where current feed pointers points to.
764 */
765 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
766 struct rb_node **pptr, u32 * pid)
767 {
768 int i;
769 struct {
770 struct rb_node *root;
771 struct rb_node **pptr;
772 u32 *pid;
773 } stk[TC_HTB_MAXDEPTH], *sp = stk;
774
775 BUG_TRAP(tree->rb_node);
776 sp->root = tree->rb_node;
777 sp->pptr = pptr;
778 sp->pid = pid;
779
780 for (i = 0; i < 65535; i++) {
781 if (!*sp->pptr && *sp->pid) {
782 /* ptr was invalidated but id is valid - try to recover
783 the original or next ptr */
784 *sp->pptr =
785 htb_id_find_next_upper(prio, sp->root, *sp->pid);
786 }
787 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
788 can become out of date quickly */
789 if (!*sp->pptr) { /* we are at right end; rewind & go up */
790 *sp->pptr = sp->root;
791 while ((*sp->pptr)->rb_left)
792 *sp->pptr = (*sp->pptr)->rb_left;
793 if (sp > stk) {
794 sp--;
795 BUG_TRAP(*sp->pptr);
796 if (!*sp->pptr)
797 return NULL;
798 htb_next_rb_node(sp->pptr);
799 }
800 } else {
801 struct htb_class *cl;
802 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
803 if (!cl->level)
804 return cl;
805 (++sp)->root = cl->un.inner.feed[prio].rb_node;
806 sp->pptr = cl->un.inner.ptr + prio;
807 sp->pid = cl->un.inner.last_ptr_id + prio;
808 }
809 }
810 BUG_TRAP(0);
811 return NULL;
812 }
813
814 /* dequeues packet at given priority and level; call only if
815 you are sure that there is active class at prio/level */
816 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
817 int level)
818 {
819 struct sk_buff *skb = NULL;
820 struct htb_class *cl, *start;
821 /* look initial class up in the row */
822 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
823 q->ptr[level] + prio,
824 q->last_ptr_id[level] + prio);
825
826 do {
827 next:
828 BUG_TRAP(cl);
829 if (!cl)
830 return NULL;
831
832 /* class can be empty - it is unlikely but can be true if leaf
833 qdisc drops packets in enqueue routine or if someone used
834 graft operation on the leaf since last dequeue;
835 simply deactivate and skip such class */
836 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
837 struct htb_class *next;
838 htb_deactivate(q, cl);
839
840 /* row/level might become empty */
841 if ((q->row_mask[level] & (1 << prio)) == 0)
842 return NULL;
843
844 next = htb_lookup_leaf(q->row[level] + prio,
845 prio, q->ptr[level] + prio,
846 q->last_ptr_id[level] + prio);
847
848 if (cl == start) /* fix start if we just deleted it */
849 start = next;
850 cl = next;
851 goto next;
852 }
853
854 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
855 if (likely(skb != NULL))
856 break;
857 if (!cl->warned) {
858 printk(KERN_WARNING
859 "htb: class %X isn't work conserving ?!\n",
860 cl->classid);
861 cl->warned = 1;
862 }
863 q->nwc_hit++;
864 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
865 ptr[0]) + prio);
866 cl = htb_lookup_leaf(q->row[level] + prio, prio,
867 q->ptr[level] + prio,
868 q->last_ptr_id[level] + prio);
869
870 } while (cl != start);
871
872 if (likely(skb != NULL)) {
873 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
874 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
875 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
876 ptr[0]) + prio);
877 }
878 /* this used to be after charge_class but this constelation
879 gives us slightly better performance */
880 if (!cl->un.leaf.q->q.qlen)
881 htb_deactivate(q, cl);
882 htb_charge_class(q, cl, level, skb);
883 }
884 return skb;
885 }
886
887 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
888 {
889 struct sk_buff *skb = NULL;
890 struct htb_sched *q = qdisc_priv(sch);
891 int level;
892 psched_time_t next_event;
893
894 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
895 skb = __skb_dequeue(&q->direct_queue);
896 if (skb != NULL) {
897 sch->flags &= ~TCQ_F_THROTTLED;
898 sch->q.qlen--;
899 return skb;
900 }
901
902 if (!sch->q.qlen)
903 goto fin;
904 q->now = psched_get_time();
905
906 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
907 q->nwc_hit = 0;
908 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
909 /* common case optimization - skip event handler quickly */
910 int m;
911 psched_time_t event;
912
913 if (q->now >= q->near_ev_cache[level]) {
914 event = htb_do_events(q, level);
915 if (!event)
916 event = q->now + PSCHED_TICKS_PER_SEC;
917 q->near_ev_cache[level] = event;
918 } else
919 event = q->near_ev_cache[level];
920
921 if (event && next_event > event)
922 next_event = event;
923
924 m = ~q->row_mask[level];
925 while (m != (int)(-1)) {
926 int prio = ffz(m);
927 m |= 1 << prio;
928 skb = htb_dequeue_tree(q, prio, level);
929 if (likely(skb != NULL)) {
930 sch->q.qlen--;
931 sch->flags &= ~TCQ_F_THROTTLED;
932 goto fin;
933 }
934 }
935 }
936 sch->qstats.overlimits++;
937 qdisc_watchdog_schedule(&q->watchdog, next_event);
938 fin:
939 return skb;
940 }
941
942 /* try to drop from each class (by prio) until one succeed */
943 static unsigned int htb_drop(struct Qdisc *sch)
944 {
945 struct htb_sched *q = qdisc_priv(sch);
946 int prio;
947
948 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
949 struct list_head *p;
950 list_for_each(p, q->drops + prio) {
951 struct htb_class *cl = list_entry(p, struct htb_class,
952 un.leaf.drop_list);
953 unsigned int len;
954 if (cl->un.leaf.q->ops->drop &&
955 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
956 sch->q.qlen--;
957 if (!cl->un.leaf.q->q.qlen)
958 htb_deactivate(q, cl);
959 return len;
960 }
961 }
962 }
963 return 0;
964 }
965
966 /* reset all classes */
967 /* always caled under BH & queue lock */
968 static void htb_reset(struct Qdisc *sch)
969 {
970 struct htb_sched *q = qdisc_priv(sch);
971 int i;
972
973 for (i = 0; i < HTB_HSIZE; i++) {
974 struct hlist_node *p;
975 struct htb_class *cl;
976
977 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
978 if (cl->level)
979 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
980 else {
981 if (cl->un.leaf.q)
982 qdisc_reset(cl->un.leaf.q);
983 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
984 }
985 cl->prio_activity = 0;
986 cl->cmode = HTB_CAN_SEND;
987
988 }
989 }
990 qdisc_watchdog_cancel(&q->watchdog);
991 __skb_queue_purge(&q->direct_queue);
992 sch->q.qlen = 0;
993 memset(q->row, 0, sizeof(q->row));
994 memset(q->row_mask, 0, sizeof(q->row_mask));
995 memset(q->wait_pq, 0, sizeof(q->wait_pq));
996 memset(q->ptr, 0, sizeof(q->ptr));
997 for (i = 0; i < TC_HTB_NUMPRIO; i++)
998 INIT_LIST_HEAD(q->drops + i);
999 }
1000
1001 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1002 {
1003 struct htb_sched *q = qdisc_priv(sch);
1004 struct rtattr *tb[TCA_HTB_INIT];
1005 struct tc_htb_glob *gopt;
1006 int i;
1007 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1008 tb[TCA_HTB_INIT - 1] == NULL ||
1009 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1010 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1011 return -EINVAL;
1012 }
1013 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1014 if (gopt->version != HTB_VER >> 16) {
1015 printk(KERN_ERR
1016 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1017 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1018 return -EINVAL;
1019 }
1020
1021 INIT_LIST_HEAD(&q->root);
1022 for (i = 0; i < HTB_HSIZE; i++)
1023 INIT_HLIST_HEAD(q->hash + i);
1024 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1025 INIT_LIST_HEAD(q->drops + i);
1026
1027 qdisc_watchdog_init(&q->watchdog, sch);
1028 skb_queue_head_init(&q->direct_queue);
1029
1030 q->direct_qlen = sch->dev->tx_queue_len;
1031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1032 q->direct_qlen = 2;
1033
1034 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1035 q->rate2quantum = 1;
1036 q->defcls = gopt->defcls;
1037
1038 return 0;
1039 }
1040
1041 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1042 {
1043 struct htb_sched *q = qdisc_priv(sch);
1044 unsigned char *b = skb_tail_pointer(skb);
1045 struct rtattr *rta;
1046 struct tc_htb_glob gopt;
1047 spin_lock_bh(&sch->dev->queue_lock);
1048 gopt.direct_pkts = q->direct_pkts;
1049
1050 gopt.version = HTB_VER;
1051 gopt.rate2quantum = q->rate2quantum;
1052 gopt.defcls = q->defcls;
1053 gopt.debug = 0;
1054 rta = (struct rtattr *)b;
1055 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1056 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1057 rta->rta_len = skb_tail_pointer(skb) - b;
1058 spin_unlock_bh(&sch->dev->queue_lock);
1059 return skb->len;
1060 rtattr_failure:
1061 spin_unlock_bh(&sch->dev->queue_lock);
1062 nlmsg_trim(skb, skb_tail_pointer(skb));
1063 return -1;
1064 }
1065
1066 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1067 struct sk_buff *skb, struct tcmsg *tcm)
1068 {
1069 struct htb_class *cl = (struct htb_class *)arg;
1070 unsigned char *b = skb_tail_pointer(skb);
1071 struct rtattr *rta;
1072 struct tc_htb_opt opt;
1073
1074 spin_lock_bh(&sch->dev->queue_lock);
1075 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1076 tcm->tcm_handle = cl->classid;
1077 if (!cl->level && cl->un.leaf.q)
1078 tcm->tcm_info = cl->un.leaf.q->handle;
1079
1080 rta = (struct rtattr *)b;
1081 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1082
1083 memset(&opt, 0, sizeof(opt));
1084
1085 opt.rate = cl->rate->rate;
1086 opt.buffer = cl->buffer;
1087 opt.ceil = cl->ceil->rate;
1088 opt.cbuffer = cl->cbuffer;
1089 opt.quantum = cl->un.leaf.quantum;
1090 opt.prio = cl->un.leaf.prio;
1091 opt.level = cl->level;
1092 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1093 rta->rta_len = skb_tail_pointer(skb) - b;
1094 spin_unlock_bh(&sch->dev->queue_lock);
1095 return skb->len;
1096 rtattr_failure:
1097 spin_unlock_bh(&sch->dev->queue_lock);
1098 nlmsg_trim(skb, b);
1099 return -1;
1100 }
1101
1102 static int
1103 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1104 {
1105 struct htb_class *cl = (struct htb_class *)arg;
1106
1107 if (!cl->level && cl->un.leaf.q)
1108 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1109 cl->xstats.tokens = cl->tokens;
1110 cl->xstats.ctokens = cl->ctokens;
1111
1112 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1113 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1114 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1115 return -1;
1116
1117 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1118 }
1119
1120 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1121 struct Qdisc **old)
1122 {
1123 struct htb_class *cl = (struct htb_class *)arg;
1124
1125 if (cl && !cl->level) {
1126 if (new == NULL &&
1127 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1128 cl->classid))
1129 == NULL)
1130 return -ENOBUFS;
1131 sch_tree_lock(sch);
1132 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1133 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1134 qdisc_reset(*old);
1135 }
1136 sch_tree_unlock(sch);
1137 return 0;
1138 }
1139 return -ENOENT;
1140 }
1141
1142 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1143 {
1144 struct htb_class *cl = (struct htb_class *)arg;
1145 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1146 }
1147
1148 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1149 {
1150 struct htb_class *cl = (struct htb_class *)arg;
1151
1152 if (cl->un.leaf.q->q.qlen == 0)
1153 htb_deactivate(qdisc_priv(sch), cl);
1154 }
1155
1156 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1157 {
1158 struct htb_class *cl = htb_find(classid, sch);
1159 if (cl)
1160 cl->refcnt++;
1161 return (unsigned long)cl;
1162 }
1163
1164 static inline int htb_parent_last_child(struct htb_class *cl)
1165 {
1166 if (!cl->parent)
1167 /* the root class */
1168 return 0;
1169
1170 if (!(cl->parent->children.next == &cl->sibling &&
1171 cl->parent->children.prev == &cl->sibling))
1172 /* not the last child */
1173 return 0;
1174
1175 return 1;
1176 }
1177
1178 static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1179 {
1180 struct htb_class *parent = cl->parent;
1181
1182 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1183
1184 parent->level = 0;
1185 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1186 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1187 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1188 parent->un.leaf.quantum = parent->quantum;
1189 parent->un.leaf.prio = parent->prio;
1190 parent->tokens = parent->buffer;
1191 parent->ctokens = parent->cbuffer;
1192 parent->t_c = psched_get_time();
1193 parent->cmode = HTB_CAN_SEND;
1194 }
1195
1196 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1197 {
1198 struct htb_sched *q = qdisc_priv(sch);
1199
1200 if (!cl->level) {
1201 BUG_TRAP(cl->un.leaf.q);
1202 qdisc_destroy(cl->un.leaf.q);
1203 }
1204 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1205 qdisc_put_rtab(cl->rate);
1206 qdisc_put_rtab(cl->ceil);
1207
1208 tcf_destroy_chain(cl->filter_list);
1209
1210 while (!list_empty(&cl->children))
1211 htb_destroy_class(sch, list_entry(cl->children.next,
1212 struct htb_class, sibling));
1213
1214 /* note: this delete may happen twice (see htb_delete) */
1215 hlist_del_init(&cl->hlist);
1216 list_del(&cl->sibling);
1217
1218 if (cl->prio_activity)
1219 htb_deactivate(q, cl);
1220
1221 if (cl->cmode != HTB_CAN_SEND)
1222 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1223
1224 kfree(cl);
1225 }
1226
1227 /* always caled under BH & queue lock */
1228 static void htb_destroy(struct Qdisc *sch)
1229 {
1230 struct htb_sched *q = qdisc_priv(sch);
1231
1232 qdisc_watchdog_cancel(&q->watchdog);
1233 /* This line used to be after htb_destroy_class call below
1234 and surprisingly it worked in 2.4. But it must precede it
1235 because filter need its target class alive to be able to call
1236 unbind_filter on it (without Oops). */
1237 tcf_destroy_chain(q->filter_list);
1238
1239 while (!list_empty(&q->root))
1240 htb_destroy_class(sch, list_entry(q->root.next,
1241 struct htb_class, sibling));
1242
1243 __skb_queue_purge(&q->direct_queue);
1244 }
1245
1246 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1247 {
1248 struct htb_sched *q = qdisc_priv(sch);
1249 struct htb_class *cl = (struct htb_class *)arg;
1250 unsigned int qlen;
1251 struct Qdisc *new_q = NULL;
1252 int last_child = 0;
1253
1254 // TODO: why don't allow to delete subtree ? references ? does
1255 // tc subsys quarantee us that in htb_destroy it holds no class
1256 // refs so that we can remove children safely there ?
1257 if (!list_empty(&cl->children) || cl->filter_cnt)
1258 return -EBUSY;
1259
1260 if (!cl->level && htb_parent_last_child(cl)) {
1261 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1262 cl->parent->classid);
1263 last_child = 1;
1264 }
1265
1266 sch_tree_lock(sch);
1267
1268 if (!cl->level) {
1269 qlen = cl->un.leaf.q->q.qlen;
1270 qdisc_reset(cl->un.leaf.q);
1271 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1272 }
1273
1274 /* delete from hash and active; remainder in destroy_class */
1275 hlist_del_init(&cl->hlist);
1276
1277 if (cl->prio_activity)
1278 htb_deactivate(q, cl);
1279
1280 if (last_child)
1281 htb_parent_to_leaf(cl, new_q);
1282
1283 if (--cl->refcnt == 0)
1284 htb_destroy_class(sch, cl);
1285
1286 sch_tree_unlock(sch);
1287 return 0;
1288 }
1289
1290 static void htb_put(struct Qdisc *sch, unsigned long arg)
1291 {
1292 struct htb_class *cl = (struct htb_class *)arg;
1293
1294 if (--cl->refcnt == 0)
1295 htb_destroy_class(sch, cl);
1296 }
1297
1298 static int htb_change_class(struct Qdisc *sch, u32 classid,
1299 u32 parentid, struct rtattr **tca,
1300 unsigned long *arg)
1301 {
1302 int err = -EINVAL;
1303 struct htb_sched *q = qdisc_priv(sch);
1304 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1305 struct rtattr *opt = tca[TCA_OPTIONS - 1];
1306 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1307 struct rtattr *tb[TCA_HTB_RTAB];
1308 struct tc_htb_opt *hopt;
1309
1310 /* extract all subattrs from opt attr */
1311 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1312 tb[TCA_HTB_PARMS - 1] == NULL ||
1313 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1314 goto failure;
1315
1316 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1317
1318 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1319
1320 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1321 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1322 if (!rtab || !ctab)
1323 goto failure;
1324
1325 if (!cl) { /* new class */
1326 struct Qdisc *new_q;
1327 int prio;
1328 struct {
1329 struct rtattr rta;
1330 struct gnet_estimator opt;
1331 } est = {
1332 .rta = {
1333 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1334 .rta_type = TCA_RATE,
1335 },
1336 .opt = {
1337 /* 4s interval, 16s averaging constant */
1338 .interval = 2,
1339 .ewma_log = 2,
1340 },
1341 };
1342
1343 /* check for valid classid */
1344 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1345 || htb_find(classid, sch))
1346 goto failure;
1347
1348 /* check maximal depth */
1349 if (parent && parent->parent && parent->parent->level < 2) {
1350 printk(KERN_ERR "htb: tree is too deep\n");
1351 goto failure;
1352 }
1353 err = -ENOBUFS;
1354 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1355 goto failure;
1356
1357 gen_new_estimator(&cl->bstats, &cl->rate_est,
1358 &sch->dev->queue_lock,
1359 tca[TCA_RATE-1] ? : &est.rta);
1360 cl->refcnt = 1;
1361 INIT_LIST_HEAD(&cl->sibling);
1362 INIT_HLIST_NODE(&cl->hlist);
1363 INIT_LIST_HEAD(&cl->children);
1364 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1365 RB_CLEAR_NODE(&cl->pq_node);
1366
1367 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1368 RB_CLEAR_NODE(&cl->node[prio]);
1369
1370 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1371 so that can't be used inside of sch_tree_lock
1372 -- thanks to Karlis Peisenieks */
1373 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1374 sch_tree_lock(sch);
1375 if (parent && !parent->level) {
1376 unsigned int qlen = parent->un.leaf.q->q.qlen;
1377
1378 /* turn parent into inner node */
1379 qdisc_reset(parent->un.leaf.q);
1380 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1381 qdisc_destroy(parent->un.leaf.q);
1382 if (parent->prio_activity)
1383 htb_deactivate(q, parent);
1384
1385 /* remove from evt list because of level change */
1386 if (parent->cmode != HTB_CAN_SEND) {
1387 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1388 parent->cmode = HTB_CAN_SEND;
1389 }
1390 parent->level = (parent->parent ? parent->parent->level
1391 : TC_HTB_MAXDEPTH) - 1;
1392 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1393 }
1394 /* leaf (we) needs elementary qdisc */
1395 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1396
1397 cl->classid = classid;
1398 cl->parent = parent;
1399
1400 /* set class to be in HTB_CAN_SEND state */
1401 cl->tokens = hopt->buffer;
1402 cl->ctokens = hopt->cbuffer;
1403 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1404 cl->t_c = psched_get_time();
1405 cl->cmode = HTB_CAN_SEND;
1406
1407 /* attach to the hash list and parent's family */
1408 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1409 list_add_tail(&cl->sibling,
1410 parent ? &parent->children : &q->root);
1411 } else {
1412 if (tca[TCA_RATE-1])
1413 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1414 &sch->dev->queue_lock,
1415 tca[TCA_RATE-1]);
1416 sch_tree_lock(sch);
1417 }
1418
1419 /* it used to be a nasty bug here, we have to check that node
1420 is really leaf before changing cl->un.leaf ! */
1421 if (!cl->level) {
1422 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1423 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1424 printk(KERN_WARNING
1425 "HTB: quantum of class %X is small. Consider r2q change.\n",
1426 cl->classid);
1427 cl->un.leaf.quantum = 1000;
1428 }
1429 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1430 printk(KERN_WARNING
1431 "HTB: quantum of class %X is big. Consider r2q change.\n",
1432 cl->classid);
1433 cl->un.leaf.quantum = 200000;
1434 }
1435 if (hopt->quantum)
1436 cl->un.leaf.quantum = hopt->quantum;
1437 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1438 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1439
1440 /* backup for htb_parent_to_leaf */
1441 cl->quantum = cl->un.leaf.quantum;
1442 cl->prio = cl->un.leaf.prio;
1443 }
1444
1445 cl->buffer = hopt->buffer;
1446 cl->cbuffer = hopt->cbuffer;
1447 if (cl->rate)
1448 qdisc_put_rtab(cl->rate);
1449 cl->rate = rtab;
1450 if (cl->ceil)
1451 qdisc_put_rtab(cl->ceil);
1452 cl->ceil = ctab;
1453 sch_tree_unlock(sch);
1454
1455 *arg = (unsigned long)cl;
1456 return 0;
1457
1458 failure:
1459 if (rtab)
1460 qdisc_put_rtab(rtab);
1461 if (ctab)
1462 qdisc_put_rtab(ctab);
1463 return err;
1464 }
1465
1466 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1467 {
1468 struct htb_sched *q = qdisc_priv(sch);
1469 struct htb_class *cl = (struct htb_class *)arg;
1470 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1471
1472 return fl;
1473 }
1474
1475 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1476 u32 classid)
1477 {
1478 struct htb_sched *q = qdisc_priv(sch);
1479 struct htb_class *cl = htb_find(classid, sch);
1480
1481 /*if (cl && !cl->level) return 0;
1482 The line above used to be there to prevent attaching filters to
1483 leaves. But at least tc_index filter uses this just to get class
1484 for other reasons so that we have to allow for it.
1485 ----
1486 19.6.2002 As Werner explained it is ok - bind filter is just
1487 another way to "lock" the class - unlike "get" this lock can
1488 be broken by class during destroy IIUC.
1489 */
1490 if (cl)
1491 cl->filter_cnt++;
1492 else
1493 q->filter_cnt++;
1494 return (unsigned long)cl;
1495 }
1496
1497 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1498 {
1499 struct htb_sched *q = qdisc_priv(sch);
1500 struct htb_class *cl = (struct htb_class *)arg;
1501
1502 if (cl)
1503 cl->filter_cnt--;
1504 else
1505 q->filter_cnt--;
1506 }
1507
1508 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1509 {
1510 struct htb_sched *q = qdisc_priv(sch);
1511 int i;
1512
1513 if (arg->stop)
1514 return;
1515
1516 for (i = 0; i < HTB_HSIZE; i++) {
1517 struct hlist_node *p;
1518 struct htb_class *cl;
1519
1520 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1521 if (arg->count < arg->skip) {
1522 arg->count++;
1523 continue;
1524 }
1525 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1526 arg->stop = 1;
1527 return;
1528 }
1529 arg->count++;
1530 }
1531 }
1532 }
1533
1534 static struct Qdisc_class_ops htb_class_ops = {
1535 .graft = htb_graft,
1536 .leaf = htb_leaf,
1537 .qlen_notify = htb_qlen_notify,
1538 .get = htb_get,
1539 .put = htb_put,
1540 .change = htb_change_class,
1541 .delete = htb_delete,
1542 .walk = htb_walk,
1543 .tcf_chain = htb_find_tcf,
1544 .bind_tcf = htb_bind_filter,
1545 .unbind_tcf = htb_unbind_filter,
1546 .dump = htb_dump_class,
1547 .dump_stats = htb_dump_class_stats,
1548 };
1549
1550 static struct Qdisc_ops htb_qdisc_ops = {
1551 .next = NULL,
1552 .cl_ops = &htb_class_ops,
1553 .id = "htb",
1554 .priv_size = sizeof(struct htb_sched),
1555 .enqueue = htb_enqueue,
1556 .dequeue = htb_dequeue,
1557 .requeue = htb_requeue,
1558 .drop = htb_drop,
1559 .init = htb_init,
1560 .reset = htb_reset,
1561 .destroy = htb_destroy,
1562 .change = NULL /* htb_change */,
1563 .dump = htb_dump,
1564 .owner = THIS_MODULE,
1565 };
1566
1567 static int __init htb_module_init(void)
1568 {
1569 return register_qdisc(&htb_qdisc_ops);
1570 }
1571 static void __exit htb_module_exit(void)
1572 {
1573 unregister_qdisc(&htb_qdisc_ops);
1574 }
1575
1576 module_init(htb_module_init)
1577 module_exit(htb_module_exit)
1578 MODULE_LICENSE("GPL");
This page took 0.062656 seconds and 6 git commands to generate.