[HTB]: Remove broken debug code.
[deliverable/linux.git] / net / sched / sch_htb.c
CommitLineData
1da177e4
LT
1/* vim: ts=8 sw=8
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */
1da177e4
LT
30#include <linux/module.h>
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <linux/bitops.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/sched.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/socket.h>
40#include <linux/sockios.h>
41#include <linux/in.h>
42#include <linux/errno.h>
43#include <linux/interrupt.h>
44#include <linux/if_ether.h>
45#include <linux/inet.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/notifier.h>
49#include <net/ip.h>
50#include <net/route.h>
51#include <linux/skbuff.h>
52#include <linux/list.h>
53#include <linux/compiler.h>
54#include <net/sock.h>
55#include <net/pkt_sched.h>
56#include <linux/rbtree.h>
57
58/* HTB algorithm.
59 Author: devik@cdi.cz
60 ========================================================================
61 HTB is like TBF with multiple classes. It is also similar to CBQ because
62 it allows to assign priority to each class in hierarchy.
63 In fact it is another implementation of Floyd's formal sharing.
64
65 Levels:
66 Each class is assigned level. Leaf has ALWAYS level 0 and root
67 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68 one less than their parent.
69*/
70
71#define HTB_HSIZE 16 /* classid hash size */
72#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
1da177e4
LT
73#define HTB_RATECM 1 /* whether to use rate computer */
74#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
75#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
76#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
77#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
78
79#if HTB_VER >> 16 != TC_HTB_PROTOVER
80#error "Mismatched sch_htb.c and pkt_sch.h"
81#endif
82
1da177e4
LT
83/* used internaly to keep status of single class */
84enum htb_cmode {
85 HTB_CANT_SEND, /* class can't send and can't borrow */
86 HTB_MAY_BORROW, /* class can't send but may borrow */
87 HTB_CAN_SEND /* class can send */
88};
89
90/* interior & leaf nodes; props specific to leaves are marked L: */
91struct htb_class
92{
1da177e4
LT
93 /* general class parameters */
94 u32 classid;
95 struct gnet_stats_basic bstats;
96 struct gnet_stats_queue qstats;
97 struct gnet_stats_rate_est rate_est;
98 struct tc_htb_xstats xstats;/* our special stats */
99 int refcnt; /* usage count of this class */
100
101#ifdef HTB_RATECM
102 /* rate measurement counters */
103 unsigned long rate_bytes,sum_bytes;
104 unsigned long rate_packets,sum_packets;
105#endif
106
107 /* topology */
108 int level; /* our level (see above) */
109 struct htb_class *parent; /* parent class */
110 struct list_head hlist; /* classid hash list item */
111 struct list_head sibling; /* sibling list item */
112 struct list_head children; /* children list */
113
114 union {
115 struct htb_class_leaf {
116 struct Qdisc *q;
117 int prio;
118 int aprio;
119 int quantum;
120 int deficit[TC_HTB_MAXDEPTH];
121 struct list_head drop_list;
122 } leaf;
123 struct htb_class_inner {
124 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
125 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
126 /* When class changes from state 1->2 and disconnects from
127 parent's feed then we lost ptr value and start from the
128 first child again. Here we store classid of the
129 last valid ptr (used when ptr is NULL). */
130 u32 last_ptr_id[TC_HTB_NUMPRIO];
131 } inner;
132 } un;
133 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
134 struct rb_node pq_node; /* node for event queue */
135 unsigned long pq_key; /* the same type as jiffies global */
136
137 int prio_activity; /* for which prios are we active */
138 enum htb_cmode cmode; /* current mode of the class */
139
140 /* class attached filters */
141 struct tcf_proto *filter_list;
142 int filter_cnt;
143
144 int warned; /* only one warning about non work conserving .. */
145
146 /* token bucket parameters */
147 struct qdisc_rate_table *rate; /* rate table of the class itself */
148 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
149 long buffer,cbuffer; /* token bucket depth/rate */
b3a62519 150 psched_tdiff_t mbuffer; /* max wait time */
1da177e4
LT
151 long tokens,ctokens; /* current number of tokens */
152 psched_time_t t_c; /* checkpoint time */
153};
154
155/* TODO: maybe compute rate when size is too large .. or drop ? */
156static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
157 int size)
158{
159 int slot = size >> rate->rate.cell_log;
160 if (slot > 255) {
161 cl->xstats.giants++;
162 slot = 255;
163 }
164 return rate->data[slot];
165}
166
167struct htb_sched
168{
169 struct list_head root; /* root classes list */
170 struct list_head hash[HTB_HSIZE]; /* hashed by classid */
171 struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
172
173 /* self list - roots of self generating tree */
174 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
175 int row_mask[TC_HTB_MAXDEPTH];
176 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
177 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
178
179 /* self wait list - roots of wait PQs per row */
180 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
181
182 /* time of nearest event per level (row) */
183 unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
184
185 /* cached value of jiffies in dequeue */
186 unsigned long jiffies;
187
188 /* whether we hit non-work conserving class during this dequeue; we use */
189 int nwc_hit; /* this to disable mindelay complaint in dequeue */
190
191 int defcls; /* class where unclassified flows go to */
1da177e4
LT
192
193 /* filters for qdisc itself */
194 struct tcf_proto *filter_list;
195 int filter_cnt;
196
197 int rate2quantum; /* quant = rate / rate2quantum */
198 psched_time_t now; /* cached dequeue time */
199 struct timer_list timer; /* send delay timer */
200#ifdef HTB_RATECM
201 struct timer_list rttim; /* rate computer timer */
202 int recmp_bucket; /* which hash bucket to recompute next */
203#endif
204
205 /* non shaped skbs; let them go directly thru */
206 struct sk_buff_head direct_queue;
207 int direct_qlen; /* max qlen of above */
208
209 long direct_pkts;
210};
211
212/* compute hash of size HTB_HSIZE for given handle */
213static __inline__ int htb_hash(u32 h)
214{
215#if HTB_HSIZE != 16
216 #error "Declare new hash for your HTB_HSIZE"
217#endif
218 h ^= h>>8; /* stolen from cbq_hash */
219 h ^= h>>4;
220 return h & 0xf;
221}
222
223/* find class in global hash table using given handle */
224static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
225{
226 struct htb_sched *q = qdisc_priv(sch);
227 struct list_head *p;
228 if (TC_H_MAJ(handle) != sch->handle)
229 return NULL;
230
231 list_for_each (p,q->hash+htb_hash(handle)) {
232 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
233 if (cl->classid == handle)
234 return cl;
235 }
236 return NULL;
237}
238
239/**
240 * htb_classify - classify a packet into class
241 *
242 * It returns NULL if the packet should be dropped or -1 if the packet
243 * should be passed directly thru. In all other cases leaf class is returned.
244 * We allow direct class selection by classid in priority. The we examine
245 * filters in qdisc and in inner nodes (if higher filter points to the inner
246 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
247 * internal fifo (direct). These packets then go directly thru. If we still
248 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
249 * then finish and return direct queue.
250 */
251#define HTB_DIRECT (struct htb_class*)-1
252static inline u32 htb_classid(struct htb_class *cl)
253{
254 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
255}
256
257static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
258{
259 struct htb_sched *q = qdisc_priv(sch);
260 struct htb_class *cl;
261 struct tcf_result res;
262 struct tcf_proto *tcf;
263 int result;
264
265 /* allow to select class by setting skb->priority to valid classid;
266 note that nfmark can be used too by attaching filter fw with no
267 rules in it */
268 if (skb->priority == sch->handle)
269 return HTB_DIRECT; /* X:0 (direct flow) selected */
270 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
271 return cl;
272
29f1df6c 273 *qerr = NET_XMIT_BYPASS;
1da177e4
LT
274 tcf = q->filter_list;
275 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
276#ifdef CONFIG_NET_CLS_ACT
277 switch (result) {
278 case TC_ACT_QUEUED:
279 case TC_ACT_STOLEN:
280 *qerr = NET_XMIT_SUCCESS;
281 case TC_ACT_SHOT:
282 return NULL;
283 }
284#elif defined(CONFIG_NET_CLS_POLICE)
285 if (result == TC_POLICE_SHOT)
286 return HTB_DIRECT;
287#endif
288 if ((cl = (void*)res.class) == NULL) {
289 if (res.classid == sch->handle)
290 return HTB_DIRECT; /* X:0 (direct flow) */
291 if ((cl = htb_find(res.classid,sch)) == NULL)
292 break; /* filter selected invalid classid */
293 }
294 if (!cl->level)
295 return cl; /* we hit leaf; return it */
296
297 /* we have got inner class; apply inner filter chain */
298 tcf = cl->filter_list;
299 }
300 /* classification failed; try to use default class */
301 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
302 if (!cl || cl->level)
303 return HTB_DIRECT; /* bad default .. this is safe bet */
304 return cl;
305}
306
1da177e4
LT
307/**
308 * htb_add_to_id_tree - adds class to the round robin list
309 *
310 * Routine adds class to the list (actually tree) sorted by classid.
311 * Make sure that class is not already on such list for given prio.
312 */
3bf72957 313static void htb_add_to_id_tree (struct rb_root *root,
1da177e4
LT
314 struct htb_class *cl,int prio)
315{
316 struct rb_node **p = &root->rb_node, *parent = NULL;
3bf72957 317
1da177e4
LT
318 while (*p) {
319 struct htb_class *c; parent = *p;
320 c = rb_entry(parent, struct htb_class, node[prio]);
3bf72957 321
1da177e4
LT
322 if (cl->classid > c->classid)
323 p = &parent->rb_right;
324 else
325 p = &parent->rb_left;
326 }
327 rb_link_node(&cl->node[prio], parent, p);
328 rb_insert_color(&cl->node[prio], root);
329}
330
331/**
332 * htb_add_to_wait_tree - adds class to the event queue with delay
333 *
334 * The class is added to priority event queue to indicate that class will
335 * change its mode in cl->pq_key microseconds. Make sure that class is not
336 * already in the queue.
337 */
338static void htb_add_to_wait_tree (struct htb_sched *q,
3bf72957 339 struct htb_class *cl,long delay)
1da177e4
LT
340{
341 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
3bf72957 342
1da177e4
LT
343 cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
344 if (cl->pq_key == q->jiffies)
345 cl->pq_key++;
346
347 /* update the nearest event cache */
348 if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
349 q->near_ev_cache[cl->level] = cl->pq_key;
350
351 while (*p) {
352 struct htb_class *c; parent = *p;
353 c = rb_entry(parent, struct htb_class, pq_node);
354 if (time_after_eq(cl->pq_key, c->pq_key))
355 p = &parent->rb_right;
356 else
357 p = &parent->rb_left;
358 }
359 rb_link_node(&cl->pq_node, parent, p);
360 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
361}
362
363/**
364 * htb_next_rb_node - finds next node in binary tree
365 *
366 * When we are past last key we return NULL.
367 * Average complexity is 2 steps per call.
368 */
369static void htb_next_rb_node(struct rb_node **n)
370{
371 *n = rb_next(*n);
372}
373
374/**
375 * htb_add_class_to_row - add class to its row
376 *
377 * The class is added to row at priorities marked in mask.
378 * It does nothing if mask == 0.
379 */
380static inline void htb_add_class_to_row(struct htb_sched *q,
381 struct htb_class *cl,int mask)
382{
1da177e4
LT
383 q->row_mask[cl->level] |= mask;
384 while (mask) {
385 int prio = ffz(~mask);
386 mask &= ~(1 << prio);
3bf72957 387 htb_add_to_id_tree(q->row[cl->level]+prio,cl,prio);
1da177e4
LT
388 }
389}
390
391/**
392 * htb_remove_class_from_row - removes class from its row
393 *
394 * The class is removed from row at priorities marked in mask.
395 * It does nothing if mask == 0.
396 */
397static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
398 struct htb_class *cl,int mask)
399{
400 int m = 0;
3bf72957 401
1da177e4
LT
402 while (mask) {
403 int prio = ffz(~mask);
404 mask &= ~(1 << prio);
405 if (q->ptr[cl->level][prio] == cl->node+prio)
406 htb_next_rb_node(q->ptr[cl->level]+prio);
3bf72957 407 rb_erase(cl->node + prio,q->row[cl->level]+prio);
1da177e4
LT
408 if (!q->row[cl->level][prio].rb_node)
409 m |= 1 << prio;
410 }
1da177e4
LT
411 q->row_mask[cl->level] &= ~m;
412}
413
414/**
415 * htb_activate_prios - creates active classe's feed chain
416 *
417 * The class is connected to ancestors and/or appropriate rows
418 * for priorities it is participating on. cl->cmode must be new
419 * (activated) mode. It does nothing if cl->prio_activity == 0.
420 */
421static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
422{
423 struct htb_class *p = cl->parent;
424 long m,mask = cl->prio_activity;
1da177e4
LT
425
426 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
3bf72957 427
1da177e4
LT
428 m = mask; while (m) {
429 int prio = ffz(~m);
430 m &= ~(1 << prio);
431
432 if (p->un.inner.feed[prio].rb_node)
433 /* parent already has its feed in use so that
434 reset bit in mask as parent is already ok */
435 mask &= ~(1 << prio);
436
3bf72957 437 htb_add_to_id_tree(p->un.inner.feed+prio,cl,prio);
1da177e4 438 }
1da177e4
LT
439 p->prio_activity |= mask;
440 cl = p; p = cl->parent;
3bf72957 441
1da177e4
LT
442 }
443 if (cl->cmode == HTB_CAN_SEND && mask)
444 htb_add_class_to_row(q,cl,mask);
445}
446
447/**
448 * htb_deactivate_prios - remove class from feed chain
449 *
450 * cl->cmode must represent old mode (before deactivation). It does
451 * nothing if cl->prio_activity == 0. Class is removed from all feed
452 * chains and rows.
453 */
454static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
455{
456 struct htb_class *p = cl->parent;
457 long m,mask = cl->prio_activity;
3bf72957 458
1da177e4
LT
459
460 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
461 m = mask; mask = 0;
462 while (m) {
463 int prio = ffz(~m);
464 m &= ~(1 << prio);
465
466 if (p->un.inner.ptr[prio] == cl->node+prio) {
467 /* we are removing child which is pointed to from
468 parent feed - forget the pointer but remember
469 classid */
470 p->un.inner.last_ptr_id[prio] = cl->classid;
471 p->un.inner.ptr[prio] = NULL;
472 }
473
3bf72957 474 rb_erase(cl->node + prio,p->un.inner.feed + prio);
1da177e4
LT
475
476 if (!p->un.inner.feed[prio].rb_node)
477 mask |= 1 << prio;
478 }
3bf72957 479
1da177e4
LT
480 p->prio_activity &= ~mask;
481 cl = p; p = cl->parent;
3bf72957 482
1da177e4
LT
483 }
484 if (cl->cmode == HTB_CAN_SEND && mask)
485 htb_remove_class_from_row(q,cl,mask);
486}
487
488/**
489 * htb_class_mode - computes and returns current class mode
490 *
491 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
492 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
493 * from now to time when cl will change its state.
494 * Also it is worth to note that class mode doesn't change simply
495 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
496 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
497 * mode transitions per time unit. The speed gain is about 1/6.
498 */
499static __inline__ enum htb_cmode
500htb_class_mode(struct htb_class *cl,long *diff)
501{
502 long toks;
503
504 if ((toks = (cl->ctokens + *diff)) < (
505#if HTB_HYSTERESIS
506 cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
507#endif
508 0)) {
509 *diff = -toks;
510 return HTB_CANT_SEND;
511 }
512 if ((toks = (cl->tokens + *diff)) >= (
513#if HTB_HYSTERESIS
514 cl->cmode == HTB_CAN_SEND ? -cl->buffer :
515#endif
516 0))
517 return HTB_CAN_SEND;
518
519 *diff = -toks;
520 return HTB_MAY_BORROW;
521}
522
523/**
524 * htb_change_class_mode - changes classe's mode
525 *
526 * This should be the only way how to change classe's mode under normal
527 * cirsumstances. Routine will update feed lists linkage, change mode
528 * and add class to the wait event queue if appropriate. New mode should
529 * be different from old one and cl->pq_key has to be valid if changing
530 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
531 */
532static void
533htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
534{
535 enum htb_cmode new_mode = htb_class_mode(cl,diff);
536
1da177e4
LT
537
538 if (new_mode == cl->cmode)
539 return;
540
541 if (cl->prio_activity) { /* not necessary: speed optimization */
542 if (cl->cmode != HTB_CANT_SEND)
543 htb_deactivate_prios(q,cl);
544 cl->cmode = new_mode;
545 if (new_mode != HTB_CANT_SEND)
546 htb_activate_prios(q,cl);
547 } else
548 cl->cmode = new_mode;
549}
550
551/**
552 * htb_activate - inserts leaf cl into appropriate active feeds
553 *
554 * Routine learns (new) priority of leaf and activates feed chain
555 * for the prio. It can be called on already active leaf safely.
556 * It also adds leaf into droplist.
557 */
558static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
559{
560 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
3bf72957 561
1da177e4
LT
562 if (!cl->prio_activity) {
563 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
564 htb_activate_prios(q,cl);
565 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
566 }
567}
568
569/**
570 * htb_deactivate - remove leaf cl from active feeds
571 *
572 * Make sure that leaf is active. In the other words it can't be called
573 * with non-active leaf. It also removes class from the drop list.
574 */
575static __inline__ void
576htb_deactivate(struct htb_sched *q,struct htb_class *cl)
577{
578 BUG_TRAP(cl->prio_activity);
3bf72957 579
1da177e4
LT
580 htb_deactivate_prios(q,cl);
581 cl->prio_activity = 0;
582 list_del_init(&cl->un.leaf.drop_list);
583}
584
585static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
586{
587 int ret;
588 struct htb_sched *q = qdisc_priv(sch);
589 struct htb_class *cl = htb_classify(skb,sch,&ret);
590
591 if (cl == HTB_DIRECT) {
592 /* enqueue to helper queue */
593 if (q->direct_queue.qlen < q->direct_qlen) {
594 __skb_queue_tail(&q->direct_queue, skb);
595 q->direct_pkts++;
033d8999
AS
596 } else {
597 kfree_skb(skb);
598 sch->qstats.drops++;
599 return NET_XMIT_DROP;
1da177e4
LT
600 }
601#ifdef CONFIG_NET_CLS_ACT
602 } else if (!cl) {
29f1df6c 603 if (ret == NET_XMIT_BYPASS)
1da177e4
LT
604 sch->qstats.drops++;
605 kfree_skb (skb);
606 return ret;
607#endif
608 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
609 sch->qstats.drops++;
610 cl->qstats.drops++;
611 return NET_XMIT_DROP;
612 } else {
613 cl->bstats.packets++; cl->bstats.bytes += skb->len;
614 htb_activate (q,cl);
615 }
616
617 sch->q.qlen++;
618 sch->bstats.packets++; sch->bstats.bytes += skb->len;
1da177e4
LT
619 return NET_XMIT_SUCCESS;
620}
621
622/* TODO: requeuing packet charges it to policers again !! */
623static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
624{
625 struct htb_sched *q = qdisc_priv(sch);
626 int ret = NET_XMIT_SUCCESS;
627 struct htb_class *cl = htb_classify(skb,sch, &ret);
628 struct sk_buff *tskb;
629
630 if (cl == HTB_DIRECT || !cl) {
631 /* enqueue to helper queue */
632 if (q->direct_queue.qlen < q->direct_qlen && cl) {
633 __skb_queue_head(&q->direct_queue, skb);
634 } else {
635 __skb_queue_head(&q->direct_queue, skb);
636 tskb = __skb_dequeue_tail(&q->direct_queue);
637 kfree_skb (tskb);
638 sch->qstats.drops++;
639 return NET_XMIT_CN;
640 }
641 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
642 sch->qstats.drops++;
643 cl->qstats.drops++;
644 return NET_XMIT_DROP;
645 } else
646 htb_activate (q,cl);
647
648 sch->q.qlen++;
649 sch->qstats.requeues++;
1da177e4
LT
650 return NET_XMIT_SUCCESS;
651}
652
653static void htb_timer(unsigned long arg)
654{
655 struct Qdisc *sch = (struct Qdisc*)arg;
656 sch->flags &= ~TCQ_F_THROTTLED;
657 wmb();
658 netif_schedule(sch->dev);
659}
660
661#ifdef HTB_RATECM
662#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
663static void htb_rate_timer(unsigned long arg)
664{
665 struct Qdisc *sch = (struct Qdisc*)arg;
666 struct htb_sched *q = qdisc_priv(sch);
667 struct list_head *p;
668
669 /* lock queue so that we can muck with it */
670 HTB_QLOCK(sch);
1da177e4
LT
671
672 q->rttim.expires = jiffies + HZ;
673 add_timer(&q->rttim);
674
675 /* scan and recompute one bucket at time */
676 if (++q->recmp_bucket >= HTB_HSIZE)
677 q->recmp_bucket = 0;
678 list_for_each (p,q->hash+q->recmp_bucket) {
679 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
3bf72957 680
1da177e4
LT
681 RT_GEN (cl->sum_bytes,cl->rate_bytes);
682 RT_GEN (cl->sum_packets,cl->rate_packets);
683 }
684 HTB_QUNLOCK(sch);
685}
686#endif
687
688/**
689 * htb_charge_class - charges amount "bytes" to leaf and ancestors
690 *
691 * Routine assumes that packet "bytes" long was dequeued from leaf cl
692 * borrowing from "level". It accounts bytes to ceil leaky bucket for
693 * leaf and all ancestors and to rate bucket for ancestors at levels
694 * "level" and higher. It also handles possible change of mode resulting
695 * from the update. Note that mode can also increase here (MAY_BORROW to
696 * CAN_SEND) because we can use more precise clock that event queue here.
697 * In such case we remove class from event queue first.
698 */
699static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
700 int level,int bytes)
701{
702 long toks,diff;
703 enum htb_cmode old_mode;
1da177e4
LT
704
705#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
706 if (toks > cl->B) toks = cl->B; \
707 toks -= L2T(cl, cl->R, bytes); \
708 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
709 cl->T = toks
710
711 while (cl) {
1da177e4 712 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
1da177e4
LT
713 if (cl->level >= level) {
714 if (cl->level == level) cl->xstats.lends++;
715 HTB_ACCNT (tokens,buffer,rate);
716 } else {
717 cl->xstats.borrows++;
718 cl->tokens += diff; /* we moved t_c; update tokens */
719 }
720 HTB_ACCNT (ctokens,cbuffer,ceil);
721 cl->t_c = q->now;
1da177e4
LT
722
723 old_mode = cl->cmode; diff = 0;
724 htb_change_class_mode(q,cl,&diff);
725 if (old_mode != cl->cmode) {
726 if (old_mode != HTB_CAN_SEND)
3bf72957 727 rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1da177e4 728 if (cl->cmode != HTB_CAN_SEND)
3bf72957 729 htb_add_to_wait_tree (q,cl,diff);
1da177e4
LT
730 }
731
732#ifdef HTB_RATECM
733 /* update rate counters */
734 cl->sum_bytes += bytes; cl->sum_packets++;
735#endif
736
737 /* update byte stats except for leaves which are already updated */
738 if (cl->level) {
739 cl->bstats.bytes += bytes;
740 cl->bstats.packets++;
741 }
742 cl = cl->parent;
743 }
744}
745
746/**
747 * htb_do_events - make mode changes to classes at the level
748 *
749 * Scans event queue for pending events and applies them. Returns jiffies to
750 * next pending event (0 for no event in pq).
751 * Note: Aplied are events whose have cl->pq_key <= jiffies.
752 */
753static long htb_do_events(struct htb_sched *q,int level)
754{
755 int i;
3bf72957 756
1da177e4
LT
757 for (i = 0; i < 500; i++) {
758 struct htb_class *cl;
759 long diff;
760 struct rb_node *p = q->wait_pq[level].rb_node;
761 if (!p) return 0;
762 while (p->rb_left) p = p->rb_left;
763
764 cl = rb_entry(p, struct htb_class, pq_node);
765 if (time_after(cl->pq_key, q->jiffies)) {
1da177e4
LT
766 return cl->pq_key - q->jiffies;
767 }
3bf72957 768 rb_erase(p,q->wait_pq+level);
1da177e4 769 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
1da177e4
LT
770 htb_change_class_mode(q,cl,&diff);
771 if (cl->cmode != HTB_CAN_SEND)
3bf72957 772 htb_add_to_wait_tree (q,cl,diff);
1da177e4
LT
773 }
774 if (net_ratelimit())
775 printk(KERN_WARNING "htb: too many events !\n");
776 return HZ/10;
777}
778
779/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
780 is no such one exists. */
781static struct rb_node *
782htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
783{
784 struct rb_node *r = NULL;
785 while (n) {
786 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
787 if (id == cl->classid) return n;
788
789 if (id > cl->classid) {
790 n = n->rb_right;
791 } else {
792 r = n;
793 n = n->rb_left;
794 }
795 }
796 return r;
797}
798
799/**
800 * htb_lookup_leaf - returns next leaf class in DRR order
801 *
802 * Find leaf where current feed pointers points to.
803 */
804static struct htb_class *
3bf72957 805htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
1da177e4
LT
806{
807 int i;
808 struct {
809 struct rb_node *root;
810 struct rb_node **pptr;
811 u32 *pid;
812 } stk[TC_HTB_MAXDEPTH],*sp = stk;
813
814 BUG_TRAP(tree->rb_node);
815 sp->root = tree->rb_node;
816 sp->pptr = pptr;
817 sp->pid = pid;
818
819 for (i = 0; i < 65535; i++) {
1da177e4
LT
820 if (!*sp->pptr && *sp->pid) {
821 /* ptr was invalidated but id is valid - try to recover
822 the original or next ptr */
823 *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
824 }
825 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
826 can become out of date quickly */
827 if (!*sp->pptr) { /* we are at right end; rewind & go up */
828 *sp->pptr = sp->root;
829 while ((*sp->pptr)->rb_left)
830 *sp->pptr = (*sp->pptr)->rb_left;
831 if (sp > stk) {
832 sp--;
833 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
834 htb_next_rb_node (sp->pptr);
835 }
836 } else {
837 struct htb_class *cl;
838 cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
1da177e4
LT
839 if (!cl->level)
840 return cl;
841 (++sp)->root = cl->un.inner.feed[prio].rb_node;
842 sp->pptr = cl->un.inner.ptr+prio;
843 sp->pid = cl->un.inner.last_ptr_id+prio;
844 }
845 }
846 BUG_TRAP(0);
847 return NULL;
848}
849
850/* dequeues packet at given priority and level; call only if
851 you are sure that there is active class at prio/level */
852static struct sk_buff *
853htb_dequeue_tree(struct htb_sched *q,int prio,int level)
854{
855 struct sk_buff *skb = NULL;
856 struct htb_class *cl,*start;
857 /* look initial class up in the row */
3bf72957 858 start = cl = htb_lookup_leaf (q->row[level]+prio,prio,
1da177e4
LT
859 q->ptr[level]+prio,q->last_ptr_id[level]+prio);
860
861 do {
862next:
863 BUG_TRAP(cl);
864 if (!cl) return NULL;
1da177e4
LT
865
866 /* class can be empty - it is unlikely but can be true if leaf
867 qdisc drops packets in enqueue routine or if someone used
868 graft operation on the leaf since last dequeue;
869 simply deactivate and skip such class */
870 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
871 struct htb_class *next;
872 htb_deactivate(q,cl);
873
874 /* row/level might become empty */
875 if ((q->row_mask[level] & (1 << prio)) == 0)
876 return NULL;
877
3bf72957 878 next = htb_lookup_leaf (q->row[level]+prio,
1da177e4
LT
879 prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
880
881 if (cl == start) /* fix start if we just deleted it */
882 start = next;
883 cl = next;
884 goto next;
885 }
886
887 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL))
888 break;
889 if (!cl->warned) {
890 printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
891 cl->warned = 1;
892 }
893 q->nwc_hit++;
894 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
3bf72957 895 cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio,
1da177e4
LT
896 q->last_ptr_id[level]+prio);
897
898 } while (cl != start);
899
900 if (likely(skb != NULL)) {
901 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1da177e4
LT
902 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
903 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
904 }
905 /* this used to be after charge_class but this constelation
906 gives us slightly better performance */
907 if (!cl->un.leaf.q->q.qlen)
908 htb_deactivate (q,cl);
909 htb_charge_class (q,cl,level,skb->len);
910 }
911 return skb;
912}
913
914static void htb_delay_by(struct Qdisc *sch,long delay)
915{
916 struct htb_sched *q = qdisc_priv(sch);
917 if (delay <= 0) delay = 1;
918 if (unlikely(delay > 5*HZ)) {
919 if (net_ratelimit())
920 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
921 delay = 5*HZ;
922 }
923 /* why don't use jiffies here ? because expires can be in past */
924 mod_timer(&q->timer, q->jiffies + delay);
925 sch->flags |= TCQ_F_THROTTLED;
926 sch->qstats.overlimits++;
1da177e4
LT
927}
928
929static struct sk_buff *htb_dequeue(struct Qdisc *sch)
930{
931 struct sk_buff *skb = NULL;
932 struct htb_sched *q = qdisc_priv(sch);
933 int level;
934 long min_delay;
1da177e4
LT
935
936 q->jiffies = jiffies;
1da177e4
LT
937
938 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
939 if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
940 sch->flags &= ~TCQ_F_THROTTLED;
941 sch->q.qlen--;
942 return skb;
943 }
944
945 if (!sch->q.qlen) goto fin;
946 PSCHED_GET_TIME(q->now);
947
948 min_delay = LONG_MAX;
949 q->nwc_hit = 0;
950 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
951 /* common case optimization - skip event handler quickly */
952 int m;
953 long delay;
954 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
955 delay = htb_do_events(q,level);
956 q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1da177e4
LT
957 } else
958 delay = q->near_ev_cache[level] - q->jiffies;
959
960 if (delay && min_delay > delay)
961 min_delay = delay;
962 m = ~q->row_mask[level];
963 while (m != (int)(-1)) {
964 int prio = ffz (m);
965 m |= 1 << prio;
966 skb = htb_dequeue_tree(q,prio,level);
967 if (likely(skb != NULL)) {
968 sch->q.qlen--;
969 sch->flags &= ~TCQ_F_THROTTLED;
970 goto fin;
971 }
972 }
973 }
1da177e4
LT
974 htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
975fin:
1da177e4
LT
976 return skb;
977}
978
979/* try to drop from each class (by prio) until one succeed */
980static unsigned int htb_drop(struct Qdisc* sch)
981{
982 struct htb_sched *q = qdisc_priv(sch);
983 int prio;
984
985 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
986 struct list_head *p;
987 list_for_each (p,q->drops+prio) {
988 struct htb_class *cl = list_entry(p, struct htb_class,
989 un.leaf.drop_list);
990 unsigned int len;
991 if (cl->un.leaf.q->ops->drop &&
992 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
993 sch->q.qlen--;
994 if (!cl->un.leaf.q->q.qlen)
995 htb_deactivate (q,cl);
996 return len;
997 }
998 }
999 }
1000 return 0;
1001}
1002
1003/* reset all classes */
1004/* always caled under BH & queue lock */
1005static void htb_reset(struct Qdisc* sch)
1006{
1007 struct htb_sched *q = qdisc_priv(sch);
1008 int i;
1da177e4
LT
1009
1010 for (i = 0; i < HTB_HSIZE; i++) {
1011 struct list_head *p;
1012 list_for_each (p,q->hash+i) {
1013 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1014 if (cl->level)
1015 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1016 else {
1017 if (cl->un.leaf.q)
1018 qdisc_reset(cl->un.leaf.q);
1019 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1020 }
1021 cl->prio_activity = 0;
1022 cl->cmode = HTB_CAN_SEND;
1da177e4
LT
1023
1024 }
1025 }
1026 sch->flags &= ~TCQ_F_THROTTLED;
1027 del_timer(&q->timer);
1028 __skb_queue_purge(&q->direct_queue);
1029 sch->q.qlen = 0;
1030 memset(q->row,0,sizeof(q->row));
1031 memset(q->row_mask,0,sizeof(q->row_mask));
1032 memset(q->wait_pq,0,sizeof(q->wait_pq));
1033 memset(q->ptr,0,sizeof(q->ptr));
1034 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1035 INIT_LIST_HEAD(q->drops+i);
1036}
1037
1038static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1039{
1040 struct htb_sched *q = qdisc_priv(sch);
1041 struct rtattr *tb[TCA_HTB_INIT];
1042 struct tc_htb_glob *gopt;
1043 int i;
1da177e4
LT
1044 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1045 tb[TCA_HTB_INIT-1] == NULL ||
1046 RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1047 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1048 return -EINVAL;
1049 }
1050 gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1051 if (gopt->version != HTB_VER >> 16) {
1052 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1053 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1054 return -EINVAL;
1055 }
1da177e4
LT
1056
1057 INIT_LIST_HEAD(&q->root);
1058 for (i = 0; i < HTB_HSIZE; i++)
1059 INIT_LIST_HEAD(q->hash+i);
1060 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1061 INIT_LIST_HEAD(q->drops+i);
1062
1063 init_timer(&q->timer);
1064 skb_queue_head_init(&q->direct_queue);
1065
1066 q->direct_qlen = sch->dev->tx_queue_len;
1067 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1068 q->direct_qlen = 2;
1069 q->timer.function = htb_timer;
1070 q->timer.data = (unsigned long)sch;
1071
1072#ifdef HTB_RATECM
1073 init_timer(&q->rttim);
1074 q->rttim.function = htb_rate_timer;
1075 q->rttim.data = (unsigned long)sch;
1076 q->rttim.expires = jiffies + HZ;
1077 add_timer(&q->rttim);
1078#endif
1079 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1080 q->rate2quantum = 1;
1081 q->defcls = gopt->defcls;
1082
1083 return 0;
1084}
1085
1086static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1087{
1088 struct htb_sched *q = qdisc_priv(sch);
1089 unsigned char *b = skb->tail;
1090 struct rtattr *rta;
1091 struct tc_htb_glob gopt;
1da177e4
LT
1092 HTB_QLOCK(sch);
1093 gopt.direct_pkts = q->direct_pkts;
1094
1da177e4
LT
1095 gopt.version = HTB_VER;
1096 gopt.rate2quantum = q->rate2quantum;
1097 gopt.defcls = q->defcls;
3bf72957 1098 gopt.debug = 0;
1da177e4
LT
1099 rta = (struct rtattr*)b;
1100 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1101 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1102 rta->rta_len = skb->tail - b;
1103 HTB_QUNLOCK(sch);
1104 return skb->len;
1105rtattr_failure:
1106 HTB_QUNLOCK(sch);
1107 skb_trim(skb, skb->tail - skb->data);
1108 return -1;
1109}
1110
1111static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1112 struct sk_buff *skb, struct tcmsg *tcm)
1113{
1da177e4
LT
1114 struct htb_class *cl = (struct htb_class*)arg;
1115 unsigned char *b = skb->tail;
1116 struct rtattr *rta;
1117 struct tc_htb_opt opt;
1118
1da177e4
LT
1119 HTB_QLOCK(sch);
1120 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1121 tcm->tcm_handle = cl->classid;
1122 if (!cl->level && cl->un.leaf.q)
1123 tcm->tcm_info = cl->un.leaf.q->handle;
1124
1125 rta = (struct rtattr*)b;
1126 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1127
1128 memset (&opt,0,sizeof(opt));
1129
1130 opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1131 opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1132 opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1133 opt.level = cl->level;
1134 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1135 rta->rta_len = skb->tail - b;
1136 HTB_QUNLOCK(sch);
1137 return skb->len;
1138rtattr_failure:
1139 HTB_QUNLOCK(sch);
1140 skb_trim(skb, b - skb->data);
1141 return -1;
1142}
1143
1144static int
1145htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1146 struct gnet_dump *d)
1147{
1148 struct htb_class *cl = (struct htb_class*)arg;
1149
1150#ifdef HTB_RATECM
1151 cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1152 cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1153#endif
1154
1155 if (!cl->level && cl->un.leaf.q)
1156 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1157 cl->xstats.tokens = cl->tokens;
1158 cl->xstats.ctokens = cl->ctokens;
1159
1160 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1161 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1162 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1163 return -1;
1164
1165 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1166}
1167
1168static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1169 struct Qdisc **old)
1170{
1171 struct htb_class *cl = (struct htb_class*)arg;
1172
1173 if (cl && !cl->level) {
1174 if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1175 &pfifo_qdisc_ops)) == NULL)
1176 return -ENOBUFS;
1177 sch_tree_lock(sch);
1178 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1179 if (cl->prio_activity)
1180 htb_deactivate (qdisc_priv(sch),cl);
1181
1182 /* TODO: is it correct ? Why CBQ doesn't do it ? */
1183 sch->q.qlen -= (*old)->q.qlen;
1184 qdisc_reset(*old);
1185 }
1186 sch_tree_unlock(sch);
1187 return 0;
1188 }
1189 return -ENOENT;
1190}
1191
1192static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1193{
1194 struct htb_class *cl = (struct htb_class*)arg;
1195 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1196}
1197
1198static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1199{
1da177e4 1200 struct htb_class *cl = htb_find(classid,sch);
1da177e4
LT
1201 if (cl)
1202 cl->refcnt++;
1203 return (unsigned long)cl;
1204}
1205
1206static void htb_destroy_filters(struct tcf_proto **fl)
1207{
1208 struct tcf_proto *tp;
1209
1210 while ((tp = *fl) != NULL) {
1211 *fl = tp->next;
1212 tcf_destroy(tp);
1213 }
1214}
1215
1216static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1217{
1218 struct htb_sched *q = qdisc_priv(sch);
1da177e4
LT
1219 if (!cl->level) {
1220 BUG_TRAP(cl->un.leaf.q);
1221 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1222 qdisc_destroy(cl->un.leaf.q);
1223 }
1224 qdisc_put_rtab(cl->rate);
1225 qdisc_put_rtab(cl->ceil);
1226
1227 htb_destroy_filters (&cl->filter_list);
1228
1229 while (!list_empty(&cl->children))
1230 htb_destroy_class (sch,list_entry(cl->children.next,
1231 struct htb_class,sibling));
1232
1233 /* note: this delete may happen twice (see htb_delete) */
1234 list_del(&cl->hlist);
1235 list_del(&cl->sibling);
1236
1237 if (cl->prio_activity)
1238 htb_deactivate (q,cl);
1239
1240 if (cl->cmode != HTB_CAN_SEND)
3bf72957 1241 rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1da177e4
LT
1242
1243 kfree(cl);
1244}
1245
1246/* always caled under BH & queue lock */
1247static void htb_destroy(struct Qdisc* sch)
1248{
1249 struct htb_sched *q = qdisc_priv(sch);
1da177e4
LT
1250
1251 del_timer_sync (&q->timer);
1252#ifdef HTB_RATECM
1253 del_timer_sync (&q->rttim);
1254#endif
1255 /* This line used to be after htb_destroy_class call below
1256 and surprisingly it worked in 2.4. But it must precede it
1257 because filter need its target class alive to be able to call
1258 unbind_filter on it (without Oops). */
1259 htb_destroy_filters(&q->filter_list);
1260
1261 while (!list_empty(&q->root))
1262 htb_destroy_class (sch,list_entry(q->root.next,
1263 struct htb_class,sibling));
1264
1265 __skb_queue_purge(&q->direct_queue);
1266}
1267
1268static int htb_delete(struct Qdisc *sch, unsigned long arg)
1269{
1270 struct htb_sched *q = qdisc_priv(sch);
1271 struct htb_class *cl = (struct htb_class*)arg;
1da177e4
LT
1272
1273 // TODO: why don't allow to delete subtree ? references ? does
1274 // tc subsys quarantee us that in htb_destroy it holds no class
1275 // refs so that we can remove children safely there ?
1276 if (!list_empty(&cl->children) || cl->filter_cnt)
1277 return -EBUSY;
1278
1279 sch_tree_lock(sch);
1280
1281 /* delete from hash and active; remainder in destroy_class */
1282 list_del_init(&cl->hlist);
1283 if (cl->prio_activity)
1284 htb_deactivate (q,cl);
1285
1286 if (--cl->refcnt == 0)
1287 htb_destroy_class(sch,cl);
1288
1289 sch_tree_unlock(sch);
1290 return 0;
1291}
1292
1293static void htb_put(struct Qdisc *sch, unsigned long arg)
1294{
1da177e4 1295 struct htb_class *cl = (struct htb_class*)arg;
1da177e4
LT
1296
1297 if (--cl->refcnt == 0)
1298 htb_destroy_class(sch,cl);
1299}
1300
1301static int htb_change_class(struct Qdisc *sch, u32 classid,
1302 u32 parentid, struct rtattr **tca, unsigned long *arg)
1303{
1304 int err = -EINVAL;
1305 struct htb_sched *q = qdisc_priv(sch);
1306 struct htb_class *cl = (struct htb_class*)*arg,*parent;
1307 struct rtattr *opt = tca[TCA_OPTIONS-1];
1308 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1309 struct rtattr *tb[TCA_HTB_RTAB];
1310 struct tc_htb_opt *hopt;
1311
1312 /* extract all subattrs from opt attr */
1313 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1314 tb[TCA_HTB_PARMS-1] == NULL ||
1315 RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1316 goto failure;
1317
1318 parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1319
1320 hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
3bf72957 1321
1da177e4
LT
1322 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1323 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1324 if (!rtab || !ctab) goto failure;
1325
1326 if (!cl) { /* new class */
1327 struct Qdisc *new_q;
1328 /* check for valid classid */
1329 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1330 goto failure;
1331
1332 /* check maximal depth */
1333 if (parent && parent->parent && parent->parent->level < 2) {
1334 printk(KERN_ERR "htb: tree is too deep\n");
1335 goto failure;
1336 }
1337 err = -ENOBUFS;
0da974f4 1338 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1da177e4
LT
1339 goto failure;
1340
1da177e4
LT
1341 cl->refcnt = 1;
1342 INIT_LIST_HEAD(&cl->sibling);
1343 INIT_LIST_HEAD(&cl->hlist);
1344 INIT_LIST_HEAD(&cl->children);
1345 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1da177e4
LT
1346
1347 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1348 so that can't be used inside of sch_tree_lock
1349 -- thanks to Karlis Peisenieks */
1350 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1351 sch_tree_lock(sch);
1352 if (parent && !parent->level) {
1353 /* turn parent into inner node */
1354 sch->q.qlen -= parent->un.leaf.q->q.qlen;
1355 qdisc_destroy (parent->un.leaf.q);
1356 if (parent->prio_activity)
1357 htb_deactivate (q,parent);
1358
1359 /* remove from evt list because of level change */
1360 if (parent->cmode != HTB_CAN_SEND) {
3bf72957 1361 rb_erase(&parent->pq_node,q->wait_pq);
1da177e4
LT
1362 parent->cmode = HTB_CAN_SEND;
1363 }
1364 parent->level = (parent->parent ? parent->parent->level
1365 : TC_HTB_MAXDEPTH) - 1;
1366 memset (&parent->un.inner,0,sizeof(parent->un.inner));
1367 }
1368 /* leaf (we) needs elementary qdisc */
1369 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1370
1371 cl->classid = classid; cl->parent = parent;
1372
1373 /* set class to be in HTB_CAN_SEND state */
1374 cl->tokens = hopt->buffer;
1375 cl->ctokens = hopt->cbuffer;
2724a1a5 1376 cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */
1da177e4
LT
1377 PSCHED_GET_TIME(cl->t_c);
1378 cl->cmode = HTB_CAN_SEND;
1379
1380 /* attach to the hash list and parent's family */
1381 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1382 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1da177e4
LT
1383 } else sch_tree_lock(sch);
1384
1385 /* it used to be a nasty bug here, we have to check that node
1386 is really leaf before changing cl->un.leaf ! */
1387 if (!cl->level) {
1388 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1389 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1390 printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1391 cl->un.leaf.quantum = 1000;
1392 }
1393 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1394 printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1395 cl->un.leaf.quantum = 200000;
1396 }
1397 if (hopt->quantum)
1398 cl->un.leaf.quantum = hopt->quantum;
1399 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1400 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1401 }
1402
1403 cl->buffer = hopt->buffer;
1404 cl->cbuffer = hopt->cbuffer;
1405 if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1406 if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1407 sch_tree_unlock(sch);
1408
1409 *arg = (unsigned long)cl;
1410 return 0;
1411
1412failure:
1413 if (rtab) qdisc_put_rtab(rtab);
1414 if (ctab) qdisc_put_rtab(ctab);
1415 return err;
1416}
1417
1418static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1419{
1420 struct htb_sched *q = qdisc_priv(sch);
1421 struct htb_class *cl = (struct htb_class *)arg;
1422 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
3bf72957 1423
1da177e4
LT
1424 return fl;
1425}
1426
1427static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1428 u32 classid)
1429{
1430 struct htb_sched *q = qdisc_priv(sch);
1431 struct htb_class *cl = htb_find (classid,sch);
3bf72957 1432
1da177e4
LT
1433 /*if (cl && !cl->level) return 0;
1434 The line above used to be there to prevent attaching filters to
1435 leaves. But at least tc_index filter uses this just to get class
1436 for other reasons so that we have to allow for it.
1437 ----
1438 19.6.2002 As Werner explained it is ok - bind filter is just
1439 another way to "lock" the class - unlike "get" this lock can
1440 be broken by class during destroy IIUC.
1441 */
1442 if (cl)
1443 cl->filter_cnt++;
1444 else
1445 q->filter_cnt++;
1446 return (unsigned long)cl;
1447}
1448
1449static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1450{
1451 struct htb_sched *q = qdisc_priv(sch);
1452 struct htb_class *cl = (struct htb_class *)arg;
3bf72957 1453
1da177e4
LT
1454 if (cl)
1455 cl->filter_cnt--;
1456 else
1457 q->filter_cnt--;
1458}
1459
1460static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1461{
1462 struct htb_sched *q = qdisc_priv(sch);
1463 int i;
1464
1465 if (arg->stop)
1466 return;
1467
1468 for (i = 0; i < HTB_HSIZE; i++) {
1469 struct list_head *p;
1470 list_for_each (p,q->hash+i) {
1471 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1472 if (arg->count < arg->skip) {
1473 arg->count++;
1474 continue;
1475 }
1476 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1477 arg->stop = 1;
1478 return;
1479 }
1480 arg->count++;
1481 }
1482 }
1483}
1484
1485static struct Qdisc_class_ops htb_class_ops = {
1486 .graft = htb_graft,
1487 .leaf = htb_leaf,
1488 .get = htb_get,
1489 .put = htb_put,
1490 .change = htb_change_class,
1491 .delete = htb_delete,
1492 .walk = htb_walk,
1493 .tcf_chain = htb_find_tcf,
1494 .bind_tcf = htb_bind_filter,
1495 .unbind_tcf = htb_unbind_filter,
1496 .dump = htb_dump_class,
1497 .dump_stats = htb_dump_class_stats,
1498};
1499
1500static struct Qdisc_ops htb_qdisc_ops = {
1501 .next = NULL,
1502 .cl_ops = &htb_class_ops,
1503 .id = "htb",
1504 .priv_size = sizeof(struct htb_sched),
1505 .enqueue = htb_enqueue,
1506 .dequeue = htb_dequeue,
1507 .requeue = htb_requeue,
1508 .drop = htb_drop,
1509 .init = htb_init,
1510 .reset = htb_reset,
1511 .destroy = htb_destroy,
1512 .change = NULL /* htb_change */,
1513 .dump = htb_dump,
1514 .owner = THIS_MODULE,
1515};
1516
1517static int __init htb_module_init(void)
1518{
1519 return register_qdisc(&htb_qdisc_ops);
1520}
1521static void __exit htb_module_exit(void)
1522{
1523 unregister_qdisc(&htb_qdisc_ops);
1524}
1525module_init(htb_module_init)
1526module_exit(htb_module_exit)
1527MODULE_LICENSE("GPL");
This page took 0.212725 seconds and 5 git commands to generate.