Merge branch 'master' of /usr/src/ntfs-2.6/
[deliverable/linux.git] / net / sched / sch_tbf.c
1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <linux/bitops.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/in.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/if_ether.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/notifier.h>
35 #include <net/ip.h>
36 #include <net/route.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <net/pkt_sched.h>
40
41
42 /* Simple Token Bucket Filter.
43 =======================================
44
45 SOURCE.
46 -------
47
48 None.
49
50 Description.
51 ------------
52
53 A data flow obeys TBF with rate R and depth B, if for any
54 time interval t_i...t_f the number of transmitted bits
55 does not exceed B + R*(t_f-t_i).
56
57 Packetized version of this definition:
58 The sequence of packets of sizes s_i served at moments t_i
59 obeys TBF, if for any i<=k:
60
61 s_i+....+s_k <= B + R*(t_k - t_i)
62
63 Algorithm.
64 ----------
65
66 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
67
68 N(t+delta) = min{B/R, N(t) + delta}
69
70 If the first packet in queue has length S, it may be
71 transmitted only at the time t_* when S/R <= N(t_*),
72 and in this case N(t) jumps:
73
74 N(t_* + 0) = N(t_* - 0) - S/R.
75
76
77
78 Actually, QoS requires two TBF to be applied to a data stream.
79 One of them controls steady state burst size, another
80 one with rate P (peak rate) and depth M (equal to link MTU)
81 limits bursts at a smaller time scale.
82
83 It is easy to see that P>R, and B>M. If P is infinity, this double
84 TBF is equivalent to a single one.
85
86 When TBF works in reshaping mode, latency is estimated as:
87
88 lat = max ((L-B)/R, (L-M)/P)
89
90
91 NOTES.
92 ------
93
94 If TBF throttles, it starts a watchdog timer, which will wake it up
95 when it is ready to transmit.
96 Note that the minimal timer resolution is 1/HZ.
97 If no new packets arrive during this period,
98 or if the device is not awaken by EOI for some previous packet,
99 TBF can stop its activity for 1/HZ.
100
101
102 This means, that with depth B, the maximal rate is
103
104 R_crit = B*HZ
105
106 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
107
108 Note that the peak rate TBF is much more tough: with MTU 1500
109 P_crit = 150Kbytes/sec. So, if you need greater peak
110 rates, use alpha with HZ=1000 :-)
111
112 With classful TBF, limit is just kept for backwards compatibility.
113 It is passed to the default bfifo qdisc - if the inner qdisc is
114 changed the limit is not effective anymore.
115 */
116
117 struct tbf_sched_data
118 {
119 /* Parameters */
120 u32 limit; /* Maximal length of backlog: bytes */
121 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
122 u32 mtu;
123 u32 max_size;
124 struct qdisc_rate_table *R_tab;
125 struct qdisc_rate_table *P_tab;
126
127 /* Variables */
128 long tokens; /* Current number of B tokens */
129 long ptokens; /* Current number of P tokens */
130 psched_time_t t_c; /* Time check-point */
131 struct timer_list wd_timer; /* Watchdog timer */
132 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
133 };
134
135 #define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
136 #define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
137
138 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
139 {
140 struct tbf_sched_data *q = qdisc_priv(sch);
141 int ret;
142
143 if (skb->len > q->max_size) {
144 sch->qstats.drops++;
145 #ifdef CONFIG_NET_CLS_POLICE
146 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
147 #endif
148 kfree_skb(skb);
149
150 return NET_XMIT_DROP;
151 }
152
153 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
154 sch->qstats.drops++;
155 return ret;
156 }
157
158 sch->q.qlen++;
159 sch->bstats.bytes += skb->len;
160 sch->bstats.packets++;
161 return 0;
162 }
163
164 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
165 {
166 struct tbf_sched_data *q = qdisc_priv(sch);
167 int ret;
168
169 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
170 sch->q.qlen++;
171 sch->qstats.requeues++;
172 }
173
174 return ret;
175 }
176
177 static unsigned int tbf_drop(struct Qdisc* sch)
178 {
179 struct tbf_sched_data *q = qdisc_priv(sch);
180 unsigned int len = 0;
181
182 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
183 sch->q.qlen--;
184 sch->qstats.drops++;
185 }
186 return len;
187 }
188
189 static void tbf_watchdog(unsigned long arg)
190 {
191 struct Qdisc *sch = (struct Qdisc*)arg;
192
193 sch->flags &= ~TCQ_F_THROTTLED;
194 netif_schedule(sch->dev);
195 }
196
197 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
198 {
199 struct tbf_sched_data *q = qdisc_priv(sch);
200 struct sk_buff *skb;
201
202 skb = q->qdisc->dequeue(q->qdisc);
203
204 if (skb) {
205 psched_time_t now;
206 long toks, delay;
207 long ptoks = 0;
208 unsigned int len = skb->len;
209
210 PSCHED_GET_TIME(now);
211
212 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
213
214 if (q->P_tab) {
215 ptoks = toks + q->ptokens;
216 if (ptoks > (long)q->mtu)
217 ptoks = q->mtu;
218 ptoks -= L2T_P(q, len);
219 }
220 toks += q->tokens;
221 if (toks > (long)q->buffer)
222 toks = q->buffer;
223 toks -= L2T(q, len);
224
225 if ((toks|ptoks) >= 0) {
226 q->t_c = now;
227 q->tokens = toks;
228 q->ptokens = ptoks;
229 sch->q.qlen--;
230 sch->flags &= ~TCQ_F_THROTTLED;
231 return skb;
232 }
233
234 delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
235
236 if (delay == 0)
237 delay = 1;
238
239 mod_timer(&q->wd_timer, jiffies+delay);
240
241 /* Maybe we have a shorter packet in the queue,
242 which can be sent now. It sounds cool,
243 but, however, this is wrong in principle.
244 We MUST NOT reorder packets under these circumstances.
245
246 Really, if we split the flow into independent
247 subflows, it would be a very good solution.
248 This is the main idea of all FQ algorithms
249 (cf. CSZ, HPFQ, HFSC)
250 */
251
252 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
253 /* When requeue fails skb is dropped */
254 sch->q.qlen--;
255 sch->qstats.drops++;
256 }
257
258 sch->flags |= TCQ_F_THROTTLED;
259 sch->qstats.overlimits++;
260 }
261 return NULL;
262 }
263
264 static void tbf_reset(struct Qdisc* sch)
265 {
266 struct tbf_sched_data *q = qdisc_priv(sch);
267
268 qdisc_reset(q->qdisc);
269 sch->q.qlen = 0;
270 PSCHED_GET_TIME(q->t_c);
271 q->tokens = q->buffer;
272 q->ptokens = q->mtu;
273 sch->flags &= ~TCQ_F_THROTTLED;
274 del_timer(&q->wd_timer);
275 }
276
277 static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
278 {
279 struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
280 struct rtattr *rta;
281 int ret;
282
283 if (q) {
284 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
285 if (rta) {
286 rta->rta_type = RTM_NEWQDISC;
287 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
288 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
289
290 ret = q->ops->change(q, rta);
291 kfree(rta);
292
293 if (ret == 0)
294 return q;
295 }
296 qdisc_destroy(q);
297 }
298
299 return NULL;
300 }
301
302 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
303 {
304 int err = -EINVAL;
305 struct tbf_sched_data *q = qdisc_priv(sch);
306 struct rtattr *tb[TCA_TBF_PTAB];
307 struct tc_tbf_qopt *qopt;
308 struct qdisc_rate_table *rtab = NULL;
309 struct qdisc_rate_table *ptab = NULL;
310 struct Qdisc *child = NULL;
311 int max_size,n;
312
313 if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
314 tb[TCA_TBF_PARMS-1] == NULL ||
315 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
316 goto done;
317
318 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
319 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
320 if (rtab == NULL)
321 goto done;
322
323 if (qopt->peakrate.rate) {
324 if (qopt->peakrate.rate > qopt->rate.rate)
325 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
326 if (ptab == NULL)
327 goto done;
328 }
329
330 for (n = 0; n < 256; n++)
331 if (rtab->data[n] > qopt->buffer) break;
332 max_size = (n << qopt->rate.cell_log)-1;
333 if (ptab) {
334 int size;
335
336 for (n = 0; n < 256; n++)
337 if (ptab->data[n] > qopt->mtu) break;
338 size = (n << qopt->peakrate.cell_log)-1;
339 if (size < max_size) max_size = size;
340 }
341 if (max_size < 0)
342 goto done;
343
344 if (qopt->limit > 0) {
345 if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
346 goto done;
347 }
348
349 sch_tree_lock(sch);
350 if (child)
351 qdisc_destroy(xchg(&q->qdisc, child));
352 q->limit = qopt->limit;
353 q->mtu = qopt->mtu;
354 q->max_size = max_size;
355 q->buffer = qopt->buffer;
356 q->tokens = q->buffer;
357 q->ptokens = q->mtu;
358 rtab = xchg(&q->R_tab, rtab);
359 ptab = xchg(&q->P_tab, ptab);
360 sch_tree_unlock(sch);
361 err = 0;
362 done:
363 if (rtab)
364 qdisc_put_rtab(rtab);
365 if (ptab)
366 qdisc_put_rtab(ptab);
367 return err;
368 }
369
370 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
371 {
372 struct tbf_sched_data *q = qdisc_priv(sch);
373
374 if (opt == NULL)
375 return -EINVAL;
376
377 PSCHED_GET_TIME(q->t_c);
378 init_timer(&q->wd_timer);
379 q->wd_timer.function = tbf_watchdog;
380 q->wd_timer.data = (unsigned long)sch;
381
382 q->qdisc = &noop_qdisc;
383
384 return tbf_change(sch, opt);
385 }
386
387 static void tbf_destroy(struct Qdisc *sch)
388 {
389 struct tbf_sched_data *q = qdisc_priv(sch);
390
391 del_timer(&q->wd_timer);
392
393 if (q->P_tab)
394 qdisc_put_rtab(q->P_tab);
395 if (q->R_tab)
396 qdisc_put_rtab(q->R_tab);
397
398 qdisc_destroy(q->qdisc);
399 }
400
401 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
402 {
403 struct tbf_sched_data *q = qdisc_priv(sch);
404 unsigned char *b = skb->tail;
405 struct rtattr *rta;
406 struct tc_tbf_qopt opt;
407
408 rta = (struct rtattr*)b;
409 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
410
411 opt.limit = q->limit;
412 opt.rate = q->R_tab->rate;
413 if (q->P_tab)
414 opt.peakrate = q->P_tab->rate;
415 else
416 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
417 opt.mtu = q->mtu;
418 opt.buffer = q->buffer;
419 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
420 rta->rta_len = skb->tail - b;
421
422 return skb->len;
423
424 rtattr_failure:
425 skb_trim(skb, b - skb->data);
426 return -1;
427 }
428
429 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
430 struct sk_buff *skb, struct tcmsg *tcm)
431 {
432 struct tbf_sched_data *q = qdisc_priv(sch);
433
434 if (cl != 1) /* only one class */
435 return -ENOENT;
436
437 tcm->tcm_handle |= TC_H_MIN(1);
438 tcm->tcm_info = q->qdisc->handle;
439
440 return 0;
441 }
442
443 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
444 struct Qdisc **old)
445 {
446 struct tbf_sched_data *q = qdisc_priv(sch);
447
448 if (new == NULL)
449 new = &noop_qdisc;
450
451 sch_tree_lock(sch);
452 *old = xchg(&q->qdisc, new);
453 qdisc_reset(*old);
454 sch->q.qlen = 0;
455 sch_tree_unlock(sch);
456
457 return 0;
458 }
459
460 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
461 {
462 struct tbf_sched_data *q = qdisc_priv(sch);
463 return q->qdisc;
464 }
465
466 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
467 {
468 return 1;
469 }
470
471 static void tbf_put(struct Qdisc *sch, unsigned long arg)
472 {
473 }
474
475 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
476 struct rtattr **tca, unsigned long *arg)
477 {
478 return -ENOSYS;
479 }
480
481 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
482 {
483 return -ENOSYS;
484 }
485
486 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
487 {
488 if (!walker->stop) {
489 if (walker->count >= walker->skip)
490 if (walker->fn(sch, 1, walker) < 0) {
491 walker->stop = 1;
492 return;
493 }
494 walker->count++;
495 }
496 }
497
498 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
499 {
500 return NULL;
501 }
502
503 static struct Qdisc_class_ops tbf_class_ops =
504 {
505 .graft = tbf_graft,
506 .leaf = tbf_leaf,
507 .get = tbf_get,
508 .put = tbf_put,
509 .change = tbf_change_class,
510 .delete = tbf_delete,
511 .walk = tbf_walk,
512 .tcf_chain = tbf_find_tcf,
513 .dump = tbf_dump_class,
514 };
515
516 static struct Qdisc_ops tbf_qdisc_ops = {
517 .next = NULL,
518 .cl_ops = &tbf_class_ops,
519 .id = "tbf",
520 .priv_size = sizeof(struct tbf_sched_data),
521 .enqueue = tbf_enqueue,
522 .dequeue = tbf_dequeue,
523 .requeue = tbf_requeue,
524 .drop = tbf_drop,
525 .init = tbf_init,
526 .reset = tbf_reset,
527 .destroy = tbf_destroy,
528 .change = tbf_change,
529 .dump = tbf_dump,
530 .owner = THIS_MODULE,
531 };
532
533 static int __init tbf_module_init(void)
534 {
535 return register_qdisc(&tbf_qdisc_ops);
536 }
537
538 static void __exit tbf_module_exit(void)
539 {
540 unregister_qdisc(&tbf_qdisc_ops);
541 }
542 module_init(tbf_module_init)
543 module_exit(tbf_module_exit)
544 MODULE_LICENSE("GPL");
This page took 0.176338 seconds and 6 git commands to generate.