[NETLINK]: Remove NLMSG_{NEW_ANSWER,CANCEL,END}
[deliverable/linux.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4
LT
15#include <linux/module.h>
16#include <asm/uaccess.h>
17#include <asm/system.h>
18#include <linux/bitops.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/jiffies.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/if_ether.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/notifier.h>
34#include <net/ip.h>
35#include <net/route.h>
36#include <linux/skbuff.h>
37#include <net/sock.h>
38#include <net/pkt_sched.h>
39
40
41/* Simple Token Bucket Filter.
42 =======================================
43
44 SOURCE.
45 -------
46
47 None.
48
49 Description.
50 ------------
51
52 A data flow obeys TBF with rate R and depth B, if for any
53 time interval t_i...t_f the number of transmitted bits
54 does not exceed B + R*(t_f-t_i).
55
56 Packetized version of this definition:
57 The sequence of packets of sizes s_i served at moments t_i
58 obeys TBF, if for any i<=k:
59
60 s_i+....+s_k <= B + R*(t_k - t_i)
61
62 Algorithm.
63 ----------
64
65 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
66
67 N(t+delta) = min{B/R, N(t) + delta}
68
69 If the first packet in queue has length S, it may be
70 transmitted only at the time t_* when S/R <= N(t_*),
71 and in this case N(t) jumps:
72
73 N(t_* + 0) = N(t_* - 0) - S/R.
74
75
76
77 Actually, QoS requires two TBF to be applied to a data stream.
78 One of them controls steady state burst size, another
79 one with rate P (peak rate) and depth M (equal to link MTU)
80 limits bursts at a smaller time scale.
81
82 It is easy to see that P>R, and B>M. If P is infinity, this double
83 TBF is equivalent to a single one.
84
85 When TBF works in reshaping mode, latency is estimated as:
86
87 lat = max ((L-B)/R, (L-M)/P)
88
89
90 NOTES.
91 ------
92
93 If TBF throttles, it starts a watchdog timer, which will wake it up
94 when it is ready to transmit.
95 Note that the minimal timer resolution is 1/HZ.
96 If no new packets arrive during this period,
97 or if the device is not awaken by EOI for some previous packet,
98 TBF can stop its activity for 1/HZ.
99
100
101 This means, that with depth B, the maximal rate is
102
103 R_crit = B*HZ
104
105 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
106
107 Note that the peak rate TBF is much more tough: with MTU 1500
108 P_crit = 150Kbytes/sec. So, if you need greater peak
109 rates, use alpha with HZ=1000 :-)
110
111 With classful TBF, limit is just kept for backwards compatibility.
112 It is passed to the default bfifo qdisc - if the inner qdisc is
113 changed the limit is not effective anymore.
114*/
115
116struct tbf_sched_data
117{
118/* Parameters */
119 u32 limit; /* Maximal length of backlog: bytes */
120 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
121 u32 mtu;
122 u32 max_size;
123 struct qdisc_rate_table *R_tab;
124 struct qdisc_rate_table *P_tab;
125
126/* Variables */
127 long tokens; /* Current number of B tokens */
128 long ptokens; /* Current number of P tokens */
129 psched_time_t t_c; /* Time check-point */
1da177e4 130 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 131 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
132};
133
134#define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
135#define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
136
137static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
138{
139 struct tbf_sched_data *q = qdisc_priv(sch);
140 int ret;
141
142 if (skb->len > q->max_size) {
143 sch->qstats.drops++;
144#ifdef CONFIG_NET_CLS_POLICE
145 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
146#endif
147 kfree_skb(skb);
148
149 return NET_XMIT_DROP;
150 }
151
152 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
153 sch->qstats.drops++;
154 return ret;
155 }
156
157 sch->q.qlen++;
158 sch->bstats.bytes += skb->len;
159 sch->bstats.packets++;
160 return 0;
161}
162
163static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
164{
165 struct tbf_sched_data *q = qdisc_priv(sch);
166 int ret;
167
168 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
169 sch->q.qlen++;
170 sch->qstats.requeues++;
171 }
172
173 return ret;
174}
175
176static unsigned int tbf_drop(struct Qdisc* sch)
177{
178 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 179 unsigned int len = 0;
1da177e4 180
6d037a26 181 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
182 sch->q.qlen--;
183 sch->qstats.drops++;
184 }
185 return len;
186}
187
1da177e4
LT
188static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
189{
190 struct tbf_sched_data *q = qdisc_priv(sch);
191 struct sk_buff *skb;
192
193 skb = q->qdisc->dequeue(q->qdisc);
194
195 if (skb) {
196 psched_time_t now;
f7f593e3 197 long toks;
1da177e4
LT
198 long ptoks = 0;
199 unsigned int len = skb->len;
200
201 PSCHED_GET_TIME(now);
202
203 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
204
205 if (q->P_tab) {
206 ptoks = toks + q->ptokens;
207 if (ptoks > (long)q->mtu)
208 ptoks = q->mtu;
209 ptoks -= L2T_P(q, len);
210 }
211 toks += q->tokens;
212 if (toks > (long)q->buffer)
213 toks = q->buffer;
214 toks -= L2T(q, len);
215
216 if ((toks|ptoks) >= 0) {
217 q->t_c = now;
218 q->tokens = toks;
219 q->ptokens = ptoks;
220 sch->q.qlen--;
221 sch->flags &= ~TCQ_F_THROTTLED;
222 return skb;
223 }
224
f7f593e3
PM
225 qdisc_watchdog_schedule(&q->watchdog,
226 now + max_t(long, -toks, -ptoks));
1da177e4
LT
227
228 /* Maybe we have a shorter packet in the queue,
229 which can be sent now. It sounds cool,
230 but, however, this is wrong in principle.
231 We MUST NOT reorder packets under these circumstances.
232
233 Really, if we split the flow into independent
234 subflows, it would be a very good solution.
235 This is the main idea of all FQ algorithms
236 (cf. CSZ, HPFQ, HFSC)
237 */
238
239 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
240 /* When requeue fails skb is dropped */
e488eafc 241 qdisc_tree_decrease_qlen(q->qdisc, 1);
1da177e4
LT
242 sch->qstats.drops++;
243 }
244
1da177e4
LT
245 sch->qstats.overlimits++;
246 }
247 return NULL;
248}
249
250static void tbf_reset(struct Qdisc* sch)
251{
252 struct tbf_sched_data *q = qdisc_priv(sch);
253
254 qdisc_reset(q->qdisc);
255 sch->q.qlen = 0;
256 PSCHED_GET_TIME(q->t_c);
257 q->tokens = q->buffer;
258 q->ptokens = q->mtu;
f7f593e3 259 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
260}
261
9f9afec4 262static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
1da177e4 263{
9f9afec4 264 struct Qdisc *q;
10297b99 265 struct rtattr *rta;
1da177e4
LT
266 int ret;
267
9f9afec4
PM
268 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
269 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
270 if (q) {
271 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
272 if (rta) {
273 rta->rta_type = RTM_NEWQDISC;
10297b99 274 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
1da177e4
LT
275 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
276
277 ret = q->ops->change(q, rta);
278 kfree(rta);
279
280 if (ret == 0)
281 return q;
282 }
283 qdisc_destroy(q);
284 }
285
286 return NULL;
287}
288
289static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
290{
291 int err = -EINVAL;
292 struct tbf_sched_data *q = qdisc_priv(sch);
293 struct rtattr *tb[TCA_TBF_PTAB];
294 struct tc_tbf_qopt *qopt;
295 struct qdisc_rate_table *rtab = NULL;
296 struct qdisc_rate_table *ptab = NULL;
297 struct Qdisc *child = NULL;
298 int max_size,n;
299
300 if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
301 tb[TCA_TBF_PARMS-1] == NULL ||
302 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
303 goto done;
304
305 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
306 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
307 if (rtab == NULL)
308 goto done;
309
310 if (qopt->peakrate.rate) {
311 if (qopt->peakrate.rate > qopt->rate.rate)
312 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
313 if (ptab == NULL)
314 goto done;
315 }
316
317 for (n = 0; n < 256; n++)
318 if (rtab->data[n] > qopt->buffer) break;
319 max_size = (n << qopt->rate.cell_log)-1;
320 if (ptab) {
321 int size;
322
323 for (n = 0; n < 256; n++)
324 if (ptab->data[n] > qopt->mtu) break;
325 size = (n << qopt->peakrate.cell_log)-1;
326 if (size < max_size) max_size = size;
327 }
328 if (max_size < 0)
329 goto done;
330
053cfed7 331 if (qopt->limit > 0) {
9f9afec4 332 if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
1da177e4
LT
333 goto done;
334 }
335
336 sch_tree_lock(sch);
5e50da01
PM
337 if (child) {
338 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
053cfed7 339 qdisc_destroy(xchg(&q->qdisc, child));
5e50da01 340 }
1da177e4
LT
341 q->limit = qopt->limit;
342 q->mtu = qopt->mtu;
343 q->max_size = max_size;
344 q->buffer = qopt->buffer;
345 q->tokens = q->buffer;
346 q->ptokens = q->mtu;
347 rtab = xchg(&q->R_tab, rtab);
348 ptab = xchg(&q->P_tab, ptab);
349 sch_tree_unlock(sch);
350 err = 0;
351done:
352 if (rtab)
353 qdisc_put_rtab(rtab);
354 if (ptab)
355 qdisc_put_rtab(ptab);
356 return err;
357}
358
359static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
360{
361 struct tbf_sched_data *q = qdisc_priv(sch);
362
363 if (opt == NULL)
364 return -EINVAL;
365
366 PSCHED_GET_TIME(q->t_c);
f7f593e3 367 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4
LT
368 q->qdisc = &noop_qdisc;
369
370 return tbf_change(sch, opt);
371}
372
373static void tbf_destroy(struct Qdisc *sch)
374{
375 struct tbf_sched_data *q = qdisc_priv(sch);
376
f7f593e3 377 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
378
379 if (q->P_tab)
380 qdisc_put_rtab(q->P_tab);
381 if (q->R_tab)
382 qdisc_put_rtab(q->R_tab);
383
384 qdisc_destroy(q->qdisc);
385}
386
387static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
388{
389 struct tbf_sched_data *q = qdisc_priv(sch);
27a884dc 390 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
391 struct rtattr *rta;
392 struct tc_tbf_qopt opt;
393
394 rta = (struct rtattr*)b;
395 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
396
397 opt.limit = q->limit;
398 opt.rate = q->R_tab->rate;
399 if (q->P_tab)
400 opt.peakrate = q->P_tab->rate;
401 else
402 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
403 opt.mtu = q->mtu;
404 opt.buffer = q->buffer;
405 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
27a884dc 406 rta->rta_len = skb_tail_pointer(skb) - b;
1da177e4
LT
407
408 return skb->len;
409
410rtattr_failure:
411 skb_trim(skb, b - skb->data);
412 return -1;
413}
414
415static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
416 struct sk_buff *skb, struct tcmsg *tcm)
417{
418 struct tbf_sched_data *q = qdisc_priv(sch);
419
420 if (cl != 1) /* only one class */
421 return -ENOENT;
422
423 tcm->tcm_handle |= TC_H_MIN(1);
424 tcm->tcm_info = q->qdisc->handle;
425
426 return 0;
427}
428
429static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
430 struct Qdisc **old)
431{
432 struct tbf_sched_data *q = qdisc_priv(sch);
433
434 if (new == NULL)
435 new = &noop_qdisc;
436
437 sch_tree_lock(sch);
438 *old = xchg(&q->qdisc, new);
5e50da01 439 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 440 qdisc_reset(*old);
1da177e4
LT
441 sch_tree_unlock(sch);
442
443 return 0;
444}
445
446static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
447{
448 struct tbf_sched_data *q = qdisc_priv(sch);
449 return q->qdisc;
450}
451
452static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
453{
454 return 1;
455}
456
457static void tbf_put(struct Qdisc *sch, unsigned long arg)
458{
459}
460
10297b99 461static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1da177e4
LT
462 struct rtattr **tca, unsigned long *arg)
463{
464 return -ENOSYS;
465}
466
467static int tbf_delete(struct Qdisc *sch, unsigned long arg)
468{
469 return -ENOSYS;
470}
471
472static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
473{
474 if (!walker->stop) {
475 if (walker->count >= walker->skip)
476 if (walker->fn(sch, 1, walker) < 0) {
477 walker->stop = 1;
478 return;
479 }
480 walker->count++;
481 }
482}
483
484static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
485{
486 return NULL;
487}
488
489static struct Qdisc_class_ops tbf_class_ops =
490{
491 .graft = tbf_graft,
492 .leaf = tbf_leaf,
493 .get = tbf_get,
494 .put = tbf_put,
495 .change = tbf_change_class,
496 .delete = tbf_delete,
497 .walk = tbf_walk,
498 .tcf_chain = tbf_find_tcf,
499 .dump = tbf_dump_class,
500};
501
502static struct Qdisc_ops tbf_qdisc_ops = {
503 .next = NULL,
504 .cl_ops = &tbf_class_ops,
505 .id = "tbf",
506 .priv_size = sizeof(struct tbf_sched_data),
507 .enqueue = tbf_enqueue,
508 .dequeue = tbf_dequeue,
509 .requeue = tbf_requeue,
510 .drop = tbf_drop,
511 .init = tbf_init,
512 .reset = tbf_reset,
513 .destroy = tbf_destroy,
514 .change = tbf_change,
515 .dump = tbf_dump,
516 .owner = THIS_MODULE,
517};
518
519static int __init tbf_module_init(void)
520{
521 return register_qdisc(&tbf_qdisc_ops);
522}
523
524static void __exit tbf_module_exit(void)
525{
526 unregister_qdisc(&tbf_qdisc_ops);
527}
528module_init(tbf_module_init)
529module_exit(tbf_module_exit)
530MODULE_LICENSE("GPL");
This page took 0.222515 seconds and 5 git commands to generate.