[NET_SCHED]: Fix endless loops caused by inaccurate qlen counters (part 1)
[deliverable/linux.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4
LT
15#include <linux/module.h>
16#include <asm/uaccess.h>
17#include <asm/system.h>
18#include <linux/bitops.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/jiffies.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/if_ether.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/notifier.h>
34#include <net/ip.h>
35#include <net/route.h>
36#include <linux/skbuff.h>
37#include <net/sock.h>
38#include <net/pkt_sched.h>
39
40
41/* Simple Token Bucket Filter.
42 =======================================
43
44 SOURCE.
45 -------
46
47 None.
48
49 Description.
50 ------------
51
52 A data flow obeys TBF with rate R and depth B, if for any
53 time interval t_i...t_f the number of transmitted bits
54 does not exceed B + R*(t_f-t_i).
55
56 Packetized version of this definition:
57 The sequence of packets of sizes s_i served at moments t_i
58 obeys TBF, if for any i<=k:
59
60 s_i+....+s_k <= B + R*(t_k - t_i)
61
62 Algorithm.
63 ----------
64
65 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
66
67 N(t+delta) = min{B/R, N(t) + delta}
68
69 If the first packet in queue has length S, it may be
70 transmitted only at the time t_* when S/R <= N(t_*),
71 and in this case N(t) jumps:
72
73 N(t_* + 0) = N(t_* - 0) - S/R.
74
75
76
77 Actually, QoS requires two TBF to be applied to a data stream.
78 One of them controls steady state burst size, another
79 one with rate P (peak rate) and depth M (equal to link MTU)
80 limits bursts at a smaller time scale.
81
82 It is easy to see that P>R, and B>M. If P is infinity, this double
83 TBF is equivalent to a single one.
84
85 When TBF works in reshaping mode, latency is estimated as:
86
87 lat = max ((L-B)/R, (L-M)/P)
88
89
90 NOTES.
91 ------
92
93 If TBF throttles, it starts a watchdog timer, which will wake it up
94 when it is ready to transmit.
95 Note that the minimal timer resolution is 1/HZ.
96 If no new packets arrive during this period,
97 or if the device is not awaken by EOI for some previous packet,
98 TBF can stop its activity for 1/HZ.
99
100
101 This means, that with depth B, the maximal rate is
102
103 R_crit = B*HZ
104
105 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
106
107 Note that the peak rate TBF is much more tough: with MTU 1500
108 P_crit = 150Kbytes/sec. So, if you need greater peak
109 rates, use alpha with HZ=1000 :-)
110
111 With classful TBF, limit is just kept for backwards compatibility.
112 It is passed to the default bfifo qdisc - if the inner qdisc is
113 changed the limit is not effective anymore.
114*/
115
116struct tbf_sched_data
117{
118/* Parameters */
119 u32 limit; /* Maximal length of backlog: bytes */
120 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
121 u32 mtu;
122 u32 max_size;
123 struct qdisc_rate_table *R_tab;
124 struct qdisc_rate_table *P_tab;
125
126/* Variables */
127 long tokens; /* Current number of B tokens */
128 long ptokens; /* Current number of P tokens */
129 psched_time_t t_c; /* Time check-point */
130 struct timer_list wd_timer; /* Watchdog timer */
131 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
132};
133
134#define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
135#define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log])
136
137static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
138{
139 struct tbf_sched_data *q = qdisc_priv(sch);
140 int ret;
141
142 if (skb->len > q->max_size) {
143 sch->qstats.drops++;
144#ifdef CONFIG_NET_CLS_POLICE
145 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
146#endif
147 kfree_skb(skb);
148
149 return NET_XMIT_DROP;
150 }
151
152 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
153 sch->qstats.drops++;
154 return ret;
155 }
156
157 sch->q.qlen++;
158 sch->bstats.bytes += skb->len;
159 sch->bstats.packets++;
160 return 0;
161}
162
163static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
164{
165 struct tbf_sched_data *q = qdisc_priv(sch);
166 int ret;
167
168 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
169 sch->q.qlen++;
170 sch->qstats.requeues++;
171 }
172
173 return ret;
174}
175
176static unsigned int tbf_drop(struct Qdisc* sch)
177{
178 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 179 unsigned int len = 0;
1da177e4 180
6d037a26 181 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
182 sch->q.qlen--;
183 sch->qstats.drops++;
184 }
185 return len;
186}
187
188static void tbf_watchdog(unsigned long arg)
189{
190 struct Qdisc *sch = (struct Qdisc*)arg;
191
192 sch->flags &= ~TCQ_F_THROTTLED;
193 netif_schedule(sch->dev);
194}
195
196static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
197{
198 struct tbf_sched_data *q = qdisc_priv(sch);
199 struct sk_buff *skb;
200
201 skb = q->qdisc->dequeue(q->qdisc);
202
203 if (skb) {
204 psched_time_t now;
205 long toks, delay;
206 long ptoks = 0;
207 unsigned int len = skb->len;
208
209 PSCHED_GET_TIME(now);
210
211 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
212
213 if (q->P_tab) {
214 ptoks = toks + q->ptokens;
215 if (ptoks > (long)q->mtu)
216 ptoks = q->mtu;
217 ptoks -= L2T_P(q, len);
218 }
219 toks += q->tokens;
220 if (toks > (long)q->buffer)
221 toks = q->buffer;
222 toks -= L2T(q, len);
223
224 if ((toks|ptoks) >= 0) {
225 q->t_c = now;
226 q->tokens = toks;
227 q->ptokens = ptoks;
228 sch->q.qlen--;
229 sch->flags &= ~TCQ_F_THROTTLED;
230 return skb;
231 }
232
233 delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
234
235 if (delay == 0)
236 delay = 1;
237
238 mod_timer(&q->wd_timer, jiffies+delay);
239
240 /* Maybe we have a shorter packet in the queue,
241 which can be sent now. It sounds cool,
242 but, however, this is wrong in principle.
243 We MUST NOT reorder packets under these circumstances.
244
245 Really, if we split the flow into independent
246 subflows, it would be a very good solution.
247 This is the main idea of all FQ algorithms
248 (cf. CSZ, HPFQ, HFSC)
249 */
250
251 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
252 /* When requeue fails skb is dropped */
253 sch->q.qlen--;
254 sch->qstats.drops++;
255 }
256
257 sch->flags |= TCQ_F_THROTTLED;
258 sch->qstats.overlimits++;
259 }
260 return NULL;
261}
262
263static void tbf_reset(struct Qdisc* sch)
264{
265 struct tbf_sched_data *q = qdisc_priv(sch);
266
267 qdisc_reset(q->qdisc);
268 sch->q.qlen = 0;
269 PSCHED_GET_TIME(q->t_c);
270 q->tokens = q->buffer;
271 q->ptokens = q->mtu;
272 sch->flags &= ~TCQ_F_THROTTLED;
273 del_timer(&q->wd_timer);
274}
275
9f9afec4 276static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
1da177e4 277{
9f9afec4 278 struct Qdisc *q;
1da177e4
LT
279 struct rtattr *rta;
280 int ret;
281
9f9afec4
PM
282 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
283 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
284 if (q) {
285 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
286 if (rta) {
287 rta->rta_type = RTM_NEWQDISC;
288 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
289 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
290
291 ret = q->ops->change(q, rta);
292 kfree(rta);
293
294 if (ret == 0)
295 return q;
296 }
297 qdisc_destroy(q);
298 }
299
300 return NULL;
301}
302
303static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
304{
305 int err = -EINVAL;
306 struct tbf_sched_data *q = qdisc_priv(sch);
307 struct rtattr *tb[TCA_TBF_PTAB];
308 struct tc_tbf_qopt *qopt;
309 struct qdisc_rate_table *rtab = NULL;
310 struct qdisc_rate_table *ptab = NULL;
311 struct Qdisc *child = NULL;
312 int max_size,n;
313
314 if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
315 tb[TCA_TBF_PARMS-1] == NULL ||
316 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
317 goto done;
318
319 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
320 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
321 if (rtab == NULL)
322 goto done;
323
324 if (qopt->peakrate.rate) {
325 if (qopt->peakrate.rate > qopt->rate.rate)
326 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
327 if (ptab == NULL)
328 goto done;
329 }
330
331 for (n = 0; n < 256; n++)
332 if (rtab->data[n] > qopt->buffer) break;
333 max_size = (n << qopt->rate.cell_log)-1;
334 if (ptab) {
335 int size;
336
337 for (n = 0; n < 256; n++)
338 if (ptab->data[n] > qopt->mtu) break;
339 size = (n << qopt->peakrate.cell_log)-1;
340 if (size < max_size) max_size = size;
341 }
342 if (max_size < 0)
343 goto done;
344
053cfed7 345 if (qopt->limit > 0) {
9f9afec4 346 if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
1da177e4
LT
347 goto done;
348 }
349
350 sch_tree_lock(sch);
053cfed7
PM
351 if (child)
352 qdisc_destroy(xchg(&q->qdisc, child));
1da177e4
LT
353 q->limit = qopt->limit;
354 q->mtu = qopt->mtu;
355 q->max_size = max_size;
356 q->buffer = qopt->buffer;
357 q->tokens = q->buffer;
358 q->ptokens = q->mtu;
359 rtab = xchg(&q->R_tab, rtab);
360 ptab = xchg(&q->P_tab, ptab);
361 sch_tree_unlock(sch);
362 err = 0;
363done:
364 if (rtab)
365 qdisc_put_rtab(rtab);
366 if (ptab)
367 qdisc_put_rtab(ptab);
368 return err;
369}
370
371static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
372{
373 struct tbf_sched_data *q = qdisc_priv(sch);
374
375 if (opt == NULL)
376 return -EINVAL;
377
378 PSCHED_GET_TIME(q->t_c);
379 init_timer(&q->wd_timer);
380 q->wd_timer.function = tbf_watchdog;
381 q->wd_timer.data = (unsigned long)sch;
382
383 q->qdisc = &noop_qdisc;
384
385 return tbf_change(sch, opt);
386}
387
388static void tbf_destroy(struct Qdisc *sch)
389{
390 struct tbf_sched_data *q = qdisc_priv(sch);
391
392 del_timer(&q->wd_timer);
393
394 if (q->P_tab)
395 qdisc_put_rtab(q->P_tab);
396 if (q->R_tab)
397 qdisc_put_rtab(q->R_tab);
398
399 qdisc_destroy(q->qdisc);
400}
401
402static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
403{
404 struct tbf_sched_data *q = qdisc_priv(sch);
405 unsigned char *b = skb->tail;
406 struct rtattr *rta;
407 struct tc_tbf_qopt opt;
408
409 rta = (struct rtattr*)b;
410 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
411
412 opt.limit = q->limit;
413 opt.rate = q->R_tab->rate;
414 if (q->P_tab)
415 opt.peakrate = q->P_tab->rate;
416 else
417 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
418 opt.mtu = q->mtu;
419 opt.buffer = q->buffer;
420 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
421 rta->rta_len = skb->tail - b;
422
423 return skb->len;
424
425rtattr_failure:
426 skb_trim(skb, b - skb->data);
427 return -1;
428}
429
430static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
431 struct sk_buff *skb, struct tcmsg *tcm)
432{
433 struct tbf_sched_data *q = qdisc_priv(sch);
434
435 if (cl != 1) /* only one class */
436 return -ENOENT;
437
438 tcm->tcm_handle |= TC_H_MIN(1);
439 tcm->tcm_info = q->qdisc->handle;
440
441 return 0;
442}
443
444static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
445 struct Qdisc **old)
446{
447 struct tbf_sched_data *q = qdisc_priv(sch);
448
449 if (new == NULL)
450 new = &noop_qdisc;
451
452 sch_tree_lock(sch);
453 *old = xchg(&q->qdisc, new);
454 qdisc_reset(*old);
455 sch->q.qlen = 0;
456 sch_tree_unlock(sch);
457
458 return 0;
459}
460
461static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
462{
463 struct tbf_sched_data *q = qdisc_priv(sch);
464 return q->qdisc;
465}
466
467static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
468{
469 return 1;
470}
471
472static void tbf_put(struct Qdisc *sch, unsigned long arg)
473{
474}
475
476static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
477 struct rtattr **tca, unsigned long *arg)
478{
479 return -ENOSYS;
480}
481
482static int tbf_delete(struct Qdisc *sch, unsigned long arg)
483{
484 return -ENOSYS;
485}
486
487static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
488{
489 if (!walker->stop) {
490 if (walker->count >= walker->skip)
491 if (walker->fn(sch, 1, walker) < 0) {
492 walker->stop = 1;
493 return;
494 }
495 walker->count++;
496 }
497}
498
499static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
500{
501 return NULL;
502}
503
504static struct Qdisc_class_ops tbf_class_ops =
505{
506 .graft = tbf_graft,
507 .leaf = tbf_leaf,
508 .get = tbf_get,
509 .put = tbf_put,
510 .change = tbf_change_class,
511 .delete = tbf_delete,
512 .walk = tbf_walk,
513 .tcf_chain = tbf_find_tcf,
514 .dump = tbf_dump_class,
515};
516
517static struct Qdisc_ops tbf_qdisc_ops = {
518 .next = NULL,
519 .cl_ops = &tbf_class_ops,
520 .id = "tbf",
521 .priv_size = sizeof(struct tbf_sched_data),
522 .enqueue = tbf_enqueue,
523 .dequeue = tbf_dequeue,
524 .requeue = tbf_requeue,
525 .drop = tbf_drop,
526 .init = tbf_init,
527 .reset = tbf_reset,
528 .destroy = tbf_destroy,
529 .change = tbf_change,
530 .dump = tbf_dump,
531 .owner = THIS_MODULE,
532};
533
534static int __init tbf_module_init(void)
535{
536 return register_qdisc(&tbf_qdisc_ops);
537}
538
539static void __exit tbf_module_exit(void)
540{
541 unregister_qdisc(&tbf_qdisc_ops);
542}
543module_init(tbf_module_init)
544module_exit(tbf_module_exit)
545MODULE_LICENSE("GPL");
This page took 0.18789 seconds and 5 git commands to generate.