net_sched: cleanups
[deliverable/linux.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4 15#include <linux/module.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
0ba48053 21#include <net/netlink.h>
1da177e4
LT
22#include <net/pkt_sched.h>
23
24
25/* Simple Token Bucket Filter.
26 =======================================
27
28 SOURCE.
29 -------
30
31 None.
32
33 Description.
34 ------------
35
36 A data flow obeys TBF with rate R and depth B, if for any
37 time interval t_i...t_f the number of transmitted bits
38 does not exceed B + R*(t_f-t_i).
39
40 Packetized version of this definition:
41 The sequence of packets of sizes s_i served at moments t_i
42 obeys TBF, if for any i<=k:
43
44 s_i+....+s_k <= B + R*(t_k - t_i)
45
46 Algorithm.
47 ----------
48
49 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
50
51 N(t+delta) = min{B/R, N(t) + delta}
52
53 If the first packet in queue has length S, it may be
54 transmitted only at the time t_* when S/R <= N(t_*),
55 and in this case N(t) jumps:
56
57 N(t_* + 0) = N(t_* - 0) - S/R.
58
59
60
61 Actually, QoS requires two TBF to be applied to a data stream.
62 One of them controls steady state burst size, another
63 one with rate P (peak rate) and depth M (equal to link MTU)
64 limits bursts at a smaller time scale.
65
66 It is easy to see that P>R, and B>M. If P is infinity, this double
67 TBF is equivalent to a single one.
68
69 When TBF works in reshaping mode, latency is estimated as:
70
71 lat = max ((L-B)/R, (L-M)/P)
72
73
74 NOTES.
75 ------
76
77 If TBF throttles, it starts a watchdog timer, which will wake it up
78 when it is ready to transmit.
79 Note that the minimal timer resolution is 1/HZ.
80 If no new packets arrive during this period,
81 or if the device is not awaken by EOI for some previous packet,
82 TBF can stop its activity for 1/HZ.
83
84
85 This means, that with depth B, the maximal rate is
86
87 R_crit = B*HZ
88
89 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
90
91 Note that the peak rate TBF is much more tough: with MTU 1500
92 P_crit = 150Kbytes/sec. So, if you need greater peak
93 rates, use alpha with HZ=1000 :-)
94
95 With classful TBF, limit is just kept for backwards compatibility.
96 It is passed to the default bfifo qdisc - if the inner qdisc is
97 changed the limit is not effective anymore.
98*/
99
cc7ec456 100struct tbf_sched_data {
1da177e4
LT
101/* Parameters */
102 u32 limit; /* Maximal length of backlog: bytes */
103 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
104 u32 mtu;
105 u32 max_size;
106 struct qdisc_rate_table *R_tab;
107 struct qdisc_rate_table *P_tab;
108
109/* Variables */
110 long tokens; /* Current number of B tokens */
111 long ptokens; /* Current number of P tokens */
112 psched_time_t t_c; /* Time check-point */
1da177e4 113 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 114 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
115};
116
cc7ec456
ED
117#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
118#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
1da177e4 119
cc7ec456 120static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
121{
122 struct tbf_sched_data *q = qdisc_priv(sch);
123 int ret;
124
69747650
DM
125 if (qdisc_pkt_len(skb) > q->max_size)
126 return qdisc_reshape_fail(skb, sch);
1da177e4 127
5f86173b 128 ret = qdisc_enqueue(skb, q->qdisc);
9871e50e 129 if (ret != NET_XMIT_SUCCESS) {
378a2f09
JP
130 if (net_xmit_drop_count(ret))
131 sch->qstats.drops++;
1da177e4
LT
132 return ret;
133 }
134
135 sch->q.qlen++;
bfe0d029 136 qdisc_bstats_update(sch, skb);
9871e50e 137 return NET_XMIT_SUCCESS;
1da177e4
LT
138}
139
cc7ec456 140static unsigned int tbf_drop(struct Qdisc *sch)
1da177e4
LT
141{
142 struct tbf_sched_data *q = qdisc_priv(sch);
6d037a26 143 unsigned int len = 0;
1da177e4 144
6d037a26 145 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
146 sch->q.qlen--;
147 sch->qstats.drops++;
148 }
149 return len;
150}
151
cc7ec456 152static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
153{
154 struct tbf_sched_data *q = qdisc_priv(sch);
155 struct sk_buff *skb;
156
03c05f0d 157 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
158
159 if (skb) {
160 psched_time_t now;
f7f593e3 161 long toks;
1da177e4 162 long ptoks = 0;
0abf77e5 163 unsigned int len = qdisc_pkt_len(skb);
1da177e4 164
3bebcda2 165 now = psched_get_time();
03cc45c0 166 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
1da177e4
LT
167
168 if (q->P_tab) {
169 ptoks = toks + q->ptokens;
170 if (ptoks > (long)q->mtu)
171 ptoks = q->mtu;
172 ptoks -= L2T_P(q, len);
173 }
174 toks += q->tokens;
175 if (toks > (long)q->buffer)
176 toks = q->buffer;
177 toks -= L2T(q, len);
178
179 if ((toks|ptoks) >= 0) {
77be155c 180 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
181 if (unlikely(!skb))
182 return NULL;
183
1da177e4
LT
184 q->t_c = now;
185 q->tokens = toks;
186 q->ptokens = ptoks;
187 sch->q.qlen--;
188 sch->flags &= ~TCQ_F_THROTTLED;
189 return skb;
190 }
191
f7f593e3
PM
192 qdisc_watchdog_schedule(&q->watchdog,
193 now + max_t(long, -toks, -ptoks));
1da177e4
LT
194
195 /* Maybe we have a shorter packet in the queue,
196 which can be sent now. It sounds cool,
197 but, however, this is wrong in principle.
198 We MUST NOT reorder packets under these circumstances.
199
200 Really, if we split the flow into independent
201 subflows, it would be a very good solution.
202 This is the main idea of all FQ algorithms
203 (cf. CSZ, HPFQ, HFSC)
204 */
205
1da177e4
LT
206 sch->qstats.overlimits++;
207 }
208 return NULL;
209}
210
cc7ec456 211static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
212{
213 struct tbf_sched_data *q = qdisc_priv(sch);
214
215 qdisc_reset(q->qdisc);
216 sch->q.qlen = 0;
3bebcda2 217 q->t_c = psched_get_time();
1da177e4
LT
218 q->tokens = q->buffer;
219 q->ptokens = q->mtu;
f7f593e3 220 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
221}
222
27a3421e
PM
223static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
224 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
225 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
226 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
227};
228
cc7ec456 229static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4 230{
cee63723 231 int err;
1da177e4 232 struct tbf_sched_data *q = qdisc_priv(sch);
1e90474c 233 struct nlattr *tb[TCA_TBF_PTAB + 1];
1da177e4
LT
234 struct tc_tbf_qopt *qopt;
235 struct qdisc_rate_table *rtab = NULL;
236 struct qdisc_rate_table *ptab = NULL;
237 struct Qdisc *child = NULL;
cc7ec456 238 int max_size, n;
1da177e4 239
27a3421e 240 err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
cee63723
PM
241 if (err < 0)
242 return err;
243
244 err = -EINVAL;
27a3421e 245 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
246 goto done;
247
1e90474c
PM
248 qopt = nla_data(tb[TCA_TBF_PARMS]);
249 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
1da177e4
LT
250 if (rtab == NULL)
251 goto done;
252
253 if (qopt->peakrate.rate) {
254 if (qopt->peakrate.rate > qopt->rate.rate)
1e90474c 255 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
1da177e4
LT
256 if (ptab == NULL)
257 goto done;
258 }
259
260 for (n = 0; n < 256; n++)
cc7ec456
ED
261 if (rtab->data[n] > qopt->buffer)
262 break;
263 max_size = (n << qopt->rate.cell_log) - 1;
1da177e4
LT
264 if (ptab) {
265 int size;
266
267 for (n = 0; n < 256; n++)
cc7ec456
ED
268 if (ptab->data[n] > qopt->mtu)
269 break;
270 size = (n << qopt->peakrate.cell_log) - 1;
271 if (size < max_size)
272 max_size = size;
1da177e4
LT
273 }
274 if (max_size < 0)
275 goto done;
276
f0cd1508 277 if (q->qdisc != &noop_qdisc) {
278 err = fifo_set_limit(q->qdisc, qopt->limit);
279 if (err)
280 goto done;
281 } else if (qopt->limit > 0) {
fb0305ce
PM
282 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
283 if (IS_ERR(child)) {
284 err = PTR_ERR(child);
1da177e4 285 goto done;
fb0305ce 286 }
1da177e4
LT
287 }
288
289 sch_tree_lock(sch);
5e50da01
PM
290 if (child) {
291 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
b94c8afc
PM
292 qdisc_destroy(q->qdisc);
293 q->qdisc = child;
5e50da01 294 }
1da177e4
LT
295 q->limit = qopt->limit;
296 q->mtu = qopt->mtu;
297 q->max_size = max_size;
298 q->buffer = qopt->buffer;
299 q->tokens = q->buffer;
300 q->ptokens = q->mtu;
b94c8afc 301
a0bffffc
IJ
302 swap(q->R_tab, rtab);
303 swap(q->P_tab, ptab);
b94c8afc 304
1da177e4
LT
305 sch_tree_unlock(sch);
306 err = 0;
307done:
308 if (rtab)
309 qdisc_put_rtab(rtab);
310 if (ptab)
311 qdisc_put_rtab(ptab);
312 return err;
313}
314
cc7ec456 315static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
316{
317 struct tbf_sched_data *q = qdisc_priv(sch);
318
319 if (opt == NULL)
320 return -EINVAL;
321
3bebcda2 322 q->t_c = psched_get_time();
f7f593e3 323 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4
LT
324 q->qdisc = &noop_qdisc;
325
326 return tbf_change(sch, opt);
327}
328
329static void tbf_destroy(struct Qdisc *sch)
330{
331 struct tbf_sched_data *q = qdisc_priv(sch);
332
f7f593e3 333 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
334
335 if (q->P_tab)
336 qdisc_put_rtab(q->P_tab);
337 if (q->R_tab)
338 qdisc_put_rtab(q->R_tab);
339
340 qdisc_destroy(q->qdisc);
341}
342
343static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
344{
345 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 346 struct nlattr *nest;
1da177e4
LT
347 struct tc_tbf_qopt opt;
348
4b3550ef
PM
349 nest = nla_nest_start(skb, TCA_OPTIONS);
350 if (nest == NULL)
351 goto nla_put_failure;
1da177e4
LT
352
353 opt.limit = q->limit;
354 opt.rate = q->R_tab->rate;
355 if (q->P_tab)
356 opt.peakrate = q->P_tab->rate;
357 else
358 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
359 opt.mtu = q->mtu;
360 opt.buffer = q->buffer;
1e90474c 361 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
1da177e4 362
4b3550ef 363 nla_nest_end(skb, nest);
1da177e4
LT
364 return skb->len;
365
1e90474c 366nla_put_failure:
4b3550ef 367 nla_nest_cancel(skb, nest);
1da177e4
LT
368 return -1;
369}
370
371static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
372 struct sk_buff *skb, struct tcmsg *tcm)
373{
374 struct tbf_sched_data *q = qdisc_priv(sch);
375
1da177e4
LT
376 tcm->tcm_handle |= TC_H_MIN(1);
377 tcm->tcm_info = q->qdisc->handle;
378
379 return 0;
380}
381
382static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
383 struct Qdisc **old)
384{
385 struct tbf_sched_data *q = qdisc_priv(sch);
386
387 if (new == NULL)
388 new = &noop_qdisc;
389
390 sch_tree_lock(sch);
b94c8afc
PM
391 *old = q->qdisc;
392 q->qdisc = new;
5e50da01 393 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 394 qdisc_reset(*old);
1da177e4
LT
395 sch_tree_unlock(sch);
396
397 return 0;
398}
399
400static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
401{
402 struct tbf_sched_data *q = qdisc_priv(sch);
403 return q->qdisc;
404}
405
406static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
407{
408 return 1;
409}
410
411static void tbf_put(struct Qdisc *sch, unsigned long arg)
412{
413}
414
1da177e4
LT
415static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
416{
417 if (!walker->stop) {
418 if (walker->count >= walker->skip)
419 if (walker->fn(sch, 1, walker) < 0) {
420 walker->stop = 1;
421 return;
422 }
423 walker->count++;
424 }
425}
426
cc7ec456 427static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
428 .graft = tbf_graft,
429 .leaf = tbf_leaf,
430 .get = tbf_get,
431 .put = tbf_put,
1da177e4 432 .walk = tbf_walk,
1da177e4
LT
433 .dump = tbf_dump_class,
434};
435
20fea08b 436static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
437 .next = NULL,
438 .cl_ops = &tbf_class_ops,
439 .id = "tbf",
440 .priv_size = sizeof(struct tbf_sched_data),
441 .enqueue = tbf_enqueue,
442 .dequeue = tbf_dequeue,
77be155c 443 .peek = qdisc_peek_dequeued,
1da177e4
LT
444 .drop = tbf_drop,
445 .init = tbf_init,
446 .reset = tbf_reset,
447 .destroy = tbf_destroy,
448 .change = tbf_change,
449 .dump = tbf_dump,
450 .owner = THIS_MODULE,
451};
452
453static int __init tbf_module_init(void)
454{
455 return register_qdisc(&tbf_qdisc_ops);
456}
457
458static void __exit tbf_module_exit(void)
459{
460 unregister_qdisc(&tbf_qdisc_ops);
461}
462module_init(tbf_module_init)
463module_exit(tbf_module_exit)
464MODULE_LICENSE("GPL");
This page took 0.621226 seconds and 5 git commands to generate.