pkt_sched: Remove qdisc->ops->requeue() etc.
[deliverable/linux.git] / net / sched / sch_tbf.c
1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23
24
25 /* Simple Token Bucket Filter.
26 =======================================
27
28 SOURCE.
29 -------
30
31 None.
32
33 Description.
34 ------------
35
36 A data flow obeys TBF with rate R and depth B, if for any
37 time interval t_i...t_f the number of transmitted bits
38 does not exceed B + R*(t_f-t_i).
39
40 Packetized version of this definition:
41 The sequence of packets of sizes s_i served at moments t_i
42 obeys TBF, if for any i<=k:
43
44 s_i+....+s_k <= B + R*(t_k - t_i)
45
46 Algorithm.
47 ----------
48
49 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
50
51 N(t+delta) = min{B/R, N(t) + delta}
52
53 If the first packet in queue has length S, it may be
54 transmitted only at the time t_* when S/R <= N(t_*),
55 and in this case N(t) jumps:
56
57 N(t_* + 0) = N(t_* - 0) - S/R.
58
59
60
61 Actually, QoS requires two TBF to be applied to a data stream.
62 One of them controls steady state burst size, another
63 one with rate P (peak rate) and depth M (equal to link MTU)
64 limits bursts at a smaller time scale.
65
66 It is easy to see that P>R, and B>M. If P is infinity, this double
67 TBF is equivalent to a single one.
68
69 When TBF works in reshaping mode, latency is estimated as:
70
71 lat = max ((L-B)/R, (L-M)/P)
72
73
74 NOTES.
75 ------
76
77 If TBF throttles, it starts a watchdog timer, which will wake it up
78 when it is ready to transmit.
79 Note that the minimal timer resolution is 1/HZ.
80 If no new packets arrive during this period,
81 or if the device is not awaken by EOI for some previous packet,
82 TBF can stop its activity for 1/HZ.
83
84
85 This means, that with depth B, the maximal rate is
86
87 R_crit = B*HZ
88
89 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
90
91 Note that the peak rate TBF is much more tough: with MTU 1500
92 P_crit = 150Kbytes/sec. So, if you need greater peak
93 rates, use alpha with HZ=1000 :-)
94
95 With classful TBF, limit is just kept for backwards compatibility.
96 It is passed to the default bfifo qdisc - if the inner qdisc is
97 changed the limit is not effective anymore.
98 */
99
100 struct tbf_sched_data
101 {
102 /* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
104 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
105 u32 mtu;
106 u32 max_size;
107 struct qdisc_rate_table *R_tab;
108 struct qdisc_rate_table *P_tab;
109
110 /* Variables */
111 long tokens; /* Current number of B tokens */
112 long ptokens; /* Current number of P tokens */
113 psched_time_t t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
116 };
117
118 #define L2T(q,L) qdisc_l2t((q)->R_tab,L)
119 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
120
121 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
122 {
123 struct tbf_sched_data *q = qdisc_priv(sch);
124 int ret;
125
126 if (qdisc_pkt_len(skb) > q->max_size)
127 return qdisc_reshape_fail(skb, sch);
128
129 ret = qdisc_enqueue(skb, q->qdisc);
130 if (ret != 0) {
131 if (net_xmit_drop_count(ret))
132 sch->qstats.drops++;
133 return ret;
134 }
135
136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++;
139 return 0;
140 }
141
142 static unsigned int tbf_drop(struct Qdisc* sch)
143 {
144 struct tbf_sched_data *q = qdisc_priv(sch);
145 unsigned int len = 0;
146
147 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
148 sch->q.qlen--;
149 sch->qstats.drops++;
150 }
151 return len;
152 }
153
154 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
155 {
156 struct tbf_sched_data *q = qdisc_priv(sch);
157 struct sk_buff *skb;
158
159 skb = q->qdisc->ops->peek(q->qdisc);
160
161 if (skb) {
162 psched_time_t now;
163 long toks;
164 long ptoks = 0;
165 unsigned int len = qdisc_pkt_len(skb);
166
167 now = psched_get_time();
168 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
169
170 if (q->P_tab) {
171 ptoks = toks + q->ptokens;
172 if (ptoks > (long)q->mtu)
173 ptoks = q->mtu;
174 ptoks -= L2T_P(q, len);
175 }
176 toks += q->tokens;
177 if (toks > (long)q->buffer)
178 toks = q->buffer;
179 toks -= L2T(q, len);
180
181 if ((toks|ptoks) >= 0) {
182 skb = qdisc_dequeue_peeked(q->qdisc);
183 if (unlikely(!skb))
184 return NULL;
185
186 q->t_c = now;
187 q->tokens = toks;
188 q->ptokens = ptoks;
189 sch->q.qlen--;
190 sch->flags &= ~TCQ_F_THROTTLED;
191 return skb;
192 }
193
194 qdisc_watchdog_schedule(&q->watchdog,
195 now + max_t(long, -toks, -ptoks));
196
197 /* Maybe we have a shorter packet in the queue,
198 which can be sent now. It sounds cool,
199 but, however, this is wrong in principle.
200 We MUST NOT reorder packets under these circumstances.
201
202 Really, if we split the flow into independent
203 subflows, it would be a very good solution.
204 This is the main idea of all FQ algorithms
205 (cf. CSZ, HPFQ, HFSC)
206 */
207
208 sch->qstats.overlimits++;
209 }
210 return NULL;
211 }
212
213 static void tbf_reset(struct Qdisc* sch)
214 {
215 struct tbf_sched_data *q = qdisc_priv(sch);
216
217 qdisc_reset(q->qdisc);
218 sch->q.qlen = 0;
219 q->t_c = psched_get_time();
220 q->tokens = q->buffer;
221 q->ptokens = q->mtu;
222 qdisc_watchdog_cancel(&q->watchdog);
223 }
224
225 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
226 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
227 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
228 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
229 };
230
231 static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
232 {
233 int err;
234 struct tbf_sched_data *q = qdisc_priv(sch);
235 struct nlattr *tb[TCA_TBF_PTAB + 1];
236 struct tc_tbf_qopt *qopt;
237 struct qdisc_rate_table *rtab = NULL;
238 struct qdisc_rate_table *ptab = NULL;
239 struct Qdisc *child = NULL;
240 int max_size,n;
241
242 err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
243 if (err < 0)
244 return err;
245
246 err = -EINVAL;
247 if (tb[TCA_TBF_PARMS] == NULL)
248 goto done;
249
250 qopt = nla_data(tb[TCA_TBF_PARMS]);
251 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
252 if (rtab == NULL)
253 goto done;
254
255 if (qopt->peakrate.rate) {
256 if (qopt->peakrate.rate > qopt->rate.rate)
257 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
258 if (ptab == NULL)
259 goto done;
260 }
261
262 for (n = 0; n < 256; n++)
263 if (rtab->data[n] > qopt->buffer) break;
264 max_size = (n << qopt->rate.cell_log)-1;
265 if (ptab) {
266 int size;
267
268 for (n = 0; n < 256; n++)
269 if (ptab->data[n] > qopt->mtu) break;
270 size = (n << qopt->peakrate.cell_log)-1;
271 if (size < max_size) max_size = size;
272 }
273 if (max_size < 0)
274 goto done;
275
276 if (qopt->limit > 0) {
277 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
278 if (IS_ERR(child)) {
279 err = PTR_ERR(child);
280 goto done;
281 }
282 }
283
284 sch_tree_lock(sch);
285 if (child) {
286 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
287 qdisc_destroy(xchg(&q->qdisc, child));
288 }
289 q->limit = qopt->limit;
290 q->mtu = qopt->mtu;
291 q->max_size = max_size;
292 q->buffer = qopt->buffer;
293 q->tokens = q->buffer;
294 q->ptokens = q->mtu;
295 rtab = xchg(&q->R_tab, rtab);
296 ptab = xchg(&q->P_tab, ptab);
297 sch_tree_unlock(sch);
298 err = 0;
299 done:
300 if (rtab)
301 qdisc_put_rtab(rtab);
302 if (ptab)
303 qdisc_put_rtab(ptab);
304 return err;
305 }
306
307 static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
308 {
309 struct tbf_sched_data *q = qdisc_priv(sch);
310
311 if (opt == NULL)
312 return -EINVAL;
313
314 q->t_c = psched_get_time();
315 qdisc_watchdog_init(&q->watchdog, sch);
316 q->qdisc = &noop_qdisc;
317
318 return tbf_change(sch, opt);
319 }
320
321 static void tbf_destroy(struct Qdisc *sch)
322 {
323 struct tbf_sched_data *q = qdisc_priv(sch);
324
325 qdisc_watchdog_cancel(&q->watchdog);
326
327 if (q->P_tab)
328 qdisc_put_rtab(q->P_tab);
329 if (q->R_tab)
330 qdisc_put_rtab(q->R_tab);
331
332 qdisc_destroy(q->qdisc);
333 }
334
335 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
336 {
337 struct tbf_sched_data *q = qdisc_priv(sch);
338 struct nlattr *nest;
339 struct tc_tbf_qopt opt;
340
341 nest = nla_nest_start(skb, TCA_OPTIONS);
342 if (nest == NULL)
343 goto nla_put_failure;
344
345 opt.limit = q->limit;
346 opt.rate = q->R_tab->rate;
347 if (q->P_tab)
348 opt.peakrate = q->P_tab->rate;
349 else
350 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
351 opt.mtu = q->mtu;
352 opt.buffer = q->buffer;
353 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
354
355 nla_nest_end(skb, nest);
356 return skb->len;
357
358 nla_put_failure:
359 nla_nest_cancel(skb, nest);
360 return -1;
361 }
362
363 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
364 struct sk_buff *skb, struct tcmsg *tcm)
365 {
366 struct tbf_sched_data *q = qdisc_priv(sch);
367
368 if (cl != 1) /* only one class */
369 return -ENOENT;
370
371 tcm->tcm_handle |= TC_H_MIN(1);
372 tcm->tcm_info = q->qdisc->handle;
373
374 return 0;
375 }
376
377 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
378 struct Qdisc **old)
379 {
380 struct tbf_sched_data *q = qdisc_priv(sch);
381
382 if (new == NULL)
383 new = &noop_qdisc;
384
385 sch_tree_lock(sch);
386 *old = xchg(&q->qdisc, new);
387 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
388 qdisc_reset(*old);
389 sch_tree_unlock(sch);
390
391 return 0;
392 }
393
394 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
395 {
396 struct tbf_sched_data *q = qdisc_priv(sch);
397 return q->qdisc;
398 }
399
400 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
401 {
402 return 1;
403 }
404
405 static void tbf_put(struct Qdisc *sch, unsigned long arg)
406 {
407 }
408
409 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
410 struct nlattr **tca, unsigned long *arg)
411 {
412 return -ENOSYS;
413 }
414
415 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
416 {
417 return -ENOSYS;
418 }
419
420 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
421 {
422 if (!walker->stop) {
423 if (walker->count >= walker->skip)
424 if (walker->fn(sch, 1, walker) < 0) {
425 walker->stop = 1;
426 return;
427 }
428 walker->count++;
429 }
430 }
431
432 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
433 {
434 return NULL;
435 }
436
437 static const struct Qdisc_class_ops tbf_class_ops =
438 {
439 .graft = tbf_graft,
440 .leaf = tbf_leaf,
441 .get = tbf_get,
442 .put = tbf_put,
443 .change = tbf_change_class,
444 .delete = tbf_delete,
445 .walk = tbf_walk,
446 .tcf_chain = tbf_find_tcf,
447 .dump = tbf_dump_class,
448 };
449
450 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
451 .next = NULL,
452 .cl_ops = &tbf_class_ops,
453 .id = "tbf",
454 .priv_size = sizeof(struct tbf_sched_data),
455 .enqueue = tbf_enqueue,
456 .dequeue = tbf_dequeue,
457 .peek = qdisc_peek_dequeued,
458 .drop = tbf_drop,
459 .init = tbf_init,
460 .reset = tbf_reset,
461 .destroy = tbf_destroy,
462 .change = tbf_change,
463 .dump = tbf_dump,
464 .owner = THIS_MODULE,
465 };
466
467 static int __init tbf_module_init(void)
468 {
469 return register_qdisc(&tbf_qdisc_ops);
470 }
471
472 static void __exit tbf_module_exit(void)
473 {
474 unregister_qdisc(&tbf_qdisc_ops);
475 }
476 module_init(tbf_module_init)
477 module_exit(tbf_module_exit)
478 MODULE_LICENSE("GPL");
This page took 0.051505 seconds and 5 git commands to generate.