Merge tag 'dm-4.8-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / net / sched / sch_tbf.c
1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
24
25
26 /* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99 */
100
101 struct tbf_sched_data {
102 /* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
104 u32 max_size;
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
109
110 /* Variables */
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
116 };
117
118
119 /* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124 {
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143 }
144
145 /*
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
148 */
149 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
150 {
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
152 return hdr_len + skb_gso_transport_seglen(skb);
153 }
154
155 /* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time
157 */
158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159 {
160 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb;
162 netdev_features_t features = netif_skb_features(skb);
163 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
164 int ret, nb;
165
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
167
168 if (IS_ERR_OR_NULL(segs))
169 return qdisc_reshape_fail(skb, sch);
170
171 nb = 0;
172 while (segs) {
173 nskb = segs->next;
174 segs->next = NULL;
175 qdisc_skb_cb(segs)->pkt_len = segs->len;
176 len += segs->len;
177 ret = qdisc_enqueue(segs, q->qdisc);
178 if (ret != NET_XMIT_SUCCESS) {
179 if (net_xmit_drop_count(ret))
180 qdisc_qstats_drop(sch);
181 } else {
182 nb++;
183 }
184 segs = nskb;
185 }
186 sch->q.qlen += nb;
187 if (nb > 1)
188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
189 consume_skb(skb);
190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
191 }
192
193 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
194 {
195 struct tbf_sched_data *q = qdisc_priv(sch);
196 int ret;
197
198 if (qdisc_pkt_len(skb) > q->max_size) {
199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
200 return tbf_segment(skb, sch);
201 return qdisc_reshape_fail(skb, sch);
202 }
203 ret = qdisc_enqueue(skb, q->qdisc);
204 if (ret != NET_XMIT_SUCCESS) {
205 if (net_xmit_drop_count(ret))
206 qdisc_qstats_drop(sch);
207 return ret;
208 }
209
210 qdisc_qstats_backlog_inc(sch, skb);
211 sch->q.qlen++;
212 return NET_XMIT_SUCCESS;
213 }
214
215 static unsigned int tbf_drop(struct Qdisc *sch)
216 {
217 struct tbf_sched_data *q = qdisc_priv(sch);
218 unsigned int len = 0;
219
220 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
221 sch->qstats.backlog -= len;
222 sch->q.qlen--;
223 qdisc_qstats_drop(sch);
224 }
225 return len;
226 }
227
228 static bool tbf_peak_present(const struct tbf_sched_data *q)
229 {
230 return q->peak.rate_bytes_ps;
231 }
232
233 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
234 {
235 struct tbf_sched_data *q = qdisc_priv(sch);
236 struct sk_buff *skb;
237
238 skb = q->qdisc->ops->peek(q->qdisc);
239
240 if (skb) {
241 s64 now;
242 s64 toks;
243 s64 ptoks = 0;
244 unsigned int len = qdisc_pkt_len(skb);
245
246 now = ktime_get_ns();
247 toks = min_t(s64, now - q->t_c, q->buffer);
248
249 if (tbf_peak_present(q)) {
250 ptoks = toks + q->ptokens;
251 if (ptoks > q->mtu)
252 ptoks = q->mtu;
253 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
254 }
255 toks += q->tokens;
256 if (toks > q->buffer)
257 toks = q->buffer;
258 toks -= (s64) psched_l2t_ns(&q->rate, len);
259
260 if ((toks|ptoks) >= 0) {
261 skb = qdisc_dequeue_peeked(q->qdisc);
262 if (unlikely(!skb))
263 return NULL;
264
265 q->t_c = now;
266 q->tokens = toks;
267 q->ptokens = ptoks;
268 qdisc_qstats_backlog_dec(sch, skb);
269 sch->q.qlen--;
270 qdisc_unthrottled(sch);
271 qdisc_bstats_update(sch, skb);
272 return skb;
273 }
274
275 qdisc_watchdog_schedule_ns(&q->watchdog,
276 now + max_t(long, -toks, -ptoks),
277 true);
278
279 /* Maybe we have a shorter packet in the queue,
280 which can be sent now. It sounds cool,
281 but, however, this is wrong in principle.
282 We MUST NOT reorder packets under these circumstances.
283
284 Really, if we split the flow into independent
285 subflows, it would be a very good solution.
286 This is the main idea of all FQ algorithms
287 (cf. CSZ, HPFQ, HFSC)
288 */
289
290 qdisc_qstats_overlimit(sch);
291 }
292 return NULL;
293 }
294
295 static void tbf_reset(struct Qdisc *sch)
296 {
297 struct tbf_sched_data *q = qdisc_priv(sch);
298
299 qdisc_reset(q->qdisc);
300 sch->qstats.backlog = 0;
301 sch->q.qlen = 0;
302 q->t_c = ktime_get_ns();
303 q->tokens = q->buffer;
304 q->ptokens = q->mtu;
305 qdisc_watchdog_cancel(&q->watchdog);
306 }
307
308 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
309 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
310 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
311 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
312 [TCA_TBF_RATE64] = { .type = NLA_U64 },
313 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
314 [TCA_TBF_BURST] = { .type = NLA_U32 },
315 [TCA_TBF_PBURST] = { .type = NLA_U32 },
316 };
317
318 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
319 {
320 int err;
321 struct tbf_sched_data *q = qdisc_priv(sch);
322 struct nlattr *tb[TCA_TBF_MAX + 1];
323 struct tc_tbf_qopt *qopt;
324 struct Qdisc *child = NULL;
325 struct psched_ratecfg rate;
326 struct psched_ratecfg peak;
327 u64 max_size;
328 s64 buffer, mtu;
329 u64 rate64 = 0, prate64 = 0;
330
331 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
332 if (err < 0)
333 return err;
334
335 err = -EINVAL;
336 if (tb[TCA_TBF_PARMS] == NULL)
337 goto done;
338
339 qopt = nla_data(tb[TCA_TBF_PARMS]);
340 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
341 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
342 tb[TCA_TBF_RTAB]));
343
344 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
345 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
346 tb[TCA_TBF_PTAB]));
347
348 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
349 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
350
351 if (tb[TCA_TBF_RATE64])
352 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
353 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
354
355 if (tb[TCA_TBF_BURST]) {
356 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
357 buffer = psched_l2t_ns(&rate, max_size);
358 } else {
359 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
360 }
361
362 if (qopt->peakrate.rate) {
363 if (tb[TCA_TBF_PRATE64])
364 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
365 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
366 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
367 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
368 peak.rate_bytes_ps, rate.rate_bytes_ps);
369 err = -EINVAL;
370 goto done;
371 }
372
373 if (tb[TCA_TBF_PBURST]) {
374 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
375 max_size = min_t(u32, max_size, pburst);
376 mtu = psched_l2t_ns(&peak, pburst);
377 } else {
378 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
379 }
380 } else {
381 memset(&peak, 0, sizeof(peak));
382 }
383
384 if (max_size < psched_mtu(qdisc_dev(sch)))
385 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
386 max_size, qdisc_dev(sch)->name,
387 psched_mtu(qdisc_dev(sch)));
388
389 if (!max_size) {
390 err = -EINVAL;
391 goto done;
392 }
393
394 if (q->qdisc != &noop_qdisc) {
395 err = fifo_set_limit(q->qdisc, qopt->limit);
396 if (err)
397 goto done;
398 } else if (qopt->limit > 0) {
399 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
400 if (IS_ERR(child)) {
401 err = PTR_ERR(child);
402 goto done;
403 }
404 }
405
406 sch_tree_lock(sch);
407 if (child) {
408 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
409 q->qdisc->qstats.backlog);
410 qdisc_destroy(q->qdisc);
411 q->qdisc = child;
412 }
413 q->limit = qopt->limit;
414 if (tb[TCA_TBF_PBURST])
415 q->mtu = mtu;
416 else
417 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
418 q->max_size = max_size;
419 if (tb[TCA_TBF_BURST])
420 q->buffer = buffer;
421 else
422 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
423 q->tokens = q->buffer;
424 q->ptokens = q->mtu;
425
426 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
427 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
428
429 sch_tree_unlock(sch);
430 err = 0;
431 done:
432 return err;
433 }
434
435 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
436 {
437 struct tbf_sched_data *q = qdisc_priv(sch);
438
439 if (opt == NULL)
440 return -EINVAL;
441
442 q->t_c = ktime_get_ns();
443 qdisc_watchdog_init(&q->watchdog, sch);
444 q->qdisc = &noop_qdisc;
445
446 return tbf_change(sch, opt);
447 }
448
449 static void tbf_destroy(struct Qdisc *sch)
450 {
451 struct tbf_sched_data *q = qdisc_priv(sch);
452
453 qdisc_watchdog_cancel(&q->watchdog);
454 qdisc_destroy(q->qdisc);
455 }
456
457 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
458 {
459 struct tbf_sched_data *q = qdisc_priv(sch);
460 struct nlattr *nest;
461 struct tc_tbf_qopt opt;
462
463 sch->qstats.backlog = q->qdisc->qstats.backlog;
464 nest = nla_nest_start(skb, TCA_OPTIONS);
465 if (nest == NULL)
466 goto nla_put_failure;
467
468 opt.limit = q->limit;
469 psched_ratecfg_getrate(&opt.rate, &q->rate);
470 if (tbf_peak_present(q))
471 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
472 else
473 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
474 opt.mtu = PSCHED_NS2TICKS(q->mtu);
475 opt.buffer = PSCHED_NS2TICKS(q->buffer);
476 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
477 goto nla_put_failure;
478 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
479 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
480 TCA_TBF_PAD))
481 goto nla_put_failure;
482 if (tbf_peak_present(q) &&
483 q->peak.rate_bytes_ps >= (1ULL << 32) &&
484 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
485 TCA_TBF_PAD))
486 goto nla_put_failure;
487
488 return nla_nest_end(skb, nest);
489
490 nla_put_failure:
491 nla_nest_cancel(skb, nest);
492 return -1;
493 }
494
495 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
496 struct sk_buff *skb, struct tcmsg *tcm)
497 {
498 struct tbf_sched_data *q = qdisc_priv(sch);
499
500 tcm->tcm_handle |= TC_H_MIN(1);
501 tcm->tcm_info = q->qdisc->handle;
502
503 return 0;
504 }
505
506 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
507 struct Qdisc **old)
508 {
509 struct tbf_sched_data *q = qdisc_priv(sch);
510
511 if (new == NULL)
512 new = &noop_qdisc;
513
514 *old = qdisc_replace(sch, new, &q->qdisc);
515 return 0;
516 }
517
518 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
519 {
520 struct tbf_sched_data *q = qdisc_priv(sch);
521 return q->qdisc;
522 }
523
524 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
525 {
526 return 1;
527 }
528
529 static void tbf_put(struct Qdisc *sch, unsigned long arg)
530 {
531 }
532
533 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
534 {
535 if (!walker->stop) {
536 if (walker->count >= walker->skip)
537 if (walker->fn(sch, 1, walker) < 0) {
538 walker->stop = 1;
539 return;
540 }
541 walker->count++;
542 }
543 }
544
545 static const struct Qdisc_class_ops tbf_class_ops = {
546 .graft = tbf_graft,
547 .leaf = tbf_leaf,
548 .get = tbf_get,
549 .put = tbf_put,
550 .walk = tbf_walk,
551 .dump = tbf_dump_class,
552 };
553
554 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
555 .next = NULL,
556 .cl_ops = &tbf_class_ops,
557 .id = "tbf",
558 .priv_size = sizeof(struct tbf_sched_data),
559 .enqueue = tbf_enqueue,
560 .dequeue = tbf_dequeue,
561 .peek = qdisc_peek_dequeued,
562 .drop = tbf_drop,
563 .init = tbf_init,
564 .reset = tbf_reset,
565 .destroy = tbf_destroy,
566 .change = tbf_change,
567 .dump = tbf_dump,
568 .owner = THIS_MODULE,
569 };
570
571 static int __init tbf_module_init(void)
572 {
573 return register_qdisc(&tbf_qdisc_ops);
574 }
575
576 static void __exit tbf_module_exit(void)
577 {
578 unregister_qdisc(&tbf_qdisc_ops);
579 }
580 module_init(tbf_module_init)
581 module_exit(tbf_module_exit)
582 MODULE_LICENSE("GPL");
This page took 0.044403 seconds and 6 git commands to generate.