Revert "UBI: use mtd->writebufsize to set minimal I/O unit size"
[deliverable/linux.git] / net / sched / sch_red.c
1 /*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
15 */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
23 #include <net/red.h>
24
25
26 /* Parameters, settable by user:
27 -----------------------------
28
29 limit - bytes (must be > qth_max + burst)
30
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
37 */
38
39 struct red_sched_data
40 {
41 u32 limit; /* HARD maximal queue length */
42 unsigned char flags;
43 struct red_parms parms;
44 struct red_stats stats;
45 struct Qdisc *qdisc;
46 };
47
48 static inline int red_use_ecn(struct red_sched_data *q)
49 {
50 return q->flags & TC_RED_ECN;
51 }
52
53 static inline int red_use_harddrop(struct red_sched_data *q)
54 {
55 return q->flags & TC_RED_HARDDROP;
56 }
57
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
59 {
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
62 int ret;
63
64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
65
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
68
69 switch (red_action(&q->parms, q->parms.qavg)) {
70 case RED_DONT_MARK:
71 break;
72
73 case RED_PROB_MARK:
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
76 q->stats.prob_drop++;
77 goto congestion_drop;
78 }
79
80 q->stats.prob_mark++;
81 break;
82
83 case RED_HARD_MARK:
84 sch->qstats.overlimits++;
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
87 q->stats.forced_drop++;
88 goto congestion_drop;
89 }
90
91 q->stats.forced_mark++;
92 break;
93 }
94
95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 qdisc_bstats_update(sch, skb);
98 sch->q.qlen++;
99 } else if (net_xmit_drop_count(ret)) {
100 q->stats.pdrop++;
101 sch->qstats.drops++;
102 }
103 return ret;
104
105 congestion_drop:
106 qdisc_drop(skb, sch);
107 return NET_XMIT_CN;
108 }
109
110 static struct sk_buff * red_dequeue(struct Qdisc* sch)
111 {
112 struct sk_buff *skb;
113 struct red_sched_data *q = qdisc_priv(sch);
114 struct Qdisc *child = q->qdisc;
115
116 skb = child->dequeue(child);
117 if (skb)
118 sch->q.qlen--;
119 else if (!red_is_idling(&q->parms))
120 red_start_of_idle_period(&q->parms);
121
122 return skb;
123 }
124
125 static struct sk_buff * red_peek(struct Qdisc* sch)
126 {
127 struct red_sched_data *q = qdisc_priv(sch);
128 struct Qdisc *child = q->qdisc;
129
130 return child->ops->peek(child);
131 }
132
133 static unsigned int red_drop(struct Qdisc* sch)
134 {
135 struct red_sched_data *q = qdisc_priv(sch);
136 struct Qdisc *child = q->qdisc;
137 unsigned int len;
138
139 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
140 q->stats.other++;
141 sch->qstats.drops++;
142 sch->q.qlen--;
143 return len;
144 }
145
146 if (!red_is_idling(&q->parms))
147 red_start_of_idle_period(&q->parms);
148
149 return 0;
150 }
151
152 static void red_reset(struct Qdisc* sch)
153 {
154 struct red_sched_data *q = qdisc_priv(sch);
155
156 qdisc_reset(q->qdisc);
157 sch->q.qlen = 0;
158 red_restart(&q->parms);
159 }
160
161 static void red_destroy(struct Qdisc *sch)
162 {
163 struct red_sched_data *q = qdisc_priv(sch);
164 qdisc_destroy(q->qdisc);
165 }
166
167 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
168 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
169 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
170 };
171
172 static int red_change(struct Qdisc *sch, struct nlattr *opt)
173 {
174 struct red_sched_data *q = qdisc_priv(sch);
175 struct nlattr *tb[TCA_RED_MAX + 1];
176 struct tc_red_qopt *ctl;
177 struct Qdisc *child = NULL;
178 int err;
179
180 if (opt == NULL)
181 return -EINVAL;
182
183 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
184 if (err < 0)
185 return err;
186
187 if (tb[TCA_RED_PARMS] == NULL ||
188 tb[TCA_RED_STAB] == NULL)
189 return -EINVAL;
190
191 ctl = nla_data(tb[TCA_RED_PARMS]);
192
193 if (ctl->limit > 0) {
194 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
195 if (IS_ERR(child))
196 return PTR_ERR(child);
197 }
198
199 sch_tree_lock(sch);
200 q->flags = ctl->flags;
201 q->limit = ctl->limit;
202 if (child) {
203 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
204 qdisc_destroy(q->qdisc);
205 q->qdisc = child;
206 }
207
208 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
209 ctl->Plog, ctl->Scell_log,
210 nla_data(tb[TCA_RED_STAB]));
211
212 if (skb_queue_empty(&sch->q))
213 red_end_of_idle_period(&q->parms);
214
215 sch_tree_unlock(sch);
216 return 0;
217 }
218
219 static int red_init(struct Qdisc* sch, struct nlattr *opt)
220 {
221 struct red_sched_data *q = qdisc_priv(sch);
222
223 q->qdisc = &noop_qdisc;
224 return red_change(sch, opt);
225 }
226
227 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
228 {
229 struct red_sched_data *q = qdisc_priv(sch);
230 struct nlattr *opts = NULL;
231 struct tc_red_qopt opt = {
232 .limit = q->limit,
233 .flags = q->flags,
234 .qth_min = q->parms.qth_min >> q->parms.Wlog,
235 .qth_max = q->parms.qth_max >> q->parms.Wlog,
236 .Wlog = q->parms.Wlog,
237 .Plog = q->parms.Plog,
238 .Scell_log = q->parms.Scell_log,
239 };
240
241 sch->qstats.backlog = q->qdisc->qstats.backlog;
242 opts = nla_nest_start(skb, TCA_OPTIONS);
243 if (opts == NULL)
244 goto nla_put_failure;
245 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
246 return nla_nest_end(skb, opts);
247
248 nla_put_failure:
249 nla_nest_cancel(skb, opts);
250 return -EMSGSIZE;
251 }
252
253 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
254 {
255 struct red_sched_data *q = qdisc_priv(sch);
256 struct tc_red_xstats st = {
257 .early = q->stats.prob_drop + q->stats.forced_drop,
258 .pdrop = q->stats.pdrop,
259 .other = q->stats.other,
260 .marked = q->stats.prob_mark + q->stats.forced_mark,
261 };
262
263 return gnet_stats_copy_app(d, &st, sizeof(st));
264 }
265
266 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
267 struct sk_buff *skb, struct tcmsg *tcm)
268 {
269 struct red_sched_data *q = qdisc_priv(sch);
270
271 tcm->tcm_handle |= TC_H_MIN(1);
272 tcm->tcm_info = q->qdisc->handle;
273 return 0;
274 }
275
276 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
277 struct Qdisc **old)
278 {
279 struct red_sched_data *q = qdisc_priv(sch);
280
281 if (new == NULL)
282 new = &noop_qdisc;
283
284 sch_tree_lock(sch);
285 *old = q->qdisc;
286 q->qdisc = new;
287 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
288 qdisc_reset(*old);
289 sch_tree_unlock(sch);
290 return 0;
291 }
292
293 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
294 {
295 struct red_sched_data *q = qdisc_priv(sch);
296 return q->qdisc;
297 }
298
299 static unsigned long red_get(struct Qdisc *sch, u32 classid)
300 {
301 return 1;
302 }
303
304 static void red_put(struct Qdisc *sch, unsigned long arg)
305 {
306 }
307
308 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
309 {
310 if (!walker->stop) {
311 if (walker->count >= walker->skip)
312 if (walker->fn(sch, 1, walker) < 0) {
313 walker->stop = 1;
314 return;
315 }
316 walker->count++;
317 }
318 }
319
320 static const struct Qdisc_class_ops red_class_ops = {
321 .graft = red_graft,
322 .leaf = red_leaf,
323 .get = red_get,
324 .put = red_put,
325 .walk = red_walk,
326 .dump = red_dump_class,
327 };
328
329 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
330 .id = "red",
331 .priv_size = sizeof(struct red_sched_data),
332 .cl_ops = &red_class_ops,
333 .enqueue = red_enqueue,
334 .dequeue = red_dequeue,
335 .peek = red_peek,
336 .drop = red_drop,
337 .init = red_init,
338 .reset = red_reset,
339 .destroy = red_destroy,
340 .change = red_change,
341 .dump = red_dump,
342 .dump_stats = red_dump_stats,
343 .owner = THIS_MODULE,
344 };
345
346 static int __init red_module_init(void)
347 {
348 return register_qdisc(&red_qdisc_ops);
349 }
350
351 static void __exit red_module_exit(void)
352 {
353 unregister_qdisc(&red_qdisc_ops);
354 }
355
356 module_init(red_module_init)
357 module_exit(red_module_exit)
358
359 MODULE_LICENSE("GPL");
This page took 0.040507 seconds and 5 git commands to generate.