Pull acpi_device_handle_cleanup into release branch
[deliverable/linux.git] / net / sched / sch_red.c
1 /*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
15 */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <net/pkt_sched.h>
23 #include <net/inet_ecn.h>
24 #include <net/red.h>
25
26
27 /* Parameters, settable by user:
28 -----------------------------
29
30 limit - bytes (must be > qth_max + burst)
31
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
38 */
39
40 struct red_sched_data
41 {
42 u32 limit; /* HARD maximal queue length */
43 unsigned char flags;
44 struct red_parms parms;
45 struct red_stats stats;
46 struct Qdisc *qdisc;
47 };
48
49 static inline int red_use_ecn(struct red_sched_data *q)
50 {
51 return q->flags & TC_RED_ECN;
52 }
53
54 static inline int red_use_harddrop(struct red_sched_data *q)
55 {
56 return q->flags & TC_RED_HARDDROP;
57 }
58
59 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
60 {
61 struct red_sched_data *q = qdisc_priv(sch);
62 struct Qdisc *child = q->qdisc;
63 int ret;
64
65 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
66
67 if (red_is_idling(&q->parms))
68 red_end_of_idle_period(&q->parms);
69
70 switch (red_action(&q->parms, q->parms.qavg)) {
71 case RED_DONT_MARK:
72 break;
73
74 case RED_PROB_MARK:
75 sch->qstats.overlimits++;
76 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
77 q->stats.prob_drop++;
78 goto congestion_drop;
79 }
80
81 q->stats.prob_mark++;
82 break;
83
84 case RED_HARD_MARK:
85 sch->qstats.overlimits++;
86 if (red_use_harddrop(q) || !red_use_ecn(q) ||
87 !INET_ECN_set_ce(skb)) {
88 q->stats.forced_drop++;
89 goto congestion_drop;
90 }
91
92 q->stats.forced_mark++;
93 break;
94 }
95
96 ret = child->enqueue(skb, child);
97 if (likely(ret == NET_XMIT_SUCCESS)) {
98 sch->bstats.bytes += skb->len;
99 sch->bstats.packets++;
100 sch->q.qlen++;
101 } else {
102 q->stats.pdrop++;
103 sch->qstats.drops++;
104 }
105 return ret;
106
107 congestion_drop:
108 qdisc_drop(skb, sch);
109 return NET_XMIT_CN;
110 }
111
112 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 {
114 struct red_sched_data *q = qdisc_priv(sch);
115 struct Qdisc *child = q->qdisc;
116 int ret;
117
118 if (red_is_idling(&q->parms))
119 red_end_of_idle_period(&q->parms);
120
121 ret = child->ops->requeue(skb, child);
122 if (likely(ret == NET_XMIT_SUCCESS)) {
123 sch->qstats.requeues++;
124 sch->q.qlen++;
125 }
126 return ret;
127 }
128
129 static struct sk_buff * red_dequeue(struct Qdisc* sch)
130 {
131 struct sk_buff *skb;
132 struct red_sched_data *q = qdisc_priv(sch);
133 struct Qdisc *child = q->qdisc;
134
135 skb = child->dequeue(child);
136 if (skb)
137 sch->q.qlen--;
138 else if (!red_is_idling(&q->parms))
139 red_start_of_idle_period(&q->parms);
140
141 return skb;
142 }
143
144 static unsigned int red_drop(struct Qdisc* sch)
145 {
146 struct red_sched_data *q = qdisc_priv(sch);
147 struct Qdisc *child = q->qdisc;
148 unsigned int len;
149
150 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
151 q->stats.other++;
152 sch->qstats.drops++;
153 sch->q.qlen--;
154 return len;
155 }
156
157 if (!red_is_idling(&q->parms))
158 red_start_of_idle_period(&q->parms);
159
160 return 0;
161 }
162
163 static void red_reset(struct Qdisc* sch)
164 {
165 struct red_sched_data *q = qdisc_priv(sch);
166
167 qdisc_reset(q->qdisc);
168 sch->q.qlen = 0;
169 red_restart(&q->parms);
170 }
171
172 static void red_destroy(struct Qdisc *sch)
173 {
174 struct red_sched_data *q = qdisc_priv(sch);
175 qdisc_destroy(q->qdisc);
176 }
177
178 static struct Qdisc *red_create_dflt(struct net_device *dev, u32 limit)
179 {
180 struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
181 struct rtattr *rta;
182 int ret;
183
184 if (q) {
185 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)),
186 GFP_KERNEL);
187 if (rta) {
188 rta->rta_type = RTM_NEWQDISC;
189 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
190 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
191
192 ret = q->ops->change(q, rta);
193 kfree(rta);
194
195 if (ret == 0)
196 return q;
197 }
198 qdisc_destroy(q);
199 }
200 return NULL;
201 }
202
203 static int red_change(struct Qdisc *sch, struct rtattr *opt)
204 {
205 struct red_sched_data *q = qdisc_priv(sch);
206 struct rtattr *tb[TCA_RED_MAX];
207 struct tc_red_qopt *ctl;
208 struct Qdisc *child = NULL;
209
210 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
211 return -EINVAL;
212
213 if (tb[TCA_RED_PARMS-1] == NULL ||
214 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
215 tb[TCA_RED_STAB-1] == NULL ||
216 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
217 return -EINVAL;
218
219 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
220
221 if (ctl->limit > 0) {
222 child = red_create_dflt(sch->dev, ctl->limit);
223 if (child == NULL)
224 return -ENOMEM;
225 }
226
227 sch_tree_lock(sch);
228 q->flags = ctl->flags;
229 q->limit = ctl->limit;
230 if (child)
231 qdisc_destroy(xchg(&q->qdisc, child));
232
233 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
234 ctl->Plog, ctl->Scell_log,
235 RTA_DATA(tb[TCA_RED_STAB-1]));
236
237 if (skb_queue_empty(&sch->q))
238 red_end_of_idle_period(&q->parms);
239
240 sch_tree_unlock(sch);
241 return 0;
242 }
243
244 static int red_init(struct Qdisc* sch, struct rtattr *opt)
245 {
246 struct red_sched_data *q = qdisc_priv(sch);
247
248 q->qdisc = &noop_qdisc;
249 return red_change(sch, opt);
250 }
251
252 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
253 {
254 struct red_sched_data *q = qdisc_priv(sch);
255 struct rtattr *opts = NULL;
256 struct tc_red_qopt opt = {
257 .limit = q->limit,
258 .flags = q->flags,
259 .qth_min = q->parms.qth_min >> q->parms.Wlog,
260 .qth_max = q->parms.qth_max >> q->parms.Wlog,
261 .Wlog = q->parms.Wlog,
262 .Plog = q->parms.Plog,
263 .Scell_log = q->parms.Scell_log,
264 };
265
266 opts = RTA_NEST(skb, TCA_OPTIONS);
267 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
268 return RTA_NEST_END(skb, opts);
269
270 rtattr_failure:
271 return RTA_NEST_CANCEL(skb, opts);
272 }
273
274 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
275 {
276 struct red_sched_data *q = qdisc_priv(sch);
277 struct tc_red_xstats st = {
278 .early = q->stats.prob_drop + q->stats.forced_drop,
279 .pdrop = q->stats.pdrop,
280 .other = q->stats.other,
281 .marked = q->stats.prob_mark + q->stats.forced_mark,
282 };
283
284 return gnet_stats_copy_app(d, &st, sizeof(st));
285 }
286
287 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
288 struct sk_buff *skb, struct tcmsg *tcm)
289 {
290 struct red_sched_data *q = qdisc_priv(sch);
291
292 if (cl != 1)
293 return -ENOENT;
294 tcm->tcm_handle |= TC_H_MIN(1);
295 tcm->tcm_info = q->qdisc->handle;
296 return 0;
297 }
298
299 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
300 struct Qdisc **old)
301 {
302 struct red_sched_data *q = qdisc_priv(sch);
303
304 if (new == NULL)
305 new = &noop_qdisc;
306
307 sch_tree_lock(sch);
308 *old = xchg(&q->qdisc, new);
309 qdisc_reset(*old);
310 sch->q.qlen = 0;
311 sch_tree_unlock(sch);
312 return 0;
313 }
314
315 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
316 {
317 struct red_sched_data *q = qdisc_priv(sch);
318 return q->qdisc;
319 }
320
321 static unsigned long red_get(struct Qdisc *sch, u32 classid)
322 {
323 return 1;
324 }
325
326 static void red_put(struct Qdisc *sch, unsigned long arg)
327 {
328 return;
329 }
330
331 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
332 struct rtattr **tca, unsigned long *arg)
333 {
334 return -ENOSYS;
335 }
336
337 static int red_delete(struct Qdisc *sch, unsigned long cl)
338 {
339 return -ENOSYS;
340 }
341
342 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
343 {
344 if (!walker->stop) {
345 if (walker->count >= walker->skip)
346 if (walker->fn(sch, 1, walker) < 0) {
347 walker->stop = 1;
348 return;
349 }
350 walker->count++;
351 }
352 }
353
354 static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
355 {
356 return NULL;
357 }
358
359 static struct Qdisc_class_ops red_class_ops = {
360 .graft = red_graft,
361 .leaf = red_leaf,
362 .get = red_get,
363 .put = red_put,
364 .change = red_change_class,
365 .delete = red_delete,
366 .walk = red_walk,
367 .tcf_chain = red_find_tcf,
368 .dump = red_dump_class,
369 };
370
371 static struct Qdisc_ops red_qdisc_ops = {
372 .id = "red",
373 .priv_size = sizeof(struct red_sched_data),
374 .cl_ops = &red_class_ops,
375 .enqueue = red_enqueue,
376 .dequeue = red_dequeue,
377 .requeue = red_requeue,
378 .drop = red_drop,
379 .init = red_init,
380 .reset = red_reset,
381 .destroy = red_destroy,
382 .change = red_change,
383 .dump = red_dump,
384 .dump_stats = red_dump_stats,
385 .owner = THIS_MODULE,
386 };
387
388 static int __init red_module_init(void)
389 {
390 return register_qdisc(&red_qdisc_ops);
391 }
392
393 static void __exit red_module_exit(void)
394 {
395 unregister_qdisc(&red_qdisc_ops);
396 }
397
398 module_init(red_module_init)
399 module_exit(red_module_exit)
400
401 MODULE_LICENSE("GPL");
This page took 0.048312 seconds and 6 git commands to generate.