[PKT_SCHED]: RED: Cleanup and remove unnecessary code
[deliverable/linux.git] / net / sched / sch_red.c
1 /*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
15 */
16
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <net/pkt_sched.h>
24 #include <net/inet_ecn.h>
25 #include <net/red.h>
26
27
28 /* Parameters, settable by user:
29 -----------------------------
30
31 limit - bytes (must be > qth_max + burst)
32
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
39 */
40
41 struct red_sched_data
42 {
43 u32 limit; /* HARD maximal queue length */
44 unsigned char flags;
45 struct red_parms parms;
46 struct red_stats stats;
47 };
48
49 static inline int red_use_ecn(struct red_sched_data *q)
50 {
51 return q->flags & TC_RED_ECN;
52 }
53
54 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
55 {
56 struct red_sched_data *q = qdisc_priv(sch);
57
58 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
59
60 if (red_is_idling(&q->parms))
61 red_end_of_idle_period(&q->parms);
62
63 switch (red_action(&q->parms, q->parms.qavg)) {
64 case RED_DONT_MARK:
65 break;
66
67 case RED_PROB_MARK:
68 sch->qstats.overlimits++;
69 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
70 q->stats.prob_drop++;
71 goto congestion_drop;
72 }
73
74 q->stats.prob_mark++;
75 break;
76
77 case RED_HARD_MARK:
78 sch->qstats.overlimits++;
79 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
80 q->stats.forced_drop++;
81 goto congestion_drop;
82 }
83
84 q->stats.forced_mark++;
85 break;
86 }
87
88 if (sch->qstats.backlog + skb->len <= q->limit)
89 return qdisc_enqueue_tail(skb, sch);
90
91 q->stats.pdrop++;
92 return qdisc_drop(skb, sch);
93
94 congestion_drop:
95 qdisc_drop(skb, sch);
96 return NET_XMIT_CN;
97 }
98
99 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
100 {
101 struct red_sched_data *q = qdisc_priv(sch);
102
103 if (red_is_idling(&q->parms))
104 red_end_of_idle_period(&q->parms);
105
106 return qdisc_requeue(skb, sch);
107 }
108
109 static struct sk_buff * red_dequeue(struct Qdisc* sch)
110 {
111 struct sk_buff *skb;
112 struct red_sched_data *q = qdisc_priv(sch);
113
114 skb = qdisc_dequeue_head(sch);
115
116 if (skb == NULL && !red_is_idling(&q->parms))
117 red_start_of_idle_period(&q->parms);
118
119 return skb;
120 }
121
122 static unsigned int red_drop(struct Qdisc* sch)
123 {
124 struct sk_buff *skb;
125 struct red_sched_data *q = qdisc_priv(sch);
126
127 skb = qdisc_dequeue_tail(sch);
128 if (skb) {
129 unsigned int len = skb->len;
130 q->stats.other++;
131 qdisc_drop(skb, sch);
132 return len;
133 }
134
135 if (!red_is_idling(&q->parms))
136 red_start_of_idle_period(&q->parms);
137
138 return 0;
139 }
140
141 static void red_reset(struct Qdisc* sch)
142 {
143 struct red_sched_data *q = qdisc_priv(sch);
144
145 qdisc_reset_queue(sch);
146 red_restart(&q->parms);
147 }
148
149 static int red_change(struct Qdisc *sch, struct rtattr *opt)
150 {
151 struct red_sched_data *q = qdisc_priv(sch);
152 struct rtattr *tb[TCA_RED_MAX];
153 struct tc_red_qopt *ctl;
154
155 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
156 return -EINVAL;
157
158 if (tb[TCA_RED_PARMS-1] == NULL ||
159 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
160 tb[TCA_RED_STAB-1] == NULL ||
161 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
162 return -EINVAL;
163
164 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
165
166 sch_tree_lock(sch);
167 q->flags = ctl->flags;
168 q->limit = ctl->limit;
169
170 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
171 ctl->Plog, ctl->Scell_log,
172 RTA_DATA(tb[TCA_RED_STAB-1]));
173
174 if (skb_queue_empty(&sch->q))
175 red_end_of_idle_period(&q->parms);
176
177 sch_tree_unlock(sch);
178 return 0;
179 }
180
181 static int red_init(struct Qdisc* sch, struct rtattr *opt)
182 {
183 return red_change(sch, opt);
184 }
185
186 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
187 {
188 struct red_sched_data *q = qdisc_priv(sch);
189 struct rtattr *opts = NULL;
190 struct tc_red_qopt opt = {
191 .limit = q->limit,
192 .flags = q->flags,
193 .qth_min = q->parms.qth_min >> q->parms.Wlog,
194 .qth_max = q->parms.qth_max >> q->parms.Wlog,
195 .Wlog = q->parms.Wlog,
196 .Plog = q->parms.Plog,
197 .Scell_log = q->parms.Scell_log,
198 };
199
200 opts = RTA_NEST(skb, TCA_OPTIONS);
201 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
202 return RTA_NEST_END(skb, opts);
203
204 rtattr_failure:
205 return RTA_NEST_CANCEL(skb, opts);
206 }
207
208 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
209 {
210 struct red_sched_data *q = qdisc_priv(sch);
211 struct tc_red_xstats st = {
212 .early = q->stats.prob_drop + q->stats.forced_drop,
213 .pdrop = q->stats.pdrop,
214 .other = q->stats.other,
215 .marked = q->stats.prob_mark + q->stats.forced_mark,
216 };
217
218 return gnet_stats_copy_app(d, &st, sizeof(st));
219 }
220
221 static struct Qdisc_ops red_qdisc_ops = {
222 .id = "red",
223 .priv_size = sizeof(struct red_sched_data),
224 .enqueue = red_enqueue,
225 .dequeue = red_dequeue,
226 .requeue = red_requeue,
227 .drop = red_drop,
228 .init = red_init,
229 .reset = red_reset,
230 .change = red_change,
231 .dump = red_dump,
232 .dump_stats = red_dump_stats,
233 .owner = THIS_MODULE,
234 };
235
236 static int __init red_module_init(void)
237 {
238 return register_qdisc(&red_qdisc_ops);
239 }
240
241 static void __exit red_module_exit(void)
242 {
243 unregister_qdisc(&red_qdisc_ops);
244 }
245
246 module_init(red_module_init)
247 module_exit(red_module_exit)
248
249 MODULE_LICENSE("GPL");
This page took 0.052242 seconds and 6 git commands to generate.