Merge by hand (conflicts between pending drivers and kfree cleanups)
[deliverable/linux.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
1da177e4 21#include <linux/netdevice.h>
1da177e4 22#include <linux/skbuff.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24#include <net/inet_ecn.h>
6b31b28a 25#include <net/red.h>
1da177e4
LT
26
27
6b31b28a 28/* Parameters, settable by user:
1da177e4
LT
29 -----------------------------
30
31 limit - bytes (must be > qth_max + burst)
32
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
1da177e4
LT
39 */
40
41struct red_sched_data
42{
6b31b28a
TG
43 u32 limit; /* HARD maximal queue length */
44 unsigned char flags;
45 struct red_parms parms;
46 struct red_stats stats;
1da177e4
LT
47};
48
6b31b28a 49static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 50{
6b31b28a 51 return q->flags & TC_RED_ECN;
1da177e4
LT
52}
53
bdc450a0
TG
54static inline int red_use_harddrop(struct red_sched_data *q)
55{
56 return q->flags & TC_RED_HARDDROP;
57}
58
dba051f3 59static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
1da177e4
LT
60{
61 struct red_sched_data *q = qdisc_priv(sch);
62
6b31b28a 63 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
1da177e4 64
6b31b28a
TG
65 if (red_is_idling(&q->parms))
66 red_end_of_idle_period(&q->parms);
1da177e4 67
6b31b28a
TG
68 switch (red_action(&q->parms, q->parms.qavg)) {
69 case RED_DONT_MARK:
70 break;
1da177e4 71
6b31b28a
TG
72 case RED_PROB_MARK:
73 sch->qstats.overlimits++;
74 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
75 q->stats.prob_drop++;
76 goto congestion_drop;
77 }
1da177e4 78
6b31b28a
TG
79 q->stats.prob_mark++;
80 break;
81
82 case RED_HARD_MARK:
83 sch->qstats.overlimits++;
bdc450a0
TG
84 if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 !INET_ECN_set_ce(skb)) {
6b31b28a
TG
86 q->stats.forced_drop++;
87 goto congestion_drop;
88 }
89
90 q->stats.forced_mark++;
91 break;
1da177e4
LT
92 }
93
9e178ff2
TG
94 if (sch->qstats.backlog + skb->len <= q->limit)
95 return qdisc_enqueue_tail(skb, sch);
1da177e4 96
6b31b28a 97 q->stats.pdrop++;
9e178ff2 98 return qdisc_drop(skb, sch);
6b31b28a
TG
99
100congestion_drop:
9e178ff2 101 qdisc_drop(skb, sch);
1da177e4
LT
102 return NET_XMIT_CN;
103}
104
dba051f3 105static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
1da177e4
LT
106{
107 struct red_sched_data *q = qdisc_priv(sch);
108
6b31b28a
TG
109 if (red_is_idling(&q->parms))
110 red_end_of_idle_period(&q->parms);
1da177e4 111
9e178ff2 112 return qdisc_requeue(skb, sch);
1da177e4
LT
113}
114
dba051f3 115static struct sk_buff * red_dequeue(struct Qdisc* sch)
1da177e4
LT
116{
117 struct sk_buff *skb;
118 struct red_sched_data *q = qdisc_priv(sch);
119
9e178ff2 120 skb = qdisc_dequeue_head(sch);
6b31b28a 121
6a1b63d4 122 if (skb == NULL && !red_is_idling(&q->parms))
9e178ff2
TG
123 red_start_of_idle_period(&q->parms);
124
125 return skb;
1da177e4
LT
126}
127
128static unsigned int red_drop(struct Qdisc* sch)
129{
130 struct sk_buff *skb;
131 struct red_sched_data *q = qdisc_priv(sch);
132
9e178ff2 133 skb = qdisc_dequeue_tail(sch);
1da177e4
LT
134 if (skb) {
135 unsigned int len = skb->len;
6b31b28a 136 q->stats.other++;
9e178ff2 137 qdisc_drop(skb, sch);
1da177e4
LT
138 return len;
139 }
6b31b28a 140
6a1b63d4
TG
141 if (!red_is_idling(&q->parms))
142 red_start_of_idle_period(&q->parms);
143
1da177e4
LT
144 return 0;
145}
146
147static void red_reset(struct Qdisc* sch)
148{
149 struct red_sched_data *q = qdisc_priv(sch);
150
9e178ff2 151 qdisc_reset_queue(sch);
6b31b28a 152 red_restart(&q->parms);
1da177e4
LT
153}
154
155static int red_change(struct Qdisc *sch, struct rtattr *opt)
156{
157 struct red_sched_data *q = qdisc_priv(sch);
dba051f3 158 struct rtattr *tb[TCA_RED_MAX];
1da177e4
LT
159 struct tc_red_qopt *ctl;
160
dba051f3
TG
161 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
162 return -EINVAL;
163
164 if (tb[TCA_RED_PARMS-1] == NULL ||
1da177e4 165 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
dba051f3
TG
166 tb[TCA_RED_STAB-1] == NULL ||
167 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
1da177e4
LT
168 return -EINVAL;
169
170 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
171
172 sch_tree_lock(sch);
173 q->flags = ctl->flags;
1da177e4 174 q->limit = ctl->limit;
1da177e4 175
6b31b28a
TG
176 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
177 ctl->Plog, ctl->Scell_log,
178 RTA_DATA(tb[TCA_RED_STAB-1]));
179
b03efcfb 180 if (skb_queue_empty(&sch->q))
6b31b28a 181 red_end_of_idle_period(&q->parms);
dba051f3 182
1da177e4
LT
183 sch_tree_unlock(sch);
184 return 0;
185}
186
187static int red_init(struct Qdisc* sch, struct rtattr *opt)
188{
189 return red_change(sch, opt);
190}
191
192static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
193{
194 struct red_sched_data *q = qdisc_priv(sch);
dba051f3 195 struct rtattr *opts = NULL;
6b31b28a
TG
196 struct tc_red_qopt opt = {
197 .limit = q->limit,
198 .flags = q->flags,
199 .qth_min = q->parms.qth_min >> q->parms.Wlog,
200 .qth_max = q->parms.qth_max >> q->parms.Wlog,
201 .Wlog = q->parms.Wlog,
202 .Plog = q->parms.Plog,
203 .Scell_log = q->parms.Scell_log,
204 };
1da177e4 205
dba051f3 206 opts = RTA_NEST(skb, TCA_OPTIONS);
1da177e4 207 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
dba051f3 208 return RTA_NEST_END(skb, opts);
1da177e4
LT
209
210rtattr_failure:
dba051f3 211 return RTA_NEST_CANCEL(skb, opts);
1da177e4
LT
212}
213
214static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
215{
216 struct red_sched_data *q = qdisc_priv(sch);
6b31b28a
TG
217 struct tc_red_xstats st = {
218 .early = q->stats.prob_drop + q->stats.forced_drop,
219 .pdrop = q->stats.pdrop,
220 .other = q->stats.other,
221 .marked = q->stats.prob_mark + q->stats.forced_mark,
222 };
223
224 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
225}
226
227static struct Qdisc_ops red_qdisc_ops = {
1da177e4
LT
228 .id = "red",
229 .priv_size = sizeof(struct red_sched_data),
230 .enqueue = red_enqueue,
231 .dequeue = red_dequeue,
232 .requeue = red_requeue,
233 .drop = red_drop,
234 .init = red_init,
235 .reset = red_reset,
236 .change = red_change,
237 .dump = red_dump,
238 .dump_stats = red_dump_stats,
239 .owner = THIS_MODULE,
240};
241
242static int __init red_module_init(void)
243{
244 return register_qdisc(&red_qdisc_ops);
245}
dba051f3
TG
246
247static void __exit red_module_exit(void)
1da177e4
LT
248{
249 unregister_qdisc(&red_qdisc_ops);
250}
dba051f3 251
1da177e4
LT
252module_init(red_module_init)
253module_exit(red_module_exit)
dba051f3 254
1da177e4 255MODULE_LICENSE("GPL");
This page took 0.071721 seconds and 5 git commands to generate.