2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/vmalloc.h>
18 #include <net/pkt_sched.h>
19 #include <net/inet_ecn.h>
21 #include <net/flow_keys.h>
24 CHOKe stateless AQM for fair bandwidth allocation
25 =================================================
27 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
28 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
29 maintains no flow state. The difference from RED is an additional step
30 during the enqueuing process. If average queue size is over the
31 low threshold (qmin), a packet is chosen at random from the queue.
32 If both the new and chosen packet are from the same flow, both
33 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
34 needs to access packets in queue randomly. It has a minimal class
35 interface to allow overriding the builtin flow classifier with
39 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
40 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
43 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
44 Characteristics", IEEE/ACM Transactions on Networking, 2004
48 /* Upper bound on size of sk_buff table (packets) */
49 #define CHOKE_MAX_QUEUE (128*1024 - 1)
51 struct choke_sched_data
{
56 struct red_parms parms
;
60 struct tcf_proto __rcu
*filter_list
;
62 u32 prob_drop
; /* Early probability drops */
63 u32 prob_mark
; /* Early probability marks */
64 u32 forced_drop
; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark
; /* Forced marks, qavg > max_thresh */
66 u32 pdrop
; /* Drops due to queue limits */
67 u32 other
; /* Drops due to drop() calls */
68 u32 matched
; /* Drops to flow match */
74 unsigned int tab_mask
; /* size - 1 */
79 /* number of elements in queue including holes */
80 static unsigned int choke_len(const struct choke_sched_data
*q
)
82 return (q
->tail
- q
->head
) & q
->tab_mask
;
85 /* Is ECN parameter configured */
86 static int use_ecn(const struct choke_sched_data
*q
)
88 return q
->flags
& TC_RED_ECN
;
91 /* Should packets over max just be dropped (versus marked) */
92 static int use_harddrop(const struct choke_sched_data
*q
)
94 return q
->flags
& TC_RED_HARDDROP
;
97 /* Move head pointer forward to skip over holes */
98 static void choke_zap_head_holes(struct choke_sched_data
*q
)
101 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
102 if (q
->head
== q
->tail
)
104 } while (q
->tab
[q
->head
] == NULL
);
107 /* Move tail pointer backwards to reuse holes */
108 static void choke_zap_tail_holes(struct choke_sched_data
*q
)
111 q
->tail
= (q
->tail
- 1) & q
->tab_mask
;
112 if (q
->head
== q
->tail
)
114 } while (q
->tab
[q
->tail
] == NULL
);
117 /* Drop packet from queue array by creating a "hole" */
118 static void choke_drop_by_idx(struct Qdisc
*sch
, unsigned int idx
)
120 struct choke_sched_data
*q
= qdisc_priv(sch
);
121 struct sk_buff
*skb
= q
->tab
[idx
];
126 choke_zap_head_holes(q
);
128 choke_zap_tail_holes(q
);
130 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
131 qdisc_drop(skb
, sch
);
132 qdisc_tree_decrease_qlen(sch
, 1);
136 struct choke_skb_cb
{
139 struct flow_keys keys
;
142 static inline struct choke_skb_cb
*choke_skb_cb(const struct sk_buff
*skb
)
144 qdisc_cb_private_validate(skb
, sizeof(struct choke_skb_cb
));
145 return (struct choke_skb_cb
*)qdisc_skb_cb(skb
)->data
;
148 static inline void choke_set_classid(struct sk_buff
*skb
, u16 classid
)
150 choke_skb_cb(skb
)->classid
= classid
;
153 static u16
choke_get_classid(const struct sk_buff
*skb
)
155 return choke_skb_cb(skb
)->classid
;
159 * Compare flow of two packets
160 * Returns true only if source and destination address and port match.
161 * false for special cases
163 static bool choke_match_flow(struct sk_buff
*skb1
,
164 struct sk_buff
*skb2
)
166 if (skb1
->protocol
!= skb2
->protocol
)
169 if (!choke_skb_cb(skb1
)->keys_valid
) {
170 choke_skb_cb(skb1
)->keys_valid
= 1;
171 skb_flow_dissect(skb1
, &choke_skb_cb(skb1
)->keys
);
174 if (!choke_skb_cb(skb2
)->keys_valid
) {
175 choke_skb_cb(skb2
)->keys_valid
= 1;
176 skb_flow_dissect(skb2
, &choke_skb_cb(skb2
)->keys
);
179 return !memcmp(&choke_skb_cb(skb1
)->keys
,
180 &choke_skb_cb(skb2
)->keys
,
181 sizeof(struct flow_keys
));
185 * Classify flow using either:
186 * 1. pre-existing classification result in skb
187 * 2. fast internal classification
188 * 3. use TC filter based classification
190 static bool choke_classify(struct sk_buff
*skb
,
191 struct Qdisc
*sch
, int *qerr
)
194 struct choke_sched_data
*q
= qdisc_priv(sch
);
195 struct tcf_result res
;
196 struct tcf_proto
*fl
;
199 fl
= rcu_dereference_bh(q
->filter_list
);
200 result
= tc_classify(skb
, fl
, &res
);
202 #ifdef CONFIG_NET_CLS_ACT
206 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
211 choke_set_classid(skb
, TC_H_MIN(res
.classid
));
219 * Select a packet at random from queue
220 * HACK: since queue can have holes from previous deletion; retry several
221 * times to find a random skb but then just give up and return the head
222 * Will return NULL if queue is empty (q->head == q->tail)
224 static struct sk_buff
*choke_peek_random(const struct choke_sched_data
*q
,
231 *pidx
= (q
->head
+ prandom_u32_max(choke_len(q
))) & q
->tab_mask
;
235 } while (--retrys
> 0);
237 return q
->tab
[*pidx
= q
->head
];
241 * Compare new packet with random packet in queue
242 * returns true if matched and sets *pidx
244 static bool choke_match_random(const struct choke_sched_data
*q
,
245 struct sk_buff
*nskb
,
248 struct sk_buff
*oskb
;
250 if (q
->head
== q
->tail
)
253 oskb
= choke_peek_random(q
, pidx
);
254 if (rcu_access_pointer(q
->filter_list
))
255 return choke_get_classid(nskb
) == choke_get_classid(oskb
);
257 return choke_match_flow(oskb
, nskb
);
260 static int choke_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
262 int ret
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
263 struct choke_sched_data
*q
= qdisc_priv(sch
);
264 const struct red_parms
*p
= &q
->parms
;
266 if (rcu_access_pointer(q
->filter_list
)) {
267 /* If using external classifiers, get result and record it. */
268 if (!choke_classify(skb
, sch
, &ret
))
269 goto other_drop
; /* Packet was eaten by filter */
272 choke_skb_cb(skb
)->keys_valid
= 0;
273 /* Compute average queue usage (see RED) */
274 q
->vars
.qavg
= red_calc_qavg(p
, &q
->vars
, sch
->q
.qlen
);
275 if (red_is_idling(&q
->vars
))
276 red_end_of_idle_period(&q
->vars
);
278 /* Is queue small? */
279 if (q
->vars
.qavg
<= p
->qth_min
)
284 /* Draw a packet at random from queue and compare flow */
285 if (choke_match_random(q
, skb
, &idx
)) {
287 choke_drop_by_idx(sch
, idx
);
288 goto congestion_drop
;
291 /* Queue is large, always mark/drop */
292 if (q
->vars
.qavg
> p
->qth_max
) {
295 sch
->qstats
.overlimits
++;
296 if (use_harddrop(q
) || !use_ecn(q
) ||
297 !INET_ECN_set_ce(skb
)) {
298 q
->stats
.forced_drop
++;
299 goto congestion_drop
;
302 q
->stats
.forced_mark
++;
303 } else if (++q
->vars
.qcount
) {
304 if (red_mark_probability(p
, &q
->vars
, q
->vars
.qavg
)) {
306 q
->vars
.qR
= red_random(p
);
308 sch
->qstats
.overlimits
++;
309 if (!use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
310 q
->stats
.prob_drop
++;
311 goto congestion_drop
;
314 q
->stats
.prob_mark
++;
317 q
->vars
.qR
= red_random(p
);
320 /* Admit new packet */
321 if (sch
->q
.qlen
< q
->limit
) {
322 q
->tab
[q
->tail
] = skb
;
323 q
->tail
= (q
->tail
+ 1) & q
->tab_mask
;
325 sch
->qstats
.backlog
+= qdisc_pkt_len(skb
);
326 return NET_XMIT_SUCCESS
;
330 return qdisc_drop(skb
, sch
);
333 qdisc_drop(skb
, sch
);
337 if (ret
& __NET_XMIT_BYPASS
)
343 static struct sk_buff
*choke_dequeue(struct Qdisc
*sch
)
345 struct choke_sched_data
*q
= qdisc_priv(sch
);
348 if (q
->head
== q
->tail
) {
349 if (!red_is_idling(&q
->vars
))
350 red_start_of_idle_period(&q
->vars
);
354 skb
= q
->tab
[q
->head
];
355 q
->tab
[q
->head
] = NULL
;
356 choke_zap_head_holes(q
);
358 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
359 qdisc_bstats_update(sch
, skb
);
364 static unsigned int choke_drop(struct Qdisc
*sch
)
366 struct choke_sched_data
*q
= qdisc_priv(sch
);
369 len
= qdisc_queue_drop(sch
);
373 if (!red_is_idling(&q
->vars
))
374 red_start_of_idle_period(&q
->vars
);
380 static void choke_reset(struct Qdisc
*sch
)
382 struct choke_sched_data
*q
= qdisc_priv(sch
);
384 red_restart(&q
->vars
);
387 static const struct nla_policy choke_policy
[TCA_CHOKE_MAX
+ 1] = {
388 [TCA_CHOKE_PARMS
] = { .len
= sizeof(struct tc_red_qopt
) },
389 [TCA_CHOKE_STAB
] = { .len
= RED_STAB_SIZE
},
390 [TCA_CHOKE_MAX_P
] = { .type
= NLA_U32
},
394 static void choke_free(void *addr
)
399 static int choke_change(struct Qdisc
*sch
, struct nlattr
*opt
)
401 struct choke_sched_data
*q
= qdisc_priv(sch
);
402 struct nlattr
*tb
[TCA_CHOKE_MAX
+ 1];
403 const struct tc_red_qopt
*ctl
;
405 struct sk_buff
**old
= NULL
;
412 err
= nla_parse_nested(tb
, TCA_CHOKE_MAX
, opt
, choke_policy
);
416 if (tb
[TCA_CHOKE_PARMS
] == NULL
||
417 tb
[TCA_CHOKE_STAB
] == NULL
)
420 max_P
= tb
[TCA_CHOKE_MAX_P
] ? nla_get_u32(tb
[TCA_CHOKE_MAX_P
]) : 0;
422 ctl
= nla_data(tb
[TCA_CHOKE_PARMS
]);
424 if (ctl
->limit
> CHOKE_MAX_QUEUE
)
427 mask
= roundup_pow_of_two(ctl
->limit
+ 1) - 1;
428 if (mask
!= q
->tab_mask
) {
429 struct sk_buff
**ntab
;
431 ntab
= kcalloc(mask
+ 1, sizeof(struct sk_buff
*),
432 GFP_KERNEL
| __GFP_NOWARN
);
434 ntab
= vzalloc((mask
+ 1) * sizeof(struct sk_buff
*));
441 unsigned int oqlen
= sch
->q
.qlen
, tail
= 0;
443 while (q
->head
!= q
->tail
) {
444 struct sk_buff
*skb
= q
->tab
[q
->head
];
446 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
453 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
455 qdisc_drop(skb
, sch
);
457 qdisc_tree_decrease_qlen(sch
, oqlen
- sch
->q
.qlen
);
467 q
->flags
= ctl
->flags
;
468 q
->limit
= ctl
->limit
;
470 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
471 ctl
->Plog
, ctl
->Scell_log
,
472 nla_data(tb
[TCA_CHOKE_STAB
]),
474 red_set_vars(&q
->vars
);
476 if (q
->head
== q
->tail
)
477 red_end_of_idle_period(&q
->vars
);
479 sch_tree_unlock(sch
);
484 static int choke_init(struct Qdisc
*sch
, struct nlattr
*opt
)
486 return choke_change(sch
, opt
);
489 static int choke_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
491 struct choke_sched_data
*q
= qdisc_priv(sch
);
492 struct nlattr
*opts
= NULL
;
493 struct tc_red_qopt opt
= {
496 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
497 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
498 .Wlog
= q
->parms
.Wlog
,
499 .Plog
= q
->parms
.Plog
,
500 .Scell_log
= q
->parms
.Scell_log
,
503 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
505 goto nla_put_failure
;
507 if (nla_put(skb
, TCA_CHOKE_PARMS
, sizeof(opt
), &opt
) ||
508 nla_put_u32(skb
, TCA_CHOKE_MAX_P
, q
->parms
.max_P
))
509 goto nla_put_failure
;
510 return nla_nest_end(skb
, opts
);
513 nla_nest_cancel(skb
, opts
);
517 static int choke_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
519 struct choke_sched_data
*q
= qdisc_priv(sch
);
520 struct tc_choke_xstats st
= {
521 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
522 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
523 .pdrop
= q
->stats
.pdrop
,
524 .other
= q
->stats
.other
,
525 .matched
= q
->stats
.matched
,
528 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
531 static void choke_destroy(struct Qdisc
*sch
)
533 struct choke_sched_data
*q
= qdisc_priv(sch
);
535 tcf_destroy_chain(&q
->filter_list
);
539 static struct Qdisc
*choke_leaf(struct Qdisc
*sch
, unsigned long arg
)
544 static unsigned long choke_get(struct Qdisc
*sch
, u32 classid
)
549 static void choke_put(struct Qdisc
*q
, unsigned long cl
)
553 static unsigned long choke_bind(struct Qdisc
*sch
, unsigned long parent
,
559 static struct tcf_proto __rcu
**choke_find_tcf(struct Qdisc
*sch
,
562 struct choke_sched_data
*q
= qdisc_priv(sch
);
566 return &q
->filter_list
;
569 static int choke_dump_class(struct Qdisc
*sch
, unsigned long cl
,
570 struct sk_buff
*skb
, struct tcmsg
*tcm
)
572 tcm
->tcm_handle
|= TC_H_MIN(cl
);
576 static void choke_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
579 if (arg
->fn(sch
, 1, arg
) < 0) {
587 static const struct Qdisc_class_ops choke_class_ops
= {
591 .tcf_chain
= choke_find_tcf
,
592 .bind_tcf
= choke_bind
,
593 .unbind_tcf
= choke_put
,
594 .dump
= choke_dump_class
,
598 static struct sk_buff
*choke_peek_head(struct Qdisc
*sch
)
600 struct choke_sched_data
*q
= qdisc_priv(sch
);
602 return (q
->head
!= q
->tail
) ? q
->tab
[q
->head
] : NULL
;
605 static struct Qdisc_ops choke_qdisc_ops __read_mostly
= {
607 .priv_size
= sizeof(struct choke_sched_data
),
609 .enqueue
= choke_enqueue
,
610 .dequeue
= choke_dequeue
,
611 .peek
= choke_peek_head
,
614 .destroy
= choke_destroy
,
615 .reset
= choke_reset
,
616 .change
= choke_change
,
618 .dump_stats
= choke_dump_stats
,
619 .owner
= THIS_MODULE
,
622 static int __init
choke_module_init(void)
624 return register_qdisc(&choke_qdisc_ops
);
627 static void __exit
choke_module_exit(void)
629 unregister_qdisc(&choke_qdisc_ops
);
632 module_init(choke_module_init
)
633 module_exit(choke_module_exit
)
635 MODULE_LICENSE("GPL");