cfg80211: allow survey data to return global data
[deliverable/linux.git] / net / sched / sch_fq.c
1 /*
2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3 *
4 * Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Meant to be mostly used for localy generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
15 *
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
18 *
19 * Burst avoidance (aka pacing) capability :
20 *
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
24 *
25 * enqueue() :
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
30 *
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55
56 /*
57 * Per flow structure, dynamically allocated
58 */
59 struct fq_flow {
60 struct sk_buff *head; /* list of skbs for this flow : first skb */
61 union {
62 struct sk_buff *tail; /* last skb in the list */
63 unsigned long age; /* jiffies when flow was emptied, for gc */
64 };
65 struct rb_node fq_node; /* anchor in fq_root[] trees */
66 struct sock *sk;
67 int qlen; /* number of packets in flow queue */
68 int credit;
69 u32 socket_hash; /* sk_hash */
70 struct fq_flow *next; /* next pointer in RR lists, or &detached */
71
72 struct rb_node rate_node; /* anchor in q->delayed tree */
73 u64 time_next_packet;
74 };
75
76 struct fq_flow_head {
77 struct fq_flow *first;
78 struct fq_flow *last;
79 };
80
81 struct fq_sched_data {
82 struct fq_flow_head new_flows;
83
84 struct fq_flow_head old_flows;
85
86 struct rb_root delayed; /* for rate limited flows */
87 u64 time_next_delayed_flow;
88
89 struct fq_flow internal; /* for non classified or high prio packets */
90 u32 quantum;
91 u32 initial_quantum;
92 u32 flow_refill_delay;
93 u32 flow_max_rate; /* optional max rate per flow */
94 u32 flow_plimit; /* max packets per flow */
95 struct rb_root *fq_root;
96 u8 rate_enable;
97 u8 fq_trees_log;
98
99 u32 flows;
100 u32 inactive_flows;
101 u32 throttled_flows;
102
103 u64 stat_gc_flows;
104 u64 stat_internal_packets;
105 u64 stat_tcp_retrans;
106 u64 stat_throttled;
107 u64 stat_flows_plimit;
108 u64 stat_pkts_too_long;
109 u64 stat_allocation_errors;
110 struct qdisc_watchdog watchdog;
111 };
112
113 /* special value to mark a detached flow (not on old/new list) */
114 static struct fq_flow detached, throttled;
115
116 static void fq_flow_set_detached(struct fq_flow *f)
117 {
118 f->next = &detached;
119 f->age = jiffies;
120 }
121
122 static bool fq_flow_is_detached(const struct fq_flow *f)
123 {
124 return f->next == &detached;
125 }
126
127 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
128 {
129 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
130
131 while (*p) {
132 struct fq_flow *aux;
133
134 parent = *p;
135 aux = container_of(parent, struct fq_flow, rate_node);
136 if (f->time_next_packet >= aux->time_next_packet)
137 p = &parent->rb_right;
138 else
139 p = &parent->rb_left;
140 }
141 rb_link_node(&f->rate_node, parent, p);
142 rb_insert_color(&f->rate_node, &q->delayed);
143 q->throttled_flows++;
144 q->stat_throttled++;
145
146 f->next = &throttled;
147 if (q->time_next_delayed_flow > f->time_next_packet)
148 q->time_next_delayed_flow = f->time_next_packet;
149 }
150
151
152 static struct kmem_cache *fq_flow_cachep __read_mostly;
153
154 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
155 {
156 if (head->first)
157 head->last->next = flow;
158 else
159 head->first = flow;
160 head->last = flow;
161 flow->next = NULL;
162 }
163
164 /* limit number of collected flows per round */
165 #define FQ_GC_MAX 8
166 #define FQ_GC_AGE (3*HZ)
167
168 static bool fq_gc_candidate(const struct fq_flow *f)
169 {
170 return fq_flow_is_detached(f) &&
171 time_after(jiffies, f->age + FQ_GC_AGE);
172 }
173
174 static void fq_gc(struct fq_sched_data *q,
175 struct rb_root *root,
176 struct sock *sk)
177 {
178 struct fq_flow *f, *tofree[FQ_GC_MAX];
179 struct rb_node **p, *parent;
180 int fcnt = 0;
181
182 p = &root->rb_node;
183 parent = NULL;
184 while (*p) {
185 parent = *p;
186
187 f = container_of(parent, struct fq_flow, fq_node);
188 if (f->sk == sk)
189 break;
190
191 if (fq_gc_candidate(f)) {
192 tofree[fcnt++] = f;
193 if (fcnt == FQ_GC_MAX)
194 break;
195 }
196
197 if (f->sk > sk)
198 p = &parent->rb_right;
199 else
200 p = &parent->rb_left;
201 }
202
203 q->flows -= fcnt;
204 q->inactive_flows -= fcnt;
205 q->stat_gc_flows += fcnt;
206 while (fcnt) {
207 struct fq_flow *f = tofree[--fcnt];
208
209 rb_erase(&f->fq_node, root);
210 kmem_cache_free(fq_flow_cachep, f);
211 }
212 }
213
214 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
215 {
216 struct rb_node **p, *parent;
217 struct sock *sk = skb->sk;
218 struct rb_root *root;
219 struct fq_flow *f;
220
221 /* warning: no starvation prevention... */
222 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
223 return &q->internal;
224
225 if (unlikely(!sk)) {
226 /* By forcing low order bit to 1, we make sure to not
227 * collide with a local flow (socket pointers are word aligned)
228 */
229 sk = (struct sock *)(skb_get_hash(skb) | 1L);
230 }
231
232 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
233
234 if (q->flows >= (2U << q->fq_trees_log) &&
235 q->inactive_flows > q->flows/2)
236 fq_gc(q, root, sk);
237
238 p = &root->rb_node;
239 parent = NULL;
240 while (*p) {
241 parent = *p;
242
243 f = container_of(parent, struct fq_flow, fq_node);
244 if (f->sk == sk) {
245 /* socket might have been reallocated, so check
246 * if its sk_hash is the same.
247 * It not, we need to refill credit with
248 * initial quantum
249 */
250 if (unlikely(skb->sk &&
251 f->socket_hash != sk->sk_hash)) {
252 f->credit = q->initial_quantum;
253 f->socket_hash = sk->sk_hash;
254 f->time_next_packet = 0ULL;
255 }
256 return f;
257 }
258 if (f->sk > sk)
259 p = &parent->rb_right;
260 else
261 p = &parent->rb_left;
262 }
263
264 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
265 if (unlikely(!f)) {
266 q->stat_allocation_errors++;
267 return &q->internal;
268 }
269 fq_flow_set_detached(f);
270 f->sk = sk;
271 if (skb->sk)
272 f->socket_hash = sk->sk_hash;
273 f->credit = q->initial_quantum;
274
275 rb_link_node(&f->fq_node, parent, p);
276 rb_insert_color(&f->fq_node, root);
277
278 q->flows++;
279 q->inactive_flows++;
280 return f;
281 }
282
283
284 /* remove one skb from head of flow queue */
285 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
286 {
287 struct sk_buff *skb = flow->head;
288
289 if (skb) {
290 flow->head = skb->next;
291 skb->next = NULL;
292 flow->qlen--;
293 qdisc_qstats_backlog_dec(sch, skb);
294 sch->q.qlen--;
295 }
296 return skb;
297 }
298
299 /* We might add in the future detection of retransmits
300 * For the time being, just return false
301 */
302 static bool skb_is_retransmit(struct sk_buff *skb)
303 {
304 return false;
305 }
306
307 /* add skb to flow queue
308 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
309 * We special case tcp retransmits to be transmitted before other packets.
310 * We rely on fact that TCP retransmits are unlikely, so we do not waste
311 * a separate queue or a pointer.
312 * head-> [retrans pkt 1]
313 * [retrans pkt 2]
314 * [ normal pkt 1]
315 * [ normal pkt 2]
316 * [ normal pkt 3]
317 * tail-> [ normal pkt 4]
318 */
319 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
320 {
321 struct sk_buff *prev, *head = flow->head;
322
323 skb->next = NULL;
324 if (!head) {
325 flow->head = skb;
326 flow->tail = skb;
327 return;
328 }
329 if (likely(!skb_is_retransmit(skb))) {
330 flow->tail->next = skb;
331 flow->tail = skb;
332 return;
333 }
334
335 /* This skb is a tcp retransmit,
336 * find the last retrans packet in the queue
337 */
338 prev = NULL;
339 while (skb_is_retransmit(head)) {
340 prev = head;
341 head = head->next;
342 if (!head)
343 break;
344 }
345 if (!prev) { /* no rtx packet in queue, become the new head */
346 skb->next = flow->head;
347 flow->head = skb;
348 } else {
349 if (prev == flow->tail)
350 flow->tail = skb;
351 else
352 skb->next = prev->next;
353 prev->next = skb;
354 }
355 }
356
357 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
358 {
359 struct fq_sched_data *q = qdisc_priv(sch);
360 struct fq_flow *f;
361
362 if (unlikely(sch->q.qlen >= sch->limit))
363 return qdisc_drop(skb, sch);
364
365 f = fq_classify(skb, q);
366 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
367 q->stat_flows_plimit++;
368 return qdisc_drop(skb, sch);
369 }
370
371 f->qlen++;
372 if (skb_is_retransmit(skb))
373 q->stat_tcp_retrans++;
374 qdisc_qstats_backlog_inc(sch, skb);
375 if (fq_flow_is_detached(f)) {
376 fq_flow_add_tail(&q->new_flows, f);
377 if (time_after(jiffies, f->age + q->flow_refill_delay))
378 f->credit = max_t(u32, f->credit, q->quantum);
379 q->inactive_flows--;
380 }
381
382 /* Note: this overwrites f->age */
383 flow_queue_add(f, skb);
384
385 if (unlikely(f == &q->internal)) {
386 q->stat_internal_packets++;
387 }
388 sch->q.qlen++;
389
390 return NET_XMIT_SUCCESS;
391 }
392
393 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
394 {
395 struct rb_node *p;
396
397 if (q->time_next_delayed_flow > now)
398 return;
399
400 q->time_next_delayed_flow = ~0ULL;
401 while ((p = rb_first(&q->delayed)) != NULL) {
402 struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
403
404 if (f->time_next_packet > now) {
405 q->time_next_delayed_flow = f->time_next_packet;
406 break;
407 }
408 rb_erase(p, &q->delayed);
409 q->throttled_flows--;
410 fq_flow_add_tail(&q->old_flows, f);
411 }
412 }
413
414 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
415 {
416 struct fq_sched_data *q = qdisc_priv(sch);
417 u64 now = ktime_get_ns();
418 struct fq_flow_head *head;
419 struct sk_buff *skb;
420 struct fq_flow *f;
421 u32 rate;
422
423 skb = fq_dequeue_head(sch, &q->internal);
424 if (skb)
425 goto out;
426 fq_check_throttled(q, now);
427 begin:
428 head = &q->new_flows;
429 if (!head->first) {
430 head = &q->old_flows;
431 if (!head->first) {
432 if (q->time_next_delayed_flow != ~0ULL)
433 qdisc_watchdog_schedule_ns(&q->watchdog,
434 q->time_next_delayed_flow,
435 false);
436 return NULL;
437 }
438 }
439 f = head->first;
440
441 if (f->credit <= 0) {
442 f->credit += q->quantum;
443 head->first = f->next;
444 fq_flow_add_tail(&q->old_flows, f);
445 goto begin;
446 }
447
448 if (unlikely(f->head && now < f->time_next_packet)) {
449 head->first = f->next;
450 fq_flow_set_throttled(q, f);
451 goto begin;
452 }
453
454 skb = fq_dequeue_head(sch, f);
455 if (!skb) {
456 head->first = f->next;
457 /* force a pass through old_flows to prevent starvation */
458 if ((head == &q->new_flows) && q->old_flows.first) {
459 fq_flow_add_tail(&q->old_flows, f);
460 } else {
461 fq_flow_set_detached(f);
462 q->inactive_flows++;
463 }
464 goto begin;
465 }
466 prefetch(&skb->end);
467 f->time_next_packet = now;
468 f->credit -= qdisc_pkt_len(skb);
469
470 if (f->credit > 0 || !q->rate_enable)
471 goto out;
472
473 rate = q->flow_max_rate;
474 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
475 rate = min(skb->sk->sk_pacing_rate, rate);
476
477 if (rate != ~0U) {
478 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
479 u64 len = (u64)plen * NSEC_PER_SEC;
480
481 if (likely(rate))
482 do_div(len, rate);
483 /* Since socket rate can change later,
484 * clamp the delay to 1 second.
485 * Really, providers of too big packets should be fixed !
486 */
487 if (unlikely(len > NSEC_PER_SEC)) {
488 len = NSEC_PER_SEC;
489 q->stat_pkts_too_long++;
490 }
491
492 f->time_next_packet = now + len;
493 }
494 out:
495 qdisc_bstats_update(sch, skb);
496 return skb;
497 }
498
499 static void fq_reset(struct Qdisc *sch)
500 {
501 struct fq_sched_data *q = qdisc_priv(sch);
502 struct rb_root *root;
503 struct sk_buff *skb;
504 struct rb_node *p;
505 struct fq_flow *f;
506 unsigned int idx;
507
508 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
509 kfree_skb(skb);
510
511 if (!q->fq_root)
512 return;
513
514 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
515 root = &q->fq_root[idx];
516 while ((p = rb_first(root)) != NULL) {
517 f = container_of(p, struct fq_flow, fq_node);
518 rb_erase(p, root);
519
520 while ((skb = fq_dequeue_head(sch, f)) != NULL)
521 kfree_skb(skb);
522
523 kmem_cache_free(fq_flow_cachep, f);
524 }
525 }
526 q->new_flows.first = NULL;
527 q->old_flows.first = NULL;
528 q->delayed = RB_ROOT;
529 q->flows = 0;
530 q->inactive_flows = 0;
531 q->throttled_flows = 0;
532 }
533
534 static void fq_rehash(struct fq_sched_data *q,
535 struct rb_root *old_array, u32 old_log,
536 struct rb_root *new_array, u32 new_log)
537 {
538 struct rb_node *op, **np, *parent;
539 struct rb_root *oroot, *nroot;
540 struct fq_flow *of, *nf;
541 int fcnt = 0;
542 u32 idx;
543
544 for (idx = 0; idx < (1U << old_log); idx++) {
545 oroot = &old_array[idx];
546 while ((op = rb_first(oroot)) != NULL) {
547 rb_erase(op, oroot);
548 of = container_of(op, struct fq_flow, fq_node);
549 if (fq_gc_candidate(of)) {
550 fcnt++;
551 kmem_cache_free(fq_flow_cachep, of);
552 continue;
553 }
554 nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
555
556 np = &nroot->rb_node;
557 parent = NULL;
558 while (*np) {
559 parent = *np;
560
561 nf = container_of(parent, struct fq_flow, fq_node);
562 BUG_ON(nf->sk == of->sk);
563
564 if (nf->sk > of->sk)
565 np = &parent->rb_right;
566 else
567 np = &parent->rb_left;
568 }
569
570 rb_link_node(&of->fq_node, parent, np);
571 rb_insert_color(&of->fq_node, nroot);
572 }
573 }
574 q->flows -= fcnt;
575 q->inactive_flows -= fcnt;
576 q->stat_gc_flows += fcnt;
577 }
578
579 static void *fq_alloc_node(size_t sz, int node)
580 {
581 void *ptr;
582
583 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
584 if (!ptr)
585 ptr = vmalloc_node(sz, node);
586 return ptr;
587 }
588
589 static void fq_free(void *addr)
590 {
591 kvfree(addr);
592 }
593
594 static int fq_resize(struct Qdisc *sch, u32 log)
595 {
596 struct fq_sched_data *q = qdisc_priv(sch);
597 struct rb_root *array;
598 void *old_fq_root;
599 u32 idx;
600
601 if (q->fq_root && log == q->fq_trees_log)
602 return 0;
603
604 /* If XPS was setup, we can allocate memory on right NUMA node */
605 array = fq_alloc_node(sizeof(struct rb_root) << log,
606 netdev_queue_numa_node_read(sch->dev_queue));
607 if (!array)
608 return -ENOMEM;
609
610 for (idx = 0; idx < (1U << log); idx++)
611 array[idx] = RB_ROOT;
612
613 sch_tree_lock(sch);
614
615 old_fq_root = q->fq_root;
616 if (old_fq_root)
617 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
618
619 q->fq_root = array;
620 q->fq_trees_log = log;
621
622 sch_tree_unlock(sch);
623
624 fq_free(old_fq_root);
625
626 return 0;
627 }
628
629 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
630 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
631 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
632 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
633 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
634 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
635 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
636 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
637 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
638 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
639 };
640
641 static int fq_change(struct Qdisc *sch, struct nlattr *opt)
642 {
643 struct fq_sched_data *q = qdisc_priv(sch);
644 struct nlattr *tb[TCA_FQ_MAX + 1];
645 int err, drop_count = 0;
646 u32 fq_log;
647
648 if (!opt)
649 return -EINVAL;
650
651 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
652 if (err < 0)
653 return err;
654
655 sch_tree_lock(sch);
656
657 fq_log = q->fq_trees_log;
658
659 if (tb[TCA_FQ_BUCKETS_LOG]) {
660 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
661
662 if (nval >= 1 && nval <= ilog2(256*1024))
663 fq_log = nval;
664 else
665 err = -EINVAL;
666 }
667 if (tb[TCA_FQ_PLIMIT])
668 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
669
670 if (tb[TCA_FQ_FLOW_PLIMIT])
671 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
672
673 if (tb[TCA_FQ_QUANTUM])
674 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
675
676 if (tb[TCA_FQ_INITIAL_QUANTUM])
677 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
678
679 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
680 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
681 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
682
683 if (tb[TCA_FQ_FLOW_MAX_RATE])
684 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
685
686 if (tb[TCA_FQ_RATE_ENABLE]) {
687 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
688
689 if (enable <= 1)
690 q->rate_enable = enable;
691 else
692 err = -EINVAL;
693 }
694
695 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
696 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
697
698 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
699 }
700
701 if (!err) {
702 sch_tree_unlock(sch);
703 err = fq_resize(sch, fq_log);
704 sch_tree_lock(sch);
705 }
706 while (sch->q.qlen > sch->limit) {
707 struct sk_buff *skb = fq_dequeue(sch);
708
709 if (!skb)
710 break;
711 kfree_skb(skb);
712 drop_count++;
713 }
714 qdisc_tree_decrease_qlen(sch, drop_count);
715
716 sch_tree_unlock(sch);
717 return err;
718 }
719
720 static void fq_destroy(struct Qdisc *sch)
721 {
722 struct fq_sched_data *q = qdisc_priv(sch);
723
724 fq_reset(sch);
725 fq_free(q->fq_root);
726 qdisc_watchdog_cancel(&q->watchdog);
727 }
728
729 static int fq_init(struct Qdisc *sch, struct nlattr *opt)
730 {
731 struct fq_sched_data *q = qdisc_priv(sch);
732 int err;
733
734 sch->limit = 10000;
735 q->flow_plimit = 100;
736 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
737 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
738 q->flow_refill_delay = msecs_to_jiffies(40);
739 q->flow_max_rate = ~0U;
740 q->rate_enable = 1;
741 q->new_flows.first = NULL;
742 q->old_flows.first = NULL;
743 q->delayed = RB_ROOT;
744 q->fq_root = NULL;
745 q->fq_trees_log = ilog2(1024);
746 qdisc_watchdog_init(&q->watchdog, sch);
747
748 if (opt)
749 err = fq_change(sch, opt);
750 else
751 err = fq_resize(sch, q->fq_trees_log);
752
753 return err;
754 }
755
756 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
757 {
758 struct fq_sched_data *q = qdisc_priv(sch);
759 struct nlattr *opts;
760
761 opts = nla_nest_start(skb, TCA_OPTIONS);
762 if (opts == NULL)
763 goto nla_put_failure;
764
765 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
766
767 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
768 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
769 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
770 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
771 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
772 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
773 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
774 jiffies_to_usecs(q->flow_refill_delay)) ||
775 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
776 goto nla_put_failure;
777
778 return nla_nest_end(skb, opts);
779
780 nla_put_failure:
781 return -1;
782 }
783
784 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
785 {
786 struct fq_sched_data *q = qdisc_priv(sch);
787 u64 now = ktime_get_ns();
788 struct tc_fq_qd_stats st = {
789 .gc_flows = q->stat_gc_flows,
790 .highprio_packets = q->stat_internal_packets,
791 .tcp_retrans = q->stat_tcp_retrans,
792 .throttled = q->stat_throttled,
793 .flows_plimit = q->stat_flows_plimit,
794 .pkts_too_long = q->stat_pkts_too_long,
795 .allocation_errors = q->stat_allocation_errors,
796 .flows = q->flows,
797 .inactive_flows = q->inactive_flows,
798 .throttled_flows = q->throttled_flows,
799 .time_next_delayed_flow = q->time_next_delayed_flow - now,
800 };
801
802 return gnet_stats_copy_app(d, &st, sizeof(st));
803 }
804
805 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
806 .id = "fq",
807 .priv_size = sizeof(struct fq_sched_data),
808
809 .enqueue = fq_enqueue,
810 .dequeue = fq_dequeue,
811 .peek = qdisc_peek_dequeued,
812 .init = fq_init,
813 .reset = fq_reset,
814 .destroy = fq_destroy,
815 .change = fq_change,
816 .dump = fq_dump,
817 .dump_stats = fq_dump_stats,
818 .owner = THIS_MODULE,
819 };
820
821 static int __init fq_module_init(void)
822 {
823 int ret;
824
825 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
826 sizeof(struct fq_flow),
827 0, 0, NULL);
828 if (!fq_flow_cachep)
829 return -ENOMEM;
830
831 ret = register_qdisc(&fq_qdisc_ops);
832 if (ret)
833 kmem_cache_destroy(fq_flow_cachep);
834 return ret;
835 }
836
837 static void __exit fq_module_exit(void)
838 {
839 unregister_qdisc(&fq_qdisc_ops);
840 kmem_cache_destroy(fq_flow_cachep);
841 }
842
843 module_init(fq_module_init)
844 module_exit(fq_module_exit)
845 MODULE_AUTHOR("Eric Dumazet");
846 MODULE_LICENSE("GPL");
This page took 0.04759 seconds and 5 git commands to generate.