Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / sched / sch_generic.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support
12 */
13
1da177e4 14#include <linux/bitops.h>
1da177e4
LT
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
1da177e4 20#include <linux/errno.h>
1da177e4
LT
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24#include <linux/init.h>
25#include <linux/rcupdate.h>
26#include <linux/list.h>
5a0e3ad6 27#include <linux/slab.h>
1da177e4 28#include <net/pkt_sched.h>
7fee226a 29#include <net/dst.h>
1da177e4
LT
30
31/* Main transmission queue. */
32
0463d4ae 33/* Modifications to data participating in scheduling must be protected with
5fb66229 34 * qdisc_lock(qdisc) spinlock.
0463d4ae
PM
35 *
36 * The idea is the following:
c7e4f3bb
DM
37 * - enqueue, dequeue are serialized via qdisc root lock
38 * - ingress filtering is also serialized via qdisc root lock
0463d4ae 39 * - updates to tree and tree walking are only done under the rtnl mutex.
1da177e4 40 */
1da177e4 41
37437bb2 42static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
c716a81a 43{
7fee226a 44 skb_dst_force(skb);
6252352d 45 q->gso_skb = skb;
53e91503 46 q->qstats.requeues++;
bbd8a0d3 47 q->q.qlen++; /* it's still part of the queue */
37437bb2 48 __netif_schedule(q);
6252352d 49
c716a81a
JHS
50 return 0;
51}
52
d3b753db 53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
c716a81a 54{
554794de
JP
55 struct sk_buff *skb = q->gso_skb;
56
ebf05982
JP
57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
60
61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
73466498 63 if (!netif_xmit_frozen_or_stopped(txq)) {
6252352d 64 q->gso_skb = NULL;
bbd8a0d3
KK
65 q->q.qlen--;
66 } else
ebf05982
JP
67 skb = NULL;
68 } else {
c716a81a 69 skb = q->dequeue(q);
ebf05982 70 }
c716a81a
JHS
71
72 return skb;
73}
74
6c1361a6 75static inline int handle_dev_cpu_collision(struct sk_buff *skb,
970565bb 76 struct netdev_queue *dev_queue,
6c1361a6 77 struct Qdisc *q)
c716a81a 78{
6c1361a6 79 int ret;
c716a81a 80
c773e847 81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
6c1361a6
KK
82 /*
83 * Same CPU holding the lock. It may be a transient
84 * configuration error, when hard_start_xmit() recurses. We
85 * detect it by checking xmit owner and drop the packet when
86 * deadloop is detected. Return OK to try the next skb.
87 */
c716a81a 88 kfree_skb(skb);
e87cc472
JP
89 net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
90 dev_queue->dev->name);
6c1361a6
KK
91 ret = qdisc_qlen(q);
92 } else {
93 /*
94 * Another cpu is holding lock, requeue & delay xmits for
95 * some time.
96 */
d6d9ca0f 97 __this_cpu_inc(softnet_data.cpu_collision);
37437bb2 98 ret = dev_requeue_skb(skb, q);
c716a81a
JHS
99 }
100
6c1361a6 101 return ret;
c716a81a
JHS
102}
103
10297b99 104/*
bbd8a0d3
KK
105 * Transmit one skb, and handle the return status as required. Holding the
106 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
107 * function.
6c1361a6
KK
108 *
109 * Returns to the caller:
110 * 0 - queue is empty or throttled.
111 * >0 - queue is not empty.
6c1361a6 112 */
bbd8a0d3
KK
113int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
114 struct net_device *dev, struct netdev_queue *txq,
115 spinlock_t *root_lock)
1da177e4 116{
5f1a485d 117 int ret = NETDEV_TX_BUSY;
7698b4fc
DM
118
119 /* And release qdisc */
120 spin_unlock(root_lock);
c716a81a 121
c773e847 122 HARD_TX_LOCK(dev, txq, smp_processor_id());
73466498 123 if (!netif_xmit_frozen_or_stopped(txq))
fd2ea0a7 124 ret = dev_hard_start_xmit(skb, dev, txq);
572a9d7b 125
c773e847 126 HARD_TX_UNLOCK(dev, txq);
c716a81a 127
7698b4fc 128 spin_lock(root_lock);
c716a81a 129
9a1654ba
JP
130 if (dev_xmit_complete(ret)) {
131 /* Driver sent out skb successfully or skb was consumed */
6c1361a6 132 ret = qdisc_qlen(q);
9a1654ba 133 } else if (ret == NETDEV_TX_LOCKED) {
6c1361a6 134 /* Driver try lock failed */
eb6aafe3 135 ret = handle_dev_cpu_collision(skb, txq, q);
9a1654ba 136 } else {
6c1361a6 137 /* Driver returned NETDEV_TX_BUSY - requeue skb */
e87cc472
JP
138 if (unlikely(ret != NETDEV_TX_BUSY))
139 net_warn_ratelimited("BUG %s code %d qlen %d\n",
140 dev->name, ret, q->q.qlen);
6c1361a6 141
37437bb2 142 ret = dev_requeue_skb(skb, q);
6c1361a6 143 }
c716a81a 144
73466498 145 if (ret && netif_xmit_frozen_or_stopped(txq))
37437bb2
DM
146 ret = 0;
147
6c1361a6 148 return ret;
1da177e4
LT
149}
150
bbd8a0d3
KK
151/*
152 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
153 *
154 * __QDISC_STATE_RUNNING guarantees only one CPU can process
155 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
156 * this queue.
157 *
158 * netif_tx_lock serializes accesses to device driver.
159 *
160 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
161 * if one is grabbed, another must be free.
162 *
163 * Note, that this procedure can be called by a watchdog timer
164 *
165 * Returns to the caller:
166 * 0 - queue is empty or throttled.
167 * >0 - queue is not empty.
168 *
169 */
170static inline int qdisc_restart(struct Qdisc *q)
171{
172 struct netdev_queue *txq;
173 struct net_device *dev;
174 spinlock_t *root_lock;
175 struct sk_buff *skb;
176
177 /* Dequeue packet */
178 skb = dequeue_skb(q);
179 if (unlikely(!skb))
180 return 0;
7fee226a 181 WARN_ON_ONCE(skb_dst_is_noref(skb));
bbd8a0d3
KK
182 root_lock = qdisc_lock(q);
183 dev = qdisc_dev(q);
184 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
185
186 return sch_direct_xmit(skb, q, dev, txq, root_lock);
187}
188
37437bb2 189void __qdisc_run(struct Qdisc *q)
48d83325 190{
d5b8aa1d 191 int quota = weight_p;
2ba2506c 192
37437bb2 193 while (qdisc_restart(q)) {
2ba2506c 194 /*
d5b8aa1d 195 * Ordered by possible occurrence: Postpone processing if
196 * 1. we've exceeded packet quota
197 * 2. another process needs the CPU;
2ba2506c 198 */
f0c50c7c 199 if (--quota <= 0 || need_resched()) {
37437bb2 200 __netif_schedule(q);
d90df3ad 201 break;
2ba2506c
HX
202 }
203 }
48d83325 204
bc135b23 205 qdisc_run_end(q);
48d83325
HX
206}
207
9d21493b
ED
208unsigned long dev_trans_start(struct net_device *dev)
209{
210 unsigned long val, res = dev->trans_start;
211 unsigned int i;
212
213 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res))
216 res = val;
217 }
218 dev->trans_start = res;
219 return res;
220}
221EXPORT_SYMBOL(dev_trans_start);
222
1da177e4
LT
223static void dev_watchdog(unsigned long arg)
224{
225 struct net_device *dev = (struct net_device *)arg;
226
932ff279 227 netif_tx_lock(dev);
e8a0464c 228 if (!qdisc_tx_is_noop(dev)) {
1da177e4
LT
229 if (netif_device_present(dev) &&
230 netif_running(dev) &&
231 netif_carrier_ok(dev)) {
9d21493b 232 int some_queue_timedout = 0;
e8a0464c 233 unsigned int i;
9d21493b 234 unsigned long trans_start;
e8a0464c
DM
235
236 for (i = 0; i < dev->num_tx_queues; i++) {
237 struct netdev_queue *txq;
238
239 txq = netdev_get_tx_queue(dev, i);
9d21493b
ED
240 /*
241 * old device drivers set dev->trans_start
242 */
243 trans_start = txq->trans_start ? : dev->trans_start;
73466498 244 if (netif_xmit_stopped(txq) &&
9d21493b
ED
245 time_after(jiffies, (trans_start +
246 dev->watchdog_timeo))) {
247 some_queue_timedout = 1;
ccf5ff69 248 txq->trans_timeout++;
e8a0464c
DM
249 break;
250 }
251 }
338f7566 252
9d21493b 253 if (some_queue_timedout) {
9d21493b 254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
3019de12 255 dev->name, netdev_drivername(dev), i);
d314774c 256 dev->netdev_ops->ndo_tx_timeout(dev);
1da177e4 257 }
e8a0464c
DM
258 if (!mod_timer(&dev->watchdog_timer,
259 round_jiffies(jiffies +
260 dev->watchdog_timeo)))
1da177e4
LT
261 dev_hold(dev);
262 }
263 }
932ff279 264 netif_tx_unlock(dev);
1da177e4
LT
265
266 dev_put(dev);
267}
268
1da177e4
LT
269void __netdev_watchdog_up(struct net_device *dev)
270{
d314774c 271 if (dev->netdev_ops->ndo_tx_timeout) {
1da177e4
LT
272 if (dev->watchdog_timeo <= 0)
273 dev->watchdog_timeo = 5*HZ;
60468d5b
VP
274 if (!mod_timer(&dev->watchdog_timer,
275 round_jiffies(jiffies + dev->watchdog_timeo)))
1da177e4
LT
276 dev_hold(dev);
277 }
278}
279
280static void dev_watchdog_up(struct net_device *dev)
281{
1da177e4 282 __netdev_watchdog_up(dev);
1da177e4
LT
283}
284
285static void dev_watchdog_down(struct net_device *dev)
286{
932ff279 287 netif_tx_lock_bh(dev);
1da177e4 288 if (del_timer(&dev->watchdog_timer))
15333061 289 dev_put(dev);
932ff279 290 netif_tx_unlock_bh(dev);
1da177e4
LT
291}
292
bea3348e
SH
293/**
294 * netif_carrier_on - set carrier
295 * @dev: network device
296 *
297 * Device has detected that carrier.
298 */
0a242efc
DV
299void netif_carrier_on(struct net_device *dev)
300{
bfaae0f0 301 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
b4730016
DM
302 if (dev->reg_state == NETREG_UNINITIALIZED)
303 return;
0a242efc 304 linkwatch_fire_event(dev);
bfaae0f0
JG
305 if (netif_running(dev))
306 __netdev_watchdog_up(dev);
307 }
0a242efc 308}
62e3ba1b 309EXPORT_SYMBOL(netif_carrier_on);
0a242efc 310
bea3348e
SH
311/**
312 * netif_carrier_off - clear carrier
313 * @dev: network device
314 *
315 * Device has detected loss of carrier.
316 */
0a242efc
DV
317void netif_carrier_off(struct net_device *dev)
318{
b4730016
DM
319 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
320 if (dev->reg_state == NETREG_UNINITIALIZED)
321 return;
0a242efc 322 linkwatch_fire_event(dev);
b4730016 323 }
0a242efc 324}
62e3ba1b 325EXPORT_SYMBOL(netif_carrier_off);
0a242efc 326
1da177e4
LT
327/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
328 under all circumstances. It is difficult to invent anything faster or
329 cheaper.
330 */
331
94df109a 332static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
1da177e4
LT
333{
334 kfree_skb(skb);
335 return NET_XMIT_CN;
336}
337
94df109a 338static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
1da177e4
LT
339{
340 return NULL;
341}
342
20fea08b 343struct Qdisc_ops noop_qdisc_ops __read_mostly = {
1da177e4
LT
344 .id = "noop",
345 .priv_size = 0,
346 .enqueue = noop_enqueue,
347 .dequeue = noop_dequeue,
99c0db26 348 .peek = noop_dequeue,
1da177e4
LT
349 .owner = THIS_MODULE,
350};
351
7698b4fc 352static struct netdev_queue noop_netdev_queue = {
7698b4fc 353 .qdisc = &noop_qdisc,
9f3ffae0 354 .qdisc_sleeping = &noop_qdisc,
7698b4fc
DM
355};
356
1da177e4
LT
357struct Qdisc noop_qdisc = {
358 .enqueue = noop_enqueue,
359 .dequeue = noop_dequeue,
360 .flags = TCQ_F_BUILTIN,
10297b99 361 .ops = &noop_qdisc_ops,
1da177e4 362 .list = LIST_HEAD_INIT(noop_qdisc.list),
83874000 363 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
7698b4fc 364 .dev_queue = &noop_netdev_queue,
7b5edbc4 365 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
1da177e4 366};
62e3ba1b 367EXPORT_SYMBOL(noop_qdisc);
1da177e4 368
20fea08b 369static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
1da177e4
LT
370 .id = "noqueue",
371 .priv_size = 0,
372 .enqueue = noop_enqueue,
373 .dequeue = noop_dequeue,
99c0db26 374 .peek = noop_dequeue,
1da177e4
LT
375 .owner = THIS_MODULE,
376};
377
30ee42be
DM
378static struct Qdisc noqueue_qdisc;
379static struct netdev_queue noqueue_netdev_queue = {
380 .qdisc = &noqueue_qdisc,
9f3ffae0 381 .qdisc_sleeping = &noqueue_qdisc,
30ee42be
DM
382};
383
1da177e4
LT
384static struct Qdisc noqueue_qdisc = {
385 .enqueue = NULL,
386 .dequeue = noop_dequeue,
387 .flags = TCQ_F_BUILTIN,
388 .ops = &noqueue_qdisc_ops,
389 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
30ee42be
DM
390 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
391 .dev_queue = &noqueue_netdev_queue,
7b5edbc4 392 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
1da177e4
LT
393};
394
395
cc7ec456
ED
396static const u8 prio2band[TC_PRIO_MAX + 1] = {
397 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
398};
d3678b46
DM
399
400/* 3-band FIFO queue: old style, but should be a bit faster than
401 generic prio+fifo combination.
402 */
403
404#define PFIFO_FAST_BANDS 3
405
fd3ae5e8
KK
406/*
407 * Private data for a pfifo_fast scheduler containing:
408 * - queues for the three band
409 * - bitmap indicating which of the bands contain skbs
410 */
411struct pfifo_fast_priv {
412 u32 bitmap;
413 struct sk_buff_head q[PFIFO_FAST_BANDS];
414};
415
416/*
417 * Convert a bitmap to the first band number where an skb is queued, where:
418 * bitmap=0 means there are no skbs on any band.
419 * bitmap=1 means there is an skb on band 0.
420 * bitmap=7 means there are skbs on all 3 bands, etc.
421 */
422static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
423
424static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
425 int band)
d3678b46 426{
fd3ae5e8 427 return priv->q + band;
d3678b46
DM
428}
429
cc7ec456 430static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
321090e7 431{
a453e068
KK
432 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
433 int band = prio2band[skb->priority & TC_PRIO_MAX];
434 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
435 struct sk_buff_head *list = band2list(priv, band);
1da177e4 436
fd3ae5e8 437 priv->bitmap |= (1 << band);
d3678b46 438 qdisc->q.qlen++;
821d24ae 439 return __qdisc_enqueue_tail(skb, qdisc, list);
d3678b46 440 }
821d24ae
TG
441
442 return qdisc_drop(skb, qdisc);
1da177e4
LT
443}
444
cc7ec456 445static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
1da177e4 446{
fd3ae5e8
KK
447 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
448 int band = bitmap2band[priv->bitmap];
1da177e4 449
fd3ae5e8
KK
450 if (likely(band >= 0)) {
451 struct sk_buff_head *list = band2list(priv, band);
452 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
453
454 qdisc->q.qlen--;
455 if (skb_queue_empty(list))
456 priv->bitmap &= ~(1 << band);
457
458 return skb;
d3678b46 459 }
f87a9c3d 460
1da177e4
LT
461 return NULL;
462}
463
cc7ec456 464static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
99c0db26 465{
fd3ae5e8
KK
466 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
467 int band = bitmap2band[priv->bitmap];
468
469 if (band >= 0) {
470 struct sk_buff_head *list = band2list(priv, band);
99c0db26 471
fd3ae5e8 472 return skb_peek(list);
99c0db26
JP
473 }
474
475 return NULL;
476}
477
cc7ec456 478static void pfifo_fast_reset(struct Qdisc *qdisc)
1da177e4 479{
d3678b46 480 int prio;
fd3ae5e8 481 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
d3678b46
DM
482
483 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
fd3ae5e8 484 __qdisc_reset_queue(qdisc, band2list(priv, prio));
d3678b46 485
fd3ae5e8 486 priv->bitmap = 0;
821d24ae 487 qdisc->qstats.backlog = 0;
d3678b46 488 qdisc->q.qlen = 0;
1da177e4
LT
489}
490
d3678b46
DM
491static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
492{
493 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
494
cc7ec456 495 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
1b34ec43
DM
496 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
497 goto nla_put_failure;
d3678b46
DM
498 return skb->len;
499
500nla_put_failure:
501 return -1;
502}
503
504static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
505{
506 int prio;
fd3ae5e8 507 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
d3678b46
DM
508
509 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
fd3ae5e8 510 skb_queue_head_init(band2list(priv, prio));
d3678b46 511
23624935
ED
512 /* Can by-pass the queue discipline */
513 qdisc->flags |= TCQ_F_CAN_BYPASS;
d3678b46
DM
514 return 0;
515}
516
6ec1c69a 517struct Qdisc_ops pfifo_fast_ops __read_mostly = {
d3678b46 518 .id = "pfifo_fast",
fd3ae5e8 519 .priv_size = sizeof(struct pfifo_fast_priv),
d3678b46
DM
520 .enqueue = pfifo_fast_enqueue,
521 .dequeue = pfifo_fast_dequeue,
99c0db26 522 .peek = pfifo_fast_peek,
d3678b46
DM
523 .init = pfifo_fast_init,
524 .reset = pfifo_fast_reset,
525 .dump = pfifo_fast_dump,
1da177e4
LT
526 .owner = THIS_MODULE,
527};
b8970f0b 528EXPORT_SYMBOL(pfifo_fast_ops);
1da177e4 529
5ce2d488 530struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
bb949fbd 531 struct Qdisc_ops *ops)
1da177e4
LT
532{
533 void *p;
534 struct Qdisc *sch;
d276055c 535 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
3d54b82f 536 int err = -ENOBUFS;
1da177e4 537
f2cd2d3e
ED
538 p = kzalloc_node(size, GFP_KERNEL,
539 netdev_queue_numa_node_read(dev_queue));
540
1da177e4 541 if (!p)
3d54b82f 542 goto errout;
3d54b82f 543 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
d276055c
ED
544 /* if we got non aligned memory, ask more and do alignment ourself */
545 if (sch != p) {
546 kfree(p);
547 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
548 netdev_queue_numa_node_read(dev_queue));
549 if (!p)
550 goto errout;
551 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
552 sch->padded = (char *) sch - (char *) p;
553 }
1da177e4
LT
554 INIT_LIST_HEAD(&sch->list);
555 skb_queue_head_init(&sch->q);
79640a4c 556 spin_lock_init(&sch->busylock);
1da177e4
LT
557 sch->ops = ops;
558 sch->enqueue = ops->enqueue;
559 sch->dequeue = ops->dequeue;
bb949fbd 560 sch->dev_queue = dev_queue;
5ce2d488 561 dev_hold(qdisc_dev(sch));
1da177e4 562 atomic_set(&sch->refcnt, 1);
3d54b82f
TG
563
564 return sch;
565errout:
01e123d7 566 return ERR_PTR(err);
3d54b82f
TG
567}
568
3511c913
CG
569struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
570 struct Qdisc_ops *ops, unsigned int parentid)
3d54b82f
TG
571{
572 struct Qdisc *sch;
10297b99 573
5ce2d488 574 sch = qdisc_alloc(dev_queue, ops);
3d54b82f
TG
575 if (IS_ERR(sch))
576 goto errout;
9f9afec4 577 sch->parent = parentid;
3d54b82f 578
1da177e4
LT
579 if (!ops->init || ops->init(sch, NULL) == 0)
580 return sch;
581
0fbbeb1b 582 qdisc_destroy(sch);
3d54b82f 583errout:
1da177e4
LT
584 return NULL;
585}
62e3ba1b 586EXPORT_SYMBOL(qdisc_create_dflt);
1da177e4 587
5fb66229 588/* Under qdisc_lock(qdisc) and BH! */
1da177e4
LT
589
590void qdisc_reset(struct Qdisc *qdisc)
591{
20fea08b 592 const struct Qdisc_ops *ops = qdisc->ops;
1da177e4
LT
593
594 if (ops->reset)
595 ops->reset(qdisc);
67305ebc 596
bbd8a0d3
KK
597 if (qdisc->gso_skb) {
598 kfree_skb(qdisc->gso_skb);
599 qdisc->gso_skb = NULL;
600 qdisc->q.qlen = 0;
601 }
1da177e4 602}
62e3ba1b 603EXPORT_SYMBOL(qdisc_reset);
1da177e4 604
5d944c64
ED
605static void qdisc_rcu_free(struct rcu_head *head)
606{
607 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
608
609 kfree((char *) qdisc - qdisc->padded);
610}
611
1e0d5a57 612void qdisc_destroy(struct Qdisc *qdisc)
1da177e4 613{
8a34c5dc
DM
614 const struct Qdisc_ops *ops = qdisc->ops;
615
1e0d5a57
DM
616 if (qdisc->flags & TCQ_F_BUILTIN ||
617 !atomic_dec_and_test(&qdisc->refcnt))
618 return;
619
3a682fbd 620#ifdef CONFIG_NET_SCHED
f6e0b239
JP
621 qdisc_list_del(qdisc);
622
a2da570d 623 qdisc_put_stab(rtnl_dereference(qdisc->stab));
3a682fbd 624#endif
8a34c5dc
DM
625 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
626 if (ops->reset)
627 ops->reset(qdisc);
628 if (ops->destroy)
629 ops->destroy(qdisc);
630
631 module_put(ops->owner);
632 dev_put(qdisc_dev(qdisc));
633
554794de 634 kfree_skb(qdisc->gso_skb);
5d944c64
ED
635 /*
636 * gen_estimator est_timer() might access qdisc->q.lock,
637 * wait a RCU grace period before freeing qdisc.
638 */
639 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
1da177e4 640}
62e3ba1b 641EXPORT_SYMBOL(qdisc_destroy);
1da177e4 642
589983cd
PM
643/* Attach toplevel qdisc to device queue. */
644struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
645 struct Qdisc *qdisc)
646{
647 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
648 spinlock_t *root_lock;
649
650 root_lock = qdisc_lock(oqdisc);
651 spin_lock_bh(root_lock);
652
653 /* Prune old scheduler */
654 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
655 qdisc_reset(oqdisc);
656
657 /* ... and graft new one */
658 if (qdisc == NULL)
659 qdisc = &noop_qdisc;
660 dev_queue->qdisc_sleeping = qdisc;
661 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
662
663 spin_unlock_bh(root_lock);
664
665 return oqdisc;
666}
b8970f0b 667EXPORT_SYMBOL(dev_graft_qdisc);
589983cd 668
e8a0464c
DM
669static void attach_one_default_qdisc(struct net_device *dev,
670 struct netdev_queue *dev_queue,
671 void *_unused)
672{
cc7ec456 673 struct Qdisc *qdisc = &noqueue_qdisc;
e8a0464c
DM
674
675 if (dev->tx_queue_len) {
3511c913 676 qdisc = qdisc_create_dflt(dev_queue,
d3678b46 677 &pfifo_fast_ops, TC_H_ROOT);
e8a0464c 678 if (!qdisc) {
cc7ec456 679 netdev_info(dev, "activation failed\n");
e8a0464c
DM
680 return;
681 }
e8a0464c
DM
682 }
683 dev_queue->qdisc_sleeping = qdisc;
684}
685
6ec1c69a
DM
686static void attach_default_qdiscs(struct net_device *dev)
687{
688 struct netdev_queue *txq;
689 struct Qdisc *qdisc;
690
691 txq = netdev_get_tx_queue(dev, 0);
692
693 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
694 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
695 dev->qdisc = txq->qdisc_sleeping;
696 atomic_inc(&dev->qdisc->refcnt);
697 } else {
3511c913 698 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
6ec1c69a
DM
699 if (qdisc) {
700 qdisc->ops->attach(qdisc);
701 dev->qdisc = qdisc;
702 }
703 }
704}
705
e8a0464c
DM
706static void transition_one_qdisc(struct net_device *dev,
707 struct netdev_queue *dev_queue,
708 void *_need_watchdog)
709{
83874000 710 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
e8a0464c
DM
711 int *need_watchdog_p = _need_watchdog;
712
a9312ae8
DM
713 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
714 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
715
83874000 716 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
9d21493b
ED
717 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
718 dev_queue->trans_start = 0;
e8a0464c 719 *need_watchdog_p = 1;
9d21493b 720 }
e8a0464c
DM
721}
722
1da177e4
LT
723void dev_activate(struct net_device *dev)
724{
e8a0464c 725 int need_watchdog;
b0e1e646 726
1da177e4 727 /* No queueing discipline is attached to device;
d3678b46
DM
728 create default one i.e. pfifo_fast for devices,
729 which need queueing and noqueue_qdisc for
730 virtual interfaces
1da177e4
LT
731 */
732
6ec1c69a
DM
733 if (dev->qdisc == &noop_qdisc)
734 attach_default_qdiscs(dev);
af356afa 735
cacaddf5
TC
736 if (!netif_carrier_ok(dev))
737 /* Delay activation until next carrier-on event */
738 return;
739
e8a0464c
DM
740 need_watchdog = 0;
741 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
24824a09
ED
742 if (dev_ingress_queue(dev))
743 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
e8a0464c
DM
744
745 if (need_watchdog) {
1da177e4
LT
746 dev->trans_start = jiffies;
747 dev_watchdog_up(dev);
748 }
b0e1e646 749}
b8970f0b 750EXPORT_SYMBOL(dev_activate);
b0e1e646 751
e8a0464c
DM
752static void dev_deactivate_queue(struct net_device *dev,
753 struct netdev_queue *dev_queue,
754 void *_qdisc_default)
b0e1e646 755{
e8a0464c 756 struct Qdisc *qdisc_default = _qdisc_default;
970565bb 757 struct Qdisc *qdisc;
970565bb 758
970565bb 759 qdisc = dev_queue->qdisc;
b0e1e646 760 if (qdisc) {
83874000
DM
761 spin_lock_bh(qdisc_lock(qdisc));
762
a9312ae8
DM
763 if (!(qdisc->flags & TCQ_F_BUILTIN))
764 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
765
f7a54c13 766 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
b0e1e646 767 qdisc_reset(qdisc);
d3b753db 768
83874000 769 spin_unlock_bh(qdisc_lock(qdisc));
b0e1e646 770 }
1da177e4
LT
771}
772
4335cd2d 773static bool some_qdisc_is_busy(struct net_device *dev)
e8a0464c
DM
774{
775 unsigned int i;
776
777 for (i = 0; i < dev->num_tx_queues; i++) {
778 struct netdev_queue *dev_queue;
7698b4fc 779 spinlock_t *root_lock;
e2627c8c 780 struct Qdisc *q;
e8a0464c
DM
781 int val;
782
783 dev_queue = netdev_get_tx_queue(dev, i);
b9a3b110 784 q = dev_queue->qdisc_sleeping;
5fb66229 785 root_lock = qdisc_lock(q);
e8a0464c 786
4335cd2d 787 spin_lock_bh(root_lock);
e8a0464c 788
bc135b23 789 val = (qdisc_is_running(q) ||
b9a3b110 790 test_bit(__QDISC_STATE_SCHED, &q->state));
e8a0464c 791
4335cd2d 792 spin_unlock_bh(root_lock);
e8a0464c
DM
793
794 if (val)
795 return true;
796 }
797 return false;
798}
799
3137663d
ED
800/**
801 * dev_deactivate_many - deactivate transmissions on several devices
802 * @head: list of devices to deactivate
803 *
804 * This function returns only when all outstanding transmissions
805 * have completed, unless all devices are in dismantle phase.
806 */
44345724 807void dev_deactivate_many(struct list_head *head)
1da177e4 808{
44345724 809 struct net_device *dev;
3137663d 810 bool sync_needed = false;
41a23b07 811
44345724
OP
812 list_for_each_entry(dev, head, unreg_list) {
813 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
814 &noop_qdisc);
815 if (dev_ingress_queue(dev))
816 dev_deactivate_queue(dev, dev_ingress_queue(dev),
817 &noop_qdisc);
818
819 dev_watchdog_down(dev);
3137663d 820 sync_needed |= !dev->dismantle;
44345724 821 }
1da177e4 822
3137663d
ED
823 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
824 * This is avoided if all devices are in dismantle phase :
825 * Caller will call synchronize_net() for us
826 */
827 if (sync_needed)
828 synchronize_net();
1da177e4 829
d4828d85 830 /* Wait for outstanding qdisc_run calls. */
44345724
OP
831 list_for_each_entry(dev, head, unreg_list)
832 while (some_qdisc_is_busy(dev))
833 yield();
834}
835
836void dev_deactivate(struct net_device *dev)
837{
838 LIST_HEAD(single);
839
840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single);
5f04d506 842 list_del(&single);
1da177e4 843}
b8970f0b 844EXPORT_SYMBOL(dev_deactivate);
1da177e4 845
b0e1e646
DM
846static void dev_init_scheduler_queue(struct net_device *dev,
847 struct netdev_queue *dev_queue,
e8a0464c 848 void *_qdisc)
b0e1e646 849{
e8a0464c
DM
850 struct Qdisc *qdisc = _qdisc;
851
b0e1e646
DM
852 dev_queue->qdisc = qdisc;
853 dev_queue->qdisc_sleeping = qdisc;
b0e1e646
DM
854}
855
1da177e4
LT
856void dev_init_scheduler(struct net_device *dev)
857{
af356afa 858 dev->qdisc = &noop_qdisc;
e8a0464c 859 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
24824a09
ED
860 if (dev_ingress_queue(dev))
861 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1da177e4 862
b24b8a24 863 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
1da177e4
LT
864}
865
e8a0464c
DM
866static void shutdown_scheduler_queue(struct net_device *dev,
867 struct netdev_queue *dev_queue,
868 void *_qdisc_default)
1da177e4 869{
b0e1e646 870 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
e8a0464c 871 struct Qdisc *qdisc_default = _qdisc_default;
b0e1e646
DM
872
873 if (qdisc) {
f7a54c13 874 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
b0e1e646 875 dev_queue->qdisc_sleeping = qdisc_default;
1da177e4 876
1da177e4 877 qdisc_destroy(qdisc);
10297b99 878 }
b0e1e646
DM
879}
880
881void dev_shutdown(struct net_device *dev)
882{
e8a0464c 883 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
24824a09
ED
884 if (dev_ingress_queue(dev))
885 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
af356afa
PM
886 qdisc_destroy(dev->qdisc);
887 dev->qdisc = &noop_qdisc;
888
547b792c 889 WARN_ON(timer_pending(&dev->watchdog_timer));
1da177e4 890}
This page took 0.903487 seconds and 5 git commands to generate.