dma-mapping: add the device argument to dma_mapping_error()
[deliverable/linux.git] / net / sched / sch_generic.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support
12 */
13
1da177e4 14#include <linux/bitops.h>
1da177e4
LT
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
1da177e4 20#include <linux/errno.h>
1da177e4
LT
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24#include <linux/init.h>
25#include <linux/rcupdate.h>
26#include <linux/list.h>
1da177e4
LT
27#include <net/pkt_sched.h>
28
29/* Main transmission queue. */
30
0463d4ae 31/* Modifications to data participating in scheduling must be protected with
c7e4f3bb 32 * qdisc_root_lock(qdisc) spinlock.
0463d4ae
PM
33 *
34 * The idea is the following:
c7e4f3bb
DM
35 * - enqueue, dequeue are serialized via qdisc root lock
36 * - ingress filtering is also serialized via qdisc root lock
0463d4ae 37 * - updates to tree and tree walking are only done under the rtnl mutex.
1da177e4 38 */
1da177e4 39
c716a81a
JHS
40static inline int qdisc_qlen(struct Qdisc *q)
41{
c716a81a
JHS
42 return q->q.qlen;
43}
44
37437bb2 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
c716a81a 46{
c716a81a 47 if (unlikely(skb->next))
d3b753db 48 q->gso_skb = skb;
c716a81a
JHS
49 else
50 q->ops->requeue(skb, q);
6c1361a6 51
37437bb2 52 __netif_schedule(q);
c716a81a
JHS
53 return 0;
54}
55
d3b753db 56static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
c716a81a 57{
6c1361a6 58 struct sk_buff *skb;
c716a81a 59
d3b753db
DM
60 if ((skb = q->gso_skb))
61 q->gso_skb = NULL;
c716a81a
JHS
62 else
63 skb = q->dequeue(q);
64
65 return skb;
66}
67
6c1361a6 68static inline int handle_dev_cpu_collision(struct sk_buff *skb,
970565bb 69 struct netdev_queue *dev_queue,
6c1361a6 70 struct Qdisc *q)
c716a81a 71{
6c1361a6 72 int ret;
c716a81a 73
c773e847 74 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
6c1361a6
KK
75 /*
76 * Same CPU holding the lock. It may be a transient
77 * configuration error, when hard_start_xmit() recurses. We
78 * detect it by checking xmit owner and drop the packet when
79 * deadloop is detected. Return OK to try the next skb.
80 */
c716a81a 81 kfree_skb(skb);
6c1361a6
KK
82 if (net_ratelimit())
83 printk(KERN_WARNING "Dead loop on netdevice %s, "
c773e847 84 "fix it urgently!\n", dev_queue->dev->name);
6c1361a6
KK
85 ret = qdisc_qlen(q);
86 } else {
87 /*
88 * Another cpu is holding lock, requeue & delay xmits for
89 * some time.
90 */
91 __get_cpu_var(netdev_rx_stat).cpu_collision++;
37437bb2 92 ret = dev_requeue_skb(skb, q);
c716a81a
JHS
93 }
94
6c1361a6 95 return ret;
c716a81a
JHS
96}
97
10297b99 98/*
83874000 99 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
6c1361a6 100 *
e2627c8c 101 * __QDISC_STATE_RUNNING guarantees only one CPU can process
83874000
DM
102 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
103 * this queue.
6c1361a6
KK
104 *
105 * netif_tx_lock serializes accesses to device driver.
106 *
83874000 107 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
6c1361a6
KK
108 * if one is grabbed, another must be free.
109 *
110 * Note, that this procedure can be called by a watchdog timer
111 *
112 * Returns to the caller:
113 * 0 - queue is empty or throttled.
114 * >0 - queue is not empty.
115 *
116 */
37437bb2 117static inline int qdisc_restart(struct Qdisc *q)
1da177e4 118{
37437bb2 119 struct netdev_queue *txq;
5f1a485d 120 int ret = NETDEV_TX_BUSY;
eb6aafe3 121 struct net_device *dev;
7698b4fc 122 spinlock_t *root_lock;
eb6aafe3 123 struct sk_buff *skb;
1da177e4 124
6c1361a6 125 /* Dequeue packet */
d3b753db 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
c716a81a 127 return 0;
f6a78bfc 128
7698b4fc
DM
129 root_lock = qdisc_root_lock(q);
130
131 /* And release qdisc */
132 spin_unlock(root_lock);
c716a81a 133
37437bb2
DM
134 dev = qdisc_dev(q);
135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
eb6aafe3 136
c773e847 137 HARD_TX_LOCK(dev, txq, smp_processor_id());
5f1a485d 138 if (!netif_subqueue_stopped(dev, skb))
fd2ea0a7 139 ret = dev_hard_start_xmit(skb, dev, txq);
c773e847 140 HARD_TX_UNLOCK(dev, txq);
c716a81a 141
7698b4fc 142 spin_lock(root_lock);
c716a81a 143
6c1361a6
KK
144 switch (ret) {
145 case NETDEV_TX_OK:
146 /* Driver sent out skb successfully */
147 ret = qdisc_qlen(q);
148 break;
149
150 case NETDEV_TX_LOCKED:
151 /* Driver try lock failed */
eb6aafe3 152 ret = handle_dev_cpu_collision(skb, txq, q);
6c1361a6
KK
153 break;
154
155 default:
156 /* Driver returned NETDEV_TX_BUSY - requeue skb */
157 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
158 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
159 dev->name, ret, q->q.qlen);
160
37437bb2 161 ret = dev_requeue_skb(skb, q);
6c1361a6
KK
162 break;
163 }
c716a81a 164
37437bb2
DM
165 if (ret && netif_tx_queue_stopped(txq))
166 ret = 0;
167
6c1361a6 168 return ret;
1da177e4
LT
169}
170
37437bb2 171void __qdisc_run(struct Qdisc *q)
48d83325 172{
2ba2506c 173 unsigned long start_time = jiffies;
2ba2506c 174
37437bb2 175 while (qdisc_restart(q)) {
2ba2506c
HX
176 /*
177 * Postpone processing if
178 * 1. another process needs the CPU;
179 * 2. we've been doing it for too long.
180 */
181 if (need_resched() || jiffies != start_time) {
37437bb2 182 __netif_schedule(q);
d90df3ad 183 break;
2ba2506c
HX
184 }
185 }
48d83325 186
e2627c8c 187 clear_bit(__QDISC_STATE_RUNNING, &q->state);
48d83325
HX
188}
189
1da177e4
LT
190static void dev_watchdog(unsigned long arg)
191{
192 struct net_device *dev = (struct net_device *)arg;
193
932ff279 194 netif_tx_lock(dev);
e8a0464c 195 if (!qdisc_tx_is_noop(dev)) {
1da177e4
LT
196 if (netif_device_present(dev) &&
197 netif_running(dev) &&
198 netif_carrier_ok(dev)) {
e8a0464c
DM
199 int some_queue_stopped = 0;
200 unsigned int i;
201
202 for (i = 0; i < dev->num_tx_queues; i++) {
203 struct netdev_queue *txq;
204
205 txq = netdev_get_tx_queue(dev, i);
206 if (netif_tx_queue_stopped(txq)) {
207 some_queue_stopped = 1;
208 break;
209 }
210 }
338f7566 211
e8a0464c
DM
212 if (some_queue_stopped &&
213 time_after(jiffies, (dev->trans_start +
214 dev->watchdog_timeo))) {
6579e57b
AV
215 char drivername[64];
216 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
217 dev->name, netdev_drivername(dev, drivername, 64));
1da177e4 218 dev->tx_timeout(dev);
b4192bbd 219 WARN_ON_ONCE(1);
1da177e4 220 }
e8a0464c
DM
221 if (!mod_timer(&dev->watchdog_timer,
222 round_jiffies(jiffies +
223 dev->watchdog_timeo)))
1da177e4
LT
224 dev_hold(dev);
225 }
226 }
932ff279 227 netif_tx_unlock(dev);
1da177e4
LT
228
229 dev_put(dev);
230}
231
1da177e4
LT
232void __netdev_watchdog_up(struct net_device *dev)
233{
234 if (dev->tx_timeout) {
235 if (dev->watchdog_timeo <= 0)
236 dev->watchdog_timeo = 5*HZ;
60468d5b
VP
237 if (!mod_timer(&dev->watchdog_timer,
238 round_jiffies(jiffies + dev->watchdog_timeo)))
1da177e4
LT
239 dev_hold(dev);
240 }
241}
242
243static void dev_watchdog_up(struct net_device *dev)
244{
1da177e4 245 __netdev_watchdog_up(dev);
1da177e4
LT
246}
247
248static void dev_watchdog_down(struct net_device *dev)
249{
932ff279 250 netif_tx_lock_bh(dev);
1da177e4 251 if (del_timer(&dev->watchdog_timer))
15333061 252 dev_put(dev);
932ff279 253 netif_tx_unlock_bh(dev);
1da177e4
LT
254}
255
bea3348e
SH
256/**
257 * netif_carrier_on - set carrier
258 * @dev: network device
259 *
260 * Device has detected that carrier.
261 */
0a242efc
DV
262void netif_carrier_on(struct net_device *dev)
263{
bfaae0f0 264 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
0a242efc 265 linkwatch_fire_event(dev);
bfaae0f0
JG
266 if (netif_running(dev))
267 __netdev_watchdog_up(dev);
268 }
0a242efc 269}
62e3ba1b 270EXPORT_SYMBOL(netif_carrier_on);
0a242efc 271
bea3348e
SH
272/**
273 * netif_carrier_off - clear carrier
274 * @dev: network device
275 *
276 * Device has detected loss of carrier.
277 */
0a242efc
DV
278void netif_carrier_off(struct net_device *dev)
279{
280 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
281 linkwatch_fire_event(dev);
282}
62e3ba1b 283EXPORT_SYMBOL(netif_carrier_off);
0a242efc 284
1da177e4
LT
285/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
286 under all circumstances. It is difficult to invent anything faster or
287 cheaper.
288 */
289
94df109a 290static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
1da177e4
LT
291{
292 kfree_skb(skb);
293 return NET_XMIT_CN;
294}
295
94df109a 296static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
1da177e4
LT
297{
298 return NULL;
299}
300
94df109a 301static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
1da177e4
LT
302{
303 if (net_ratelimit())
94df109a
TG
304 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
305 skb->dev->name);
1da177e4
LT
306 kfree_skb(skb);
307 return NET_XMIT_CN;
308}
309
20fea08b 310struct Qdisc_ops noop_qdisc_ops __read_mostly = {
1da177e4
LT
311 .id = "noop",
312 .priv_size = 0,
313 .enqueue = noop_enqueue,
314 .dequeue = noop_dequeue,
315 .requeue = noop_requeue,
316 .owner = THIS_MODULE,
317};
318
7698b4fc 319static struct netdev_queue noop_netdev_queue = {
7698b4fc
DM
320 .qdisc = &noop_qdisc,
321};
322
1da177e4
LT
323struct Qdisc noop_qdisc = {
324 .enqueue = noop_enqueue,
325 .dequeue = noop_dequeue,
326 .flags = TCQ_F_BUILTIN,
10297b99 327 .ops = &noop_qdisc_ops,
1da177e4 328 .list = LIST_HEAD_INIT(noop_qdisc.list),
83874000 329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
7698b4fc 330 .dev_queue = &noop_netdev_queue,
1da177e4 331};
62e3ba1b 332EXPORT_SYMBOL(noop_qdisc);
1da177e4 333
20fea08b 334static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
1da177e4
LT
335 .id = "noqueue",
336 .priv_size = 0,
337 .enqueue = noop_enqueue,
338 .dequeue = noop_dequeue,
339 .requeue = noop_requeue,
340 .owner = THIS_MODULE,
341};
342
30ee42be
DM
343static struct Qdisc noqueue_qdisc;
344static struct netdev_queue noqueue_netdev_queue = {
345 .qdisc = &noqueue_qdisc,
346};
347
1da177e4
LT
348static struct Qdisc noqueue_qdisc = {
349 .enqueue = NULL,
350 .dequeue = noop_dequeue,
351 .flags = TCQ_F_BUILTIN,
352 .ops = &noqueue_qdisc_ops,
353 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
30ee42be
DM
354 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
355 .dev_queue = &noqueue_netdev_queue,
1da177e4
LT
356};
357
358
d3678b46
DM
359static const u8 prio2band[TC_PRIO_MAX+1] =
360 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
361
362/* 3-band FIFO queue: old style, but should be a bit faster than
363 generic prio+fifo combination.
364 */
365
366#define PFIFO_FAST_BANDS 3
367
368static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
369 struct Qdisc *qdisc)
370{
371 struct sk_buff_head *list = qdisc_priv(qdisc);
372 return list + prio2band[skb->priority & TC_PRIO_MAX];
373}
374
375static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
321090e7 376{
d3678b46 377 struct sk_buff_head *list = prio2list(skb, qdisc);
1da177e4 378
d3678b46
DM
379 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
380 qdisc->q.qlen++;
821d24ae 381 return __qdisc_enqueue_tail(skb, qdisc, list);
d3678b46 382 }
821d24ae
TG
383
384 return qdisc_drop(skb, qdisc);
1da177e4
LT
385}
386
d3678b46 387static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
1da177e4 388{
d3678b46
DM
389 int prio;
390 struct sk_buff_head *list = qdisc_priv(qdisc);
1da177e4 391
d3678b46
DM
392 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
393 if (!skb_queue_empty(list + prio)) {
394 qdisc->q.qlen--;
395 return __qdisc_dequeue_head(qdisc, list + prio);
396 }
397 }
f87a9c3d 398
1da177e4
LT
399 return NULL;
400}
401
d3678b46 402static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
1da177e4 403{
d3678b46
DM
404 qdisc->q.qlen++;
405 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
1da177e4
LT
406}
407
d3678b46 408static void pfifo_fast_reset(struct Qdisc* qdisc)
1da177e4 409{
d3678b46
DM
410 int prio;
411 struct sk_buff_head *list = qdisc_priv(qdisc);
412
413 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
414 __qdisc_reset_queue(qdisc, list + prio);
415
821d24ae 416 qdisc->qstats.backlog = 0;
d3678b46 417 qdisc->q.qlen = 0;
1da177e4
LT
418}
419
d3678b46
DM
420static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
421{
422 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
423
424 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
425 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
426 return skb->len;
427
428nla_put_failure:
429 return -1;
430}
431
432static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
433{
434 int prio;
435 struct sk_buff_head *list = qdisc_priv(qdisc);
436
437 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
438 skb_queue_head_init(list + prio);
439
440 return 0;
441}
442
443static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
444 .id = "pfifo_fast",
445 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
446 .enqueue = pfifo_fast_enqueue,
447 .dequeue = pfifo_fast_dequeue,
448 .requeue = pfifo_fast_requeue,
449 .init = pfifo_fast_init,
450 .reset = pfifo_fast_reset,
451 .dump = pfifo_fast_dump,
1da177e4
LT
452 .owner = THIS_MODULE,
453};
454
5ce2d488 455struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
bb949fbd 456 struct Qdisc_ops *ops)
1da177e4
LT
457{
458 void *p;
459 struct Qdisc *sch;
3d54b82f
TG
460 unsigned int size;
461 int err = -ENOBUFS;
1da177e4
LT
462
463 /* ensure that the Qdisc and the private data are 32-byte aligned */
3d54b82f
TG
464 size = QDISC_ALIGN(sizeof(*sch));
465 size += ops->priv_size + (QDISC_ALIGNTO - 1);
1da177e4 466
0da974f4 467 p = kzalloc(size, GFP_KERNEL);
1da177e4 468 if (!p)
3d54b82f 469 goto errout;
3d54b82f
TG
470 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
471 sch->padded = (char *) sch - (char *) p;
1da177e4
LT
472
473 INIT_LIST_HEAD(&sch->list);
474 skb_queue_head_init(&sch->q);
475 sch->ops = ops;
476 sch->enqueue = ops->enqueue;
477 sch->dequeue = ops->dequeue;
bb949fbd 478 sch->dev_queue = dev_queue;
5ce2d488 479 dev_hold(qdisc_dev(sch));
1da177e4 480 atomic_set(&sch->refcnt, 1);
3d54b82f
TG
481
482 return sch;
483errout:
01e123d7 484 return ERR_PTR(err);
3d54b82f
TG
485}
486
bb949fbd
DM
487struct Qdisc * qdisc_create_dflt(struct net_device *dev,
488 struct netdev_queue *dev_queue,
489 struct Qdisc_ops *ops,
9f9afec4 490 unsigned int parentid)
3d54b82f
TG
491{
492 struct Qdisc *sch;
10297b99 493
5ce2d488 494 sch = qdisc_alloc(dev_queue, ops);
3d54b82f
TG
495 if (IS_ERR(sch))
496 goto errout;
9f9afec4 497 sch->parent = parentid;
3d54b82f 498
1da177e4
LT
499 if (!ops->init || ops->init(sch, NULL) == 0)
500 return sch;
501
0fbbeb1b 502 qdisc_destroy(sch);
3d54b82f 503errout:
1da177e4
LT
504 return NULL;
505}
62e3ba1b 506EXPORT_SYMBOL(qdisc_create_dflt);
1da177e4 507
83874000 508/* Under qdisc_root_lock(qdisc) and BH! */
1da177e4
LT
509
510void qdisc_reset(struct Qdisc *qdisc)
511{
20fea08b 512 const struct Qdisc_ops *ops = qdisc->ops;
1da177e4
LT
513
514 if (ops->reset)
515 ops->reset(qdisc);
516}
62e3ba1b 517EXPORT_SYMBOL(qdisc_reset);
1da177e4 518
10297b99 519/* this is the rcu callback function to clean up a qdisc when there
1da177e4
LT
520 * are no further references to it */
521
522static void __qdisc_destroy(struct rcu_head *head)
523{
524 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
8a34c5dc
DM
525 const struct Qdisc_ops *ops = qdisc->ops;
526
3a682fbd 527#ifdef CONFIG_NET_SCHED
175f9c1b 528 qdisc_put_stab(qdisc->stab);
3a682fbd 529#endif
8a34c5dc
DM
530 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
531 if (ops->reset)
532 ops->reset(qdisc);
533 if (ops->destroy)
534 ops->destroy(qdisc);
535
536 module_put(ops->owner);
537 dev_put(qdisc_dev(qdisc));
538
83874000
DM
539 kfree_skb(qdisc->gso_skb);
540
1da177e4
LT
541 kfree((char *) qdisc - qdisc->padded);
542}
543
83874000 544/* Under qdisc_root_lock(qdisc) and BH! */
1da177e4
LT
545
546void qdisc_destroy(struct Qdisc *qdisc)
547{
1da177e4 548 if (qdisc->flags & TCQ_F_BUILTIN ||
85670cc1 549 !atomic_dec_and_test(&qdisc->refcnt))
1da177e4
LT
550 return;
551
30723673
DM
552 if (qdisc->parent)
553 list_del(&qdisc->list);
1da177e4
LT
554
555 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
556}
62e3ba1b 557EXPORT_SYMBOL(qdisc_destroy);
1da177e4 558
e8a0464c
DM
559static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
560{
561 unsigned int i;
562
563 for (i = 0; i < dev->num_tx_queues; i++) {
564 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
565
566 if (txq->qdisc_sleeping != &noop_qdisc)
567 return false;
568 }
569 return true;
570}
571
572static void attach_one_default_qdisc(struct net_device *dev,
573 struct netdev_queue *dev_queue,
574 void *_unused)
575{
576 struct Qdisc *qdisc;
577
578 if (dev->tx_queue_len) {
579 qdisc = qdisc_create_dflt(dev, dev_queue,
d3678b46 580 &pfifo_fast_ops, TC_H_ROOT);
e8a0464c
DM
581 if (!qdisc) {
582 printk(KERN_INFO "%s: activation failed\n", dev->name);
583 return;
584 }
e8a0464c
DM
585 } else {
586 qdisc = &noqueue_qdisc;
587 }
588 dev_queue->qdisc_sleeping = qdisc;
589}
590
591static void transition_one_qdisc(struct net_device *dev,
592 struct netdev_queue *dev_queue,
593 void *_need_watchdog)
594{
83874000 595 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
e8a0464c
DM
596 int *need_watchdog_p = _need_watchdog;
597
83874000
DM
598 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
599 if (new_qdisc != &noqueue_qdisc)
e8a0464c 600 *need_watchdog_p = 1;
e8a0464c
DM
601}
602
1da177e4
LT
603void dev_activate(struct net_device *dev)
604{
e8a0464c 605 int need_watchdog;
b0e1e646 606
1da177e4 607 /* No queueing discipline is attached to device;
d3678b46
DM
608 create default one i.e. pfifo_fast for devices,
609 which need queueing and noqueue_qdisc for
610 virtual interfaces
1da177e4
LT
611 */
612
e8a0464c
DM
613 if (dev_all_qdisc_sleeping_noop(dev))
614 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1da177e4 615
cacaddf5
TC
616 if (!netif_carrier_ok(dev))
617 /* Delay activation until next carrier-on event */
618 return;
619
e8a0464c
DM
620 need_watchdog = 0;
621 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
622
623 if (need_watchdog) {
1da177e4
LT
624 dev->trans_start = jiffies;
625 dev_watchdog_up(dev);
626 }
b0e1e646
DM
627}
628
e8a0464c
DM
629static void dev_deactivate_queue(struct net_device *dev,
630 struct netdev_queue *dev_queue,
631 void *_qdisc_default)
b0e1e646 632{
e8a0464c 633 struct Qdisc *qdisc_default = _qdisc_default;
970565bb 634 struct Qdisc *qdisc;
970565bb 635
970565bb 636 qdisc = dev_queue->qdisc;
b0e1e646 637 if (qdisc) {
83874000
DM
638 spin_lock_bh(qdisc_lock(qdisc));
639
b0e1e646
DM
640 dev_queue->qdisc = qdisc_default;
641 qdisc_reset(qdisc);
d3b753db 642
83874000 643 spin_unlock_bh(qdisc_lock(qdisc));
b0e1e646 644 }
1da177e4
LT
645}
646
e8a0464c
DM
647static bool some_qdisc_is_running(struct net_device *dev, int lock)
648{
649 unsigned int i;
650
651 for (i = 0; i < dev->num_tx_queues; i++) {
652 struct netdev_queue *dev_queue;
7698b4fc 653 spinlock_t *root_lock;
e2627c8c 654 struct Qdisc *q;
e8a0464c
DM
655 int val;
656
657 dev_queue = netdev_get_tx_queue(dev, i);
e2627c8c 658 q = dev_queue->qdisc;
7698b4fc 659 root_lock = qdisc_root_lock(q);
e8a0464c
DM
660
661 if (lock)
7698b4fc 662 spin_lock_bh(root_lock);
e8a0464c 663
e2627c8c 664 val = test_bit(__QDISC_STATE_RUNNING, &q->state);
e8a0464c
DM
665
666 if (lock)
7698b4fc 667 spin_unlock_bh(root_lock);
e8a0464c
DM
668
669 if (val)
670 return true;
671 }
672 return false;
673}
674
1da177e4
LT
675void dev_deactivate(struct net_device *dev)
676{
e8a0464c 677 bool running;
1da177e4 678
e8a0464c 679 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
41a23b07 680
1da177e4
LT
681 dev_watchdog_down(dev);
682
ce0e32e6 683 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
d4828d85 684 synchronize_rcu();
1da177e4 685
d4828d85 686 /* Wait for outstanding qdisc_run calls. */
ce0e32e6 687 do {
e8a0464c 688 while (some_qdisc_is_running(dev, 0))
ce0e32e6
HX
689 yield();
690
691 /*
692 * Double-check inside queue lock to ensure that all effects
693 * of the queue run are visible when we return.
694 */
e8a0464c 695 running = some_qdisc_is_running(dev, 1);
ce0e32e6
HX
696
697 /*
698 * The running flag should never be set at this point because
699 * we've already set dev->qdisc to noop_qdisc *inside* the same
700 * pair of spin locks. That is, if any qdisc_run starts after
701 * our initial test it should see the noop_qdisc and then
702 * clear the RUNNING bit before dropping the queue lock. So
703 * if it is set here then we've found a bug.
704 */
705 } while (WARN_ON_ONCE(running));
1da177e4
LT
706}
707
b0e1e646
DM
708static void dev_init_scheduler_queue(struct net_device *dev,
709 struct netdev_queue *dev_queue,
e8a0464c 710 void *_qdisc)
b0e1e646 711{
e8a0464c
DM
712 struct Qdisc *qdisc = _qdisc;
713
b0e1e646
DM
714 dev_queue->qdisc = qdisc;
715 dev_queue->qdisc_sleeping = qdisc;
b0e1e646
DM
716}
717
1da177e4
LT
718void dev_init_scheduler(struct net_device *dev)
719{
e8a0464c 720 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
b0e1e646 721 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
1da177e4 722
b24b8a24 723 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
1da177e4
LT
724}
725
e8a0464c
DM
726static void shutdown_scheduler_queue(struct net_device *dev,
727 struct netdev_queue *dev_queue,
728 void *_qdisc_default)
1da177e4 729{
b0e1e646 730 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
e8a0464c 731 struct Qdisc *qdisc_default = _qdisc_default;
b0e1e646
DM
732
733 if (qdisc) {
17715e62
DM
734 spinlock_t *root_lock = qdisc_root_lock(qdisc);
735
b0e1e646
DM
736 dev_queue->qdisc = qdisc_default;
737 dev_queue->qdisc_sleeping = qdisc_default;
1da177e4 738
cffe1c5d 739 spin_lock_bh(root_lock);
1da177e4 740 qdisc_destroy(qdisc);
cffe1c5d 741 spin_unlock_bh(root_lock);
10297b99 742 }
b0e1e646
DM
743}
744
745void dev_shutdown(struct net_device *dev)
746{
e8a0464c
DM
747 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
748 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
1da177e4 749 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
1da177e4 750}
This page took 0.424123 seconds and 5 git commands to generate.