{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
- struct net_device *dev = qdisc_dev(wd->qdisc);
+ struct netdev_queue *txq = wd->qdisc->dev_queue;
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
- netif_schedule(dev);
+ netif_schedule_queue(txq);
return HRTIMER_NORESTART;
}
sch->parent = parent;
- sch->stats_lock = &dev_queue->lock;
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
if (tca[TCA_RATE]) {
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
- sch->stats_lock,
+ &sch->dev_queue->lock,
tca[TCA_RATE]);
if (err) {
/*
}
if (tca[TCA_RATE])
gen_replace_estimator(&sch->bstats, &sch->rate_est,
- sch->stats_lock, tca[TCA_RATE]);
+ &sch->dev_queue->lock, tca[TCA_RATE]);
return 0;
}
q->qstats.qlen = q->q.qlen;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, q->stats_lock, &d) < 0)
+ TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, q->stats_lock, &d) < 0)
+ TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)