tipc: add neighbor monitoring framework
[deliverable/linux.git] / include / net / sch_generic.h
CommitLineData
1da177e4
LT
1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
1da177e4
LT
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
1da177e4
LT
7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h>
22e0f8b9 9#include <linux/percpu.h>
5772e9a3 10#include <linux/dynamic_queue_limits.h>
1da177e4 11#include <net/gen_stats.h>
be577ddc 12#include <net/rtnetlink.h>
1da177e4
LT
13
14struct Qdisc_ops;
15struct qdisc_walker;
16struct tcf_walker;
17struct module;
18
fd2c3ef7 19struct qdisc_rate_table {
1da177e4
LT
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
fd2c3ef7 26enum qdisc_state_t {
37437bb2 27 __QDISC_STATE_SCHED,
a9312ae8 28 __QDISC_STATE_DEACTIVATED,
e2627c8c
DM
29};
30
175f9c1b 31struct qdisc_size_table {
a2da570d 32 struct rcu_head rcu;
175f9c1b
JK
33 struct list_head list;
34 struct tc_sizespec szopts;
35 int refcnt;
36 u16 data[];
37};
38
fd2c3ef7 39struct Qdisc {
1da177e4
LT
40 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
41 struct sk_buff * (*dequeue)(struct Qdisc *dev);
05bdd2f1 42 unsigned int flags;
b00355db 43#define TCQ_F_BUILTIN 1
fd245a4a
ED
44#define TCQ_F_INGRESS 2
45#define TCQ_F_CAN_BYPASS 4
46#define TCQ_F_MQROOT 8
1abbe139
ED
47#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
48 * q->dev_queue : It can test
49 * netif_xmit_frozen_or_stopped() before
50 * dequeueing next packet.
51 * Its true for MQ/MQPRIO slaves, or non
52 * multiqueue device.
53 */
b00355db 54#define TCQ_F_WARN_NONWC (1 << 16)
22e0f8b9 55#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
4eaf3b84
ED
56#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
57 * qdisc_tree_decrease_qlen() should stop.
58 */
45203a3b 59 u32 limit;
05bdd2f1 60 const struct Qdisc_ops *ops;
a2da570d 61 struct qdisc_size_table __rcu *stab;
5e140dfc 62 struct list_head list;
1da177e4
LT
63 u32 handle;
64 u32 parent;
72b25a91
DM
65 void *u32_node;
66
5e140dfc 67 struct netdev_queue *dev_queue;
5e140dfc 68
45203a3b 69 struct gnet_stats_rate_est64 rate_est;
0d32ef8c
ED
70 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
71 struct gnet_stats_queue __percpu *cpu_qstats;
72
5e140dfc
ED
73 /*
74 * For performance sake on SMP, we put highly modified fields at the end
75 */
c8945043
FW
76 struct Qdisc *next_sched ____cacheline_aligned_in_smp;
77 struct sk_buff *gso_skb;
5e140dfc
ED
78 unsigned long state;
79 struct sk_buff_head q;
0d32ef8c 80 struct gnet_stats_basic_packed bstats;
f9eb8aea 81 seqcount_t running;
0d32ef8c 82 struct gnet_stats_queue qstats;
79640a4c 83 struct rcu_head rcu_head;
45203a3b
ED
84 int padded;
85 atomic_t refcnt;
86
87 spinlock_t busylock ____cacheline_aligned_in_smp;
1da177e4
LT
88};
89
fd245a4a 90static inline bool qdisc_is_running(const struct Qdisc *qdisc)
bc135b23 91{
f9eb8aea 92 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
bc135b23
ED
93}
94
95static inline bool qdisc_run_begin(struct Qdisc *qdisc)
96{
fd245a4a
ED
97 if (qdisc_is_running(qdisc))
98 return false;
52fbb290
ED
99 /* Variant of write_seqcount_begin() telling lockdep a trylock
100 * was attempted.
101 */
102 raw_write_seqcount_begin(&qdisc->running);
103 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
fd245a4a 104 return true;
bc135b23
ED
105}
106
107static inline void qdisc_run_end(struct Qdisc *qdisc)
108{
f9eb8aea 109 write_seqcount_end(&qdisc->running);
fd245a4a
ED
110}
111
5772e9a3
JDB
112static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
113{
114 return qdisc->flags & TCQ_F_ONETXQUEUE;
115}
116
117static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
118{
119#ifdef CONFIG_BQL
120 /* Non-BQL migrated drivers will return 0, too. */
121 return dql_avail(&txq->dql);
122#else
123 return 0;
124#endif
125}
126
fd2c3ef7 127struct Qdisc_class_ops {
1da177e4 128 /* Child qdisc manipulation */
926e61b7 129 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
1da177e4
LT
130 int (*graft)(struct Qdisc *, unsigned long cl,
131 struct Qdisc *, struct Qdisc **);
132 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
43effa1e 133 void (*qlen_notify)(struct Qdisc *, unsigned long);
1da177e4
LT
134
135 /* Class manipulation routines */
136 unsigned long (*get)(struct Qdisc *, u32 classid);
137 void (*put)(struct Qdisc *, unsigned long);
138 int (*change)(struct Qdisc *, u32, u32,
1e90474c 139 struct nlattr **, unsigned long *);
1da177e4
LT
140 int (*delete)(struct Qdisc *, unsigned long);
141 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
142
143 /* Filter manipulation */
25d8c0d5 144 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
92c075db 145 bool (*tcf_cl_offload)(u32 classid);
1da177e4
LT
146 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
147 u32 classid);
148 void (*unbind_tcf)(struct Qdisc *, unsigned long);
149
150 /* rtnetlink specific */
151 int (*dump)(struct Qdisc *, unsigned long,
152 struct sk_buff *skb, struct tcmsg*);
153 int (*dump_stats)(struct Qdisc *, unsigned long,
154 struct gnet_dump *);
155};
156
fd2c3ef7 157struct Qdisc_ops {
1da177e4 158 struct Qdisc_ops *next;
20fea08b 159 const struct Qdisc_class_ops *cl_ops;
1da177e4
LT
160 char id[IFNAMSIZ];
161 int priv_size;
162
163 int (*enqueue)(struct sk_buff *, struct Qdisc *);
164 struct sk_buff * (*dequeue)(struct Qdisc *);
90d841fd 165 struct sk_buff * (*peek)(struct Qdisc *);
1da177e4 166
1e90474c 167 int (*init)(struct Qdisc *, struct nlattr *arg);
1da177e4
LT
168 void (*reset)(struct Qdisc *);
169 void (*destroy)(struct Qdisc *);
1e90474c 170 int (*change)(struct Qdisc *, struct nlattr *arg);
6ec1c69a 171 void (*attach)(struct Qdisc *);
1da177e4
LT
172
173 int (*dump)(struct Qdisc *, struct sk_buff *);
174 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
175
176 struct module *owner;
177};
178
179
fd2c3ef7 180struct tcf_result {
1da177e4
LT
181 unsigned long class;
182 u32 classid;
183};
184
fd2c3ef7 185struct tcf_proto_ops {
36272874 186 struct list_head head;
1da177e4
LT
187 char kind[IFNAMSIZ];
188
dc7f9f6e
ED
189 int (*classify)(struct sk_buff *,
190 const struct tcf_proto *,
191 struct tcf_result *);
1da177e4 192 int (*init)(struct tcf_proto*);
1e052be6 193 bool (*destroy)(struct tcf_proto*, bool);
1da177e4
LT
194
195 unsigned long (*get)(struct tcf_proto*, u32 handle);
c1b52739 196 int (*change)(struct net *net, struct sk_buff *,
af4c6641 197 struct tcf_proto*, unsigned long,
add93b61 198 u32 handle, struct nlattr **,
2f7ef2f8 199 unsigned long *, bool);
1da177e4
LT
200 int (*delete)(struct tcf_proto*, unsigned long);
201 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
202
203 /* rtnetlink specific */
832d1d5b 204 int (*dump)(struct net*, struct tcf_proto*, unsigned long,
1da177e4
LT
205 struct sk_buff *skb, struct tcmsg*);
206
207 struct module *owner;
208};
209
fd2c3ef7 210struct tcf_proto {
1da177e4 211 /* Fast access part */
25d8c0d5
JF
212 struct tcf_proto __rcu *next;
213 void __rcu *root;
dc7f9f6e
ED
214 int (*classify)(struct sk_buff *,
215 const struct tcf_proto *,
216 struct tcf_result *);
66c6f529 217 __be16 protocol;
1da177e4
LT
218
219 /* All the rest */
220 u32 prio;
221 u32 classid;
222 struct Qdisc *q;
223 void *data;
dc7f9f6e 224 const struct tcf_proto_ops *ops;
25d8c0d5 225 struct rcu_head rcu;
1da177e4
LT
226};
227
175f9c1b
JK
228struct qdisc_skb_cb {
229 unsigned int pkt_len;
df4ab5b3 230 u16 slave_dev_queue_mapping;
045efa82 231 u16 tc_classid;
25711786
ED
232#define QDISC_CB_PRIV_LEN 20
233 unsigned char data[QDISC_CB_PRIV_LEN];
175f9c1b
JK
234};
235
16bda13d
DM
236static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
237{
238 struct qdisc_skb_cb *qcb;
5ee31c68
ED
239
240 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
16bda13d
DM
241 BUILD_BUG_ON(sizeof(qcb->data) < sz);
242}
243
05bdd2f1 244static inline int qdisc_qlen(const struct Qdisc *q)
bbd8a0d3
KK
245{
246 return q->q.qlen;
247}
248
bfe0d029 249static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
175f9c1b
JK
250{
251 return (struct qdisc_skb_cb *)skb->cb;
252}
253
83874000
DM
254static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
255{
256 return &qdisc->q.lock;
257}
258
05bdd2f1 259static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
7698b4fc 260{
46e5da40
JF
261 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
262
263 return q;
7698b4fc
DM
264}
265
05bdd2f1 266static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
2540e051
JP
267{
268 return qdisc->dev_queue->qdisc_sleeping;
269}
270
7e43f112
DM
271/* The qdisc root lock is a mechanism by which to top level
272 * of a qdisc tree can be locked from any qdisc node in the
273 * forest. This allows changing the configuration of some
274 * aspect of the qdisc tree while blocking out asynchronous
275 * qdisc access in the packet processing paths.
276 *
277 * It is only legal to do this when the root will not change
278 * on us. Otherwise we'll potentially lock the wrong qdisc
279 * root. This is enforced by holding the RTNL semaphore, which
280 * all users of this lock accessor must do.
281 */
05bdd2f1 282static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
7698b4fc
DM
283{
284 struct Qdisc *root = qdisc_root(qdisc);
285
7e43f112 286 ASSERT_RTNL();
83874000 287 return qdisc_lock(root);
7698b4fc
DM
288}
289
05bdd2f1 290static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
f6f9b93f
JP
291{
292 struct Qdisc *root = qdisc_root_sleeping(qdisc);
293
294 ASSERT_RTNL();
295 return qdisc_lock(root);
296}
297
edb09eb1
ED
298static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
299{
300 struct Qdisc *root = qdisc_root_sleeping(qdisc);
301
302 ASSERT_RTNL();
303 return &root->running;
304}
305
05bdd2f1 306static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
5ce2d488
DM
307{
308 return qdisc->dev_queue->dev;
309}
1da177e4 310
05bdd2f1 311static inline void sch_tree_lock(const struct Qdisc *q)
78a5b30b 312{
fe439dd0 313 spin_lock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
314}
315
05bdd2f1 316static inline void sch_tree_unlock(const struct Qdisc *q)
78a5b30b 317{
fe439dd0 318 spin_unlock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
319}
320
321#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
322#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
1da177e4 323
e41a33e6
TG
324extern struct Qdisc noop_qdisc;
325extern struct Qdisc_ops noop_qdisc_ops;
6ec1c69a
DM
326extern struct Qdisc_ops pfifo_fast_ops;
327extern struct Qdisc_ops mq_qdisc_ops;
d66d6c31 328extern struct Qdisc_ops noqueue_qdisc_ops;
6da7c8fc 329extern const struct Qdisc_ops *default_qdisc_ops;
1f27cde3
ED
330static inline const struct Qdisc_ops *
331get_default_qdisc_ops(const struct net_device *dev, int ntx)
332{
333 return ntx < dev->real_num_tx_queues ?
334 default_qdisc_ops : &pfifo_fast_ops;
335}
e41a33e6 336
fd2c3ef7 337struct Qdisc_class_common {
6fe1c7a5
PM
338 u32 classid;
339 struct hlist_node hnode;
340};
341
fd2c3ef7 342struct Qdisc_class_hash {
6fe1c7a5
PM
343 struct hlist_head *hash;
344 unsigned int hashsize;
345 unsigned int hashmask;
346 unsigned int hashelems;
347};
348
349static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
350{
351 id ^= id >> 8;
352 id ^= id >> 4;
353 return id & mask;
354}
355
356static inline struct Qdisc_class_common *
05bdd2f1 357qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
6fe1c7a5
PM
358{
359 struct Qdisc_class_common *cl;
6fe1c7a5
PM
360 unsigned int h;
361
362 h = qdisc_class_hash(id, hash->hashmask);
b67bfe0d 363 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
6fe1c7a5
PM
364 if (cl->classid == id)
365 return cl;
366 }
367 return NULL;
368}
369
5c15257f
JP
370int qdisc_class_hash_init(struct Qdisc_class_hash *);
371void qdisc_class_hash_insert(struct Qdisc_class_hash *,
372 struct Qdisc_class_common *);
373void qdisc_class_hash_remove(struct Qdisc_class_hash *,
374 struct Qdisc_class_common *);
375void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
376void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
377
378void dev_init_scheduler(struct net_device *dev);
379void dev_shutdown(struct net_device *dev);
380void dev_activate(struct net_device *dev);
381void dev_deactivate(struct net_device *dev);
382void dev_deactivate_many(struct list_head *head);
383struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
384 struct Qdisc *qdisc);
385void qdisc_reset(struct Qdisc *qdisc);
386void qdisc_destroy(struct Qdisc *qdisc);
2ccccf5f
WC
387void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
388 unsigned int len);
5c15257f 389struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
d2a7f269 390 const struct Qdisc_ops *ops);
5c15257f 391struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
d2a7f269 392 const struct Qdisc_ops *ops, u32 parentid);
5c15257f
JP
393void __qdisc_calculate_pkt_len(struct sk_buff *skb,
394 const struct qdisc_size_table *stab);
1e052be6 395bool tcf_destroy(struct tcf_proto *tp, bool force);
25d8c0d5 396void tcf_destroy_chain(struct tcf_proto __rcu **fl);
27b29f63 397int skb_do_redirect(struct sk_buff *);
1da177e4 398
fdc5432a
DB
399static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
400{
401#ifdef CONFIG_NET_CLS_ACT
402 return G_TC_AT(skb->tc_verd) & AT_INGRESS;
403#else
404 return false;
405#endif
406}
407
f0796d5c
JF
408/* Reset all TX qdiscs greater then index of a device. */
409static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
5aa70995 410{
4ef6acff
JF
411 struct Qdisc *qdisc;
412
f0796d5c 413 for (; i < dev->num_tx_queues; i++) {
46e5da40 414 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
4ef6acff
JF
415 if (qdisc) {
416 spin_lock_bh(qdisc_lock(qdisc));
417 qdisc_reset(qdisc);
418 spin_unlock_bh(qdisc_lock(qdisc));
419 }
420 }
5aa70995
DM
421}
422
5aa70995
DM
423static inline void qdisc_reset_all_tx(struct net_device *dev)
424{
f0796d5c 425 qdisc_reset_all_tx_gt(dev, 0);
5aa70995
DM
426}
427
3e745dd6
DM
428/* Are all TX queues of the device empty? */
429static inline bool qdisc_all_tx_empty(const struct net_device *dev)
430{
e8a0464c 431 unsigned int i;
46e5da40
JF
432
433 rcu_read_lock();
e8a0464c
DM
434 for (i = 0; i < dev->num_tx_queues; i++) {
435 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 436 const struct Qdisc *q = rcu_dereference(txq->qdisc);
3e745dd6 437
46e5da40
JF
438 if (q->q.qlen) {
439 rcu_read_unlock();
e8a0464c 440 return false;
46e5da40 441 }
e8a0464c 442 }
46e5da40 443 rcu_read_unlock();
e8a0464c 444 return true;
3e745dd6
DM
445}
446
6fa9864b 447/* Are any of the TX qdiscs changing? */
05bdd2f1 448static inline bool qdisc_tx_changing(const struct net_device *dev)
6fa9864b 449{
e8a0464c 450 unsigned int i;
46e5da40 451
e8a0464c
DM
452 for (i = 0; i < dev->num_tx_queues; i++) {
453 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 454 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
e8a0464c
DM
455 return true;
456 }
457 return false;
6fa9864b
DM
458}
459
e8a0464c 460/* Is the device using the noop qdisc on all queues? */
05297949
DM
461static inline bool qdisc_tx_is_noop(const struct net_device *dev)
462{
e8a0464c 463 unsigned int i;
46e5da40 464
e8a0464c
DM
465 for (i = 0; i < dev->num_tx_queues; i++) {
466 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 467 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
e8a0464c
DM
468 return false;
469 }
470 return true;
05297949
DM
471}
472
bfe0d029 473static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
0abf77e5 474{
175f9c1b 475 return qdisc_skb_cb(skb)->pkt_len;
0abf77e5
JK
476}
477
c27f339a 478/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
378a2f09
JP
479enum net_xmit_qdisc_t {
480 __NET_XMIT_STOLEN = 0x00010000,
c27f339a 481 __NET_XMIT_BYPASS = 0x00020000,
378a2f09
JP
482};
483
c27f339a 484#ifdef CONFIG_NET_CLS_ACT
378a2f09 485#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
378a2f09
JP
486#else
487#define net_xmit_drop_count(e) (1)
488#endif
489
a2da570d
ED
490static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
491 const struct Qdisc *sch)
5f86173b 492{
3a682fbd 493#ifdef CONFIG_NET_SCHED
a2da570d
ED
494 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
495
496 if (stab)
497 __qdisc_calculate_pkt_len(skb, stab);
3a682fbd 498#endif
a2da570d
ED
499}
500
501static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
502{
503 qdisc_calculate_pkt_len(skb, sch);
5f86173b
JK
504 return sch->enqueue(skb, sch);
505}
506
22e0f8b9
JF
507static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
508{
509 return q->flags & TCQ_F_CPUSTATS;
510}
bfe0d029 511
38040702
AV
512static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
513 __u64 bytes, __u32 packets)
514{
515 bstats->bytes += bytes;
516 bstats->packets += packets;
517}
518
bfe0d029
ED
519static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
520 const struct sk_buff *skb)
521{
38040702
AV
522 _bstats_update(bstats,
523 qdisc_pkt_len(skb),
524 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
525}
526
527static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
528 __u64 bytes, __u32 packets)
529{
530 u64_stats_update_begin(&bstats->syncp);
531 _bstats_update(&bstats->bstats, bytes, packets);
532 u64_stats_update_end(&bstats->syncp);
bfe0d029
ED
533}
534
24ea591d
ED
535static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
536 const struct sk_buff *skb)
22e0f8b9 537{
22e0f8b9
JF
538 u64_stats_update_begin(&bstats->syncp);
539 bstats_update(&bstats->bstats, skb);
540 u64_stats_update_end(&bstats->syncp);
541}
542
24ea591d
ED
543static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
544 const struct sk_buff *skb)
545{
546 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
547}
548
bfe0d029
ED
549static inline void qdisc_bstats_update(struct Qdisc *sch,
550 const struct sk_buff *skb)
bbd8a0d3 551{
bfe0d029 552 bstats_update(&sch->bstats, skb);
bbd8a0d3
KK
553}
554
25331d6c
JF
555static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
556 const struct sk_buff *skb)
557{
558 sch->qstats.backlog -= qdisc_pkt_len(skb);
559}
560
561static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
562 const struct sk_buff *skb)
563{
564 sch->qstats.backlog += qdisc_pkt_len(skb);
565}
566
567static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
568{
569 sch->qstats.drops += count;
570}
571
24ea591d 572static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
25331d6c 573{
24ea591d 574 qstats->drops++;
25331d6c
JF
575}
576
24ea591d 577static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
b0ab6f92 578{
24ea591d
ED
579 qstats->overlimits++;
580}
b0ab6f92 581
24ea591d
ED
582static inline void qdisc_qstats_drop(struct Qdisc *sch)
583{
584 qstats_drop_inc(&sch->qstats);
585}
586
587static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
588{
589 qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
b0ab6f92
JF
590}
591
25331d6c
JF
592static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
593{
594 sch->qstats.overlimits++;
595}
596
9972b25d
TG
597static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
598 struct sk_buff_head *list)
599{
600 __skb_queue_tail(list, skb);
25331d6c 601 qdisc_qstats_backlog_inc(sch, skb);
9972b25d
TG
602
603 return NET_XMIT_SUCCESS;
604}
605
606static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
607{
608 return __qdisc_enqueue_tail(skb, sch, &sch->q);
609}
610
611static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
612 struct sk_buff_head *list)
613{
614 struct sk_buff *skb = __skb_dequeue(list);
615
9190b3b3 616 if (likely(skb != NULL)) {
25331d6c 617 qdisc_qstats_backlog_dec(sch, skb);
9190b3b3
ED
618 qdisc_bstats_update(sch, skb);
619 }
9972b25d
TG
620
621 return skb;
622}
623
624static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
625{
626 return __qdisc_dequeue_head(sch, &sch->q);
57dbb2d8
HPP
627}
628
629static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
630 struct sk_buff_head *list)
631{
9190b3b3 632 struct sk_buff *skb = __skb_dequeue(list);
57dbb2d8
HPP
633
634 if (likely(skb != NULL)) {
635 unsigned int len = qdisc_pkt_len(skb);
25331d6c 636 qdisc_qstats_backlog_dec(sch, skb);
57dbb2d8
HPP
637 kfree_skb(skb);
638 return len;
639 }
640
641 return 0;
642}
643
644static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
645{
646 return __qdisc_queue_drop_head(sch, &sch->q);
9972b25d
TG
647}
648
48a8f519
PM
649static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
650{
651 return skb_peek(&sch->q);
652}
653
77be155c
JP
654/* generic pseudo peek method for non-work-conserving qdisc */
655static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
656{
657 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
61c9eaf9 658 if (!sch->gso_skb) {
77be155c 659 sch->gso_skb = sch->dequeue(sch);
a27758ff 660 if (sch->gso_skb) {
61c9eaf9 661 /* it's still part of the queue */
a27758ff 662 qdisc_qstats_backlog_inc(sch, sch->gso_skb);
61c9eaf9 663 sch->q.qlen++;
a27758ff 664 }
61c9eaf9 665 }
77be155c
JP
666
667 return sch->gso_skb;
668}
669
670/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
671static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
672{
673 struct sk_buff *skb = sch->gso_skb;
674
61c9eaf9 675 if (skb) {
77be155c 676 sch->gso_skb = NULL;
a27758ff 677 qdisc_qstats_backlog_dec(sch, skb);
61c9eaf9
JP
678 sch->q.qlen--;
679 } else {
77be155c 680 skb = sch->dequeue(sch);
61c9eaf9 681 }
77be155c
JP
682
683 return skb;
684}
685
9972b25d
TG
686static inline void __qdisc_reset_queue(struct Qdisc *sch,
687 struct sk_buff_head *list)
688{
689 /*
690 * We do not know the backlog in bytes of this list, it
691 * is up to the caller to correct it
692 */
93245dd6 693 __skb_queue_purge(list);
9972b25d
TG
694}
695
696static inline void qdisc_reset_queue(struct Qdisc *sch)
697{
698 __qdisc_reset_queue(sch, &sch->q);
699 sch->qstats.backlog = 0;
700}
701
86a7996c
WC
702static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
703 struct Qdisc **pold)
704{
705 struct Qdisc *old;
706
707 sch_tree_lock(sch);
708 old = *pold;
709 *pold = new;
710 if (old != NULL) {
2ccccf5f 711 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
86a7996c
WC
712 qdisc_reset(old);
713 }
714 sch_tree_unlock(sch);
715
716 return old;
717}
718
9972b25d
TG
719static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
720{
721 kfree_skb(skb);
25331d6c 722 qdisc_qstats_drop(sch);
9972b25d
TG
723
724 return NET_XMIT_DROP;
725}
726
e9bef55d
JDB
727/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
728 long it will take to send a packet given its size.
729 */
730static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
731{
e08b0998
JDB
732 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
733 if (slot < 0)
734 slot = 0;
e9bef55d
JDB
735 slot >>= rtab->rate.cell_log;
736 if (slot > 255)
a02cec21 737 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
e9bef55d
JDB
738 return rtab->data[slot];
739}
740
292f1c7f 741struct psched_ratecfg {
130d3d68 742 u64 rate_bytes_ps; /* bytes per second */
01cb71d2
ED
743 u32 mult;
744 u16 overhead;
8a8e3d84 745 u8 linklayer;
01cb71d2 746 u8 shift;
292f1c7f
JP
747};
748
749static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
750 unsigned int len)
751{
8a8e3d84
JDB
752 len += r->overhead;
753
754 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
755 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
756
757 return ((u64)len * r->mult) >> r->shift;
292f1c7f
JP
758}
759
5c15257f 760void psched_ratecfg_precompute(struct psched_ratecfg *r,
3e1e3aae
ED
761 const struct tc_ratespec *conf,
762 u64 rate64);
292f1c7f 763
01cb71d2
ED
764static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
765 const struct psched_ratecfg *r)
292f1c7f 766{
01cb71d2 767 memset(res, 0, sizeof(*res));
3e1e3aae
ED
768
769 /* legacy struct tc_ratespec has a 32bit @rate field
770 * Qdisc using 64bit rate should add new attributes
771 * in order to maintain compatibility.
772 */
773 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
774
01cb71d2 775 res->overhead = r->overhead;
8a8e3d84 776 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
292f1c7f
JP
777}
778
1da177e4 779#endif
This page took 3.909663 seconds and 5 git commands to generate.