2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <net/netlink.h>
44 #include <net/act_api.h>
45 #include <net/pkt_cls.h>
46 #include <linux/netdevice.h>
49 struct tc_u_knode __rcu
*next
;
51 struct tc_u_hnode __rcu
*ht_up
;
53 #ifdef CONFIG_NET_CLS_IND
57 struct tcf_result res
;
58 struct tc_u_hnode __rcu
*ht_down
;
59 #ifdef CONFIG_CLS_U32_PERF
60 struct tc_u32_pcnt __percpu
*pf
;
62 #ifdef CONFIG_CLS_U32_MARK
65 u32 __percpu
*pcpu_success
;
69 /* The 'sel' field MUST be the last field in structure to allow for
70 * tc_u32_keys allocated at end of structure.
72 struct tc_u32_sel sel
;
76 struct tc_u_hnode __rcu
*next
;
79 struct tc_u_common
*tp_c
;
83 /* The 'ht' field MUST be the last field in structure to allow for
84 * more entries allocated at end of structure.
86 struct tc_u_knode __rcu
*ht
[1];
90 struct tc_u_hnode __rcu
*hlist
;
97 static inline unsigned int u32_hash_fold(__be32 key
,
98 const struct tc_u32_sel
*sel
,
101 unsigned int h
= ntohl(key
& sel
->hmask
) >> fshift
;
106 static int u32_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
, struct tcf_result
*res
)
109 struct tc_u_knode
*knode
;
111 } stack
[TC_U32_MAXDEPTH
];
113 struct tc_u_hnode
*ht
= rcu_dereference_bh(tp
->root
);
114 unsigned int off
= skb_network_offset(skb
);
115 struct tc_u_knode
*n
;
119 #ifdef CONFIG_CLS_U32_PERF
125 n
= rcu_dereference_bh(ht
->ht
[sel
]);
129 struct tc_u32_key
*key
= n
->sel
.keys
;
131 #ifdef CONFIG_CLS_U32_PERF
132 __this_cpu_inc(n
->pf
->rcnt
);
136 #ifdef CONFIG_CLS_U32_MARK
137 if ((skb
->mark
& n
->mask
) != n
->val
) {
138 n
= rcu_dereference_bh(n
->next
);
141 __this_cpu_inc(*n
->pcpu_success
);
145 for (i
= n
->sel
.nkeys
; i
> 0; i
--, key
++) {
146 int toff
= off
+ key
->off
+ (off2
& key
->offmask
);
149 if (skb_headroom(skb
) + toff
> INT_MAX
)
152 data
= skb_header_pointer(skb
, toff
, 4, &hdata
);
155 if ((*data
^ key
->val
) & key
->mask
) {
156 n
= rcu_dereference_bh(n
->next
);
159 #ifdef CONFIG_CLS_U32_PERF
160 __this_cpu_inc(n
->pf
->kcnts
[j
]);
165 ht
= rcu_dereference_bh(n
->ht_down
);
168 if (n
->sel
.flags
& TC_U32_TERMINAL
) {
171 #ifdef CONFIG_NET_CLS_IND
172 if (!tcf_match_indev(skb
, n
->ifindex
)) {
173 n
= rcu_dereference_bh(n
->next
);
177 #ifdef CONFIG_CLS_U32_PERF
178 __this_cpu_inc(n
->pf
->rhit
);
180 r
= tcf_exts_exec(skb
, &n
->exts
, res
);
182 n
= rcu_dereference_bh(n
->next
);
188 n
= rcu_dereference_bh(n
->next
);
193 if (sdepth
>= TC_U32_MAXDEPTH
)
195 stack
[sdepth
].knode
= n
;
196 stack
[sdepth
].off
= off
;
199 ht
= rcu_dereference_bh(n
->ht_down
);
204 data
= skb_header_pointer(skb
, off
+ n
->sel
.hoff
, 4,
208 sel
= ht
->divisor
& u32_hash_fold(*data
, &n
->sel
,
211 if (!(n
->sel
.flags
& (TC_U32_VAROFFSET
| TC_U32_OFFSET
| TC_U32_EAT
)))
214 if (n
->sel
.flags
& (TC_U32_OFFSET
| TC_U32_VAROFFSET
)) {
215 off2
= n
->sel
.off
+ 3;
216 if (n
->sel
.flags
& TC_U32_VAROFFSET
) {
219 data
= skb_header_pointer(skb
,
224 off2
+= ntohs(n
->sel
.offmask
& *data
) >>
229 if (n
->sel
.flags
& TC_U32_EAT
) {
240 n
= stack
[sdepth
].knode
;
241 ht
= rcu_dereference_bh(n
->ht_up
);
242 off
= stack
[sdepth
].off
;
249 net_warn_ratelimited("cls_u32: dead loop\n");
253 static struct tc_u_hnode
*
254 u32_lookup_ht(struct tc_u_common
*tp_c
, u32 handle
)
256 struct tc_u_hnode
*ht
;
258 for (ht
= rtnl_dereference(tp_c
->hlist
);
260 ht
= rtnl_dereference(ht
->next
))
261 if (ht
->handle
== handle
)
267 static struct tc_u_knode
*
268 u32_lookup_key(struct tc_u_hnode
*ht
, u32 handle
)
271 struct tc_u_knode
*n
= NULL
;
273 sel
= TC_U32_HASH(handle
);
274 if (sel
> ht
->divisor
)
277 for (n
= rtnl_dereference(ht
->ht
[sel
]);
279 n
= rtnl_dereference(n
->next
))
280 if (n
->handle
== handle
)
287 static unsigned long u32_get(struct tcf_proto
*tp
, u32 handle
)
289 struct tc_u_hnode
*ht
;
290 struct tc_u_common
*tp_c
= tp
->data
;
292 if (TC_U32_HTID(handle
) == TC_U32_ROOT
)
293 ht
= rtnl_dereference(tp
->root
);
295 ht
= u32_lookup_ht(tp_c
, TC_U32_HTID(handle
));
300 if (TC_U32_KEY(handle
) == 0)
301 return (unsigned long)ht
;
303 return (unsigned long)u32_lookup_key(ht
, handle
);
306 static u32
gen_new_htid(struct tc_u_common
*tp_c
)
310 /* hgenerator only used inside rtnl lock it is safe to increment
311 * without read _copy_ update semantics
314 if (++tp_c
->hgenerator
== 0x7FF)
315 tp_c
->hgenerator
= 1;
316 } while (--i
> 0 && u32_lookup_ht(tp_c
, (tp_c
->hgenerator
|0x800)<<20));
318 return i
> 0 ? (tp_c
->hgenerator
|0x800)<<20 : 0;
321 static int u32_init(struct tcf_proto
*tp
)
323 struct tc_u_hnode
*root_ht
;
324 struct tc_u_common
*tp_c
;
326 tp_c
= tp
->q
->u32_node
;
328 root_ht
= kzalloc(sizeof(*root_ht
), GFP_KERNEL
);
332 root_ht
->divisor
= 0;
334 root_ht
->handle
= tp_c
? gen_new_htid(tp_c
) : 0x80000000;
335 root_ht
->prio
= tp
->prio
;
338 tp_c
= kzalloc(sizeof(*tp_c
), GFP_KERNEL
);
344 tp
->q
->u32_node
= tp_c
;
348 RCU_INIT_POINTER(root_ht
->next
, tp_c
->hlist
);
349 rcu_assign_pointer(tp_c
->hlist
, root_ht
);
350 root_ht
->tp_c
= tp_c
;
352 rcu_assign_pointer(tp
->root
, root_ht
);
357 static int u32_destroy_key(struct tcf_proto
*tp
,
358 struct tc_u_knode
*n
,
361 tcf_exts_destroy(&n
->exts
);
363 n
->ht_down
->refcnt
--;
364 #ifdef CONFIG_CLS_U32_PERF
368 #ifdef CONFIG_CLS_U32_MARK
370 free_percpu(n
->pcpu_success
);
376 /* u32_delete_key_rcu should be called when free'ing a copied
377 * version of a tc_u_knode obtained from u32_init_knode(). When
378 * copies are obtained from u32_init_knode() the statistics are
379 * shared between the old and new copies to allow readers to
380 * continue to update the statistics during the copy. To support
381 * this the u32_delete_key_rcu variant does not free the percpu
384 static void u32_delete_key_rcu(struct rcu_head
*rcu
)
386 struct tc_u_knode
*key
= container_of(rcu
, struct tc_u_knode
, rcu
);
388 u32_destroy_key(key
->tp
, key
, false);
391 /* u32_delete_key_freepf_rcu is the rcu callback variant
392 * that free's the entire structure including the statistics
393 * percpu variables. Only use this if the key is not a copy
394 * returned by u32_init_knode(). See u32_delete_key_rcu()
395 * for the variant that should be used with keys return from
398 static void u32_delete_key_freepf_rcu(struct rcu_head
*rcu
)
400 struct tc_u_knode
*key
= container_of(rcu
, struct tc_u_knode
, rcu
);
402 u32_destroy_key(key
->tp
, key
, true);
405 static int u32_delete_key(struct tcf_proto
*tp
, struct tc_u_knode
*key
)
407 struct tc_u_knode __rcu
**kp
;
408 struct tc_u_knode
*pkp
;
409 struct tc_u_hnode
*ht
= rtnl_dereference(key
->ht_up
);
412 kp
= &ht
->ht
[TC_U32_HASH(key
->handle
)];
413 for (pkp
= rtnl_dereference(*kp
); pkp
;
414 kp
= &pkp
->next
, pkp
= rtnl_dereference(*kp
)) {
416 RCU_INIT_POINTER(*kp
, key
->next
);
418 tcf_unbind_filter(tp
, &key
->res
);
419 call_rcu(&key
->rcu
, u32_delete_key_freepf_rcu
);
428 static void u32_remove_hw_knode(struct tcf_proto
*tp
, u32 handle
)
430 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
431 struct tc_cls_u32_offload u32_offload
= {0};
432 struct tc_to_netdev offload
;
434 offload
.type
= TC_SETUP_CLSU32
;
435 offload
.cls_u32
= &u32_offload
;
437 if (dev
->netdev_ops
->ndo_setup_tc
) {
438 offload
.cls_u32
->command
= TC_CLSU32_DELETE_KNODE
;
439 offload
.cls_u32
->knode
.handle
= handle
;
440 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
,
441 tp
->protocol
, &offload
);
445 static void u32_replace_hw_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*h
)
447 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
448 struct tc_cls_u32_offload u32_offload
= {0};
449 struct tc_to_netdev offload
;
451 offload
.type
= TC_SETUP_CLSU32
;
452 offload
.cls_u32
= &u32_offload
;
454 if (dev
->netdev_ops
->ndo_setup_tc
) {
455 offload
.cls_u32
->command
= TC_CLSU32_NEW_HNODE
;
456 offload
.cls_u32
->hnode
.divisor
= h
->divisor
;
457 offload
.cls_u32
->hnode
.handle
= h
->handle
;
458 offload
.cls_u32
->hnode
.prio
= h
->prio
;
460 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
,
461 tp
->protocol
, &offload
);
465 static void u32_clear_hw_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*h
)
467 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
468 struct tc_cls_u32_offload u32_offload
= {0};
469 struct tc_to_netdev offload
;
471 offload
.type
= TC_SETUP_CLSU32
;
472 offload
.cls_u32
= &u32_offload
;
474 if (dev
->netdev_ops
->ndo_setup_tc
) {
475 offload
.cls_u32
->command
= TC_CLSU32_DELETE_HNODE
;
476 offload
.cls_u32
->hnode
.divisor
= h
->divisor
;
477 offload
.cls_u32
->hnode
.handle
= h
->handle
;
478 offload
.cls_u32
->hnode
.prio
= h
->prio
;
480 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
,
481 tp
->protocol
, &offload
);
485 static void u32_replace_hw_knode(struct tcf_proto
*tp
, struct tc_u_knode
*n
)
487 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
488 struct tc_cls_u32_offload u32_offload
= {0};
489 struct tc_to_netdev offload
;
491 offload
.type
= TC_SETUP_CLSU32
;
492 offload
.cls_u32
= &u32_offload
;
494 if (dev
->netdev_ops
->ndo_setup_tc
) {
495 offload
.cls_u32
->command
= TC_CLSU32_REPLACE_KNODE
;
496 offload
.cls_u32
->knode
.handle
= n
->handle
;
497 offload
.cls_u32
->knode
.fshift
= n
->fshift
;
498 #ifdef CONFIG_CLS_U32_MARK
499 offload
.cls_u32
->knode
.val
= n
->val
;
500 offload
.cls_u32
->knode
.mask
= n
->mask
;
502 offload
.cls_u32
->knode
.val
= 0;
503 offload
.cls_u32
->knode
.mask
= 0;
505 offload
.cls_u32
->knode
.sel
= &n
->sel
;
506 offload
.cls_u32
->knode
.exts
= &n
->exts
;
508 offload
.cls_u32
->knode
.link_handle
= n
->ht_down
->handle
;
510 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
,
511 tp
->protocol
, &offload
);
515 static void u32_clear_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*ht
)
517 struct tc_u_knode
*n
;
520 for (h
= 0; h
<= ht
->divisor
; h
++) {
521 while ((n
= rtnl_dereference(ht
->ht
[h
])) != NULL
) {
522 RCU_INIT_POINTER(ht
->ht
[h
],
523 rtnl_dereference(n
->next
));
524 tcf_unbind_filter(tp
, &n
->res
);
525 u32_remove_hw_knode(tp
, n
->handle
);
526 call_rcu(&n
->rcu
, u32_delete_key_freepf_rcu
);
531 static int u32_destroy_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*ht
)
533 struct tc_u_common
*tp_c
= tp
->data
;
534 struct tc_u_hnode __rcu
**hn
;
535 struct tc_u_hnode
*phn
;
539 u32_clear_hnode(tp
, ht
);
542 for (phn
= rtnl_dereference(*hn
);
544 hn
= &phn
->next
, phn
= rtnl_dereference(*hn
)) {
546 u32_clear_hw_hnode(tp
, ht
);
547 RCU_INIT_POINTER(*hn
, ht
->next
);
556 static bool ht_empty(struct tc_u_hnode
*ht
)
560 for (h
= 0; h
<= ht
->divisor
; h
++)
561 if (rcu_access_pointer(ht
->ht
[h
]))
567 static bool u32_destroy(struct tcf_proto
*tp
, bool force
)
569 struct tc_u_common
*tp_c
= tp
->data
;
570 struct tc_u_hnode
*root_ht
= rtnl_dereference(tp
->root
);
572 WARN_ON(root_ht
== NULL
);
576 if (root_ht
->refcnt
> 1)
578 if (root_ht
->refcnt
== 1) {
579 if (!ht_empty(root_ht
))
584 if (tp_c
->refcnt
> 1)
587 if (tp_c
->refcnt
== 1) {
588 struct tc_u_hnode
*ht
;
590 for (ht
= rtnl_dereference(tp_c
->hlist
);
592 ht
= rtnl_dereference(ht
->next
))
598 if (root_ht
&& --root_ht
->refcnt
== 0)
599 u32_destroy_hnode(tp
, root_ht
);
601 if (--tp_c
->refcnt
== 0) {
602 struct tc_u_hnode
*ht
;
604 tp
->q
->u32_node
= NULL
;
606 for (ht
= rtnl_dereference(tp_c
->hlist
);
608 ht
= rtnl_dereference(ht
->next
)) {
610 u32_clear_hnode(tp
, ht
);
613 while ((ht
= rtnl_dereference(tp_c
->hlist
)) != NULL
) {
614 RCU_INIT_POINTER(tp_c
->hlist
, ht
->next
);
625 static int u32_delete(struct tcf_proto
*tp
, unsigned long arg
)
627 struct tc_u_hnode
*ht
= (struct tc_u_hnode
*)arg
;
628 struct tc_u_hnode
*root_ht
= rtnl_dereference(tp
->root
);
633 if (TC_U32_KEY(ht
->handle
)) {
634 u32_remove_hw_knode(tp
, ht
->handle
);
635 return u32_delete_key(tp
, (struct tc_u_knode
*)ht
);
641 if (ht
->refcnt
== 1) {
643 u32_destroy_hnode(tp
, ht
);
651 #define NR_U32_NODE (1<<12)
652 static u32
gen_new_kid(struct tc_u_hnode
*ht
, u32 handle
)
654 struct tc_u_knode
*n
;
656 unsigned long *bitmap
= kzalloc(BITS_TO_LONGS(NR_U32_NODE
) * sizeof(unsigned long),
659 return handle
| 0xFFF;
661 for (n
= rtnl_dereference(ht
->ht
[TC_U32_HASH(handle
)]);
663 n
= rtnl_dereference(n
->next
))
664 set_bit(TC_U32_NODE(n
->handle
), bitmap
);
666 i
= find_next_zero_bit(bitmap
, NR_U32_NODE
, 0x800);
667 if (i
>= NR_U32_NODE
)
668 i
= find_next_zero_bit(bitmap
, NR_U32_NODE
, 1);
671 return handle
| (i
>= NR_U32_NODE
? 0xFFF : i
);
674 static const struct nla_policy u32_policy
[TCA_U32_MAX
+ 1] = {
675 [TCA_U32_CLASSID
] = { .type
= NLA_U32
},
676 [TCA_U32_HASH
] = { .type
= NLA_U32
},
677 [TCA_U32_LINK
] = { .type
= NLA_U32
},
678 [TCA_U32_DIVISOR
] = { .type
= NLA_U32
},
679 [TCA_U32_SEL
] = { .len
= sizeof(struct tc_u32_sel
) },
680 [TCA_U32_INDEV
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
681 [TCA_U32_MARK
] = { .len
= sizeof(struct tc_u32_mark
) },
684 static int u32_set_parms(struct net
*net
, struct tcf_proto
*tp
,
685 unsigned long base
, struct tc_u_hnode
*ht
,
686 struct tc_u_knode
*n
, struct nlattr
**tb
,
687 struct nlattr
*est
, bool ovr
)
692 tcf_exts_init(&e
, TCA_U32_ACT
, TCA_U32_POLICE
);
693 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
698 if (tb
[TCA_U32_LINK
]) {
699 u32 handle
= nla_get_u32(tb
[TCA_U32_LINK
]);
700 struct tc_u_hnode
*ht_down
= NULL
, *ht_old
;
702 if (TC_U32_KEY(handle
))
706 ht_down
= u32_lookup_ht(ht
->tp_c
, handle
);
713 ht_old
= rtnl_dereference(n
->ht_down
);
714 rcu_assign_pointer(n
->ht_down
, ht_down
);
719 if (tb
[TCA_U32_CLASSID
]) {
720 n
->res
.classid
= nla_get_u32(tb
[TCA_U32_CLASSID
]);
721 tcf_bind_filter(tp
, &n
->res
, base
);
724 #ifdef CONFIG_NET_CLS_IND
725 if (tb
[TCA_U32_INDEV
]) {
727 ret
= tcf_change_indev(net
, tb
[TCA_U32_INDEV
]);
733 tcf_exts_change(tp
, &n
->exts
, &e
);
737 tcf_exts_destroy(&e
);
741 static void u32_replace_knode(struct tcf_proto
*tp
,
742 struct tc_u_common
*tp_c
,
743 struct tc_u_knode
*n
)
745 struct tc_u_knode __rcu
**ins
;
746 struct tc_u_knode
*pins
;
747 struct tc_u_hnode
*ht
;
749 if (TC_U32_HTID(n
->handle
) == TC_U32_ROOT
)
750 ht
= rtnl_dereference(tp
->root
);
752 ht
= u32_lookup_ht(tp_c
, TC_U32_HTID(n
->handle
));
754 ins
= &ht
->ht
[TC_U32_HASH(n
->handle
)];
756 /* The node must always exist for it to be replaced if this is not the
757 * case then something went very wrong elsewhere.
759 for (pins
= rtnl_dereference(*ins
); ;
760 ins
= &pins
->next
, pins
= rtnl_dereference(*ins
))
761 if (pins
->handle
== n
->handle
)
764 RCU_INIT_POINTER(n
->next
, pins
->next
);
765 rcu_assign_pointer(*ins
, n
);
768 static struct tc_u_knode
*u32_init_knode(struct tcf_proto
*tp
,
769 struct tc_u_knode
*n
)
771 struct tc_u_knode
*new;
772 struct tc_u32_sel
*s
= &n
->sel
;
774 new = kzalloc(sizeof(*n
) + s
->nkeys
*sizeof(struct tc_u32_key
),
780 RCU_INIT_POINTER(new->next
, n
->next
);
781 new->handle
= n
->handle
;
782 RCU_INIT_POINTER(new->ht_up
, n
->ht_up
);
784 #ifdef CONFIG_NET_CLS_IND
785 new->ifindex
= n
->ifindex
;
787 new->fshift
= n
->fshift
;
789 RCU_INIT_POINTER(new->ht_down
, n
->ht_down
);
791 /* bump reference count as long as we hold pointer to structure */
793 new->ht_down
->refcnt
++;
795 #ifdef CONFIG_CLS_U32_PERF
796 /* Statistics may be incremented by readers during update
797 * so we must keep them in tact. When the node is later destroyed
798 * a special destroy call must be made to not free the pf memory.
803 #ifdef CONFIG_CLS_U32_MARK
806 /* Similarly success statistics must be moved as pointers */
807 new->pcpu_success
= n
->pcpu_success
;
810 memcpy(&new->sel
, s
, sizeof(*s
) + s
->nkeys
*sizeof(struct tc_u32_key
));
812 tcf_exts_init(&new->exts
, TCA_U32_ACT
, TCA_U32_POLICE
);
817 static int u32_change(struct net
*net
, struct sk_buff
*in_skb
,
818 struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
820 unsigned long *arg
, bool ovr
)
822 struct tc_u_common
*tp_c
= tp
->data
;
823 struct tc_u_hnode
*ht
;
824 struct tc_u_knode
*n
;
825 struct tc_u32_sel
*s
;
826 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
827 struct nlattr
*tb
[TCA_U32_MAX
+ 1];
830 #ifdef CONFIG_CLS_U32_PERF
835 return handle
? -EINVAL
: 0;
837 err
= nla_parse_nested(tb
, TCA_U32_MAX
, opt
, u32_policy
);
841 n
= (struct tc_u_knode
*)*arg
;
843 struct tc_u_knode
*new;
845 if (TC_U32_KEY(n
->handle
) == 0)
848 new = u32_init_knode(tp
, n
);
852 err
= u32_set_parms(net
, tp
, base
,
853 rtnl_dereference(n
->ht_up
), new, tb
,
857 u32_destroy_key(tp
, new, false);
861 u32_replace_knode(tp
, tp_c
, new);
862 tcf_unbind_filter(tp
, &n
->res
);
863 call_rcu(&n
->rcu
, u32_delete_key_rcu
);
864 u32_replace_hw_knode(tp
, new);
868 if (tb
[TCA_U32_DIVISOR
]) {
869 unsigned int divisor
= nla_get_u32(tb
[TCA_U32_DIVISOR
]);
871 if (--divisor
> 0x100)
873 if (TC_U32_KEY(handle
))
876 handle
= gen_new_htid(tp
->data
);
880 ht
= kzalloc(sizeof(*ht
) + divisor
*sizeof(void *), GFP_KERNEL
);
885 ht
->divisor
= divisor
;
888 RCU_INIT_POINTER(ht
->next
, tp_c
->hlist
);
889 rcu_assign_pointer(tp_c
->hlist
, ht
);
890 *arg
= (unsigned long)ht
;
892 u32_replace_hw_hnode(tp
, ht
);
896 if (tb
[TCA_U32_HASH
]) {
897 htid
= nla_get_u32(tb
[TCA_U32_HASH
]);
898 if (TC_U32_HTID(htid
) == TC_U32_ROOT
) {
899 ht
= rtnl_dereference(tp
->root
);
902 ht
= u32_lookup_ht(tp
->data
, TC_U32_HTID(htid
));
907 ht
= rtnl_dereference(tp
->root
);
911 if (ht
->divisor
< TC_U32_HASH(htid
))
915 if (TC_U32_HTID(handle
) && TC_U32_HTID(handle
^htid
))
917 handle
= htid
| TC_U32_NODE(handle
);
919 handle
= gen_new_kid(ht
, htid
);
921 if (tb
[TCA_U32_SEL
] == NULL
)
924 s
= nla_data(tb
[TCA_U32_SEL
]);
926 n
= kzalloc(sizeof(*n
) + s
->nkeys
*sizeof(struct tc_u32_key
), GFP_KERNEL
);
930 #ifdef CONFIG_CLS_U32_PERF
931 size
= sizeof(struct tc_u32_pcnt
) + s
->nkeys
* sizeof(u64
);
932 n
->pf
= __alloc_percpu(size
, __alignof__(struct tc_u32_pcnt
));
939 memcpy(&n
->sel
, s
, sizeof(*s
) + s
->nkeys
*sizeof(struct tc_u32_key
));
940 RCU_INIT_POINTER(n
->ht_up
, ht
);
942 n
->fshift
= s
->hmask
? ffs(ntohl(s
->hmask
)) - 1 : 0;
943 tcf_exts_init(&n
->exts
, TCA_U32_ACT
, TCA_U32_POLICE
);
946 #ifdef CONFIG_CLS_U32_MARK
947 n
->pcpu_success
= alloc_percpu(u32
);
948 if (!n
->pcpu_success
) {
953 if (tb
[TCA_U32_MARK
]) {
954 struct tc_u32_mark
*mark
;
956 mark
= nla_data(tb
[TCA_U32_MARK
]);
958 n
->mask
= mark
->mask
;
962 err
= u32_set_parms(net
, tp
, base
, ht
, n
, tb
, tca
[TCA_RATE
], ovr
);
964 struct tc_u_knode __rcu
**ins
;
965 struct tc_u_knode
*pins
;
967 ins
= &ht
->ht
[TC_U32_HASH(handle
)];
968 for (pins
= rtnl_dereference(*ins
); pins
;
969 ins
= &pins
->next
, pins
= rtnl_dereference(*ins
))
970 if (TC_U32_NODE(handle
) < TC_U32_NODE(pins
->handle
))
973 RCU_INIT_POINTER(n
->next
, pins
);
974 rcu_assign_pointer(*ins
, n
);
975 u32_replace_hw_knode(tp
, n
);
976 *arg
= (unsigned long)n
;
980 #ifdef CONFIG_CLS_U32_MARK
981 free_percpu(n
->pcpu_success
);
985 #ifdef CONFIG_CLS_U32_PERF
992 static void u32_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
994 struct tc_u_common
*tp_c
= tp
->data
;
995 struct tc_u_hnode
*ht
;
996 struct tc_u_knode
*n
;
1002 for (ht
= rtnl_dereference(tp_c
->hlist
);
1004 ht
= rtnl_dereference(ht
->next
)) {
1005 if (ht
->prio
!= tp
->prio
)
1007 if (arg
->count
>= arg
->skip
) {
1008 if (arg
->fn(tp
, (unsigned long)ht
, arg
) < 0) {
1014 for (h
= 0; h
<= ht
->divisor
; h
++) {
1015 for (n
= rtnl_dereference(ht
->ht
[h
]);
1017 n
= rtnl_dereference(n
->next
)) {
1018 if (arg
->count
< arg
->skip
) {
1022 if (arg
->fn(tp
, (unsigned long)n
, arg
) < 0) {
1032 static int u32_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
1033 struct sk_buff
*skb
, struct tcmsg
*t
)
1035 struct tc_u_knode
*n
= (struct tc_u_knode
*)fh
;
1036 struct tc_u_hnode
*ht_up
, *ht_down
;
1037 struct nlattr
*nest
;
1042 t
->tcm_handle
= n
->handle
;
1044 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
1046 goto nla_put_failure
;
1048 if (TC_U32_KEY(n
->handle
) == 0) {
1049 struct tc_u_hnode
*ht
= (struct tc_u_hnode
*)fh
;
1050 u32 divisor
= ht
->divisor
+ 1;
1052 if (nla_put_u32(skb
, TCA_U32_DIVISOR
, divisor
))
1053 goto nla_put_failure
;
1055 #ifdef CONFIG_CLS_U32_PERF
1056 struct tc_u32_pcnt
*gpf
;
1060 if (nla_put(skb
, TCA_U32_SEL
,
1061 sizeof(n
->sel
) + n
->sel
.nkeys
*sizeof(struct tc_u32_key
),
1063 goto nla_put_failure
;
1065 ht_up
= rtnl_dereference(n
->ht_up
);
1067 u32 htid
= n
->handle
& 0xFFFFF000;
1068 if (nla_put_u32(skb
, TCA_U32_HASH
, htid
))
1069 goto nla_put_failure
;
1071 if (n
->res
.classid
&&
1072 nla_put_u32(skb
, TCA_U32_CLASSID
, n
->res
.classid
))
1073 goto nla_put_failure
;
1075 ht_down
= rtnl_dereference(n
->ht_down
);
1077 nla_put_u32(skb
, TCA_U32_LINK
, ht_down
->handle
))
1078 goto nla_put_failure
;
1080 #ifdef CONFIG_CLS_U32_MARK
1081 if ((n
->val
|| n
->mask
)) {
1082 struct tc_u32_mark mark
= {.val
= n
->val
,
1087 for_each_possible_cpu(cpum
) {
1088 __u32 cnt
= *per_cpu_ptr(n
->pcpu_success
, cpum
);
1090 mark
.success
+= cnt
;
1093 if (nla_put(skb
, TCA_U32_MARK
, sizeof(mark
), &mark
))
1094 goto nla_put_failure
;
1098 if (tcf_exts_dump(skb
, &n
->exts
) < 0)
1099 goto nla_put_failure
;
1101 #ifdef CONFIG_NET_CLS_IND
1103 struct net_device
*dev
;
1104 dev
= __dev_get_by_index(net
, n
->ifindex
);
1105 if (dev
&& nla_put_string(skb
, TCA_U32_INDEV
, dev
->name
))
1106 goto nla_put_failure
;
1109 #ifdef CONFIG_CLS_U32_PERF
1110 gpf
= kzalloc(sizeof(struct tc_u32_pcnt
) +
1111 n
->sel
.nkeys
* sizeof(u64
),
1114 goto nla_put_failure
;
1116 for_each_possible_cpu(cpu
) {
1118 struct tc_u32_pcnt
*pf
= per_cpu_ptr(n
->pf
, cpu
);
1120 gpf
->rcnt
+= pf
->rcnt
;
1121 gpf
->rhit
+= pf
->rhit
;
1122 for (i
= 0; i
< n
->sel
.nkeys
; i
++)
1123 gpf
->kcnts
[i
] += pf
->kcnts
[i
];
1126 if (nla_put(skb
, TCA_U32_PCNT
,
1127 sizeof(struct tc_u32_pcnt
) + n
->sel
.nkeys
*sizeof(u64
),
1130 goto nla_put_failure
;
1136 nla_nest_end(skb
, nest
);
1138 if (TC_U32_KEY(n
->handle
))
1139 if (tcf_exts_dump_stats(skb
, &n
->exts
) < 0)
1140 goto nla_put_failure
;
1144 nla_nest_cancel(skb
, nest
);
1148 static struct tcf_proto_ops cls_u32_ops __read_mostly
= {
1150 .classify
= u32_classify
,
1152 .destroy
= u32_destroy
,
1154 .change
= u32_change
,
1155 .delete = u32_delete
,
1158 .owner
= THIS_MODULE
,
1161 static int __init
init_u32(void)
1163 pr_info("u32 classifier\n");
1164 #ifdef CONFIG_CLS_U32_PERF
1165 pr_info(" Performance counters on\n");
1167 #ifdef CONFIG_NET_CLS_IND
1168 pr_info(" input device check on\n");
1170 #ifdef CONFIG_NET_CLS_ACT
1171 pr_info(" Actions configured\n");
1173 return register_tcf_proto_ops(&cls_u32_ops
);
1176 static void __exit
exit_u32(void)
1178 unregister_tcf_proto_ops(&cls_u32_ops
);
1181 module_init(init_u32
)
1182 module_exit(exit_u32
)
1183 MODULE_LICENSE("GPL");