2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_bpf.h>
22 #include <net/tc_act/tc_bpf.h>
24 #define BPF_TAB_MASK 15
25 #define ACT_BPF_NAME_LEN 256
28 struct bpf_prog
*filter
;
29 struct sock_filter
*bpf_ops
;
36 static int bpf_net_id
;
38 static int tcf_bpf(struct sk_buff
*skb
, const struct tc_action
*act
,
39 struct tcf_result
*res
)
41 struct tcf_bpf
*prog
= act
->priv
;
42 struct bpf_prog
*filter
;
43 int action
, filter_res
;
44 bool at_ingress
= G_TC_AT(skb
->tc_verd
) & AT_INGRESS
;
46 if (unlikely(!skb_mac_header_was_set(skb
)))
49 tcf_lastuse_update(&prog
->tcf_tm
);
50 bstats_cpu_update(this_cpu_ptr(prog
->common
.cpu_bstats
), skb
);
53 filter
= rcu_dereference(prog
->filter
);
55 __skb_push(skb
, skb
->mac_len
);
56 bpf_compute_data_end(skb
);
57 filter_res
= BPF_PROG_RUN(filter
, skb
);
58 __skb_pull(skb
, skb
->mac_len
);
60 bpf_compute_data_end(skb
);
61 filter_res
= BPF_PROG_RUN(filter
, skb
);
65 /* A BPF program may overwrite the default action opcode.
66 * Similarly as in cls_bpf, if filter_res == -1 we use the
67 * default action specified from tc.
69 * In case a different well-known TC_ACT opcode has been
70 * returned, it will overwrite the default one.
72 * For everything else that is unkown, TC_ACT_UNSPEC is
77 case TC_ACT_RECLASSIFY
:
84 qstats_drop_inc(this_cpu_ptr(prog
->common
.cpu_qstats
));
87 action
= prog
->tcf_action
;
90 action
= TC_ACT_UNSPEC
;
97 static bool tcf_bpf_is_ebpf(const struct tcf_bpf
*prog
)
99 return !prog
->bpf_ops
;
102 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf
*prog
,
107 if (nla_put_u16(skb
, TCA_ACT_BPF_OPS_LEN
, prog
->bpf_num_ops
))
110 nla
= nla_reserve(skb
, TCA_ACT_BPF_OPS
, prog
->bpf_num_ops
*
111 sizeof(struct sock_filter
));
115 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
120 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf
*prog
,
123 if (nla_put_u32(skb
, TCA_ACT_BPF_FD
, prog
->bpf_fd
))
126 if (prog
->bpf_name
&&
127 nla_put_string(skb
, TCA_ACT_BPF_NAME
, prog
->bpf_name
))
133 static int tcf_bpf_dump(struct sk_buff
*skb
, struct tc_action
*act
,
136 unsigned char *tp
= skb_tail_pointer(skb
);
137 struct tcf_bpf
*prog
= act
->priv
;
138 struct tc_act_bpf opt
= {
139 .index
= prog
->tcf_index
,
140 .refcnt
= prog
->tcf_refcnt
- ref
,
141 .bindcnt
= prog
->tcf_bindcnt
- bind
,
142 .action
= prog
->tcf_action
,
147 if (nla_put(skb
, TCA_ACT_BPF_PARMS
, sizeof(opt
), &opt
))
148 goto nla_put_failure
;
150 if (tcf_bpf_is_ebpf(prog
))
151 ret
= tcf_bpf_dump_ebpf_info(prog
, skb
);
153 ret
= tcf_bpf_dump_bpf_info(prog
, skb
);
155 goto nla_put_failure
;
157 tm
.install
= jiffies_to_clock_t(jiffies
- prog
->tcf_tm
.install
);
158 tm
.lastuse
= jiffies_to_clock_t(jiffies
- prog
->tcf_tm
.lastuse
);
159 tm
.firstuse
= jiffies_to_clock_t(jiffies
- prog
->tcf_tm
.firstuse
);
160 tm
.expires
= jiffies_to_clock_t(prog
->tcf_tm
.expires
);
162 if (nla_put_64bit(skb
, TCA_ACT_BPF_TM
, sizeof(tm
), &tm
,
164 goto nla_put_failure
;
173 static const struct nla_policy act_bpf_policy
[TCA_ACT_BPF_MAX
+ 1] = {
174 [TCA_ACT_BPF_PARMS
] = { .len
= sizeof(struct tc_act_bpf
) },
175 [TCA_ACT_BPF_FD
] = { .type
= NLA_U32
},
176 [TCA_ACT_BPF_NAME
] = { .type
= NLA_NUL_STRING
, .len
= ACT_BPF_NAME_LEN
},
177 [TCA_ACT_BPF_OPS_LEN
] = { .type
= NLA_U16
},
178 [TCA_ACT_BPF_OPS
] = { .type
= NLA_BINARY
,
179 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
182 static int tcf_bpf_init_from_ops(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
184 struct sock_filter
*bpf_ops
;
185 struct sock_fprog_kern fprog_tmp
;
187 u16 bpf_size
, bpf_num_ops
;
190 bpf_num_ops
= nla_get_u16(tb
[TCA_ACT_BPF_OPS_LEN
]);
191 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
194 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
195 if (bpf_size
!= nla_len(tb
[TCA_ACT_BPF_OPS
]))
198 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
202 memcpy(bpf_ops
, nla_data(tb
[TCA_ACT_BPF_OPS
]), bpf_size
);
204 fprog_tmp
.len
= bpf_num_ops
;
205 fprog_tmp
.filter
= bpf_ops
;
207 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
213 cfg
->bpf_ops
= bpf_ops
;
214 cfg
->bpf_num_ops
= bpf_num_ops
;
216 cfg
->is_ebpf
= false;
221 static int tcf_bpf_init_from_efd(struct nlattr
**tb
, struct tcf_bpf_cfg
*cfg
)
227 bpf_fd
= nla_get_u32(tb
[TCA_ACT_BPF_FD
]);
229 fp
= bpf_prog_get(bpf_fd
);
233 if (fp
->type
!= BPF_PROG_TYPE_SCHED_ACT
) {
238 if (tb
[TCA_ACT_BPF_NAME
]) {
239 name
= kmemdup(nla_data(tb
[TCA_ACT_BPF_NAME
]),
240 nla_len(tb
[TCA_ACT_BPF_NAME
]),
248 cfg
->bpf_fd
= bpf_fd
;
249 cfg
->bpf_name
= name
;
256 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg
*cfg
)
259 bpf_prog_put(cfg
->filter
);
261 bpf_prog_destroy(cfg
->filter
);
264 kfree(cfg
->bpf_name
);
267 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf
*prog
,
268 struct tcf_bpf_cfg
*cfg
)
270 cfg
->is_ebpf
= tcf_bpf_is_ebpf(prog
);
271 /* updates to prog->filter are prevented, since it's called either
272 * with rtnl lock or during final cleanup in rcu callback
274 cfg
->filter
= rcu_dereference_protected(prog
->filter
, 1);
276 cfg
->bpf_ops
= prog
->bpf_ops
;
277 cfg
->bpf_name
= prog
->bpf_name
;
280 static int tcf_bpf_init(struct net
*net
, struct nlattr
*nla
,
281 struct nlattr
*est
, struct tc_action
*act
,
282 int replace
, int bind
)
284 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
285 struct nlattr
*tb
[TCA_ACT_BPF_MAX
+ 1];
286 struct tcf_bpf_cfg cfg
, old
;
287 struct tc_act_bpf
*parm
;
288 struct tcf_bpf
*prog
;
289 bool is_bpf
, is_ebpf
;
295 ret
= nla_parse_nested(tb
, TCA_ACT_BPF_MAX
, nla
, act_bpf_policy
);
299 if (!tb
[TCA_ACT_BPF_PARMS
])
302 parm
= nla_data(tb
[TCA_ACT_BPF_PARMS
]);
304 if (!tcf_hash_check(tn
, parm
->index
, act
, bind
)) {
305 ret
= tcf_hash_create(tn
, parm
->index
, est
, act
,
306 sizeof(*prog
), bind
, true);
312 /* Don't override defaults. */
316 tcf_hash_release(act
, bind
);
321 is_bpf
= tb
[TCA_ACT_BPF_OPS_LEN
] && tb
[TCA_ACT_BPF_OPS
];
322 is_ebpf
= tb
[TCA_ACT_BPF_FD
];
324 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
)) {
329 memset(&cfg
, 0, sizeof(cfg
));
331 ret
= is_bpf
? tcf_bpf_init_from_ops(tb
, &cfg
) :
332 tcf_bpf_init_from_efd(tb
, &cfg
);
339 if (res
!= ACT_P_CREATED
)
340 tcf_bpf_prog_fill_cfg(prog
, &old
);
342 prog
->bpf_ops
= cfg
.bpf_ops
;
343 prog
->bpf_name
= cfg
.bpf_name
;
346 prog
->bpf_num_ops
= cfg
.bpf_num_ops
;
348 prog
->bpf_fd
= cfg
.bpf_fd
;
350 prog
->tcf_action
= parm
->action
;
351 rcu_assign_pointer(prog
->filter
, cfg
.filter
);
353 if (res
== ACT_P_CREATED
) {
354 tcf_hash_insert(tn
, act
);
356 /* make sure the program being replaced is no longer executing */
358 tcf_bpf_cfg_cleanup(&old
);
363 if (res
== ACT_P_CREATED
)
364 tcf_hash_cleanup(act
, est
);
369 static void tcf_bpf_cleanup(struct tc_action
*act
, int bind
)
371 struct tcf_bpf_cfg tmp
;
373 tcf_bpf_prog_fill_cfg(act
->priv
, &tmp
);
374 tcf_bpf_cfg_cleanup(&tmp
);
377 static int tcf_bpf_walker(struct net
*net
, struct sk_buff
*skb
,
378 struct netlink_callback
*cb
, int type
,
381 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
383 return tcf_generic_walker(tn
, skb
, cb
, type
, a
);
386 static int tcf_bpf_search(struct net
*net
, struct tc_action
*a
, u32 index
)
388 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
390 return tcf_hash_search(tn
, a
, index
);
393 static struct tc_action_ops act_bpf_ops __read_mostly
= {
396 .owner
= THIS_MODULE
,
398 .dump
= tcf_bpf_dump
,
399 .cleanup
= tcf_bpf_cleanup
,
400 .init
= tcf_bpf_init
,
401 .walk
= tcf_bpf_walker
,
402 .lookup
= tcf_bpf_search
,
405 static __net_init
int bpf_init_net(struct net
*net
)
407 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
409 return tc_action_net_init(tn
, &act_bpf_ops
, BPF_TAB_MASK
);
412 static void __net_exit
bpf_exit_net(struct net
*net
)
414 struct tc_action_net
*tn
= net_generic(net
, bpf_net_id
);
416 tc_action_net_exit(tn
);
419 static struct pernet_operations bpf_net_ops
= {
420 .init
= bpf_init_net
,
421 .exit
= bpf_exit_net
,
423 .size
= sizeof(struct tc_action_net
),
426 static int __init
bpf_init_module(void)
428 return tcf_register_action(&act_bpf_ops
, &bpf_net_ops
);
431 static void __exit
bpf_cleanup_module(void)
433 tcf_unregister_action(&act_bpf_ops
, &bpf_net_ops
);
436 module_init(bpf_init_module
);
437 module_exit(bpf_cleanup_module
);
439 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
440 MODULE_DESCRIPTION("TC BPF based action");
441 MODULE_LICENSE("GPL v2");