bpf: remove artificial bpf_skb_{load, store}_bytes buffer limitation
[deliverable/linux.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
29#include <linux/in.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/if_packet.h>
5a0e3ad6 33#include <linux/gfp.h>
1da177e4
LT
34#include <net/ip.h>
35#include <net/protocol.h>
4738c1db 36#include <net/netlink.h>
1da177e4
LT
37#include <linux/skbuff.h>
38#include <net/sock.h>
10b89ee4 39#include <net/flow_dissector.h>
1da177e4
LT
40#include <linux/errno.h>
41#include <linux/timer.h>
1da177e4 42#include <asm/uaccess.h>
40daafc8 43#include <asm/unaligned.h>
1da177e4 44#include <linux/filter.h>
86e4ca66 45#include <linux/ratelimit.h>
46b325c7 46#include <linux/seccomp.h>
f3335031 47#include <linux/if_vlan.h>
89aa0758 48#include <linux/bpf.h>
d691f9e8 49#include <net/sch_generic.h>
8d20aabe 50#include <net/cls_cgroup.h>
d3aa45ce 51#include <net/dst_metadata.h>
c46646d0 52#include <net/dst.h>
538950a1 53#include <net/sock_reuseport.h>
1da177e4 54
43db6d65
SH
55/**
56 * sk_filter - run a packet through a socket filter
57 * @sk: sock associated with &sk_buff
58 * @skb: buffer to filter
43db6d65 59 *
ff936a04
AS
60 * Run the eBPF program and then cut skb->data to correct size returned by
61 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 62 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 63 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
64 * be accepted or -EPERM if the packet should be tossed.
65 *
66 */
67int sk_filter(struct sock *sk, struct sk_buff *skb)
68{
69 int err;
70 struct sk_filter *filter;
71
c93bdd0e
MG
72 /*
73 * If the skb was allocated from pfmemalloc reserves, only
74 * allow SOCK_MEMALLOC sockets to use it as this socket is
75 * helping free memory
76 */
77 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
78 return -ENOMEM;
79
43db6d65
SH
80 err = security_sock_rcv_skb(sk, skb);
81 if (err)
82 return err;
83
80f8f102
ED
84 rcu_read_lock();
85 filter = rcu_dereference(sk->sk_filter);
43db6d65 86 if (filter) {
ff936a04 87 unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
0d7da9dd 88
43db6d65
SH
89 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
90 }
80f8f102 91 rcu_read_unlock();
43db6d65
SH
92
93 return err;
94}
95EXPORT_SYMBOL(sk_filter);
96
30743837 97static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
bd4cf0ed 98{
56193d1b 99 return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
bd4cf0ed
AS
100}
101
30743837 102static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
bd4cf0ed 103{
eb9672f4 104 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
bd4cf0ed
AS
105 struct nlattr *nla;
106
107 if (skb_is_nonlinear(skb))
108 return 0;
109
05ab8f26
MK
110 if (skb->len < sizeof(struct nlattr))
111 return 0;
112
30743837 113 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
114 return 0;
115
30743837 116 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
117 if (nla)
118 return (void *) nla - (void *) skb->data;
119
120 return 0;
121}
122
30743837 123static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
bd4cf0ed 124{
eb9672f4 125 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
bd4cf0ed
AS
126 struct nlattr *nla;
127
128 if (skb_is_nonlinear(skb))
129 return 0;
130
05ab8f26
MK
131 if (skb->len < sizeof(struct nlattr))
132 return 0;
133
30743837 134 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
135 return 0;
136
30743837
DB
137 nla = (struct nlattr *) &skb->data[a];
138 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
139 return 0;
140
30743837 141 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
142 if (nla)
143 return (void *) nla - (void *) skb->data;
144
145 return 0;
146}
147
30743837 148static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
bd4cf0ed
AS
149{
150 return raw_smp_processor_id();
151}
152
9bac3d6d
AS
153static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
154 struct bpf_insn *insn_buf)
155{
156 struct bpf_insn *insn = insn_buf;
157
158 switch (skb_field) {
159 case SKF_AD_MARK:
160 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
161
162 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
163 offsetof(struct sk_buff, mark));
164 break;
165
166 case SKF_AD_PKTTYPE:
167 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
168 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
169#ifdef __BIG_ENDIAN_BITFIELD
170 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
171#endif
172 break;
173
174 case SKF_AD_QUEUE:
175 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
176
177 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
178 offsetof(struct sk_buff, queue_mapping));
179 break;
c2497395 180
c2497395
AS
181 case SKF_AD_VLAN_TAG:
182 case SKF_AD_VLAN_TAG_PRESENT:
183 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
184 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
185
186 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
187 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
188 offsetof(struct sk_buff, vlan_tci));
189 if (skb_field == SKF_AD_VLAN_TAG) {
190 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
191 ~VLAN_TAG_PRESENT);
192 } else {
193 /* dst_reg >>= 12 */
194 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
195 /* dst_reg &= 1 */
196 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
197 }
198 break;
9bac3d6d
AS
199 }
200
201 return insn - insn_buf;
202}
203
bd4cf0ed 204static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 205 struct bpf_insn **insnp)
bd4cf0ed 206{
2695fb55 207 struct bpf_insn *insn = *insnp;
9bac3d6d 208 u32 cnt;
bd4cf0ed
AS
209
210 switch (fp->k) {
211 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
212 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
213
214 /* A = *(u16 *) (CTX + offsetof(protocol)) */
215 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
216 offsetof(struct sk_buff, protocol));
217 /* A = ntohs(A) [emitting a nop or swap16] */
218 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
219 break;
220
221 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
222 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
223 insn += cnt - 1;
bd4cf0ed
AS
224 break;
225
226 case SKF_AD_OFF + SKF_AD_IFINDEX:
227 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
228 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
229 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679
DB
230 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
231
232 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
233 BPF_REG_TMP, BPF_REG_CTX,
234 offsetof(struct sk_buff, dev));
235 /* if (tmp != 0) goto pc + 1 */
236 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
237 *insn++ = BPF_EXIT_INSN();
238 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
239 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
240 offsetof(struct net_device, ifindex));
241 else
242 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
243 offsetof(struct net_device, type));
bd4cf0ed
AS
244 break;
245
246 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
247 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
248 insn += cnt - 1;
bd4cf0ed
AS
249 break;
250
251 case SKF_AD_OFF + SKF_AD_RXHASH:
252 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
253
9739eef1
AS
254 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
255 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
256 break;
257
258 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
259 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
260 insn += cnt - 1;
bd4cf0ed
AS
261 break;
262
263 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
264 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
265 BPF_REG_A, BPF_REG_CTX, insn);
266 insn += cnt - 1;
267 break;
bd4cf0ed 268
c2497395
AS
269 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
270 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
271 BPF_REG_A, BPF_REG_CTX, insn);
272 insn += cnt - 1;
bd4cf0ed
AS
273 break;
274
27cd5452
MS
275 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
276 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
277
278 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
279 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
280 offsetof(struct sk_buff, vlan_proto));
281 /* A = ntohs(A) [emitting a nop or swap16] */
282 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
283 break;
284
bd4cf0ed
AS
285 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
286 case SKF_AD_OFF + SKF_AD_NLATTR:
287 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
288 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 289 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 290 /* arg1 = CTX */
f8f6d679 291 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 292 /* arg2 = A */
f8f6d679 293 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 294 /* arg3 = X */
f8f6d679 295 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 296 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
297 switch (fp->k) {
298 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
f8f6d679 299 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
bd4cf0ed
AS
300 break;
301 case SKF_AD_OFF + SKF_AD_NLATTR:
f8f6d679 302 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
bd4cf0ed
AS
303 break;
304 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
f8f6d679 305 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
bd4cf0ed
AS
306 break;
307 case SKF_AD_OFF + SKF_AD_CPU:
f8f6d679 308 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
bd4cf0ed 309 break;
4cd3675e 310 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
311 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
312 bpf_user_rnd_init_once();
4cd3675e 313 break;
bd4cf0ed
AS
314 }
315 break;
316
317 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
318 /* A ^= X */
319 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
320 break;
321
322 default:
323 /* This is just a dummy call to avoid letting the compiler
324 * evict __bpf_call_base() as an optimization. Placed here
325 * where no-one bothers.
326 */
327 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
328 return false;
329 }
330
331 *insnp = insn;
332 return true;
333}
334
335/**
8fb575ca 336 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
337 * @prog: the user passed filter program
338 * @len: the length of the user passed filter program
339 * @new_prog: buffer where converted program will be stored
340 * @new_len: pointer to store length of converted program
341 *
342 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
343 * Conversion workflow:
344 *
345 * 1) First pass for calculating the new program length:
8fb575ca 346 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
bd4cf0ed
AS
347 *
348 * 2) 2nd pass to remap in two passes: 1st pass finds new
349 * jump offsets, 2nd pass remapping:
2695fb55 350 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
8fb575ca 351 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
bd4cf0ed 352 */
d9e12f42
NS
353static int bpf_convert_filter(struct sock_filter *prog, int len,
354 struct bpf_insn *new_prog, int *new_len)
bd4cf0ed
AS
355{
356 int new_flen = 0, pass = 0, target, i;
2695fb55 357 struct bpf_insn *new_insn;
bd4cf0ed
AS
358 struct sock_filter *fp;
359 int *addrs = NULL;
360 u8 bpf_src;
361
362 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 363 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 364
6f9a093b 365 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
366 return -EINVAL;
367
368 if (new_prog) {
658da937
DB
369 addrs = kcalloc(len, sizeof(*addrs),
370 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
371 if (!addrs)
372 return -ENOMEM;
373 }
374
375do_pass:
376 new_insn = new_prog;
377 fp = prog;
378
8b614aeb
DB
379 /* Classic BPF related prologue emission. */
380 if (new_insn) {
381 /* Classic BPF expects A and X to be reset first. These need
382 * to be guaranteed to be the first two instructions.
383 */
384 *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
385 *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
386
387 /* All programs must keep CTX in callee saved BPF_REG_CTX.
388 * In eBPF case it's done by the compiler, here we need to
389 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
390 */
391 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
392 } else {
393 new_insn += 3;
394 }
bd4cf0ed
AS
395
396 for (i = 0; i < len; fp++, i++) {
2695fb55
AS
397 struct bpf_insn tmp_insns[6] = { };
398 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
399
400 if (addrs)
401 addrs[i] = new_insn - new_prog;
402
403 switch (fp->code) {
404 /* All arithmetic insns and skb loads map as-is. */
405 case BPF_ALU | BPF_ADD | BPF_X:
406 case BPF_ALU | BPF_ADD | BPF_K:
407 case BPF_ALU | BPF_SUB | BPF_X:
408 case BPF_ALU | BPF_SUB | BPF_K:
409 case BPF_ALU | BPF_AND | BPF_X:
410 case BPF_ALU | BPF_AND | BPF_K:
411 case BPF_ALU | BPF_OR | BPF_X:
412 case BPF_ALU | BPF_OR | BPF_K:
413 case BPF_ALU | BPF_LSH | BPF_X:
414 case BPF_ALU | BPF_LSH | BPF_K:
415 case BPF_ALU | BPF_RSH | BPF_X:
416 case BPF_ALU | BPF_RSH | BPF_K:
417 case BPF_ALU | BPF_XOR | BPF_X:
418 case BPF_ALU | BPF_XOR | BPF_K:
419 case BPF_ALU | BPF_MUL | BPF_X:
420 case BPF_ALU | BPF_MUL | BPF_K:
421 case BPF_ALU | BPF_DIV | BPF_X:
422 case BPF_ALU | BPF_DIV | BPF_K:
423 case BPF_ALU | BPF_MOD | BPF_X:
424 case BPF_ALU | BPF_MOD | BPF_K:
425 case BPF_ALU | BPF_NEG:
426 case BPF_LD | BPF_ABS | BPF_W:
427 case BPF_LD | BPF_ABS | BPF_H:
428 case BPF_LD | BPF_ABS | BPF_B:
429 case BPF_LD | BPF_IND | BPF_W:
430 case BPF_LD | BPF_IND | BPF_H:
431 case BPF_LD | BPF_IND | BPF_B:
432 /* Check for overloaded BPF extension and
433 * directly convert it if found, otherwise
434 * just move on with mapping.
435 */
436 if (BPF_CLASS(fp->code) == BPF_LD &&
437 BPF_MODE(fp->code) == BPF_ABS &&
438 convert_bpf_extensions(fp, &insn))
439 break;
440
f8f6d679 441 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
442 break;
443
f8f6d679
DB
444 /* Jump transformation cannot use BPF block macros
445 * everywhere as offset calculation and target updates
446 * require a bit more work than the rest, i.e. jump
447 * opcodes map as-is, but offsets need adjustment.
448 */
449
450#define BPF_EMIT_JMP \
bd4cf0ed
AS
451 do { \
452 if (target >= len || target < 0) \
453 goto err; \
454 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
455 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
456 insn->off -= insn - tmp_insns; \
457 } while (0)
458
f8f6d679
DB
459 case BPF_JMP | BPF_JA:
460 target = i + fp->k + 1;
461 insn->code = fp->code;
462 BPF_EMIT_JMP;
bd4cf0ed
AS
463 break;
464
465 case BPF_JMP | BPF_JEQ | BPF_K:
466 case BPF_JMP | BPF_JEQ | BPF_X:
467 case BPF_JMP | BPF_JSET | BPF_K:
468 case BPF_JMP | BPF_JSET | BPF_X:
469 case BPF_JMP | BPF_JGT | BPF_K:
470 case BPF_JMP | BPF_JGT | BPF_X:
471 case BPF_JMP | BPF_JGE | BPF_K:
472 case BPF_JMP | BPF_JGE | BPF_X:
473 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
474 /* BPF immediates are signed, zero extend
475 * immediate into tmp register and use it
476 * in compare insn.
477 */
f8f6d679 478 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 479
e430f34e
AS
480 insn->dst_reg = BPF_REG_A;
481 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
482 bpf_src = BPF_X;
483 } else {
e430f34e 484 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
485 insn->imm = fp->k;
486 bpf_src = BPF_SRC(fp->code);
19539ce7 487 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 488 }
bd4cf0ed
AS
489
490 /* Common case where 'jump_false' is next insn. */
491 if (fp->jf == 0) {
492 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
493 target = i + fp->jt + 1;
f8f6d679 494 BPF_EMIT_JMP;
bd4cf0ed 495 break;
1da177e4 496 }
bd4cf0ed
AS
497
498 /* Convert JEQ into JNE when 'jump_true' is next insn. */
499 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
500 insn->code = BPF_JMP | BPF_JNE | bpf_src;
501 target = i + fp->jf + 1;
f8f6d679 502 BPF_EMIT_JMP;
bd4cf0ed 503 break;
0b05b2a4 504 }
bd4cf0ed
AS
505
506 /* Other jumps are mapped into two insns: Jxx and JA. */
507 target = i + fp->jt + 1;
508 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 509 BPF_EMIT_JMP;
bd4cf0ed
AS
510 insn++;
511
512 insn->code = BPF_JMP | BPF_JA;
513 target = i + fp->jf + 1;
f8f6d679 514 BPF_EMIT_JMP;
bd4cf0ed
AS
515 break;
516
517 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
518 case BPF_LDX | BPF_MSH | BPF_B:
9739eef1 519 /* tmp = A */
f8f6d679 520 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1268e253 521 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
f8f6d679 522 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
9739eef1 523 /* A &= 0xf */
f8f6d679 524 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 525 /* A <<= 2 */
f8f6d679 526 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
9739eef1 527 /* X = A */
f8f6d679 528 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 529 /* A = tmp */
f8f6d679 530 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed
AS
531 break;
532
533 /* RET_K, RET_A are remaped into 2 insns. */
534 case BPF_RET | BPF_A:
535 case BPF_RET | BPF_K:
f8f6d679
DB
536 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
537 BPF_K : BPF_X, BPF_REG_0,
538 BPF_REG_A, fp->k);
9739eef1 539 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
540 break;
541
542 /* Store to stack. */
543 case BPF_ST:
544 case BPF_STX:
f8f6d679
DB
545 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
546 BPF_ST ? BPF_REG_A : BPF_REG_X,
547 -(BPF_MEMWORDS - fp->k) * 4);
bd4cf0ed
AS
548 break;
549
550 /* Load from stack. */
551 case BPF_LD | BPF_MEM:
552 case BPF_LDX | BPF_MEM:
f8f6d679
DB
553 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
554 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
555 -(BPF_MEMWORDS - fp->k) * 4);
bd4cf0ed
AS
556 break;
557
558 /* A = K or X = K */
559 case BPF_LD | BPF_IMM:
560 case BPF_LDX | BPF_IMM:
f8f6d679
DB
561 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
562 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
563 break;
564
565 /* X = A */
566 case BPF_MISC | BPF_TAX:
f8f6d679 567 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
568 break;
569
570 /* A = X */
571 case BPF_MISC | BPF_TXA:
f8f6d679 572 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
573 break;
574
575 /* A = skb->len or X = skb->len */
576 case BPF_LD | BPF_W | BPF_LEN:
577 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
578 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
579 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
580 offsetof(struct sk_buff, len));
bd4cf0ed
AS
581 break;
582
f8f6d679 583 /* Access seccomp_data fields. */
bd4cf0ed 584 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
585 /* A = *(u32 *) (ctx + K) */
586 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
587 break;
588
ca9f1fd2 589 /* Unknown instruction. */
1da177e4 590 default:
bd4cf0ed 591 goto err;
1da177e4 592 }
bd4cf0ed
AS
593
594 insn++;
595 if (new_prog)
596 memcpy(new_insn, tmp_insns,
597 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 598 new_insn += insn - tmp_insns;
1da177e4
LT
599 }
600
bd4cf0ed
AS
601 if (!new_prog) {
602 /* Only calculating new length. */
603 *new_len = new_insn - new_prog;
604 return 0;
605 }
606
607 pass++;
608 if (new_flen != new_insn - new_prog) {
609 new_flen = new_insn - new_prog;
610 if (pass > 2)
611 goto err;
bd4cf0ed
AS
612 goto do_pass;
613 }
614
615 kfree(addrs);
616 BUG_ON(*new_len != new_flen);
1da177e4 617 return 0;
bd4cf0ed
AS
618err:
619 kfree(addrs);
620 return -EINVAL;
1da177e4
LT
621}
622
bd4cf0ed 623/* Security:
bd4cf0ed 624 *
2d5311e4 625 * As we dont want to clear mem[] array for each packet going through
8ea6e345 626 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 627 * a cell if not previously written, and we check all branches to be sure
25985edc 628 * a malicious user doesn't try to abuse us.
2d5311e4 629 */
ec31a05c 630static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 631{
34805931 632 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
633 int pc, ret = 0;
634
635 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 636
99e72a0f 637 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
638 if (!masks)
639 return -ENOMEM;
34805931 640
2d5311e4
ED
641 memset(masks, 0xff, flen * sizeof(*masks));
642
643 for (pc = 0; pc < flen; pc++) {
644 memvalid &= masks[pc];
645
646 switch (filter[pc].code) {
34805931
DB
647 case BPF_ST:
648 case BPF_STX:
2d5311e4
ED
649 memvalid |= (1 << filter[pc].k);
650 break;
34805931
DB
651 case BPF_LD | BPF_MEM:
652 case BPF_LDX | BPF_MEM:
2d5311e4
ED
653 if (!(memvalid & (1 << filter[pc].k))) {
654 ret = -EINVAL;
655 goto error;
656 }
657 break;
34805931
DB
658 case BPF_JMP | BPF_JA:
659 /* A jump must set masks on target */
2d5311e4
ED
660 masks[pc + 1 + filter[pc].k] &= memvalid;
661 memvalid = ~0;
662 break;
34805931
DB
663 case BPF_JMP | BPF_JEQ | BPF_K:
664 case BPF_JMP | BPF_JEQ | BPF_X:
665 case BPF_JMP | BPF_JGE | BPF_K:
666 case BPF_JMP | BPF_JGE | BPF_X:
667 case BPF_JMP | BPF_JGT | BPF_K:
668 case BPF_JMP | BPF_JGT | BPF_X:
669 case BPF_JMP | BPF_JSET | BPF_K:
670 case BPF_JMP | BPF_JSET | BPF_X:
671 /* A jump must set masks on targets */
2d5311e4
ED
672 masks[pc + 1 + filter[pc].jt] &= memvalid;
673 masks[pc + 1 + filter[pc].jf] &= memvalid;
674 memvalid = ~0;
675 break;
676 }
677 }
678error:
679 kfree(masks);
680 return ret;
681}
682
34805931
DB
683static bool chk_code_allowed(u16 code_to_probe)
684{
685 static const bool codes[] = {
686 /* 32 bit ALU operations */
687 [BPF_ALU | BPF_ADD | BPF_K] = true,
688 [BPF_ALU | BPF_ADD | BPF_X] = true,
689 [BPF_ALU | BPF_SUB | BPF_K] = true,
690 [BPF_ALU | BPF_SUB | BPF_X] = true,
691 [BPF_ALU | BPF_MUL | BPF_K] = true,
692 [BPF_ALU | BPF_MUL | BPF_X] = true,
693 [BPF_ALU | BPF_DIV | BPF_K] = true,
694 [BPF_ALU | BPF_DIV | BPF_X] = true,
695 [BPF_ALU | BPF_MOD | BPF_K] = true,
696 [BPF_ALU | BPF_MOD | BPF_X] = true,
697 [BPF_ALU | BPF_AND | BPF_K] = true,
698 [BPF_ALU | BPF_AND | BPF_X] = true,
699 [BPF_ALU | BPF_OR | BPF_K] = true,
700 [BPF_ALU | BPF_OR | BPF_X] = true,
701 [BPF_ALU | BPF_XOR | BPF_K] = true,
702 [BPF_ALU | BPF_XOR | BPF_X] = true,
703 [BPF_ALU | BPF_LSH | BPF_K] = true,
704 [BPF_ALU | BPF_LSH | BPF_X] = true,
705 [BPF_ALU | BPF_RSH | BPF_K] = true,
706 [BPF_ALU | BPF_RSH | BPF_X] = true,
707 [BPF_ALU | BPF_NEG] = true,
708 /* Load instructions */
709 [BPF_LD | BPF_W | BPF_ABS] = true,
710 [BPF_LD | BPF_H | BPF_ABS] = true,
711 [BPF_LD | BPF_B | BPF_ABS] = true,
712 [BPF_LD | BPF_W | BPF_LEN] = true,
713 [BPF_LD | BPF_W | BPF_IND] = true,
714 [BPF_LD | BPF_H | BPF_IND] = true,
715 [BPF_LD | BPF_B | BPF_IND] = true,
716 [BPF_LD | BPF_IMM] = true,
717 [BPF_LD | BPF_MEM] = true,
718 [BPF_LDX | BPF_W | BPF_LEN] = true,
719 [BPF_LDX | BPF_B | BPF_MSH] = true,
720 [BPF_LDX | BPF_IMM] = true,
721 [BPF_LDX | BPF_MEM] = true,
722 /* Store instructions */
723 [BPF_ST] = true,
724 [BPF_STX] = true,
725 /* Misc instructions */
726 [BPF_MISC | BPF_TAX] = true,
727 [BPF_MISC | BPF_TXA] = true,
728 /* Return instructions */
729 [BPF_RET | BPF_K] = true,
730 [BPF_RET | BPF_A] = true,
731 /* Jump instructions */
732 [BPF_JMP | BPF_JA] = true,
733 [BPF_JMP | BPF_JEQ | BPF_K] = true,
734 [BPF_JMP | BPF_JEQ | BPF_X] = true,
735 [BPF_JMP | BPF_JGE | BPF_K] = true,
736 [BPF_JMP | BPF_JGE | BPF_X] = true,
737 [BPF_JMP | BPF_JGT | BPF_K] = true,
738 [BPF_JMP | BPF_JGT | BPF_X] = true,
739 [BPF_JMP | BPF_JSET | BPF_K] = true,
740 [BPF_JMP | BPF_JSET | BPF_X] = true,
741 };
742
743 if (code_to_probe >= ARRAY_SIZE(codes))
744 return false;
745
746 return codes[code_to_probe];
747}
748
1da177e4 749/**
4df95ff4 750 * bpf_check_classic - verify socket filter code
1da177e4
LT
751 * @filter: filter to verify
752 * @flen: length of filter
753 *
754 * Check the user's filter code. If we let some ugly
755 * filter code slip through kaboom! The filter must contain
93699863
KK
756 * no references or jumps that are out of range, no illegal
757 * instructions, and must end with a RET instruction.
1da177e4 758 *
7b11f69f
KK
759 * All jumps are forward as they are not signed.
760 *
761 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 762 */
d9e12f42
NS
763static int bpf_check_classic(const struct sock_filter *filter,
764 unsigned int flen)
1da177e4 765{
aa1113d9 766 bool anc_found;
34805931 767 int pc;
1da177e4 768
1b93ae64 769 if (flen == 0 || flen > BPF_MAXINSNS)
1da177e4
LT
770 return -EINVAL;
771
34805931 772 /* Check the filter code now */
1da177e4 773 for (pc = 0; pc < flen; pc++) {
ec31a05c 774 const struct sock_filter *ftest = &filter[pc];
93699863 775
34805931
DB
776 /* May we actually operate on this code? */
777 if (!chk_code_allowed(ftest->code))
cba328fc 778 return -EINVAL;
34805931 779
93699863 780 /* Some instructions need special checks */
34805931
DB
781 switch (ftest->code) {
782 case BPF_ALU | BPF_DIV | BPF_K:
783 case BPF_ALU | BPF_MOD | BPF_K:
784 /* Check for division by zero */
b6069a95
ED
785 if (ftest->k == 0)
786 return -EINVAL;
787 break;
229394e8
RV
788 case BPF_ALU | BPF_LSH | BPF_K:
789 case BPF_ALU | BPF_RSH | BPF_K:
790 if (ftest->k >= 32)
791 return -EINVAL;
792 break;
34805931
DB
793 case BPF_LD | BPF_MEM:
794 case BPF_LDX | BPF_MEM:
795 case BPF_ST:
796 case BPF_STX:
797 /* Check for invalid memory addresses */
93699863
KK
798 if (ftest->k >= BPF_MEMWORDS)
799 return -EINVAL;
800 break;
34805931
DB
801 case BPF_JMP | BPF_JA:
802 /* Note, the large ftest->k might cause loops.
93699863
KK
803 * Compare this with conditional jumps below,
804 * where offsets are limited. --ANK (981016)
805 */
34805931 806 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 807 return -EINVAL;
01f2f3f6 808 break;
34805931
DB
809 case BPF_JMP | BPF_JEQ | BPF_K:
810 case BPF_JMP | BPF_JEQ | BPF_X:
811 case BPF_JMP | BPF_JGE | BPF_K:
812 case BPF_JMP | BPF_JGE | BPF_X:
813 case BPF_JMP | BPF_JGT | BPF_K:
814 case BPF_JMP | BPF_JGT | BPF_X:
815 case BPF_JMP | BPF_JSET | BPF_K:
816 case BPF_JMP | BPF_JSET | BPF_X:
817 /* Both conditionals must be safe */
e35bedf3 818 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
819 pc + ftest->jf + 1 >= flen)
820 return -EINVAL;
cba328fc 821 break;
34805931
DB
822 case BPF_LD | BPF_W | BPF_ABS:
823 case BPF_LD | BPF_H | BPF_ABS:
824 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 825 anc_found = false;
34805931
DB
826 if (bpf_anc_helper(ftest) & BPF_ANC)
827 anc_found = true;
828 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
829 if (anc_found == false && ftest->k >= SKF_AD_OFF)
830 return -EINVAL;
01f2f3f6
HPP
831 }
832 }
93699863 833
34805931 834 /* Last instruction must be a RET code */
01f2f3f6 835 switch (filter[flen - 1].code) {
34805931
DB
836 case BPF_RET | BPF_K:
837 case BPF_RET | BPF_A:
2d5311e4 838 return check_load_and_stores(filter, flen);
cba328fc 839 }
34805931 840
cba328fc 841 return -EINVAL;
1da177e4
LT
842}
843
7ae457c1
AS
844static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
845 const struct sock_fprog *fprog)
a3ea269b 846{
009937e7 847 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
848 struct sock_fprog_kern *fkprog;
849
850 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
851 if (!fp->orig_prog)
852 return -ENOMEM;
853
854 fkprog = fp->orig_prog;
855 fkprog->len = fprog->len;
658da937
DB
856
857 fkprog->filter = kmemdup(fp->insns, fsize,
858 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
859 if (!fkprog->filter) {
860 kfree(fp->orig_prog);
861 return -ENOMEM;
862 }
863
864 return 0;
865}
866
7ae457c1 867static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
868{
869 struct sock_fprog_kern *fprog = fp->orig_prog;
870
871 if (fprog) {
872 kfree(fprog->filter);
873 kfree(fprog);
874 }
875}
876
7ae457c1
AS
877static void __bpf_prog_release(struct bpf_prog *prog)
878{
24701ece 879 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
880 bpf_prog_put(prog);
881 } else {
882 bpf_release_orig_filter(prog);
883 bpf_prog_free(prog);
884 }
7ae457c1
AS
885}
886
34c5bd66
PN
887static void __sk_filter_release(struct sk_filter *fp)
888{
7ae457c1
AS
889 __bpf_prog_release(fp->prog);
890 kfree(fp);
34c5bd66
PN
891}
892
47e958ea 893/**
46bcf14f 894 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
895 * @rcu: rcu_head that contains the sk_filter to free
896 */
fbc907f0 897static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
898{
899 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
900
34c5bd66 901 __sk_filter_release(fp);
47e958ea 902}
fbc907f0
DB
903
904/**
905 * sk_filter_release - release a socket filter
906 * @fp: filter to remove
907 *
908 * Remove a filter from a socket and release its resources.
909 */
910static void sk_filter_release(struct sk_filter *fp)
911{
912 if (atomic_dec_and_test(&fp->refcnt))
913 call_rcu(&fp->rcu, sk_filter_release_rcu);
914}
915
916void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
917{
7ae457c1 918 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 919
278571ba
AS
920 atomic_sub(filter_size, &sk->sk_omem_alloc);
921 sk_filter_release(fp);
fbc907f0 922}
47e958ea 923
278571ba
AS
924/* try to charge the socket memory if there is space available
925 * return true on success
926 */
927bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 928{
7ae457c1 929 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
930
931 /* same check as in sock_kmalloc() */
932 if (filter_size <= sysctl_optmem_max &&
933 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
934 atomic_inc(&fp->refcnt);
935 atomic_add(filter_size, &sk->sk_omem_alloc);
936 return true;
bd4cf0ed 937 }
278571ba 938 return false;
bd4cf0ed
AS
939}
940
7ae457c1 941static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
942{
943 struct sock_filter *old_prog;
7ae457c1 944 struct bpf_prog *old_fp;
34805931 945 int err, new_len, old_len = fp->len;
bd4cf0ed
AS
946
947 /* We are free to overwrite insns et al right here as it
948 * won't be used at this point in time anymore internally
949 * after the migration to the internal BPF instruction
950 * representation.
951 */
952 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 953 sizeof(struct bpf_insn));
bd4cf0ed 954
bd4cf0ed
AS
955 /* Conversion cannot happen on overlapping memory areas,
956 * so we need to keep the user BPF around until the 2nd
957 * pass. At this time, the user BPF is stored in fp->insns.
958 */
959 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 960 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
961 if (!old_prog) {
962 err = -ENOMEM;
963 goto out_err;
964 }
965
966 /* 1st pass: calculate the new program length. */
8fb575ca 967 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
bd4cf0ed
AS
968 if (err)
969 goto out_err_free;
970
971 /* Expand fp for appending the new filter representation. */
972 old_fp = fp;
60a3b225 973 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
974 if (!fp) {
975 /* The old_fp is still around in case we couldn't
976 * allocate new memory, so uncharge on that one.
977 */
978 fp = old_fp;
979 err = -ENOMEM;
980 goto out_err_free;
981 }
982
bd4cf0ed
AS
983 fp->len = new_len;
984
2695fb55 985 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
8fb575ca 986 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
bd4cf0ed 987 if (err)
8fb575ca 988 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
989 * to allocate memory, remapping must succeed. Note,
990 * that at this time old_fp has already been released
278571ba 991 * by krealloc().
bd4cf0ed
AS
992 */
993 goto out_err_free;
994
7ae457c1 995 bpf_prog_select_runtime(fp);
5fe821a9 996
bd4cf0ed
AS
997 kfree(old_prog);
998 return fp;
999
1000out_err_free:
1001 kfree(old_prog);
1002out_err:
7ae457c1 1003 __bpf_prog_release(fp);
bd4cf0ed
AS
1004 return ERR_PTR(err);
1005}
1006
ac67eb2c
DB
1007static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1008 bpf_aux_classic_check_t trans)
302d6637
JP
1009{
1010 int err;
1011
bd4cf0ed 1012 fp->bpf_func = NULL;
a91263d5 1013 fp->jited = 0;
302d6637 1014
4df95ff4 1015 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1016 if (err) {
7ae457c1 1017 __bpf_prog_release(fp);
bd4cf0ed 1018 return ERR_PTR(err);
418c96ac 1019 }
302d6637 1020
4ae92bc7
NS
1021 /* There might be additional checks and transformations
1022 * needed on classic filters, f.e. in case of seccomp.
1023 */
1024 if (trans) {
1025 err = trans(fp->insns, fp->len);
1026 if (err) {
1027 __bpf_prog_release(fp);
1028 return ERR_PTR(err);
1029 }
1030 }
1031
bd4cf0ed
AS
1032 /* Probe if we can JIT compile the filter and if so, do
1033 * the compilation of the filter.
1034 */
302d6637 1035 bpf_jit_compile(fp);
bd4cf0ed
AS
1036
1037 /* JIT compiler couldn't process this filter, so do the
1038 * internal BPF translation for the optimized interpreter.
1039 */
5fe821a9 1040 if (!fp->jited)
7ae457c1 1041 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1042
1043 return fp;
302d6637
JP
1044}
1045
1046/**
7ae457c1 1047 * bpf_prog_create - create an unattached filter
c6c4b97c 1048 * @pfp: the unattached filter that is created
677a9fd3 1049 * @fprog: the filter program
302d6637 1050 *
c6c4b97c 1051 * Create a filter independent of any socket. We first run some
302d6637
JP
1052 * sanity checks on it to make sure it does not explode on us later.
1053 * If an error occurs or there is insufficient memory for the filter
1054 * a negative errno code is returned. On success the return is zero.
1055 */
7ae457c1 1056int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1057{
009937e7 1058 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1059 struct bpf_prog *fp;
302d6637
JP
1060
1061 /* Make sure new filter is there and in the right amounts. */
1062 if (fprog->filter == NULL)
1063 return -EINVAL;
1064
60a3b225 1065 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1066 if (!fp)
1067 return -ENOMEM;
a3ea269b 1068
302d6637
JP
1069 memcpy(fp->insns, fprog->filter, fsize);
1070
302d6637 1071 fp->len = fprog->len;
a3ea269b
DB
1072 /* Since unattached filters are not copied back to user
1073 * space through sk_get_filter(), we do not need to hold
1074 * a copy here, and can spare us the work.
1075 */
1076 fp->orig_prog = NULL;
302d6637 1077
7ae457c1 1078 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1079 * memory in case something goes wrong.
1080 */
4ae92bc7 1081 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1082 if (IS_ERR(fp))
1083 return PTR_ERR(fp);
302d6637
JP
1084
1085 *pfp = fp;
1086 return 0;
302d6637 1087}
7ae457c1 1088EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1089
ac67eb2c
DB
1090/**
1091 * bpf_prog_create_from_user - create an unattached filter from user buffer
1092 * @pfp: the unattached filter that is created
1093 * @fprog: the filter program
1094 * @trans: post-classic verifier transformation handler
bab18991 1095 * @save_orig: save classic BPF program
ac67eb2c
DB
1096 *
1097 * This function effectively does the same as bpf_prog_create(), only
1098 * that it builds up its insns buffer from user space provided buffer.
1099 * It also allows for passing a bpf_aux_classic_check_t handler.
1100 */
1101int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1102 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1103{
1104 unsigned int fsize = bpf_classic_proglen(fprog);
1105 struct bpf_prog *fp;
bab18991 1106 int err;
ac67eb2c
DB
1107
1108 /* Make sure new filter is there and in the right amounts. */
1109 if (fprog->filter == NULL)
1110 return -EINVAL;
1111
1112 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1113 if (!fp)
1114 return -ENOMEM;
1115
1116 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1117 __bpf_prog_free(fp);
1118 return -EFAULT;
1119 }
1120
1121 fp->len = fprog->len;
ac67eb2c
DB
1122 fp->orig_prog = NULL;
1123
bab18991
DB
1124 if (save_orig) {
1125 err = bpf_prog_store_orig_filter(fp, fprog);
1126 if (err) {
1127 __bpf_prog_free(fp);
1128 return -ENOMEM;
1129 }
1130 }
1131
ac67eb2c
DB
1132 /* bpf_prepare_filter() already takes care of freeing
1133 * memory in case something goes wrong.
1134 */
1135 fp = bpf_prepare_filter(fp, trans);
1136 if (IS_ERR(fp))
1137 return PTR_ERR(fp);
1138
1139 *pfp = fp;
1140 return 0;
1141}
2ea273d7 1142EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1143
7ae457c1 1144void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1145{
7ae457c1 1146 __bpf_prog_release(fp);
302d6637 1147}
7ae457c1 1148EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1149
49b31e57
DB
1150static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1151{
1152 struct sk_filter *fp, *old_fp;
1153
1154 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1155 if (!fp)
1156 return -ENOMEM;
1157
1158 fp->prog = prog;
1159 atomic_set(&fp->refcnt, 0);
1160
1161 if (!sk_filter_charge(sk, fp)) {
1162 kfree(fp);
1163 return -ENOMEM;
1164 }
1165
1166 old_fp = rcu_dereference_protected(sk->sk_filter,
1167 sock_owned_by_user(sk));
1168 rcu_assign_pointer(sk->sk_filter, fp);
1169
1170 if (old_fp)
1171 sk_filter_uncharge(sk, old_fp);
1172
1173 return 0;
1174}
1175
538950a1
CG
1176static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1177{
1178 struct bpf_prog *old_prog;
1179 int err;
1180
1181 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1182 return -ENOMEM;
1183
fa463497 1184 if (sk_unhashed(sk) && sk->sk_reuseport) {
538950a1
CG
1185 err = reuseport_alloc(sk);
1186 if (err)
1187 return err;
1188 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1189 /* The socket wasn't bound with SO_REUSEPORT */
1190 return -EINVAL;
1191 }
1192
1193 old_prog = reuseport_attach_prog(sk, prog);
1194 if (old_prog)
1195 bpf_prog_destroy(old_prog);
1196
1197 return 0;
1198}
1199
1200static
1201struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1202{
009937e7 1203 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1
AS
1204 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1205 struct bpf_prog *prog;
1da177e4
LT
1206 int err;
1207
d59577b6 1208 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1209 return ERR_PTR(-EPERM);
d59577b6 1210
1da177e4 1211 /* Make sure new filter is there and in the right amounts. */
e35bedf3 1212 if (fprog->filter == NULL)
538950a1 1213 return ERR_PTR(-EINVAL);
1da177e4 1214
60a3b225 1215 prog = bpf_prog_alloc(bpf_fsize, 0);
7ae457c1 1216 if (!prog)
538950a1 1217 return ERR_PTR(-ENOMEM);
a3ea269b 1218
7ae457c1 1219 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1220 __bpf_prog_free(prog);
538950a1 1221 return ERR_PTR(-EFAULT);
1da177e4
LT
1222 }
1223
7ae457c1 1224 prog->len = fprog->len;
1da177e4 1225
7ae457c1 1226 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1227 if (err) {
c0d1379a 1228 __bpf_prog_free(prog);
538950a1 1229 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1230 }
1231
7ae457c1 1232 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1233 * memory in case something goes wrong.
1234 */
538950a1
CG
1235 return bpf_prepare_filter(prog, NULL);
1236}
1237
1238/**
1239 * sk_attach_filter - attach a socket filter
1240 * @fprog: the filter program
1241 * @sk: the socket to use
1242 *
1243 * Attach the user's filter code. We first run some sanity checks on
1244 * it to make sure it does not explode on us later. If an error
1245 * occurs or there is insufficient memory for the filter a negative
1246 * errno code is returned. On success the return is zero.
1247 */
1248int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1249{
1250 struct bpf_prog *prog = __get_filter(fprog, sk);
1251 int err;
1252
7ae457c1
AS
1253 if (IS_ERR(prog))
1254 return PTR_ERR(prog);
1255
49b31e57
DB
1256 err = __sk_attach_prog(prog, sk);
1257 if (err < 0) {
7ae457c1 1258 __bpf_prog_release(prog);
49b31e57 1259 return err;
278571ba
AS
1260 }
1261
d3904b73 1262 return 0;
1da177e4 1263}
5ff3f073 1264EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1265
538950a1 1266int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1267{
538950a1 1268 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1269 int err;
89aa0758 1270
538950a1
CG
1271 if (IS_ERR(prog))
1272 return PTR_ERR(prog);
1273
1274 err = __reuseport_attach_prog(prog, sk);
1275 if (err < 0) {
1276 __bpf_prog_release(prog);
1277 return err;
1278 }
1279
1280 return 0;
1281}
1282
1283static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1284{
1285 struct bpf_prog *prog;
1286
89aa0758 1287 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1288 return ERR_PTR(-EPERM);
89aa0758
AS
1289
1290 prog = bpf_prog_get(ufd);
198bf1b0 1291 if (IS_ERR(prog))
538950a1 1292 return prog;
89aa0758 1293
24701ece 1294 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758 1295 bpf_prog_put(prog);
538950a1 1296 return ERR_PTR(-EINVAL);
89aa0758
AS
1297 }
1298
538950a1
CG
1299 return prog;
1300}
1301
1302int sk_attach_bpf(u32 ufd, struct sock *sk)
1303{
1304 struct bpf_prog *prog = __get_bpf(ufd, sk);
1305 int err;
1306
1307 if (IS_ERR(prog))
1308 return PTR_ERR(prog);
1309
49b31e57
DB
1310 err = __sk_attach_prog(prog, sk);
1311 if (err < 0) {
89aa0758 1312 bpf_prog_put(prog);
49b31e57 1313 return err;
89aa0758
AS
1314 }
1315
89aa0758
AS
1316 return 0;
1317}
1318
538950a1
CG
1319int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1320{
1321 struct bpf_prog *prog = __get_bpf(ufd, sk);
1322 int err;
1323
1324 if (IS_ERR(prog))
1325 return PTR_ERR(prog);
1326
1327 err = __reuseport_attach_prog(prog, sk);
1328 if (err < 0) {
1329 bpf_prog_put(prog);
1330 return err;
1331 }
1332
1333 return 0;
1334}
1335
21cafc1d
DB
1336struct bpf_scratchpad {
1337 union {
1338 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1339 u8 buff[MAX_BPF_STACK];
1340 };
1341};
1342
1343static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822
AS
1344
1345static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
608cd71a 1346{
21cafc1d 1347 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
608cd71a 1348 struct sk_buff *skb = (struct sk_buff *) (long) r1;
a166151c 1349 int offset = (int) r2;
608cd71a
AS
1350 void *from = (void *) (long) r3;
1351 unsigned int len = (unsigned int) r4;
608cd71a
AS
1352 void *ptr;
1353
781c53bc
DB
1354 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM)))
1355 return -EINVAL;
1356
608cd71a
AS
1357 /* bpf verifier guarantees that:
1358 * 'from' pointer points to bpf program stack
1359 * 'len' bytes of it were initialized
1360 * 'len' > 0
1361 * 'skb' is a valid pointer to 'struct sk_buff'
1362 *
1363 * so check for invalid 'offset' and too large 'len'
1364 */
21cafc1d 1365 if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
608cd71a
AS
1366 return -EFAULT;
1367
a166151c 1368 if (unlikely(skb_cloned(skb) &&
3431205e 1369 !skb_clone_writable(skb, offset + len)))
608cd71a
AS
1370 return -EFAULT;
1371
21cafc1d 1372 ptr = skb_header_pointer(skb, offset, len, sp->buff);
608cd71a
AS
1373 if (unlikely(!ptr))
1374 return -EFAULT;
1375
781c53bc 1376 if (flags & BPF_F_RECOMPUTE_CSUM)
91bc4822 1377 skb_postpull_rcsum(skb, ptr, len);
608cd71a
AS
1378
1379 memcpy(ptr, from, len);
1380
21cafc1d 1381 if (ptr == sp->buff)
608cd71a
AS
1382 /* skb_store_bits cannot return -EFAULT here */
1383 skb_store_bits(skb, offset, ptr, len);
1384
781c53bc 1385 if (flags & BPF_F_RECOMPUTE_CSUM)
f8ffad69
DB
1386 skb_postpush_rcsum(skb, ptr, len);
1387
608cd71a
AS
1388 return 0;
1389}
1390
1391const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1392 .func = bpf_skb_store_bytes,
1393 .gpl_only = false,
1394 .ret_type = RET_INTEGER,
1395 .arg1_type = ARG_PTR_TO_CTX,
1396 .arg2_type = ARG_ANYTHING,
1397 .arg3_type = ARG_PTR_TO_STACK,
1398 .arg4_type = ARG_CONST_STACK_SIZE,
91bc4822
AS
1399 .arg5_type = ARG_ANYTHING,
1400};
1401
05c74e5e
DB
1402static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1403{
1404 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1405 int offset = (int) r2;
1406 void *to = (void *)(unsigned long) r3;
1407 unsigned int len = (unsigned int) r4;
1408 void *ptr;
1409
21cafc1d 1410 if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
05c74e5e
DB
1411 return -EFAULT;
1412
1413 ptr = skb_header_pointer(skb, offset, len, to);
1414 if (unlikely(!ptr))
1415 return -EFAULT;
1416 if (ptr != to)
1417 memcpy(to, ptr, len);
1418
1419 return 0;
1420}
1421
1422const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1423 .func = bpf_skb_load_bytes,
1424 .gpl_only = false,
1425 .ret_type = RET_INTEGER,
1426 .arg1_type = ARG_PTR_TO_CTX,
1427 .arg2_type = ARG_ANYTHING,
1428 .arg3_type = ARG_PTR_TO_STACK,
1429 .arg4_type = ARG_CONST_STACK_SIZE,
1430};
1431
a166151c 1432static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
91bc4822
AS
1433{
1434 struct sk_buff *skb = (struct sk_buff *) (long) r1;
a166151c 1435 int offset = (int) r2;
91bc4822
AS
1436 __sum16 sum, *ptr;
1437
781c53bc
DB
1438 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1439 return -EINVAL;
a166151c 1440 if (unlikely((u32) offset > 0xffff))
91bc4822
AS
1441 return -EFAULT;
1442
a166151c 1443 if (unlikely(skb_cloned(skb) &&
3431205e 1444 !skb_clone_writable(skb, offset + sizeof(sum))))
91bc4822
AS
1445 return -EFAULT;
1446
1447 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1448 if (unlikely(!ptr))
1449 return -EFAULT;
1450
781c53bc 1451 switch (flags & BPF_F_HDR_FIELD_MASK) {
91bc4822
AS
1452 case 2:
1453 csum_replace2(ptr, from, to);
1454 break;
1455 case 4:
1456 csum_replace4(ptr, from, to);
1457 break;
1458 default:
1459 return -EINVAL;
1460 }
1461
1462 if (ptr == &sum)
1463 /* skb_store_bits guaranteed to not return -EFAULT here */
1464 skb_store_bits(skb, offset, ptr, sizeof(sum));
1465
1466 return 0;
1467}
1468
1469const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1470 .func = bpf_l3_csum_replace,
1471 .gpl_only = false,
1472 .ret_type = RET_INTEGER,
1473 .arg1_type = ARG_PTR_TO_CTX,
1474 .arg2_type = ARG_ANYTHING,
1475 .arg3_type = ARG_ANYTHING,
1476 .arg4_type = ARG_ANYTHING,
1477 .arg5_type = ARG_ANYTHING,
1478};
1479
a166151c 1480static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
91bc4822
AS
1481{
1482 struct sk_buff *skb = (struct sk_buff *) (long) r1;
781c53bc 1483 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
a166151c 1484 int offset = (int) r2;
91bc4822
AS
1485 __sum16 sum, *ptr;
1486
781c53bc
DB
1487 if (unlikely(flags & ~(BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1488 return -EINVAL;
a166151c 1489 if (unlikely((u32) offset > 0xffff))
91bc4822
AS
1490 return -EFAULT;
1491
a166151c 1492 if (unlikely(skb_cloned(skb) &&
3431205e 1493 !skb_clone_writable(skb, offset + sizeof(sum))))
91bc4822
AS
1494 return -EFAULT;
1495
1496 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1497 if (unlikely(!ptr))
1498 return -EFAULT;
1499
781c53bc 1500 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1501 case 0:
1502 if (unlikely(from != 0))
1503 return -EINVAL;
1504
1505 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1506 break;
91bc4822
AS
1507 case 2:
1508 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1509 break;
1510 case 4:
1511 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1512 break;
1513 default:
1514 return -EINVAL;
1515 }
1516
1517 if (ptr == &sum)
1518 /* skb_store_bits guaranteed to not return -EFAULT here */
1519 skb_store_bits(skb, offset, ptr, sizeof(sum));
1520
1521 return 0;
1522}
1523
1524const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1525 .func = bpf_l4_csum_replace,
1526 .gpl_only = false,
1527 .ret_type = RET_INTEGER,
1528 .arg1_type = ARG_PTR_TO_CTX,
1529 .arg2_type = ARG_ANYTHING,
1530 .arg3_type = ARG_ANYTHING,
1531 .arg4_type = ARG_ANYTHING,
1532 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1533};
1534
7d672345
DB
1535static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
1536{
21cafc1d 1537 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
7d672345
DB
1538 u64 diff_size = from_size + to_size;
1539 __be32 *from = (__be32 *) (long) r1;
1540 __be32 *to = (__be32 *) (long) r3;
1541 int i, j = 0;
1542
1543 /* This is quite flexible, some examples:
1544 *
1545 * from_size == 0, to_size > 0, seed := csum --> pushing data
1546 * from_size > 0, to_size == 0, seed := csum --> pulling data
1547 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1548 *
1549 * Even for diffing, from_size and to_size don't need to be equal.
1550 */
1551 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1552 diff_size > sizeof(sp->diff)))
1553 return -EINVAL;
1554
1555 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1556 sp->diff[j] = ~from[i];
1557 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1558 sp->diff[j] = to[i];
1559
1560 return csum_partial(sp->diff, diff_size, seed);
1561}
1562
1563const struct bpf_func_proto bpf_csum_diff_proto = {
1564 .func = bpf_csum_diff,
1565 .gpl_only = false,
1566 .ret_type = RET_INTEGER,
1567 .arg1_type = ARG_PTR_TO_STACK,
1568 .arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO,
1569 .arg3_type = ARG_PTR_TO_STACK,
1570 .arg4_type = ARG_CONST_STACK_SIZE_OR_ZERO,
1571 .arg5_type = ARG_ANYTHING,
1572};
1573
3896d655
AS
1574static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1575{
1576 struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
1577 struct net_device *dev;
1578
781c53bc
DB
1579 if (unlikely(flags & ~(BPF_F_INGRESS)))
1580 return -EINVAL;
1581
3896d655
AS
1582 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
1583 if (unlikely(!dev))
1584 return -EINVAL;
1585
3896d655
AS
1586 skb2 = skb_clone(skb, GFP_ATOMIC);
1587 if (unlikely(!skb2))
1588 return -ENOMEM;
1589
781c53bc 1590 if (flags & BPF_F_INGRESS) {
f8ffad69
DB
1591 if (skb_at_tc_ingress(skb2))
1592 skb_postpush_rcsum(skb2, skb_mac_header(skb2),
1593 skb2->mac_len);
3896d655 1594 return dev_forward_skb(dev, skb2);
f8ffad69 1595 }
3896d655
AS
1596
1597 skb2->dev = dev;
6bf05773 1598 skb_sender_cpu_clear(skb2);
3896d655
AS
1599 return dev_queue_xmit(skb2);
1600}
1601
1602const struct bpf_func_proto bpf_clone_redirect_proto = {
1603 .func = bpf_clone_redirect,
1604 .gpl_only = false,
1605 .ret_type = RET_INTEGER,
1606 .arg1_type = ARG_PTR_TO_CTX,
1607 .arg2_type = ARG_ANYTHING,
1608 .arg3_type = ARG_ANYTHING,
1609};
1610
27b29f63
AS
1611struct redirect_info {
1612 u32 ifindex;
1613 u32 flags;
1614};
1615
1616static DEFINE_PER_CPU(struct redirect_info, redirect_info);
781c53bc 1617
27b29f63
AS
1618static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
1619{
1620 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1621
781c53bc
DB
1622 if (unlikely(flags & ~(BPF_F_INGRESS)))
1623 return TC_ACT_SHOT;
1624
27b29f63
AS
1625 ri->ifindex = ifindex;
1626 ri->flags = flags;
781c53bc 1627
27b29f63
AS
1628 return TC_ACT_REDIRECT;
1629}
1630
1631int skb_do_redirect(struct sk_buff *skb)
1632{
1633 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1634 struct net_device *dev;
1635
1636 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
1637 ri->ifindex = 0;
1638 if (unlikely(!dev)) {
1639 kfree_skb(skb);
1640 return -EINVAL;
1641 }
1642
781c53bc 1643 if (ri->flags & BPF_F_INGRESS) {
f8ffad69
DB
1644 if (skb_at_tc_ingress(skb))
1645 skb_postpush_rcsum(skb, skb_mac_header(skb),
1646 skb->mac_len);
27b29f63 1647 return dev_forward_skb(dev, skb);
f8ffad69 1648 }
27b29f63
AS
1649
1650 skb->dev = dev;
cfc81b50 1651 skb_sender_cpu_clear(skb);
27b29f63
AS
1652 return dev_queue_xmit(skb);
1653}
1654
1655const struct bpf_func_proto bpf_redirect_proto = {
1656 .func = bpf_redirect,
1657 .gpl_only = false,
1658 .ret_type = RET_INTEGER,
1659 .arg1_type = ARG_ANYTHING,
1660 .arg2_type = ARG_ANYTHING,
1661};
1662
8d20aabe
DB
1663static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1664{
1665 return task_get_classid((struct sk_buff *) (unsigned long) r1);
1666}
1667
1668static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
1669 .func = bpf_get_cgroup_classid,
1670 .gpl_only = false,
1671 .ret_type = RET_INTEGER,
1672 .arg1_type = ARG_PTR_TO_CTX,
1673};
1674
c46646d0
DB
1675static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1676{
1677#ifdef CONFIG_IP_ROUTE_CLASSID
1678 const struct dst_entry *dst;
1679
1680 dst = skb_dst((struct sk_buff *) (unsigned long) r1);
1681 if (dst)
1682 return dst->tclassid;
1683#endif
1684 return 0;
1685}
1686
1687static const struct bpf_func_proto bpf_get_route_realm_proto = {
1688 .func = bpf_get_route_realm,
1689 .gpl_only = false,
1690 .ret_type = RET_INTEGER,
1691 .arg1_type = ARG_PTR_TO_CTX,
1692};
1693
4e10df9a
AS
1694static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
1695{
1696 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1697 __be16 vlan_proto = (__force __be16) r2;
1698
1699 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
1700 vlan_proto != htons(ETH_P_8021AD)))
1701 vlan_proto = htons(ETH_P_8021Q);
1702
1703 return skb_vlan_push(skb, vlan_proto, vlan_tci);
1704}
1705
1706const struct bpf_func_proto bpf_skb_vlan_push_proto = {
1707 .func = bpf_skb_vlan_push,
1708 .gpl_only = false,
1709 .ret_type = RET_INTEGER,
1710 .arg1_type = ARG_PTR_TO_CTX,
1711 .arg2_type = ARG_ANYTHING,
1712 .arg3_type = ARG_ANYTHING,
1713};
4d9c5c53 1714EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
4e10df9a
AS
1715
1716static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1717{
1718 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1719
1720 return skb_vlan_pop(skb);
1721}
1722
1723const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
1724 .func = bpf_skb_vlan_pop,
1725 .gpl_only = false,
1726 .ret_type = RET_INTEGER,
1727 .arg1_type = ARG_PTR_TO_CTX,
1728};
4d9c5c53 1729EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
4e10df9a
AS
1730
1731bool bpf_helper_changes_skb_data(void *func)
1732{
1733 if (func == bpf_skb_vlan_push)
1734 return true;
1735 if (func == bpf_skb_vlan_pop)
1736 return true;
1737 return false;
1738}
1739
c6c33454
DB
1740static unsigned short bpf_tunnel_key_af(u64 flags)
1741{
1742 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
1743}
1744
d3aa45ce
AS
1745static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
1746{
1747 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1748 struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
c6c33454
DB
1749 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
1750 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce 1751
c6c33454 1752 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6))))
7f9562a1 1753 return -EINVAL;
c6c33454
DB
1754 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags))
1755 return -EPROTO;
1756 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
1757 switch (size) {
1758 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
1759 /* Fixup deprecated structure layouts here, so we have
1760 * a common path later on.
1761 */
1762 if (ip_tunnel_info_af(info) != AF_INET)
1763 return -EINVAL;
1764 to = (struct bpf_tunnel_key *)compat;
1765 break;
1766 default:
1767 return -EINVAL;
1768 }
1769 }
d3aa45ce
AS
1770
1771 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
1772 to->tunnel_tos = info->key.tos;
1773 to->tunnel_ttl = info->key.ttl;
1774
1775 if (flags & BPF_F_TUNINFO_IPV6)
1776 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
1777 sizeof(to->remote_ipv6));
1778 else
1779 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1780
1781 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
1782 memcpy((void *)(long) r2, to, size);
d3aa45ce
AS
1783
1784 return 0;
1785}
1786
1787const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
1788 .func = bpf_skb_get_tunnel_key,
1789 .gpl_only = false,
1790 .ret_type = RET_INTEGER,
1791 .arg1_type = ARG_PTR_TO_CTX,
1792 .arg2_type = ARG_PTR_TO_STACK,
1793 .arg3_type = ARG_CONST_STACK_SIZE,
1794 .arg4_type = ARG_ANYTHING,
1795};
1796
1797static struct metadata_dst __percpu *md_dst;
1798
1799static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
1800{
1801 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1802 struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
1803 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 1804 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
1805 struct ip_tunnel_info *info;
1806
c6c33454 1807 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
d3aa45ce 1808 return -EINVAL;
c6c33454
DB
1809 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
1810 switch (size) {
1811 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
1812 /* Fixup deprecated structure layouts here, so we have
1813 * a common path later on.
1814 */
1815 memcpy(compat, from, size);
1816 memset(compat + size, 0, sizeof(compat) - size);
1817 from = (struct bpf_tunnel_key *)compat;
1818 break;
1819 default:
1820 return -EINVAL;
1821 }
1822 }
d3aa45ce
AS
1823
1824 skb_dst_drop(skb);
1825 dst_hold((struct dst_entry *) md);
1826 skb_dst_set(skb, (struct dst_entry *) md);
1827
1828 info = &md->u.tun_info;
1829 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 1830
1dd34b5a 1831 info->key.tun_flags = TUNNEL_KEY;
d3aa45ce 1832 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
1833 info->key.tos = from->tunnel_tos;
1834 info->key.ttl = from->tunnel_ttl;
1835
1836 if (flags & BPF_F_TUNINFO_IPV6) {
1837 info->mode |= IP_TUNNEL_INFO_IPV6;
1838 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
1839 sizeof(from->remote_ipv6));
1840 } else {
1841 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
1842 }
d3aa45ce
AS
1843
1844 return 0;
1845}
1846
1847const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
1848 .func = bpf_skb_set_tunnel_key,
1849 .gpl_only = false,
1850 .ret_type = RET_INTEGER,
1851 .arg1_type = ARG_PTR_TO_CTX,
1852 .arg2_type = ARG_PTR_TO_STACK,
1853 .arg3_type = ARG_CONST_STACK_SIZE,
1854 .arg4_type = ARG_ANYTHING,
1855};
1856
1857static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
1858{
1859 if (!md_dst) {
1860 /* race is not possible, since it's called from
1861 * verifier that is holding verifier mutex
1862 */
1863 md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
1864 if (!md_dst)
1865 return NULL;
1866 }
1867 return &bpf_skb_set_tunnel_key_proto;
1868}
1869
d4052c4a
DB
1870static const struct bpf_func_proto *
1871sk_filter_func_proto(enum bpf_func_id func_id)
89aa0758
AS
1872{
1873 switch (func_id) {
1874 case BPF_FUNC_map_lookup_elem:
1875 return &bpf_map_lookup_elem_proto;
1876 case BPF_FUNC_map_update_elem:
1877 return &bpf_map_update_elem_proto;
1878 case BPF_FUNC_map_delete_elem:
1879 return &bpf_map_delete_elem_proto;
03e69b50
DB
1880 case BPF_FUNC_get_prandom_u32:
1881 return &bpf_get_prandom_u32_proto;
c04167ce
DB
1882 case BPF_FUNC_get_smp_processor_id:
1883 return &bpf_get_smp_processor_id_proto;
04fd61ab
AS
1884 case BPF_FUNC_tail_call:
1885 return &bpf_tail_call_proto;
17ca8cbf
DB
1886 case BPF_FUNC_ktime_get_ns:
1887 return &bpf_ktime_get_ns_proto;
0756ea3e 1888 case BPF_FUNC_trace_printk:
1be7f75d
AS
1889 if (capable(CAP_SYS_ADMIN))
1890 return bpf_get_trace_printk_proto();
89aa0758
AS
1891 default:
1892 return NULL;
1893 }
1894}
1895
608cd71a
AS
1896static const struct bpf_func_proto *
1897tc_cls_act_func_proto(enum bpf_func_id func_id)
1898{
1899 switch (func_id) {
1900 case BPF_FUNC_skb_store_bytes:
1901 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
1902 case BPF_FUNC_skb_load_bytes:
1903 return &bpf_skb_load_bytes_proto;
7d672345
DB
1904 case BPF_FUNC_csum_diff:
1905 return &bpf_csum_diff_proto;
91bc4822
AS
1906 case BPF_FUNC_l3_csum_replace:
1907 return &bpf_l3_csum_replace_proto;
1908 case BPF_FUNC_l4_csum_replace:
1909 return &bpf_l4_csum_replace_proto;
3896d655
AS
1910 case BPF_FUNC_clone_redirect:
1911 return &bpf_clone_redirect_proto;
8d20aabe
DB
1912 case BPF_FUNC_get_cgroup_classid:
1913 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
1914 case BPF_FUNC_skb_vlan_push:
1915 return &bpf_skb_vlan_push_proto;
1916 case BPF_FUNC_skb_vlan_pop:
1917 return &bpf_skb_vlan_pop_proto;
d3aa45ce
AS
1918 case BPF_FUNC_skb_get_tunnel_key:
1919 return &bpf_skb_get_tunnel_key_proto;
1920 case BPF_FUNC_skb_set_tunnel_key:
1921 return bpf_get_skb_set_tunnel_key_proto();
27b29f63
AS
1922 case BPF_FUNC_redirect:
1923 return &bpf_redirect_proto;
c46646d0
DB
1924 case BPF_FUNC_get_route_realm:
1925 return &bpf_get_route_realm_proto;
608cd71a
AS
1926 default:
1927 return sk_filter_func_proto(func_id);
1928 }
1929}
1930
d691f9e8 1931static bool __is_valid_access(int off, int size, enum bpf_access_type type)
89aa0758 1932{
9bac3d6d
AS
1933 /* check bounds */
1934 if (off < 0 || off >= sizeof(struct __sk_buff))
1935 return false;
1936
1937 /* disallow misaligned access */
1938 if (off % size != 0)
1939 return false;
1940
1941 /* all __sk_buff fields are __u32 */
1942 if (size != 4)
1943 return false;
1944
1945 return true;
1946}
1947
d691f9e8
AS
1948static bool sk_filter_is_valid_access(int off, int size,
1949 enum bpf_access_type type)
1950{
045efa82
DB
1951 if (off == offsetof(struct __sk_buff, tc_classid))
1952 return false;
1953
d691f9e8
AS
1954 if (type == BPF_WRITE) {
1955 switch (off) {
1956 case offsetof(struct __sk_buff, cb[0]) ...
1957 offsetof(struct __sk_buff, cb[4]):
1958 break;
1959 default:
1960 return false;
1961 }
1962 }
1963
1964 return __is_valid_access(off, size, type);
1965}
1966
1967static bool tc_cls_act_is_valid_access(int off, int size,
1968 enum bpf_access_type type)
1969{
045efa82
DB
1970 if (off == offsetof(struct __sk_buff, tc_classid))
1971 return type == BPF_WRITE ? true : false;
1972
d691f9e8
AS
1973 if (type == BPF_WRITE) {
1974 switch (off) {
1975 case offsetof(struct __sk_buff, mark):
1976 case offsetof(struct __sk_buff, tc_index):
754f1e6a 1977 case offsetof(struct __sk_buff, priority):
d691f9e8
AS
1978 case offsetof(struct __sk_buff, cb[0]) ...
1979 offsetof(struct __sk_buff, cb[4]):
1980 break;
1981 default:
1982 return false;
1983 }
1984 }
1985 return __is_valid_access(off, size, type);
1986}
1987
1988static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
1989 int src_reg, int ctx_off,
ff936a04
AS
1990 struct bpf_insn *insn_buf,
1991 struct bpf_prog *prog)
9bac3d6d
AS
1992{
1993 struct bpf_insn *insn = insn_buf;
1994
1995 switch (ctx_off) {
1996 case offsetof(struct __sk_buff, len):
1997 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
1998
1999 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
2000 offsetof(struct sk_buff, len));
2001 break;
2002
0b8c707d
DB
2003 case offsetof(struct __sk_buff, protocol):
2004 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
2005
2006 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
2007 offsetof(struct sk_buff, protocol));
2008 break;
2009
27cd5452
MS
2010 case offsetof(struct __sk_buff, vlan_proto):
2011 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
2012
2013 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
2014 offsetof(struct sk_buff, vlan_proto));
2015 break;
2016
bcad5718
DB
2017 case offsetof(struct __sk_buff, priority):
2018 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
2019
754f1e6a
DB
2020 if (type == BPF_WRITE)
2021 *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
2022 offsetof(struct sk_buff, priority));
2023 else
2024 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
2025 offsetof(struct sk_buff, priority));
bcad5718
DB
2026 break;
2027
37e82c2f
AS
2028 case offsetof(struct __sk_buff, ingress_ifindex):
2029 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
2030
2031 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
2032 offsetof(struct sk_buff, skb_iif));
2033 break;
2034
2035 case offsetof(struct __sk_buff, ifindex):
2036 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
2037
2038 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
2039 dst_reg, src_reg,
2040 offsetof(struct sk_buff, dev));
2041 *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
2042 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
2043 offsetof(struct net_device, ifindex));
2044 break;
2045
ba7591d8
DB
2046 case offsetof(struct __sk_buff, hash):
2047 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
2048
2049 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
2050 offsetof(struct sk_buff, hash));
2051 break;
2052
9bac3d6d 2053 case offsetof(struct __sk_buff, mark):
d691f9e8
AS
2054 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
2055
2056 if (type == BPF_WRITE)
2057 *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
2058 offsetof(struct sk_buff, mark));
2059 else
2060 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
2061 offsetof(struct sk_buff, mark));
2062 break;
9bac3d6d
AS
2063
2064 case offsetof(struct __sk_buff, pkt_type):
2065 return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
2066
2067 case offsetof(struct __sk_buff, queue_mapping):
2068 return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
c2497395 2069
c2497395
AS
2070 case offsetof(struct __sk_buff, vlan_present):
2071 return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
2072 dst_reg, src_reg, insn);
2073
2074 case offsetof(struct __sk_buff, vlan_tci):
2075 return convert_skb_access(SKF_AD_VLAN_TAG,
2076 dst_reg, src_reg, insn);
d691f9e8
AS
2077
2078 case offsetof(struct __sk_buff, cb[0]) ...
2079 offsetof(struct __sk_buff, cb[4]):
2080 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
2081
ff936a04 2082 prog->cb_access = 1;
d691f9e8
AS
2083 ctx_off -= offsetof(struct __sk_buff, cb[0]);
2084 ctx_off += offsetof(struct sk_buff, cb);
2085 ctx_off += offsetof(struct qdisc_skb_cb, data);
2086 if (type == BPF_WRITE)
2087 *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
2088 else
2089 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
2090 break;
2091
045efa82
DB
2092 case offsetof(struct __sk_buff, tc_classid):
2093 ctx_off -= offsetof(struct __sk_buff, tc_classid);
2094 ctx_off += offsetof(struct sk_buff, cb);
2095 ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
2096 WARN_ON(type != BPF_WRITE);
2097 *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
2098 break;
2099
d691f9e8
AS
2100 case offsetof(struct __sk_buff, tc_index):
2101#ifdef CONFIG_NET_SCHED
2102 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
2103
2104 if (type == BPF_WRITE)
2105 *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
2106 offsetof(struct sk_buff, tc_index));
2107 else
2108 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
2109 offsetof(struct sk_buff, tc_index));
2110 break;
2111#else
2112 if (type == BPF_WRITE)
2113 *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
2114 else
2115 *insn++ = BPF_MOV64_IMM(dst_reg, 0);
2116 break;
2117#endif
9bac3d6d
AS
2118 }
2119
2120 return insn - insn_buf;
89aa0758
AS
2121}
2122
d4052c4a
DB
2123static const struct bpf_verifier_ops sk_filter_ops = {
2124 .get_func_proto = sk_filter_func_proto,
2125 .is_valid_access = sk_filter_is_valid_access,
d691f9e8 2126 .convert_ctx_access = bpf_net_convert_ctx_access,
89aa0758
AS
2127};
2128
608cd71a
AS
2129static const struct bpf_verifier_ops tc_cls_act_ops = {
2130 .get_func_proto = tc_cls_act_func_proto,
d691f9e8
AS
2131 .is_valid_access = tc_cls_act_is_valid_access,
2132 .convert_ctx_access = bpf_net_convert_ctx_access,
608cd71a
AS
2133};
2134
d4052c4a
DB
2135static struct bpf_prog_type_list sk_filter_type __read_mostly = {
2136 .ops = &sk_filter_ops,
89aa0758
AS
2137 .type = BPF_PROG_TYPE_SOCKET_FILTER,
2138};
2139
96be4325 2140static struct bpf_prog_type_list sched_cls_type __read_mostly = {
608cd71a 2141 .ops = &tc_cls_act_ops,
96be4325
DB
2142 .type = BPF_PROG_TYPE_SCHED_CLS,
2143};
2144
94caee8c 2145static struct bpf_prog_type_list sched_act_type __read_mostly = {
608cd71a 2146 .ops = &tc_cls_act_ops,
94caee8c
DB
2147 .type = BPF_PROG_TYPE_SCHED_ACT,
2148};
2149
d4052c4a 2150static int __init register_sk_filter_ops(void)
89aa0758 2151{
d4052c4a 2152 bpf_register_prog_type(&sk_filter_type);
96be4325 2153 bpf_register_prog_type(&sched_cls_type);
94caee8c 2154 bpf_register_prog_type(&sched_act_type);
96be4325 2155
89aa0758
AS
2156 return 0;
2157}
d4052c4a
DB
2158late_initcall(register_sk_filter_ops);
2159
55b33325
PE
2160int sk_detach_filter(struct sock *sk)
2161{
2162 int ret = -ENOENT;
2163 struct sk_filter *filter;
2164
d59577b6
VB
2165 if (sock_flag(sk, SOCK_FILTER_LOCKED))
2166 return -EPERM;
2167
f91ff5b9
ED
2168 filter = rcu_dereference_protected(sk->sk_filter,
2169 sock_owned_by_user(sk));
55b33325 2170 if (filter) {
a9b3cd7f 2171 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 2172 sk_filter_uncharge(sk, filter);
55b33325
PE
2173 ret = 0;
2174 }
a3ea269b 2175
55b33325
PE
2176 return ret;
2177}
5ff3f073 2178EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 2179
a3ea269b
DB
2180int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
2181 unsigned int len)
a8fc9277 2182{
a3ea269b 2183 struct sock_fprog_kern *fprog;
a8fc9277 2184 struct sk_filter *filter;
a3ea269b 2185 int ret = 0;
a8fc9277
PE
2186
2187 lock_sock(sk);
2188 filter = rcu_dereference_protected(sk->sk_filter,
a3ea269b 2189 sock_owned_by_user(sk));
a8fc9277
PE
2190 if (!filter)
2191 goto out;
a3ea269b
DB
2192
2193 /* We're copying the filter that has been originally attached,
93d08b69
DB
2194 * so no conversion/decode needed anymore. eBPF programs that
2195 * have no original program cannot be dumped through this.
a3ea269b 2196 */
93d08b69 2197 ret = -EACCES;
7ae457c1 2198 fprog = filter->prog->orig_prog;
93d08b69
DB
2199 if (!fprog)
2200 goto out;
a3ea269b
DB
2201
2202 ret = fprog->len;
a8fc9277 2203 if (!len)
a3ea269b 2204 /* User space only enquires number of filter blocks. */
a8fc9277 2205 goto out;
a3ea269b 2206
a8fc9277 2207 ret = -EINVAL;
a3ea269b 2208 if (len < fprog->len)
a8fc9277
PE
2209 goto out;
2210
2211 ret = -EFAULT;
009937e7 2212 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 2213 goto out;
a8fc9277 2214
a3ea269b
DB
2215 /* Instead of bytes, the API requests to return the number
2216 * of filter blocks.
2217 */
2218 ret = fprog->len;
a8fc9277
PE
2219out:
2220 release_sock(sk);
2221 return ret;
2222}
This page took 0.896089 seconds and 5 git commands to generate.