net: filter: make register naming more comprehensible
[deliverable/linux.git] / net / core / filter.c
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
22 */
23
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/in.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_packet.h>
33 #include <linux/gfp.h>
34 #include <net/ip.h>
35 #include <net/protocol.h>
36 #include <net/netlink.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <linux/errno.h>
40 #include <linux/timer.h>
41 #include <asm/uaccess.h>
42 #include <asm/unaligned.h>
43 #include <linux/filter.h>
44 #include <linux/ratelimit.h>
45 #include <linux/seccomp.h>
46 #include <linux/if_vlan.h>
47
48 /* Registers */
49 #define R0 regs[BPF_REG_0]
50 #define R1 regs[BPF_REG_1]
51 #define R2 regs[BPF_REG_2]
52 #define R3 regs[BPF_REG_3]
53 #define R4 regs[BPF_REG_4]
54 #define R5 regs[BPF_REG_5]
55 #define R6 regs[BPF_REG_6]
56 #define R7 regs[BPF_REG_7]
57 #define R8 regs[BPF_REG_8]
58 #define R9 regs[BPF_REG_9]
59 #define R10 regs[BPF_REG_10]
60
61 /* Named registers */
62 #define A regs[insn->a_reg]
63 #define X regs[insn->x_reg]
64 #define FP regs[BPF_REG_FP]
65 #define ARG1 regs[BPF_REG_ARG1]
66 #define CTX regs[BPF_REG_CTX]
67 #define K insn->imm
68
69 /* No hurry in this branch
70 *
71 * Exported for the bpf jit load helper.
72 */
73 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
74 {
75 u8 *ptr = NULL;
76
77 if (k >= SKF_NET_OFF)
78 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
79 else if (k >= SKF_LL_OFF)
80 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
81
82 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
83 return ptr;
84 return NULL;
85 }
86
87 static inline void *load_pointer(const struct sk_buff *skb, int k,
88 unsigned int size, void *buffer)
89 {
90 if (k >= 0)
91 return skb_header_pointer(skb, k, size, buffer);
92 return bpf_internal_load_pointer_neg_helper(skb, k, size);
93 }
94
95 /**
96 * sk_filter - run a packet through a socket filter
97 * @sk: sock associated with &sk_buff
98 * @skb: buffer to filter
99 *
100 * Run the filter code and then cut skb->data to correct size returned by
101 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
102 * than pkt_len we keep whole skb->data. This is the socket level
103 * wrapper to sk_run_filter. It returns 0 if the packet should
104 * be accepted or -EPERM if the packet should be tossed.
105 *
106 */
107 int sk_filter(struct sock *sk, struct sk_buff *skb)
108 {
109 int err;
110 struct sk_filter *filter;
111
112 /*
113 * If the skb was allocated from pfmemalloc reserves, only
114 * allow SOCK_MEMALLOC sockets to use it as this socket is
115 * helping free memory
116 */
117 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
118 return -ENOMEM;
119
120 err = security_sock_rcv_skb(sk, skb);
121 if (err)
122 return err;
123
124 rcu_read_lock();
125 filter = rcu_dereference(sk->sk_filter);
126 if (filter) {
127 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
128
129 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
130 }
131 rcu_read_unlock();
132
133 return err;
134 }
135 EXPORT_SYMBOL(sk_filter);
136
137 /* Base function for offset calculation. Needs to go into .text section,
138 * therefore keeping it non-static as well; will also be used by JITs
139 * anyway later on, so do not let the compiler omit it.
140 */
141 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
142 {
143 return 0;
144 }
145
146 /**
147 * __sk_run_filter - run a filter on a given context
148 * @ctx: buffer to run the filter on
149 * @insn: filter to apply
150 *
151 * Decode and apply filter instructions to the skb->data. Return length to
152 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
153 * array of filter instructions.
154 */
155 unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
156 {
157 u64 stack[MAX_BPF_STACK / sizeof(u64)];
158 u64 regs[MAX_BPF_REG], tmp;
159 static const void *jumptable[256] = {
160 [0 ... 255] = &&default_label,
161 /* Now overwrite non-defaults ... */
162 #define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
163 DL(ALU, ADD, X),
164 DL(ALU, ADD, K),
165 DL(ALU, SUB, X),
166 DL(ALU, SUB, K),
167 DL(ALU, AND, X),
168 DL(ALU, AND, K),
169 DL(ALU, OR, X),
170 DL(ALU, OR, K),
171 DL(ALU, LSH, X),
172 DL(ALU, LSH, K),
173 DL(ALU, RSH, X),
174 DL(ALU, RSH, K),
175 DL(ALU, XOR, X),
176 DL(ALU, XOR, K),
177 DL(ALU, MUL, X),
178 DL(ALU, MUL, K),
179 DL(ALU, MOV, X),
180 DL(ALU, MOV, K),
181 DL(ALU, DIV, X),
182 DL(ALU, DIV, K),
183 DL(ALU, MOD, X),
184 DL(ALU, MOD, K),
185 DL(ALU, NEG, 0),
186 DL(ALU, END, TO_BE),
187 DL(ALU, END, TO_LE),
188 DL(ALU64, ADD, X),
189 DL(ALU64, ADD, K),
190 DL(ALU64, SUB, X),
191 DL(ALU64, SUB, K),
192 DL(ALU64, AND, X),
193 DL(ALU64, AND, K),
194 DL(ALU64, OR, X),
195 DL(ALU64, OR, K),
196 DL(ALU64, LSH, X),
197 DL(ALU64, LSH, K),
198 DL(ALU64, RSH, X),
199 DL(ALU64, RSH, K),
200 DL(ALU64, XOR, X),
201 DL(ALU64, XOR, K),
202 DL(ALU64, MUL, X),
203 DL(ALU64, MUL, K),
204 DL(ALU64, MOV, X),
205 DL(ALU64, MOV, K),
206 DL(ALU64, ARSH, X),
207 DL(ALU64, ARSH, K),
208 DL(ALU64, DIV, X),
209 DL(ALU64, DIV, K),
210 DL(ALU64, MOD, X),
211 DL(ALU64, MOD, K),
212 DL(ALU64, NEG, 0),
213 DL(JMP, CALL, 0),
214 DL(JMP, JA, 0),
215 DL(JMP, JEQ, X),
216 DL(JMP, JEQ, K),
217 DL(JMP, JNE, X),
218 DL(JMP, JNE, K),
219 DL(JMP, JGT, X),
220 DL(JMP, JGT, K),
221 DL(JMP, JGE, X),
222 DL(JMP, JGE, K),
223 DL(JMP, JSGT, X),
224 DL(JMP, JSGT, K),
225 DL(JMP, JSGE, X),
226 DL(JMP, JSGE, K),
227 DL(JMP, JSET, X),
228 DL(JMP, JSET, K),
229 DL(JMP, EXIT, 0),
230 DL(STX, MEM, B),
231 DL(STX, MEM, H),
232 DL(STX, MEM, W),
233 DL(STX, MEM, DW),
234 DL(STX, XADD, W),
235 DL(STX, XADD, DW),
236 DL(ST, MEM, B),
237 DL(ST, MEM, H),
238 DL(ST, MEM, W),
239 DL(ST, MEM, DW),
240 DL(LDX, MEM, B),
241 DL(LDX, MEM, H),
242 DL(LDX, MEM, W),
243 DL(LDX, MEM, DW),
244 DL(LD, ABS, W),
245 DL(LD, ABS, H),
246 DL(LD, ABS, B),
247 DL(LD, IND, W),
248 DL(LD, IND, H),
249 DL(LD, IND, B),
250 #undef DL
251 };
252 void *ptr;
253 int off;
254
255 #define CONT ({ insn++; goto select_insn; })
256 #define CONT_JMP ({ insn++; goto select_insn; })
257
258 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
259 ARG1 = (u64) (unsigned long) ctx;
260
261 /* Register for user BPF programs need to be reset first. */
262 regs[BPF_REG_A] = 0;
263 regs[BPF_REG_X] = 0;
264
265 select_insn:
266 goto *jumptable[insn->code];
267
268 /* ALU */
269 #define ALU(OPCODE, OP) \
270 ALU64_##OPCODE##_X: \
271 A = A OP X; \
272 CONT; \
273 ALU_##OPCODE##_X: \
274 A = (u32) A OP (u32) X; \
275 CONT; \
276 ALU64_##OPCODE##_K: \
277 A = A OP K; \
278 CONT; \
279 ALU_##OPCODE##_K: \
280 A = (u32) A OP (u32) K; \
281 CONT;
282
283 ALU(ADD, +)
284 ALU(SUB, -)
285 ALU(AND, &)
286 ALU(OR, |)
287 ALU(LSH, <<)
288 ALU(RSH, >>)
289 ALU(XOR, ^)
290 ALU(MUL, *)
291 #undef ALU
292 ALU_NEG_0:
293 A = (u32) -A;
294 CONT;
295 ALU64_NEG_0:
296 A = -A;
297 CONT;
298 ALU_MOV_X:
299 A = (u32) X;
300 CONT;
301 ALU_MOV_K:
302 A = (u32) K;
303 CONT;
304 ALU64_MOV_X:
305 A = X;
306 CONT;
307 ALU64_MOV_K:
308 A = K;
309 CONT;
310 ALU64_ARSH_X:
311 (*(s64 *) &A) >>= X;
312 CONT;
313 ALU64_ARSH_K:
314 (*(s64 *) &A) >>= K;
315 CONT;
316 ALU64_MOD_X:
317 if (unlikely(X == 0))
318 return 0;
319 tmp = A;
320 A = do_div(tmp, X);
321 CONT;
322 ALU_MOD_X:
323 if (unlikely(X == 0))
324 return 0;
325 tmp = (u32) A;
326 A = do_div(tmp, (u32) X);
327 CONT;
328 ALU64_MOD_K:
329 tmp = A;
330 A = do_div(tmp, K);
331 CONT;
332 ALU_MOD_K:
333 tmp = (u32) A;
334 A = do_div(tmp, (u32) K);
335 CONT;
336 ALU64_DIV_X:
337 if (unlikely(X == 0))
338 return 0;
339 do_div(A, X);
340 CONT;
341 ALU_DIV_X:
342 if (unlikely(X == 0))
343 return 0;
344 tmp = (u32) A;
345 do_div(tmp, (u32) X);
346 A = (u32) tmp;
347 CONT;
348 ALU64_DIV_K:
349 do_div(A, K);
350 CONT;
351 ALU_DIV_K:
352 tmp = (u32) A;
353 do_div(tmp, (u32) K);
354 A = (u32) tmp;
355 CONT;
356 ALU_END_TO_BE:
357 switch (K) {
358 case 16:
359 A = (__force u16) cpu_to_be16(A);
360 break;
361 case 32:
362 A = (__force u32) cpu_to_be32(A);
363 break;
364 case 64:
365 A = (__force u64) cpu_to_be64(A);
366 break;
367 }
368 CONT;
369 ALU_END_TO_LE:
370 switch (K) {
371 case 16:
372 A = (__force u16) cpu_to_le16(A);
373 break;
374 case 32:
375 A = (__force u32) cpu_to_le32(A);
376 break;
377 case 64:
378 A = (__force u64) cpu_to_le64(A);
379 break;
380 }
381 CONT;
382
383 /* CALL */
384 JMP_CALL_0:
385 /* Function call scratches R1-R5 registers, preserves R6-R9,
386 * and stores return value into R0.
387 */
388 R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5);
389 CONT;
390
391 /* JMP */
392 JMP_JA_0:
393 insn += insn->off;
394 CONT;
395 JMP_JEQ_X:
396 if (A == X) {
397 insn += insn->off;
398 CONT_JMP;
399 }
400 CONT;
401 JMP_JEQ_K:
402 if (A == K) {
403 insn += insn->off;
404 CONT_JMP;
405 }
406 CONT;
407 JMP_JNE_X:
408 if (A != X) {
409 insn += insn->off;
410 CONT_JMP;
411 }
412 CONT;
413 JMP_JNE_K:
414 if (A != K) {
415 insn += insn->off;
416 CONT_JMP;
417 }
418 CONT;
419 JMP_JGT_X:
420 if (A > X) {
421 insn += insn->off;
422 CONT_JMP;
423 }
424 CONT;
425 JMP_JGT_K:
426 if (A > K) {
427 insn += insn->off;
428 CONT_JMP;
429 }
430 CONT;
431 JMP_JGE_X:
432 if (A >= X) {
433 insn += insn->off;
434 CONT_JMP;
435 }
436 CONT;
437 JMP_JGE_K:
438 if (A >= K) {
439 insn += insn->off;
440 CONT_JMP;
441 }
442 CONT;
443 JMP_JSGT_X:
444 if (((s64) A) > ((s64) X)) {
445 insn += insn->off;
446 CONT_JMP;
447 }
448 CONT;
449 JMP_JSGT_K:
450 if (((s64) A) > ((s64) K)) {
451 insn += insn->off;
452 CONT_JMP;
453 }
454 CONT;
455 JMP_JSGE_X:
456 if (((s64) A) >= ((s64) X)) {
457 insn += insn->off;
458 CONT_JMP;
459 }
460 CONT;
461 JMP_JSGE_K:
462 if (((s64) A) >= ((s64) K)) {
463 insn += insn->off;
464 CONT_JMP;
465 }
466 CONT;
467 JMP_JSET_X:
468 if (A & X) {
469 insn += insn->off;
470 CONT_JMP;
471 }
472 CONT;
473 JMP_JSET_K:
474 if (A & K) {
475 insn += insn->off;
476 CONT_JMP;
477 }
478 CONT;
479 JMP_EXIT_0:
480 return R0;
481
482 /* STX and ST and LDX*/
483 #define LDST(SIZEOP, SIZE) \
484 STX_MEM_##SIZEOP: \
485 *(SIZE *)(unsigned long) (A + insn->off) = X; \
486 CONT; \
487 ST_MEM_##SIZEOP: \
488 *(SIZE *)(unsigned long) (A + insn->off) = K; \
489 CONT; \
490 LDX_MEM_##SIZEOP: \
491 A = *(SIZE *)(unsigned long) (X + insn->off); \
492 CONT;
493
494 LDST(B, u8)
495 LDST(H, u16)
496 LDST(W, u32)
497 LDST(DW, u64)
498 #undef LDST
499 STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
500 atomic_add((u32) X, (atomic_t *)(unsigned long)
501 (A + insn->off));
502 CONT;
503 STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
504 atomic64_add((u64) X, (atomic64_t *)(unsigned long)
505 (A + insn->off));
506 CONT;
507 LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
508 off = K;
509 load_word:
510 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
511 * appearing in the programs where ctx == skb. All programs
512 * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter()
513 * saves it in R6, internal BPF verifier will check that
514 * R6 == ctx.
515 *
516 * BPF_ABS and BPF_IND are wrappers of function calls, so
517 * they scratch R1-R5 registers, preserve R6-R9, and store
518 * return value into R0.
519 *
520 * Implicit input:
521 * ctx
522 *
523 * Explicit input:
524 * X == any register
525 * K == 32-bit immediate
526 *
527 * Output:
528 * R0 - 8/16/32-bit skb data converted to cpu endianness
529 */
530 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
531 if (likely(ptr != NULL)) {
532 R0 = get_unaligned_be32(ptr);
533 CONT;
534 }
535 return 0;
536 LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
537 off = K;
538 load_half:
539 ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
540 if (likely(ptr != NULL)) {
541 R0 = get_unaligned_be16(ptr);
542 CONT;
543 }
544 return 0;
545 LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */
546 off = K;
547 load_byte:
548 ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
549 if (likely(ptr != NULL)) {
550 R0 = *(u8 *)ptr;
551 CONT;
552 }
553 return 0;
554 LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
555 off = K + X;
556 goto load_word;
557 LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
558 off = K + X;
559 goto load_half;
560 LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */
561 off = K + X;
562 goto load_byte;
563
564 default_label:
565 /* If we ever reach this, we have a bug somewhere. */
566 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
567 return 0;
568 }
569
570 u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
571 const struct sock_filter_int *insni)
572 __attribute__ ((alias ("__sk_run_filter")));
573
574 u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
575 const struct sock_filter_int *insni)
576 __attribute__ ((alias ("__sk_run_filter")));
577 EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
578
579 /* Helper to find the offset of pkt_type in sk_buff structure. We want
580 * to make sure its still a 3bit field starting at a byte boundary;
581 * taken from arch/x86/net/bpf_jit_comp.c.
582 */
583 #define PKT_TYPE_MAX 7
584 static unsigned int pkt_type_offset(void)
585 {
586 struct sk_buff skb_probe = { .pkt_type = ~0, };
587 u8 *ct = (u8 *) &skb_probe;
588 unsigned int off;
589
590 for (off = 0; off < sizeof(struct sk_buff); off++) {
591 if (ct[off] == PKT_TYPE_MAX)
592 return off;
593 }
594
595 pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
596 return -1;
597 }
598
599 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
600 {
601 struct sk_buff *skb = (struct sk_buff *)(long) ctx;
602
603 return __skb_get_poff(skb);
604 }
605
606 static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
607 {
608 struct sk_buff *skb = (struct sk_buff *)(long) ctx;
609 struct nlattr *nla;
610
611 if (skb_is_nonlinear(skb))
612 return 0;
613
614 if (skb->len < sizeof(struct nlattr))
615 return 0;
616
617 if (a > skb->len - sizeof(struct nlattr))
618 return 0;
619
620 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
621 if (nla)
622 return (void *) nla - (void *) skb->data;
623
624 return 0;
625 }
626
627 static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
628 {
629 struct sk_buff *skb = (struct sk_buff *)(long) ctx;
630 struct nlattr *nla;
631
632 if (skb_is_nonlinear(skb))
633 return 0;
634
635 if (skb->len < sizeof(struct nlattr))
636 return 0;
637
638 if (a > skb->len - sizeof(struct nlattr))
639 return 0;
640
641 nla = (struct nlattr *) &skb->data[a];
642 if (nla->nla_len > skb->len - a)
643 return 0;
644
645 nla = nla_find_nested(nla, x);
646 if (nla)
647 return (void *) nla - (void *) skb->data;
648
649 return 0;
650 }
651
652 static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
653 {
654 return raw_smp_processor_id();
655 }
656
657 /* note that this only generates 32-bit random numbers */
658 static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
659 {
660 return (u64)prandom_u32();
661 }
662
663 static bool convert_bpf_extensions(struct sock_filter *fp,
664 struct sock_filter_int **insnp)
665 {
666 struct sock_filter_int *insn = *insnp;
667
668 switch (fp->k) {
669 case SKF_AD_OFF + SKF_AD_PROTOCOL:
670 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
671
672 insn->code = BPF_LDX | BPF_MEM | BPF_H;
673 insn->a_reg = BPF_REG_A;
674 insn->x_reg = BPF_REG_CTX;
675 insn->off = offsetof(struct sk_buff, protocol);
676 insn++;
677
678 /* A = ntohs(A) [emitting a nop or swap16] */
679 insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
680 insn->a_reg = BPF_REG_A;
681 insn->imm = 16;
682 break;
683
684 case SKF_AD_OFF + SKF_AD_PKTTYPE:
685 insn->code = BPF_LDX | BPF_MEM | BPF_B;
686 insn->a_reg = BPF_REG_A;
687 insn->x_reg = BPF_REG_CTX;
688 insn->off = pkt_type_offset();
689 if (insn->off < 0)
690 return false;
691 insn++;
692
693 insn->code = BPF_ALU | BPF_AND | BPF_K;
694 insn->a_reg = BPF_REG_A;
695 insn->imm = PKT_TYPE_MAX;
696 break;
697
698 case SKF_AD_OFF + SKF_AD_IFINDEX:
699 case SKF_AD_OFF + SKF_AD_HATYPE:
700 if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
701 insn->code = BPF_LDX | BPF_MEM | BPF_DW;
702 else
703 insn->code = BPF_LDX | BPF_MEM | BPF_W;
704 insn->a_reg = BPF_REG_TMP;
705 insn->x_reg = BPF_REG_CTX;
706 insn->off = offsetof(struct sk_buff, dev);
707 insn++;
708
709 insn->code = BPF_JMP | BPF_JNE | BPF_K;
710 insn->a_reg = BPF_REG_TMP;
711 insn->imm = 0;
712 insn->off = 1;
713 insn++;
714
715 insn->code = BPF_JMP | BPF_EXIT;
716 insn++;
717
718 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
719 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
720
721 insn->a_reg = BPF_REG_A;
722 insn->x_reg = BPF_REG_TMP;
723
724 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
725 insn->code = BPF_LDX | BPF_MEM | BPF_W;
726 insn->off = offsetof(struct net_device, ifindex);
727 } else {
728 insn->code = BPF_LDX | BPF_MEM | BPF_H;
729 insn->off = offsetof(struct net_device, type);
730 }
731 break;
732
733 case SKF_AD_OFF + SKF_AD_MARK:
734 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
735
736 insn->code = BPF_LDX | BPF_MEM | BPF_W;
737 insn->a_reg = BPF_REG_A;
738 insn->x_reg = BPF_REG_CTX;
739 insn->off = offsetof(struct sk_buff, mark);
740 break;
741
742 case SKF_AD_OFF + SKF_AD_RXHASH:
743 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
744
745 insn->code = BPF_LDX | BPF_MEM | BPF_W;
746 insn->a_reg = BPF_REG_A;
747 insn->x_reg = BPF_REG_CTX;
748 insn->off = offsetof(struct sk_buff, hash);
749 break;
750
751 case SKF_AD_OFF + SKF_AD_QUEUE:
752 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
753
754 insn->code = BPF_LDX | BPF_MEM | BPF_H;
755 insn->a_reg = BPF_REG_A;
756 insn->x_reg = BPF_REG_CTX;
757 insn->off = offsetof(struct sk_buff, queue_mapping);
758 break;
759
760 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
761 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
762 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
763
764 insn->code = BPF_LDX | BPF_MEM | BPF_H;
765 insn->a_reg = BPF_REG_A;
766 insn->x_reg = BPF_REG_CTX;
767 insn->off = offsetof(struct sk_buff, vlan_tci);
768 insn++;
769
770 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
771
772 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
773 insn->code = BPF_ALU | BPF_AND | BPF_K;
774 insn->a_reg = BPF_REG_A;
775 insn->imm = ~VLAN_TAG_PRESENT;
776 } else {
777 insn->code = BPF_ALU | BPF_RSH | BPF_K;
778 insn->a_reg = BPF_REG_A;
779 insn->imm = 12;
780 insn++;
781
782 insn->code = BPF_ALU | BPF_AND | BPF_K;
783 insn->a_reg = BPF_REG_A;
784 insn->imm = 1;
785 }
786 break;
787
788 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
789 case SKF_AD_OFF + SKF_AD_NLATTR:
790 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
791 case SKF_AD_OFF + SKF_AD_CPU:
792 case SKF_AD_OFF + SKF_AD_RANDOM:
793 /* arg1 = ctx */
794 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
795 insn->a_reg = BPF_REG_ARG1;
796 insn->x_reg = BPF_REG_CTX;
797 insn++;
798
799 /* arg2 = A */
800 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
801 insn->a_reg = BPF_REG_ARG2;
802 insn->x_reg = BPF_REG_A;
803 insn++;
804
805 /* arg3 = X */
806 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
807 insn->a_reg = BPF_REG_ARG3;
808 insn->x_reg = BPF_REG_X;
809 insn++;
810
811 /* Emit call(ctx, arg2=A, arg3=X) */
812 insn->code = BPF_JMP | BPF_CALL;
813 switch (fp->k) {
814 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
815 insn->imm = __skb_get_pay_offset - __bpf_call_base;
816 break;
817 case SKF_AD_OFF + SKF_AD_NLATTR:
818 insn->imm = __skb_get_nlattr - __bpf_call_base;
819 break;
820 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
821 insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
822 break;
823 case SKF_AD_OFF + SKF_AD_CPU:
824 insn->imm = __get_raw_cpu_id - __bpf_call_base;
825 break;
826 case SKF_AD_OFF + SKF_AD_RANDOM:
827 insn->imm = __get_random_u32 - __bpf_call_base;
828 break;
829 }
830 break;
831
832 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
833 insn->code = BPF_ALU | BPF_XOR | BPF_X;
834 insn->a_reg = BPF_REG_A;
835 insn->x_reg = BPF_REG_X;
836 break;
837
838 default:
839 /* This is just a dummy call to avoid letting the compiler
840 * evict __bpf_call_base() as an optimization. Placed here
841 * where no-one bothers.
842 */
843 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
844 return false;
845 }
846
847 *insnp = insn;
848 return true;
849 }
850
851 /**
852 * sk_convert_filter - convert filter program
853 * @prog: the user passed filter program
854 * @len: the length of the user passed filter program
855 * @new_prog: buffer where converted program will be stored
856 * @new_len: pointer to store length of converted program
857 *
858 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
859 * Conversion workflow:
860 *
861 * 1) First pass for calculating the new program length:
862 * sk_convert_filter(old_prog, old_len, NULL, &new_len)
863 *
864 * 2) 2nd pass to remap in two passes: 1st pass finds new
865 * jump offsets, 2nd pass remapping:
866 * new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
867 * sk_convert_filter(old_prog, old_len, new_prog, &new_len);
868 *
869 * User BPF's register A is mapped to our BPF register 6, user BPF
870 * register X is mapped to BPF register 7; frame pointer is always
871 * register 10; Context 'void *ctx' is stored in register 1, that is,
872 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
873 * ctx == 'struct seccomp_data *'.
874 */
875 int sk_convert_filter(struct sock_filter *prog, int len,
876 struct sock_filter_int *new_prog, int *new_len)
877 {
878 int new_flen = 0, pass = 0, target, i;
879 struct sock_filter_int *new_insn;
880 struct sock_filter *fp;
881 int *addrs = NULL;
882 u8 bpf_src;
883
884 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
885 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
886
887 if (len <= 0 || len >= BPF_MAXINSNS)
888 return -EINVAL;
889
890 if (new_prog) {
891 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
892 if (!addrs)
893 return -ENOMEM;
894 }
895
896 do_pass:
897 new_insn = new_prog;
898 fp = prog;
899
900 if (new_insn) {
901 new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
902 new_insn->a_reg = BPF_REG_CTX;
903 new_insn->x_reg = BPF_REG_ARG1;
904 }
905 new_insn++;
906
907 for (i = 0; i < len; fp++, i++) {
908 struct sock_filter_int tmp_insns[6] = { };
909 struct sock_filter_int *insn = tmp_insns;
910
911 if (addrs)
912 addrs[i] = new_insn - new_prog;
913
914 switch (fp->code) {
915 /* All arithmetic insns and skb loads map as-is. */
916 case BPF_ALU | BPF_ADD | BPF_X:
917 case BPF_ALU | BPF_ADD | BPF_K:
918 case BPF_ALU | BPF_SUB | BPF_X:
919 case BPF_ALU | BPF_SUB | BPF_K:
920 case BPF_ALU | BPF_AND | BPF_X:
921 case BPF_ALU | BPF_AND | BPF_K:
922 case BPF_ALU | BPF_OR | BPF_X:
923 case BPF_ALU | BPF_OR | BPF_K:
924 case BPF_ALU | BPF_LSH | BPF_X:
925 case BPF_ALU | BPF_LSH | BPF_K:
926 case BPF_ALU | BPF_RSH | BPF_X:
927 case BPF_ALU | BPF_RSH | BPF_K:
928 case BPF_ALU | BPF_XOR | BPF_X:
929 case BPF_ALU | BPF_XOR | BPF_K:
930 case BPF_ALU | BPF_MUL | BPF_X:
931 case BPF_ALU | BPF_MUL | BPF_K:
932 case BPF_ALU | BPF_DIV | BPF_X:
933 case BPF_ALU | BPF_DIV | BPF_K:
934 case BPF_ALU | BPF_MOD | BPF_X:
935 case BPF_ALU | BPF_MOD | BPF_K:
936 case BPF_ALU | BPF_NEG:
937 case BPF_LD | BPF_ABS | BPF_W:
938 case BPF_LD | BPF_ABS | BPF_H:
939 case BPF_LD | BPF_ABS | BPF_B:
940 case BPF_LD | BPF_IND | BPF_W:
941 case BPF_LD | BPF_IND | BPF_H:
942 case BPF_LD | BPF_IND | BPF_B:
943 /* Check for overloaded BPF extension and
944 * directly convert it if found, otherwise
945 * just move on with mapping.
946 */
947 if (BPF_CLASS(fp->code) == BPF_LD &&
948 BPF_MODE(fp->code) == BPF_ABS &&
949 convert_bpf_extensions(fp, &insn))
950 break;
951
952 insn->code = fp->code;
953 insn->a_reg = BPF_REG_A;
954 insn->x_reg = BPF_REG_X;
955 insn->imm = fp->k;
956 break;
957
958 /* Jump opcodes map as-is, but offsets need adjustment. */
959 case BPF_JMP | BPF_JA:
960 target = i + fp->k + 1;
961 insn->code = fp->code;
962 #define EMIT_JMP \
963 do { \
964 if (target >= len || target < 0) \
965 goto err; \
966 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
967 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
968 insn->off -= insn - tmp_insns; \
969 } while (0)
970
971 EMIT_JMP;
972 break;
973
974 case BPF_JMP | BPF_JEQ | BPF_K:
975 case BPF_JMP | BPF_JEQ | BPF_X:
976 case BPF_JMP | BPF_JSET | BPF_K:
977 case BPF_JMP | BPF_JSET | BPF_X:
978 case BPF_JMP | BPF_JGT | BPF_K:
979 case BPF_JMP | BPF_JGT | BPF_X:
980 case BPF_JMP | BPF_JGE | BPF_K:
981 case BPF_JMP | BPF_JGE | BPF_X:
982 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
983 /* BPF immediates are signed, zero extend
984 * immediate into tmp register and use it
985 * in compare insn.
986 */
987 insn->code = BPF_ALU | BPF_MOV | BPF_K;
988 insn->a_reg = BPF_REG_TMP;
989 insn->imm = fp->k;
990 insn++;
991
992 insn->a_reg = BPF_REG_A;
993 insn->x_reg = BPF_REG_TMP;
994 bpf_src = BPF_X;
995 } else {
996 insn->a_reg = BPF_REG_A;
997 insn->x_reg = BPF_REG_X;
998 insn->imm = fp->k;
999 bpf_src = BPF_SRC(fp->code);
1000 }
1001
1002 /* Common case where 'jump_false' is next insn. */
1003 if (fp->jf == 0) {
1004 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
1005 target = i + fp->jt + 1;
1006 EMIT_JMP;
1007 break;
1008 }
1009
1010 /* Convert JEQ into JNE when 'jump_true' is next insn. */
1011 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
1012 insn->code = BPF_JMP | BPF_JNE | bpf_src;
1013 target = i + fp->jf + 1;
1014 EMIT_JMP;
1015 break;
1016 }
1017
1018 /* Other jumps are mapped into two insns: Jxx and JA. */
1019 target = i + fp->jt + 1;
1020 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
1021 EMIT_JMP;
1022 insn++;
1023
1024 insn->code = BPF_JMP | BPF_JA;
1025 target = i + fp->jf + 1;
1026 EMIT_JMP;
1027 break;
1028
1029 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
1030 case BPF_LDX | BPF_MSH | BPF_B:
1031 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1032 insn->a_reg = BPF_REG_TMP;
1033 insn->x_reg = BPF_REG_A;
1034 insn++;
1035
1036 insn->code = BPF_LD | BPF_ABS | BPF_B;
1037 insn->a_reg = BPF_REG_A;
1038 insn->imm = fp->k;
1039 insn++;
1040
1041 insn->code = BPF_ALU | BPF_AND | BPF_K;
1042 insn->a_reg = BPF_REG_A;
1043 insn->imm = 0xf;
1044 insn++;
1045
1046 insn->code = BPF_ALU | BPF_LSH | BPF_K;
1047 insn->a_reg = BPF_REG_A;
1048 insn->imm = 2;
1049 insn++;
1050
1051 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1052 insn->a_reg = BPF_REG_X;
1053 insn->x_reg = BPF_REG_A;
1054 insn++;
1055
1056 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1057 insn->a_reg = BPF_REG_A;
1058 insn->x_reg = BPF_REG_TMP;
1059 break;
1060
1061 /* RET_K, RET_A are remaped into 2 insns. */
1062 case BPF_RET | BPF_A:
1063 case BPF_RET | BPF_K:
1064 insn->code = BPF_ALU | BPF_MOV |
1065 (BPF_RVAL(fp->code) == BPF_K ?
1066 BPF_K : BPF_X);
1067 insn->a_reg = 0;
1068 insn->x_reg = BPF_REG_A;
1069 insn->imm = fp->k;
1070 insn++;
1071
1072 insn->code = BPF_JMP | BPF_EXIT;
1073 break;
1074
1075 /* Store to stack. */
1076 case BPF_ST:
1077 case BPF_STX:
1078 insn->code = BPF_STX | BPF_MEM | BPF_W;
1079 insn->a_reg = BPF_REG_FP;
1080 insn->x_reg = fp->code == BPF_ST ?
1081 BPF_REG_A : BPF_REG_X;
1082 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1083 break;
1084
1085 /* Load from stack. */
1086 case BPF_LD | BPF_MEM:
1087 case BPF_LDX | BPF_MEM:
1088 insn->code = BPF_LDX | BPF_MEM | BPF_W;
1089 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1090 BPF_REG_A : BPF_REG_X;
1091 insn->x_reg = BPF_REG_FP;
1092 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1093 break;
1094
1095 /* A = K or X = K */
1096 case BPF_LD | BPF_IMM:
1097 case BPF_LDX | BPF_IMM:
1098 insn->code = BPF_ALU | BPF_MOV | BPF_K;
1099 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1100 BPF_REG_A : BPF_REG_X;
1101 insn->imm = fp->k;
1102 break;
1103
1104 /* X = A */
1105 case BPF_MISC | BPF_TAX:
1106 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1107 insn->a_reg = BPF_REG_X;
1108 insn->x_reg = BPF_REG_A;
1109 break;
1110
1111 /* A = X */
1112 case BPF_MISC | BPF_TXA:
1113 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1114 insn->a_reg = BPF_REG_A;
1115 insn->x_reg = BPF_REG_X;
1116 break;
1117
1118 /* A = skb->len or X = skb->len */
1119 case BPF_LD | BPF_W | BPF_LEN:
1120 case BPF_LDX | BPF_W | BPF_LEN:
1121 insn->code = BPF_LDX | BPF_MEM | BPF_W;
1122 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
1123 BPF_REG_A : BPF_REG_X;
1124 insn->x_reg = BPF_REG_CTX;
1125 insn->off = offsetof(struct sk_buff, len);
1126 break;
1127
1128 /* access seccomp_data fields */
1129 case BPF_LDX | BPF_ABS | BPF_W:
1130 insn->code = BPF_LDX | BPF_MEM | BPF_W;
1131 insn->a_reg = BPF_REG_A;
1132 insn->x_reg = BPF_REG_CTX;
1133 insn->off = fp->k;
1134 break;
1135
1136 default:
1137 goto err;
1138 }
1139
1140 insn++;
1141 if (new_prog)
1142 memcpy(new_insn, tmp_insns,
1143 sizeof(*insn) * (insn - tmp_insns));
1144
1145 new_insn += insn - tmp_insns;
1146 }
1147
1148 if (!new_prog) {
1149 /* Only calculating new length. */
1150 *new_len = new_insn - new_prog;
1151 return 0;
1152 }
1153
1154 pass++;
1155 if (new_flen != new_insn - new_prog) {
1156 new_flen = new_insn - new_prog;
1157 if (pass > 2)
1158 goto err;
1159
1160 goto do_pass;
1161 }
1162
1163 kfree(addrs);
1164 BUG_ON(*new_len != new_flen);
1165 return 0;
1166 err:
1167 kfree(addrs);
1168 return -EINVAL;
1169 }
1170
1171 /* Security:
1172 *
1173 * A BPF program is able to use 16 cells of memory to store intermediate
1174 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
1175 *
1176 * As we dont want to clear mem[] array for each packet going through
1177 * sk_run_filter(), we check that filter loaded by user never try to read
1178 * a cell if not previously written, and we check all branches to be sure
1179 * a malicious user doesn't try to abuse us.
1180 */
1181 static int check_load_and_stores(struct sock_filter *filter, int flen)
1182 {
1183 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
1184 int pc, ret = 0;
1185
1186 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1187 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
1188 if (!masks)
1189 return -ENOMEM;
1190 memset(masks, 0xff, flen * sizeof(*masks));
1191
1192 for (pc = 0; pc < flen; pc++) {
1193 memvalid &= masks[pc];
1194
1195 switch (filter[pc].code) {
1196 case BPF_S_ST:
1197 case BPF_S_STX:
1198 memvalid |= (1 << filter[pc].k);
1199 break;
1200 case BPF_S_LD_MEM:
1201 case BPF_S_LDX_MEM:
1202 if (!(memvalid & (1 << filter[pc].k))) {
1203 ret = -EINVAL;
1204 goto error;
1205 }
1206 break;
1207 case BPF_S_JMP_JA:
1208 /* a jump must set masks on target */
1209 masks[pc + 1 + filter[pc].k] &= memvalid;
1210 memvalid = ~0;
1211 break;
1212 case BPF_S_JMP_JEQ_K:
1213 case BPF_S_JMP_JEQ_X:
1214 case BPF_S_JMP_JGE_K:
1215 case BPF_S_JMP_JGE_X:
1216 case BPF_S_JMP_JGT_K:
1217 case BPF_S_JMP_JGT_X:
1218 case BPF_S_JMP_JSET_X:
1219 case BPF_S_JMP_JSET_K:
1220 /* a jump must set masks on targets */
1221 masks[pc + 1 + filter[pc].jt] &= memvalid;
1222 masks[pc + 1 + filter[pc].jf] &= memvalid;
1223 memvalid = ~0;
1224 break;
1225 }
1226 }
1227 error:
1228 kfree(masks);
1229 return ret;
1230 }
1231
1232 /**
1233 * sk_chk_filter - verify socket filter code
1234 * @filter: filter to verify
1235 * @flen: length of filter
1236 *
1237 * Check the user's filter code. If we let some ugly
1238 * filter code slip through kaboom! The filter must contain
1239 * no references or jumps that are out of range, no illegal
1240 * instructions, and must end with a RET instruction.
1241 *
1242 * All jumps are forward as they are not signed.
1243 *
1244 * Returns 0 if the rule set is legal or -EINVAL if not.
1245 */
1246 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
1247 {
1248 /*
1249 * Valid instructions are initialized to non-0.
1250 * Invalid instructions are initialized to 0.
1251 */
1252 static const u8 codes[] = {
1253 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
1254 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
1255 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
1256 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
1257 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
1258 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
1259 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
1260 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
1261 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
1262 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
1263 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
1264 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
1265 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
1266 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
1267 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
1268 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
1269 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
1270 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
1271 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
1272 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
1273 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
1274 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
1275 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
1276 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
1277 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
1278 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
1279 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
1280 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
1281 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
1282 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
1283 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
1284 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
1285 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
1286 [BPF_RET|BPF_K] = BPF_S_RET_K,
1287 [BPF_RET|BPF_A] = BPF_S_RET_A,
1288 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
1289 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
1290 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
1291 [BPF_ST] = BPF_S_ST,
1292 [BPF_STX] = BPF_S_STX,
1293 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
1294 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
1295 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
1296 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
1297 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
1298 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
1299 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
1300 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
1301 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
1302 };
1303 int pc;
1304 bool anc_found;
1305
1306 if (flen == 0 || flen > BPF_MAXINSNS)
1307 return -EINVAL;
1308
1309 /* check the filter code now */
1310 for (pc = 0; pc < flen; pc++) {
1311 struct sock_filter *ftest = &filter[pc];
1312 u16 code = ftest->code;
1313
1314 if (code >= ARRAY_SIZE(codes))
1315 return -EINVAL;
1316 code = codes[code];
1317 if (!code)
1318 return -EINVAL;
1319 /* Some instructions need special checks */
1320 switch (code) {
1321 case BPF_S_ALU_DIV_K:
1322 case BPF_S_ALU_MOD_K:
1323 /* check for division by zero */
1324 if (ftest->k == 0)
1325 return -EINVAL;
1326 break;
1327 case BPF_S_LD_MEM:
1328 case BPF_S_LDX_MEM:
1329 case BPF_S_ST:
1330 case BPF_S_STX:
1331 /* check for invalid memory addresses */
1332 if (ftest->k >= BPF_MEMWORDS)
1333 return -EINVAL;
1334 break;
1335 case BPF_S_JMP_JA:
1336 /*
1337 * Note, the large ftest->k might cause loops.
1338 * Compare this with conditional jumps below,
1339 * where offsets are limited. --ANK (981016)
1340 */
1341 if (ftest->k >= (unsigned int)(flen-pc-1))
1342 return -EINVAL;
1343 break;
1344 case BPF_S_JMP_JEQ_K:
1345 case BPF_S_JMP_JEQ_X:
1346 case BPF_S_JMP_JGE_K:
1347 case BPF_S_JMP_JGE_X:
1348 case BPF_S_JMP_JGT_K:
1349 case BPF_S_JMP_JGT_X:
1350 case BPF_S_JMP_JSET_X:
1351 case BPF_S_JMP_JSET_K:
1352 /* for conditionals both must be safe */
1353 if (pc + ftest->jt + 1 >= flen ||
1354 pc + ftest->jf + 1 >= flen)
1355 return -EINVAL;
1356 break;
1357 case BPF_S_LD_W_ABS:
1358 case BPF_S_LD_H_ABS:
1359 case BPF_S_LD_B_ABS:
1360 anc_found = false;
1361 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
1362 code = BPF_S_ANC_##CODE; \
1363 anc_found = true; \
1364 break
1365 switch (ftest->k) {
1366 ANCILLARY(PROTOCOL);
1367 ANCILLARY(PKTTYPE);
1368 ANCILLARY(IFINDEX);
1369 ANCILLARY(NLATTR);
1370 ANCILLARY(NLATTR_NEST);
1371 ANCILLARY(MARK);
1372 ANCILLARY(QUEUE);
1373 ANCILLARY(HATYPE);
1374 ANCILLARY(RXHASH);
1375 ANCILLARY(CPU);
1376 ANCILLARY(ALU_XOR_X);
1377 ANCILLARY(VLAN_TAG);
1378 ANCILLARY(VLAN_TAG_PRESENT);
1379 ANCILLARY(PAY_OFFSET);
1380 ANCILLARY(RANDOM);
1381 }
1382
1383 /* ancillary operation unknown or unsupported */
1384 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1385 return -EINVAL;
1386 }
1387 ftest->code = code;
1388 }
1389
1390 /* last instruction must be a RET code */
1391 switch (filter[flen - 1].code) {
1392 case BPF_S_RET_K:
1393 case BPF_S_RET_A:
1394 return check_load_and_stores(filter, flen);
1395 }
1396 return -EINVAL;
1397 }
1398 EXPORT_SYMBOL(sk_chk_filter);
1399
1400 static int sk_store_orig_filter(struct sk_filter *fp,
1401 const struct sock_fprog *fprog)
1402 {
1403 unsigned int fsize = sk_filter_proglen(fprog);
1404 struct sock_fprog_kern *fkprog;
1405
1406 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1407 if (!fp->orig_prog)
1408 return -ENOMEM;
1409
1410 fkprog = fp->orig_prog;
1411 fkprog->len = fprog->len;
1412 fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
1413 if (!fkprog->filter) {
1414 kfree(fp->orig_prog);
1415 return -ENOMEM;
1416 }
1417
1418 return 0;
1419 }
1420
1421 static void sk_release_orig_filter(struct sk_filter *fp)
1422 {
1423 struct sock_fprog_kern *fprog = fp->orig_prog;
1424
1425 if (fprog) {
1426 kfree(fprog->filter);
1427 kfree(fprog);
1428 }
1429 }
1430
1431 /**
1432 * sk_filter_release_rcu - Release a socket filter by rcu_head
1433 * @rcu: rcu_head that contains the sk_filter to free
1434 */
1435 static void sk_filter_release_rcu(struct rcu_head *rcu)
1436 {
1437 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1438
1439 sk_release_orig_filter(fp);
1440 bpf_jit_free(fp);
1441 }
1442
1443 /**
1444 * sk_filter_release - release a socket filter
1445 * @fp: filter to remove
1446 *
1447 * Remove a filter from a socket and release its resources.
1448 */
1449 static void sk_filter_release(struct sk_filter *fp)
1450 {
1451 if (atomic_dec_and_test(&fp->refcnt))
1452 call_rcu(&fp->rcu, sk_filter_release_rcu);
1453 }
1454
1455 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1456 {
1457 atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1458 sk_filter_release(fp);
1459 }
1460
1461 void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1462 {
1463 atomic_inc(&fp->refcnt);
1464 atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1465 }
1466
1467 static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1468 struct sock *sk,
1469 unsigned int len)
1470 {
1471 struct sk_filter *fp_new;
1472
1473 if (sk == NULL)
1474 return krealloc(fp, len, GFP_KERNEL);
1475
1476 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1477 if (fp_new) {
1478 memcpy(fp_new, fp, sizeof(struct sk_filter));
1479 /* As we're kepping orig_prog in fp_new along,
1480 * we need to make sure we're not evicting it
1481 * from the old fp.
1482 */
1483 fp->orig_prog = NULL;
1484 sk_filter_uncharge(sk, fp);
1485 }
1486
1487 return fp_new;
1488 }
1489
1490 static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1491 struct sock *sk)
1492 {
1493 struct sock_filter *old_prog;
1494 struct sk_filter *old_fp;
1495 int i, err, new_len, old_len = fp->len;
1496
1497 /* We are free to overwrite insns et al right here as it
1498 * won't be used at this point in time anymore internally
1499 * after the migration to the internal BPF instruction
1500 * representation.
1501 */
1502 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1503 sizeof(struct sock_filter_int));
1504
1505 /* For now, we need to unfiddle BPF_S_* identifiers in place.
1506 * This can sooner or later on be subject to removal, e.g. when
1507 * JITs have been converted.
1508 */
1509 for (i = 0; i < fp->len; i++)
1510 sk_decode_filter(&fp->insns[i], &fp->insns[i]);
1511
1512 /* Conversion cannot happen on overlapping memory areas,
1513 * so we need to keep the user BPF around until the 2nd
1514 * pass. At this time, the user BPF is stored in fp->insns.
1515 */
1516 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1517 GFP_KERNEL);
1518 if (!old_prog) {
1519 err = -ENOMEM;
1520 goto out_err;
1521 }
1522
1523 /* 1st pass: calculate the new program length. */
1524 err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
1525 if (err)
1526 goto out_err_free;
1527
1528 /* Expand fp for appending the new filter representation. */
1529 old_fp = fp;
1530 fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
1531 if (!fp) {
1532 /* The old_fp is still around in case we couldn't
1533 * allocate new memory, so uncharge on that one.
1534 */
1535 fp = old_fp;
1536 err = -ENOMEM;
1537 goto out_err_free;
1538 }
1539
1540 fp->bpf_func = sk_run_filter_int_skb;
1541 fp->len = new_len;
1542
1543 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
1544 err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
1545 if (err)
1546 /* 2nd sk_convert_filter() can fail only if it fails
1547 * to allocate memory, remapping must succeed. Note,
1548 * that at this time old_fp has already been released
1549 * by __sk_migrate_realloc().
1550 */
1551 goto out_err_free;
1552
1553 kfree(old_prog);
1554 return fp;
1555
1556 out_err_free:
1557 kfree(old_prog);
1558 out_err:
1559 /* Rollback filter setup. */
1560 if (sk != NULL)
1561 sk_filter_uncharge(sk, fp);
1562 else
1563 kfree(fp);
1564 return ERR_PTR(err);
1565 }
1566
1567 static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1568 struct sock *sk)
1569 {
1570 int err;
1571
1572 fp->bpf_func = NULL;
1573 fp->jited = 0;
1574
1575 err = sk_chk_filter(fp->insns, fp->len);
1576 if (err)
1577 return ERR_PTR(err);
1578
1579 /* Probe if we can JIT compile the filter and if so, do
1580 * the compilation of the filter.
1581 */
1582 bpf_jit_compile(fp);
1583
1584 /* JIT compiler couldn't process this filter, so do the
1585 * internal BPF translation for the optimized interpreter.
1586 */
1587 if (!fp->jited)
1588 fp = __sk_migrate_filter(fp, sk);
1589
1590 return fp;
1591 }
1592
1593 /**
1594 * sk_unattached_filter_create - create an unattached filter
1595 * @fprog: the filter program
1596 * @pfp: the unattached filter that is created
1597 *
1598 * Create a filter independent of any socket. We first run some
1599 * sanity checks on it to make sure it does not explode on us later.
1600 * If an error occurs or there is insufficient memory for the filter
1601 * a negative errno code is returned. On success the return is zero.
1602 */
1603 int sk_unattached_filter_create(struct sk_filter **pfp,
1604 struct sock_fprog *fprog)
1605 {
1606 unsigned int fsize = sk_filter_proglen(fprog);
1607 struct sk_filter *fp;
1608
1609 /* Make sure new filter is there and in the right amounts. */
1610 if (fprog->filter == NULL)
1611 return -EINVAL;
1612
1613 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
1614 if (!fp)
1615 return -ENOMEM;
1616
1617 memcpy(fp->insns, fprog->filter, fsize);
1618
1619 atomic_set(&fp->refcnt, 1);
1620 fp->len = fprog->len;
1621 /* Since unattached filters are not copied back to user
1622 * space through sk_get_filter(), we do not need to hold
1623 * a copy here, and can spare us the work.
1624 */
1625 fp->orig_prog = NULL;
1626
1627 /* __sk_prepare_filter() already takes care of uncharging
1628 * memory in case something goes wrong.
1629 */
1630 fp = __sk_prepare_filter(fp, NULL);
1631 if (IS_ERR(fp))
1632 return PTR_ERR(fp);
1633
1634 *pfp = fp;
1635 return 0;
1636 }
1637 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
1638
1639 void sk_unattached_filter_destroy(struct sk_filter *fp)
1640 {
1641 sk_filter_release(fp);
1642 }
1643 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
1644
1645 /**
1646 * sk_attach_filter - attach a socket filter
1647 * @fprog: the filter program
1648 * @sk: the socket to use
1649 *
1650 * Attach the user's filter code. We first run some sanity checks on
1651 * it to make sure it does not explode on us later. If an error
1652 * occurs or there is insufficient memory for the filter a negative
1653 * errno code is returned. On success the return is zero.
1654 */
1655 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1656 {
1657 struct sk_filter *fp, *old_fp;
1658 unsigned int fsize = sk_filter_proglen(fprog);
1659 unsigned int sk_fsize = sk_filter_size(fprog->len);
1660 int err;
1661
1662 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1663 return -EPERM;
1664
1665 /* Make sure new filter is there and in the right amounts. */
1666 if (fprog->filter == NULL)
1667 return -EINVAL;
1668
1669 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
1670 if (!fp)
1671 return -ENOMEM;
1672
1673 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1674 sock_kfree_s(sk, fp, sk_fsize);
1675 return -EFAULT;
1676 }
1677
1678 atomic_set(&fp->refcnt, 1);
1679 fp->len = fprog->len;
1680
1681 err = sk_store_orig_filter(fp, fprog);
1682 if (err) {
1683 sk_filter_uncharge(sk, fp);
1684 return -ENOMEM;
1685 }
1686
1687 /* __sk_prepare_filter() already takes care of uncharging
1688 * memory in case something goes wrong.
1689 */
1690 fp = __sk_prepare_filter(fp, sk);
1691 if (IS_ERR(fp))
1692 return PTR_ERR(fp);
1693
1694 old_fp = rcu_dereference_protected(sk->sk_filter,
1695 sock_owned_by_user(sk));
1696 rcu_assign_pointer(sk->sk_filter, fp);
1697
1698 if (old_fp)
1699 sk_filter_uncharge(sk, old_fp);
1700
1701 return 0;
1702 }
1703 EXPORT_SYMBOL_GPL(sk_attach_filter);
1704
1705 int sk_detach_filter(struct sock *sk)
1706 {
1707 int ret = -ENOENT;
1708 struct sk_filter *filter;
1709
1710 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1711 return -EPERM;
1712
1713 filter = rcu_dereference_protected(sk->sk_filter,
1714 sock_owned_by_user(sk));
1715 if (filter) {
1716 RCU_INIT_POINTER(sk->sk_filter, NULL);
1717 sk_filter_uncharge(sk, filter);
1718 ret = 0;
1719 }
1720
1721 return ret;
1722 }
1723 EXPORT_SYMBOL_GPL(sk_detach_filter);
1724
1725 void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1726 {
1727 static const u16 decodes[] = {
1728 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
1729 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
1730 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
1731 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
1732 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
1733 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
1734 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
1735 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
1736 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
1737 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
1738 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
1739 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
1740 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
1741 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
1742 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
1743 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
1744 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
1745 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
1746 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
1747 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
1748 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
1749 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
1750 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
1751 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
1752 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
1753 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
1754 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
1755 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
1756 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
1757 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
1758 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
1759 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
1760 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
1761 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
1762 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
1763 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1764 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
1765 [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS,
1766 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
1767 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
1768 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
1769 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
1770 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
1771 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
1772 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
1773 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
1774 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
1775 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
1776 [BPF_S_RET_K] = BPF_RET|BPF_K,
1777 [BPF_S_RET_A] = BPF_RET|BPF_A,
1778 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
1779 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
1780 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
1781 [BPF_S_ST] = BPF_ST,
1782 [BPF_S_STX] = BPF_STX,
1783 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
1784 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
1785 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
1786 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
1787 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
1788 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
1789 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
1790 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
1791 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
1792 };
1793 u16 code;
1794
1795 code = filt->code;
1796
1797 to->code = decodes[code];
1798 to->jt = filt->jt;
1799 to->jf = filt->jf;
1800 to->k = filt->k;
1801 }
1802
1803 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1804 unsigned int len)
1805 {
1806 struct sock_fprog_kern *fprog;
1807 struct sk_filter *filter;
1808 int ret = 0;
1809
1810 lock_sock(sk);
1811 filter = rcu_dereference_protected(sk->sk_filter,
1812 sock_owned_by_user(sk));
1813 if (!filter)
1814 goto out;
1815
1816 /* We're copying the filter that has been originally attached,
1817 * so no conversion/decode needed anymore.
1818 */
1819 fprog = filter->orig_prog;
1820
1821 ret = fprog->len;
1822 if (!len)
1823 /* User space only enquires number of filter blocks. */
1824 goto out;
1825
1826 ret = -EINVAL;
1827 if (len < fprog->len)
1828 goto out;
1829
1830 ret = -EFAULT;
1831 if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
1832 goto out;
1833
1834 /* Instead of bytes, the API requests to return the number
1835 * of filter blocks.
1836 */
1837 ret = fprog->len;
1838 out:
1839 release_sock(sk);
1840 return ret;
1841 }
This page took 0.076853 seconds and 5 git commands to generate.