xfrm_user: return error pointer instead of NULL #2
[deliverable/linux.git] / net / core / filter.c
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
6 *
7 * Based on the design of:
8 * - The Berkeley Packet Filter
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41 #include <linux/seccomp.h>
42
43 /* No hurry in this branch
44 *
45 * Exported for the bpf jit load helper.
46 */
47 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
48 {
49 u8 *ptr = NULL;
50
51 if (k >= SKF_NET_OFF)
52 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
53 else if (k >= SKF_LL_OFF)
54 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
55
56 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
57 return ptr;
58 return NULL;
59 }
60
61 static inline void *load_pointer(const struct sk_buff *skb, int k,
62 unsigned int size, void *buffer)
63 {
64 if (k >= 0)
65 return skb_header_pointer(skb, k, size, buffer);
66 return bpf_internal_load_pointer_neg_helper(skb, k, size);
67 }
68
69 /**
70 * sk_filter - run a packet through a socket filter
71 * @sk: sock associated with &sk_buff
72 * @skb: buffer to filter
73 *
74 * Run the filter code and then cut skb->data to correct size returned by
75 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
76 * than pkt_len we keep whole skb->data. This is the socket level
77 * wrapper to sk_run_filter. It returns 0 if the packet should
78 * be accepted or -EPERM if the packet should be tossed.
79 *
80 */
81 int sk_filter(struct sock *sk, struct sk_buff *skb)
82 {
83 int err;
84 struct sk_filter *filter;
85
86 /*
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
89 * helping free memory
90 */
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
92 return -ENOMEM;
93
94 err = security_sock_rcv_skb(sk, skb);
95 if (err)
96 return err;
97
98 rcu_read_lock();
99 filter = rcu_dereference(sk->sk_filter);
100 if (filter) {
101 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
102
103 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
104 }
105 rcu_read_unlock();
106
107 return err;
108 }
109 EXPORT_SYMBOL(sk_filter);
110
111 /**
112 * sk_run_filter - run a filter on a socket
113 * @skb: buffer to run the filter on
114 * @fentry: filter to apply
115 *
116 * Decode and apply filter instructions to the skb->data.
117 * Return length to keep, 0 for none. @skb is the data we are
118 * filtering, @filter is the array of filter instructions.
119 * Because all jumps are guaranteed to be before last instruction,
120 * and last instruction guaranteed to be a RET, we dont need to check
121 * flen. (We used to pass to this function the length of filter)
122 */
123 unsigned int sk_run_filter(const struct sk_buff *skb,
124 const struct sock_filter *fentry)
125 {
126 void *ptr;
127 u32 A = 0; /* Accumulator */
128 u32 X = 0; /* Index Register */
129 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
130 u32 tmp;
131 int k;
132
133 /*
134 * Process array of filter instructions.
135 */
136 for (;; fentry++) {
137 #if defined(CONFIG_X86_32)
138 #define K (fentry->k)
139 #else
140 const u32 K = fentry->k;
141 #endif
142
143 switch (fentry->code) {
144 case BPF_S_ALU_ADD_X:
145 A += X;
146 continue;
147 case BPF_S_ALU_ADD_K:
148 A += K;
149 continue;
150 case BPF_S_ALU_SUB_X:
151 A -= X;
152 continue;
153 case BPF_S_ALU_SUB_K:
154 A -= K;
155 continue;
156 case BPF_S_ALU_MUL_X:
157 A *= X;
158 continue;
159 case BPF_S_ALU_MUL_K:
160 A *= K;
161 continue;
162 case BPF_S_ALU_DIV_X:
163 if (X == 0)
164 return 0;
165 A /= X;
166 continue;
167 case BPF_S_ALU_DIV_K:
168 A = reciprocal_divide(A, K);
169 continue;
170 case BPF_S_ALU_AND_X:
171 A &= X;
172 continue;
173 case BPF_S_ALU_AND_K:
174 A &= K;
175 continue;
176 case BPF_S_ALU_OR_X:
177 A |= X;
178 continue;
179 case BPF_S_ALU_OR_K:
180 A |= K;
181 continue;
182 case BPF_S_ALU_LSH_X:
183 A <<= X;
184 continue;
185 case BPF_S_ALU_LSH_K:
186 A <<= K;
187 continue;
188 case BPF_S_ALU_RSH_X:
189 A >>= X;
190 continue;
191 case BPF_S_ALU_RSH_K:
192 A >>= K;
193 continue;
194 case BPF_S_ALU_NEG:
195 A = -A;
196 continue;
197 case BPF_S_JMP_JA:
198 fentry += K;
199 continue;
200 case BPF_S_JMP_JGT_K:
201 fentry += (A > K) ? fentry->jt : fentry->jf;
202 continue;
203 case BPF_S_JMP_JGE_K:
204 fentry += (A >= K) ? fentry->jt : fentry->jf;
205 continue;
206 case BPF_S_JMP_JEQ_K:
207 fentry += (A == K) ? fentry->jt : fentry->jf;
208 continue;
209 case BPF_S_JMP_JSET_K:
210 fentry += (A & K) ? fentry->jt : fentry->jf;
211 continue;
212 case BPF_S_JMP_JGT_X:
213 fentry += (A > X) ? fentry->jt : fentry->jf;
214 continue;
215 case BPF_S_JMP_JGE_X:
216 fentry += (A >= X) ? fentry->jt : fentry->jf;
217 continue;
218 case BPF_S_JMP_JEQ_X:
219 fentry += (A == X) ? fentry->jt : fentry->jf;
220 continue;
221 case BPF_S_JMP_JSET_X:
222 fentry += (A & X) ? fentry->jt : fentry->jf;
223 continue;
224 case BPF_S_LD_W_ABS:
225 k = K;
226 load_w:
227 ptr = load_pointer(skb, k, 4, &tmp);
228 if (ptr != NULL) {
229 A = get_unaligned_be32(ptr);
230 continue;
231 }
232 return 0;
233 case BPF_S_LD_H_ABS:
234 k = K;
235 load_h:
236 ptr = load_pointer(skb, k, 2, &tmp);
237 if (ptr != NULL) {
238 A = get_unaligned_be16(ptr);
239 continue;
240 }
241 return 0;
242 case BPF_S_LD_B_ABS:
243 k = K;
244 load_b:
245 ptr = load_pointer(skb, k, 1, &tmp);
246 if (ptr != NULL) {
247 A = *(u8 *)ptr;
248 continue;
249 }
250 return 0;
251 case BPF_S_LD_W_LEN:
252 A = skb->len;
253 continue;
254 case BPF_S_LDX_W_LEN:
255 X = skb->len;
256 continue;
257 case BPF_S_LD_W_IND:
258 k = X + K;
259 goto load_w;
260 case BPF_S_LD_H_IND:
261 k = X + K;
262 goto load_h;
263 case BPF_S_LD_B_IND:
264 k = X + K;
265 goto load_b;
266 case BPF_S_LDX_B_MSH:
267 ptr = load_pointer(skb, K, 1, &tmp);
268 if (ptr != NULL) {
269 X = (*(u8 *)ptr & 0xf) << 2;
270 continue;
271 }
272 return 0;
273 case BPF_S_LD_IMM:
274 A = K;
275 continue;
276 case BPF_S_LDX_IMM:
277 X = K;
278 continue;
279 case BPF_S_LD_MEM:
280 A = mem[K];
281 continue;
282 case BPF_S_LDX_MEM:
283 X = mem[K];
284 continue;
285 case BPF_S_MISC_TAX:
286 X = A;
287 continue;
288 case BPF_S_MISC_TXA:
289 A = X;
290 continue;
291 case BPF_S_RET_K:
292 return K;
293 case BPF_S_RET_A:
294 return A;
295 case BPF_S_ST:
296 mem[K] = A;
297 continue;
298 case BPF_S_STX:
299 mem[K] = X;
300 continue;
301 case BPF_S_ANC_PROTOCOL:
302 A = ntohs(skb->protocol);
303 continue;
304 case BPF_S_ANC_PKTTYPE:
305 A = skb->pkt_type;
306 continue;
307 case BPF_S_ANC_IFINDEX:
308 if (!skb->dev)
309 return 0;
310 A = skb->dev->ifindex;
311 continue;
312 case BPF_S_ANC_MARK:
313 A = skb->mark;
314 continue;
315 case BPF_S_ANC_QUEUE:
316 A = skb->queue_mapping;
317 continue;
318 case BPF_S_ANC_HATYPE:
319 if (!skb->dev)
320 return 0;
321 A = skb->dev->type;
322 continue;
323 case BPF_S_ANC_RXHASH:
324 A = skb->rxhash;
325 continue;
326 case BPF_S_ANC_CPU:
327 A = raw_smp_processor_id();
328 continue;
329 case BPF_S_ANC_ALU_XOR_X:
330 A ^= X;
331 continue;
332 case BPF_S_ANC_NLATTR: {
333 struct nlattr *nla;
334
335 if (skb_is_nonlinear(skb))
336 return 0;
337 if (A > skb->len - sizeof(struct nlattr))
338 return 0;
339
340 nla = nla_find((struct nlattr *)&skb->data[A],
341 skb->len - A, X);
342 if (nla)
343 A = (void *)nla - (void *)skb->data;
344 else
345 A = 0;
346 continue;
347 }
348 case BPF_S_ANC_NLATTR_NEST: {
349 struct nlattr *nla;
350
351 if (skb_is_nonlinear(skb))
352 return 0;
353 if (A > skb->len - sizeof(struct nlattr))
354 return 0;
355
356 nla = (struct nlattr *)&skb->data[A];
357 if (nla->nla_len > A - skb->len)
358 return 0;
359
360 nla = nla_find_nested(nla, X);
361 if (nla)
362 A = (void *)nla - (void *)skb->data;
363 else
364 A = 0;
365 continue;
366 }
367 #ifdef CONFIG_SECCOMP_FILTER
368 case BPF_S_ANC_SECCOMP_LD_W:
369 A = seccomp_bpf_load(fentry->k);
370 continue;
371 #endif
372 default:
373 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
374 fentry->code, fentry->jt,
375 fentry->jf, fentry->k);
376 return 0;
377 }
378 }
379
380 return 0;
381 }
382 EXPORT_SYMBOL(sk_run_filter);
383
384 /*
385 * Security :
386 * A BPF program is able to use 16 cells of memory to store intermediate
387 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
388 * As we dont want to clear mem[] array for each packet going through
389 * sk_run_filter(), we check that filter loaded by user never try to read
390 * a cell if not previously written, and we check all branches to be sure
391 * a malicious user doesn't try to abuse us.
392 */
393 static int check_load_and_stores(struct sock_filter *filter, int flen)
394 {
395 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
396 int pc, ret = 0;
397
398 BUILD_BUG_ON(BPF_MEMWORDS > 16);
399 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
400 if (!masks)
401 return -ENOMEM;
402 memset(masks, 0xff, flen * sizeof(*masks));
403
404 for (pc = 0; pc < flen; pc++) {
405 memvalid &= masks[pc];
406
407 switch (filter[pc].code) {
408 case BPF_S_ST:
409 case BPF_S_STX:
410 memvalid |= (1 << filter[pc].k);
411 break;
412 case BPF_S_LD_MEM:
413 case BPF_S_LDX_MEM:
414 if (!(memvalid & (1 << filter[pc].k))) {
415 ret = -EINVAL;
416 goto error;
417 }
418 break;
419 case BPF_S_JMP_JA:
420 /* a jump must set masks on target */
421 masks[pc + 1 + filter[pc].k] &= memvalid;
422 memvalid = ~0;
423 break;
424 case BPF_S_JMP_JEQ_K:
425 case BPF_S_JMP_JEQ_X:
426 case BPF_S_JMP_JGE_K:
427 case BPF_S_JMP_JGE_X:
428 case BPF_S_JMP_JGT_K:
429 case BPF_S_JMP_JGT_X:
430 case BPF_S_JMP_JSET_X:
431 case BPF_S_JMP_JSET_K:
432 /* a jump must set masks on targets */
433 masks[pc + 1 + filter[pc].jt] &= memvalid;
434 masks[pc + 1 + filter[pc].jf] &= memvalid;
435 memvalid = ~0;
436 break;
437 }
438 }
439 error:
440 kfree(masks);
441 return ret;
442 }
443
444 /**
445 * sk_chk_filter - verify socket filter code
446 * @filter: filter to verify
447 * @flen: length of filter
448 *
449 * Check the user's filter code. If we let some ugly
450 * filter code slip through kaboom! The filter must contain
451 * no references or jumps that are out of range, no illegal
452 * instructions, and must end with a RET instruction.
453 *
454 * All jumps are forward as they are not signed.
455 *
456 * Returns 0 if the rule set is legal or -EINVAL if not.
457 */
458 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
459 {
460 /*
461 * Valid instructions are initialized to non-0.
462 * Invalid instructions are initialized to 0.
463 */
464 static const u8 codes[] = {
465 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
466 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
467 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
468 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
469 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
470 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
471 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
472 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
473 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
474 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
475 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
476 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
477 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
478 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
479 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
480 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
481 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
482 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
483 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
484 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
485 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
486 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
487 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
488 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
489 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
490 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
491 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
492 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
493 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
494 [BPF_RET|BPF_K] = BPF_S_RET_K,
495 [BPF_RET|BPF_A] = BPF_S_RET_A,
496 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
497 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
498 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
499 [BPF_ST] = BPF_S_ST,
500 [BPF_STX] = BPF_S_STX,
501 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
502 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
503 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
504 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
505 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
506 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
507 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
508 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
509 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
510 };
511 int pc;
512
513 if (flen == 0 || flen > BPF_MAXINSNS)
514 return -EINVAL;
515
516 /* check the filter code now */
517 for (pc = 0; pc < flen; pc++) {
518 struct sock_filter *ftest = &filter[pc];
519 u16 code = ftest->code;
520
521 if (code >= ARRAY_SIZE(codes))
522 return -EINVAL;
523 code = codes[code];
524 if (!code)
525 return -EINVAL;
526 /* Some instructions need special checks */
527 switch (code) {
528 case BPF_S_ALU_DIV_K:
529 /* check for division by zero */
530 if (ftest->k == 0)
531 return -EINVAL;
532 ftest->k = reciprocal_value(ftest->k);
533 break;
534 case BPF_S_LD_MEM:
535 case BPF_S_LDX_MEM:
536 case BPF_S_ST:
537 case BPF_S_STX:
538 /* check for invalid memory addresses */
539 if (ftest->k >= BPF_MEMWORDS)
540 return -EINVAL;
541 break;
542 case BPF_S_JMP_JA:
543 /*
544 * Note, the large ftest->k might cause loops.
545 * Compare this with conditional jumps below,
546 * where offsets are limited. --ANK (981016)
547 */
548 if (ftest->k >= (unsigned int)(flen-pc-1))
549 return -EINVAL;
550 break;
551 case BPF_S_JMP_JEQ_K:
552 case BPF_S_JMP_JEQ_X:
553 case BPF_S_JMP_JGE_K:
554 case BPF_S_JMP_JGE_X:
555 case BPF_S_JMP_JGT_K:
556 case BPF_S_JMP_JGT_X:
557 case BPF_S_JMP_JSET_X:
558 case BPF_S_JMP_JSET_K:
559 /* for conditionals both must be safe */
560 if (pc + ftest->jt + 1 >= flen ||
561 pc + ftest->jf + 1 >= flen)
562 return -EINVAL;
563 break;
564 case BPF_S_LD_W_ABS:
565 case BPF_S_LD_H_ABS:
566 case BPF_S_LD_B_ABS:
567 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
568 code = BPF_S_ANC_##CODE; \
569 break
570 switch (ftest->k) {
571 ANCILLARY(PROTOCOL);
572 ANCILLARY(PKTTYPE);
573 ANCILLARY(IFINDEX);
574 ANCILLARY(NLATTR);
575 ANCILLARY(NLATTR_NEST);
576 ANCILLARY(MARK);
577 ANCILLARY(QUEUE);
578 ANCILLARY(HATYPE);
579 ANCILLARY(RXHASH);
580 ANCILLARY(CPU);
581 ANCILLARY(ALU_XOR_X);
582 }
583 }
584 ftest->code = code;
585 }
586
587 /* last instruction must be a RET code */
588 switch (filter[flen - 1].code) {
589 case BPF_S_RET_K:
590 case BPF_S_RET_A:
591 return check_load_and_stores(filter, flen);
592 }
593 return -EINVAL;
594 }
595 EXPORT_SYMBOL(sk_chk_filter);
596
597 /**
598 * sk_filter_release_rcu - Release a socket filter by rcu_head
599 * @rcu: rcu_head that contains the sk_filter to free
600 */
601 void sk_filter_release_rcu(struct rcu_head *rcu)
602 {
603 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
604
605 bpf_jit_free(fp);
606 kfree(fp);
607 }
608 EXPORT_SYMBOL(sk_filter_release_rcu);
609
610 static int __sk_prepare_filter(struct sk_filter *fp)
611 {
612 int err;
613
614 fp->bpf_func = sk_run_filter;
615
616 err = sk_chk_filter(fp->insns, fp->len);
617 if (err)
618 return err;
619
620 bpf_jit_compile(fp);
621 return 0;
622 }
623
624 /**
625 * sk_unattached_filter_create - create an unattached filter
626 * @fprog: the filter program
627 * @pfp: the unattached filter that is created
628 *
629 * Create a filter independent of any socket. We first run some
630 * sanity checks on it to make sure it does not explode on us later.
631 * If an error occurs or there is insufficient memory for the filter
632 * a negative errno code is returned. On success the return is zero.
633 */
634 int sk_unattached_filter_create(struct sk_filter **pfp,
635 struct sock_fprog *fprog)
636 {
637 struct sk_filter *fp;
638 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
639 int err;
640
641 /* Make sure new filter is there and in the right amounts. */
642 if (fprog->filter == NULL)
643 return -EINVAL;
644
645 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
646 if (!fp)
647 return -ENOMEM;
648 memcpy(fp->insns, fprog->filter, fsize);
649
650 atomic_set(&fp->refcnt, 1);
651 fp->len = fprog->len;
652
653 err = __sk_prepare_filter(fp);
654 if (err)
655 goto free_mem;
656
657 *pfp = fp;
658 return 0;
659 free_mem:
660 kfree(fp);
661 return err;
662 }
663 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
664
665 void sk_unattached_filter_destroy(struct sk_filter *fp)
666 {
667 sk_filter_release(fp);
668 }
669 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
670
671 /**
672 * sk_attach_filter - attach a socket filter
673 * @fprog: the filter program
674 * @sk: the socket to use
675 *
676 * Attach the user's filter code. We first run some sanity checks on
677 * it to make sure it does not explode on us later. If an error
678 * occurs or there is insufficient memory for the filter a negative
679 * errno code is returned. On success the return is zero.
680 */
681 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
682 {
683 struct sk_filter *fp, *old_fp;
684 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
685 int err;
686
687 /* Make sure new filter is there and in the right amounts. */
688 if (fprog->filter == NULL)
689 return -EINVAL;
690
691 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
692 if (!fp)
693 return -ENOMEM;
694 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
695 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
696 return -EFAULT;
697 }
698
699 atomic_set(&fp->refcnt, 1);
700 fp->len = fprog->len;
701
702 err = __sk_prepare_filter(fp);
703 if (err) {
704 sk_filter_uncharge(sk, fp);
705 return err;
706 }
707
708 old_fp = rcu_dereference_protected(sk->sk_filter,
709 sock_owned_by_user(sk));
710 rcu_assign_pointer(sk->sk_filter, fp);
711
712 if (old_fp)
713 sk_filter_uncharge(sk, old_fp);
714 return 0;
715 }
716 EXPORT_SYMBOL_GPL(sk_attach_filter);
717
718 int sk_detach_filter(struct sock *sk)
719 {
720 int ret = -ENOENT;
721 struct sk_filter *filter;
722
723 filter = rcu_dereference_protected(sk->sk_filter,
724 sock_owned_by_user(sk));
725 if (filter) {
726 RCU_INIT_POINTER(sk->sk_filter, NULL);
727 sk_filter_uncharge(sk, filter);
728 ret = 0;
729 }
730 return ret;
731 }
732 EXPORT_SYMBOL_GPL(sk_detach_filter);
This page took 0.056399 seconds and 5 git commands to generate.