[SK_BUFF]: export skb_pull_rcsum
[deliverable/linux.git] / net / netfilter / nf_sockopt.c
CommitLineData
f6ebe77f
HW
1#include <linux/config.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/skbuff.h>
6#include <linux/netfilter.h>
4a3e2f71 7#include <linux/mutex.h>
f6ebe77f
HW
8#include <net/sock.h>
9
10#include "nf_internals.h"
11
12/* Sockopts only registered and called from user context, so
13 net locking would be overkill. Also, [gs]etsockopt calls may
14 sleep. */
4a3e2f71 15static DEFINE_MUTEX(nf_sockopt_mutex);
f6ebe77f
HW
16static LIST_HEAD(nf_sockopts);
17
18/* Do exclusive ranges overlap? */
19static inline int overlap(int min1, int max1, int min2, int max2)
20{
21 return max1 > min2 && min1 < max2;
22}
23
24/* Functions to register sockopt ranges (exclusive). */
25int nf_register_sockopt(struct nf_sockopt_ops *reg)
26{
27 struct list_head *i;
28 int ret = 0;
29
4a3e2f71 30 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
f6ebe77f
HW
31 return -EINTR;
32
33 list_for_each(i, &nf_sockopts) {
34 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
35 if (ops->pf == reg->pf
36 && (overlap(ops->set_optmin, ops->set_optmax,
37 reg->set_optmin, reg->set_optmax)
38 || overlap(ops->get_optmin, ops->get_optmax,
39 reg->get_optmin, reg->get_optmax))) {
40 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
41 ops->set_optmin, ops->set_optmax,
42 ops->get_optmin, ops->get_optmax,
43 reg->set_optmin, reg->set_optmax,
44 reg->get_optmin, reg->get_optmax);
45 ret = -EBUSY;
46 goto out;
47 }
48 }
49
50 list_add(&reg->list, &nf_sockopts);
51out:
4a3e2f71 52 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
53 return ret;
54}
55EXPORT_SYMBOL(nf_register_sockopt);
56
57void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
58{
59 /* No point being interruptible: we're probably in cleanup_module() */
60 restart:
4a3e2f71 61 mutex_lock(&nf_sockopt_mutex);
f6ebe77f
HW
62 if (reg->use != 0) {
63 /* To be woken by nf_sockopt call... */
64 /* FIXME: Stuart Young's name appears gratuitously. */
65 set_current_state(TASK_UNINTERRUPTIBLE);
66 reg->cleanup_task = current;
4a3e2f71 67 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
68 schedule();
69 goto restart;
70 }
71 list_del(&reg->list);
4a3e2f71 72 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
73}
74EXPORT_SYMBOL(nf_unregister_sockopt);
75
76/* Call get/setsockopt() */
77static int nf_sockopt(struct sock *sk, int pf, int val,
78 char __user *opt, int *len, int get)
79{
80 struct list_head *i;
81 struct nf_sockopt_ops *ops;
82 int ret;
83
4a3e2f71 84 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
f6ebe77f
HW
85 return -EINTR;
86
87 list_for_each(i, &nf_sockopts) {
88 ops = (struct nf_sockopt_ops *)i;
89 if (ops->pf == pf) {
90 if (get) {
91 if (val >= ops->get_optmin
92 && val < ops->get_optmax) {
93 ops->use++;
4a3e2f71 94 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
95 ret = ops->get(sk, val, opt, len);
96 goto out;
97 }
98 } else {
99 if (val >= ops->set_optmin
100 && val < ops->set_optmax) {
101 ops->use++;
4a3e2f71 102 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
103 ret = ops->set(sk, val, opt, *len);
104 goto out;
105 }
106 }
107 }
108 }
4a3e2f71 109 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
110 return -ENOPROTOOPT;
111
112 out:
4a3e2f71 113 mutex_lock(&nf_sockopt_mutex);
f6ebe77f
HW
114 ops->use--;
115 if (ops->cleanup_task)
116 wake_up_process(ops->cleanup_task);
4a3e2f71 117 mutex_unlock(&nf_sockopt_mutex);
f6ebe77f
HW
118 return ret;
119}
120
121int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
122 int len)
123{
124 return nf_sockopt(sk, pf, val, opt, &len, 0);
125}
126EXPORT_SYMBOL(nf_setsockopt);
127
128int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
129{
130 return nf_sockopt(sk, pf, val, opt, len, 1);
131}
132EXPORT_SYMBOL(nf_getsockopt);
133
3fdadf7d
DM
134#ifdef CONFIG_COMPAT
135static int compat_nf_sockopt(struct sock *sk, int pf, int val,
136 char __user *opt, int *len, int get)
137{
138 struct list_head *i;
139 struct nf_sockopt_ops *ops;
140 int ret;
141
142 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
143 return -EINTR;
144
145 list_for_each(i, &nf_sockopts) {
146 ops = (struct nf_sockopt_ops *)i;
147 if (ops->pf == pf) {
148 if (get) {
149 if (val >= ops->get_optmin
150 && val < ops->get_optmax) {
151 ops->use++;
152 mutex_unlock(&nf_sockopt_mutex);
153 if (ops->compat_get)
154 ret = ops->compat_get(sk,
155 val, opt, len);
156 else
157 ret = ops->get(sk,
158 val, opt, len);
159 goto out;
160 }
161 } else {
162 if (val >= ops->set_optmin
163 && val < ops->set_optmax) {
164 ops->use++;
165 mutex_unlock(&nf_sockopt_mutex);
166 if (ops->compat_set)
167 ret = ops->compat_set(sk,
168 val, opt, *len);
169 else
170 ret = ops->set(sk,
171 val, opt, *len);
172 goto out;
173 }
174 }
175 }
176 }
177 mutex_unlock(&nf_sockopt_mutex);
178 return -ENOPROTOOPT;
179
180 out:
181 mutex_lock(&nf_sockopt_mutex);
182 ops->use--;
183 if (ops->cleanup_task)
184 wake_up_process(ops->cleanup_task);
185 mutex_unlock(&nf_sockopt_mutex);
186 return ret;
187}
188
189int compat_nf_setsockopt(struct sock *sk, int pf,
190 int val, char __user *opt, int len)
191{
192 return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
193}
194EXPORT_SYMBOL(compat_nf_setsockopt);
195
196int compat_nf_getsockopt(struct sock *sk, int pf,
197 int val, char __user *opt, int *len)
198{
199 return compat_nf_sockopt(sk, pf, val, opt, len, 1);
200}
201EXPORT_SYMBOL(compat_nf_getsockopt);
202#endif
This page took 0.140216 seconds and 5 git commands to generate.