s390/uaccess: fix page table walk
[deliverable/linux.git] / net / ipv4 / inet_fragment.c
1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26
27 static void inet_frag_secret_rebuild(unsigned long dummy)
28 {
29 struct inet_frags *f = (struct inet_frags *)dummy;
30 unsigned long now = jiffies;
31 int i;
32
33 write_lock(&f->lock);
34 get_random_bytes(&f->rnd, sizeof(u32));
35 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
36 struct inet_frag_queue *q;
37 struct hlist_node *n;
38
39 hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
40 unsigned int hval = f->hashfn(q);
41
42 if (hval != i) {
43 hlist_del(&q->list);
44
45 /* Relink to new hash chain. */
46 hlist_add_head(&q->list, &f->hash[hval]);
47 }
48 }
49 }
50 write_unlock(&f->lock);
51
52 mod_timer(&f->secret_timer, now + f->secret_interval);
53 }
54
55 void inet_frags_init(struct inet_frags *f)
56 {
57 int i;
58
59 for (i = 0; i < INETFRAGS_HASHSZ; i++)
60 INIT_HLIST_HEAD(&f->hash[i]);
61
62 rwlock_init(&f->lock);
63
64 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
65 (jiffies ^ (jiffies >> 6)));
66
67 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
68 (unsigned long)f);
69 f->secret_timer.expires = jiffies + f->secret_interval;
70 add_timer(&f->secret_timer);
71 }
72 EXPORT_SYMBOL(inet_frags_init);
73
74 void inet_frags_init_net(struct netns_frags *nf)
75 {
76 nf->nqueues = 0;
77 init_frag_mem_limit(nf);
78 INIT_LIST_HEAD(&nf->lru_list);
79 spin_lock_init(&nf->lru_lock);
80 }
81 EXPORT_SYMBOL(inet_frags_init_net);
82
83 void inet_frags_fini(struct inet_frags *f)
84 {
85 del_timer(&f->secret_timer);
86 }
87 EXPORT_SYMBOL(inet_frags_fini);
88
89 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
90 {
91 nf->low_thresh = 0;
92
93 local_bh_disable();
94 inet_frag_evictor(nf, f, true);
95 local_bh_enable();
96
97 percpu_counter_destroy(&nf->mem);
98 }
99 EXPORT_SYMBOL(inet_frags_exit_net);
100
101 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
102 {
103 write_lock(&f->lock);
104 hlist_del(&fq->list);
105 fq->net->nqueues--;
106 write_unlock(&f->lock);
107 inet_frag_lru_del(fq);
108 }
109
110 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
111 {
112 if (del_timer(&fq->timer))
113 atomic_dec(&fq->refcnt);
114
115 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
116 fq_unlink(fq, f);
117 atomic_dec(&fq->refcnt);
118 fq->last_in |= INET_FRAG_COMPLETE;
119 }
120 }
121 EXPORT_SYMBOL(inet_frag_kill);
122
123 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
124 struct sk_buff *skb)
125 {
126 if (f->skb_free)
127 f->skb_free(skb);
128 kfree_skb(skb);
129 }
130
131 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
132 int *work)
133 {
134 struct sk_buff *fp;
135 struct netns_frags *nf;
136 unsigned int sum, sum_truesize = 0;
137
138 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
139 WARN_ON(del_timer(&q->timer) != 0);
140
141 /* Release all fragment data. */
142 fp = q->fragments;
143 nf = q->net;
144 while (fp) {
145 struct sk_buff *xp = fp->next;
146
147 sum_truesize += fp->truesize;
148 frag_kfree_skb(nf, f, fp);
149 fp = xp;
150 }
151 sum = sum_truesize + f->qsize;
152 if (work)
153 *work -= sum;
154 sub_frag_mem_limit(q, sum);
155
156 if (f->destructor)
157 f->destructor(q);
158 kfree(q);
159
160 }
161 EXPORT_SYMBOL(inet_frag_destroy);
162
163 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
164 {
165 struct inet_frag_queue *q;
166 int work, evicted = 0;
167
168 if (!force) {
169 if (frag_mem_limit(nf) <= nf->high_thresh)
170 return 0;
171 }
172
173 work = frag_mem_limit(nf) - nf->low_thresh;
174 while (work > 0) {
175 spin_lock(&nf->lru_lock);
176
177 if (list_empty(&nf->lru_list)) {
178 spin_unlock(&nf->lru_lock);
179 break;
180 }
181
182 q = list_first_entry(&nf->lru_list,
183 struct inet_frag_queue, lru_list);
184 atomic_inc(&q->refcnt);
185 spin_unlock(&nf->lru_lock);
186
187 spin_lock(&q->lock);
188 if (!(q->last_in & INET_FRAG_COMPLETE))
189 inet_frag_kill(q, f);
190 spin_unlock(&q->lock);
191
192 if (atomic_dec_and_test(&q->refcnt))
193 inet_frag_destroy(q, f, &work);
194 evicted++;
195 }
196
197 return evicted;
198 }
199 EXPORT_SYMBOL(inet_frag_evictor);
200
201 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
202 struct inet_frag_queue *qp_in, struct inet_frags *f,
203 void *arg)
204 {
205 struct inet_frag_queue *qp;
206 #ifdef CONFIG_SMP
207 #endif
208 unsigned int hash;
209
210 write_lock(&f->lock);
211 /*
212 * While we stayed w/o the lock other CPU could update
213 * the rnd seed, so we need to re-calculate the hash
214 * chain. Fortunatelly the qp_in can be used to get one.
215 */
216 hash = f->hashfn(qp_in);
217 #ifdef CONFIG_SMP
218 /* With SMP race we have to recheck hash table, because
219 * such entry could be created on other cpu, while we
220 * promoted read lock to write lock.
221 */
222 hlist_for_each_entry(qp, &f->hash[hash], list) {
223 if (qp->net == nf && f->match(qp, arg)) {
224 atomic_inc(&qp->refcnt);
225 write_unlock(&f->lock);
226 qp_in->last_in |= INET_FRAG_COMPLETE;
227 inet_frag_put(qp_in, f);
228 return qp;
229 }
230 }
231 #endif
232 qp = qp_in;
233 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
234 atomic_inc(&qp->refcnt);
235
236 atomic_inc(&qp->refcnt);
237 hlist_add_head(&qp->list, &f->hash[hash]);
238 nf->nqueues++;
239 write_unlock(&f->lock);
240 inet_frag_lru_add(nf, qp);
241 return qp;
242 }
243
244 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
245 struct inet_frags *f, void *arg)
246 {
247 struct inet_frag_queue *q;
248
249 q = kzalloc(f->qsize, GFP_ATOMIC);
250 if (q == NULL)
251 return NULL;
252
253 q->net = nf;
254 f->constructor(q, arg);
255 add_frag_mem_limit(q, f->qsize);
256
257 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
258 spin_lock_init(&q->lock);
259 atomic_set(&q->refcnt, 1);
260
261 return q;
262 }
263
264 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
265 struct inet_frags *f, void *arg)
266 {
267 struct inet_frag_queue *q;
268
269 q = inet_frag_alloc(nf, f, arg);
270 if (q == NULL)
271 return NULL;
272
273 return inet_frag_intern(nf, q, f, arg);
274 }
275
276 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 struct inet_frags *f, void *key, unsigned int hash)
278 __releases(&f->lock)
279 {
280 struct inet_frag_queue *q;
281 int depth = 0;
282
283 hlist_for_each_entry(q, &f->hash[hash], list) {
284 if (q->net == nf && f->match(q, key)) {
285 atomic_inc(&q->refcnt);
286 read_unlock(&f->lock);
287 return q;
288 }
289 depth++;
290 }
291 read_unlock(&f->lock);
292
293 if (depth <= INETFRAGS_MAXDEPTH)
294 return inet_frag_create(nf, f, key);
295 else
296 return ERR_PTR(-ENOBUFS);
297 }
298 EXPORT_SYMBOL(inet_frag_find);
299
300 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
301 const char *prefix)
302 {
303 static const char msg[] = "inet_frag_find: Fragment hash bucket"
304 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
305 ". Dropping fragment.\n";
306
307 if (PTR_ERR(q) == -ENOBUFS)
308 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309 }
310 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
This page took 0.054691 seconds and 5 git commands to generate.