930d23870811b1b797d580fce3ca1f25576f832c
[deliverable/linux.git] / net / ipv4 / inet_fragment.c
1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32 const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46 };
47 EXPORT_SYMBOL(ip_frag_ecn_table);
48
49 static unsigned int
50 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
51 {
52 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
53 }
54
55 static void inet_frag_secret_rebuild(unsigned long dummy)
56 {
57 struct inet_frags *f = (struct inet_frags *)dummy;
58 unsigned long now = jiffies;
59 int i;
60
61 /* Per bucket lock NOT needed here, due to write lock protection */
62 write_lock(&f->lock);
63
64 get_random_bytes(&f->rnd, sizeof(u32));
65 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
66 struct inet_frag_bucket *hb;
67 struct inet_frag_queue *q;
68 struct hlist_node *n;
69
70 hb = &f->hash[i];
71 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
72 unsigned int hval = inet_frag_hashfn(f, q);
73
74 if (hval != i) {
75 struct inet_frag_bucket *hb_dest;
76
77 hlist_del(&q->list);
78
79 /* Relink to new hash chain. */
80 hb_dest = &f->hash[hval];
81 hlist_add_head(&q->list, &hb_dest->chain);
82 }
83 }
84 }
85 write_unlock(&f->lock);
86
87 mod_timer(&f->secret_timer, now + f->secret_interval);
88 }
89
90 void inet_frags_init(struct inet_frags *f)
91 {
92 int i;
93
94 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
95 struct inet_frag_bucket *hb = &f->hash[i];
96
97 spin_lock_init(&hb->chain_lock);
98 INIT_HLIST_HEAD(&hb->chain);
99 }
100 rwlock_init(&f->lock);
101
102 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
103 (unsigned long)f);
104 f->secret_timer.expires = jiffies + f->secret_interval;
105 add_timer(&f->secret_timer);
106 }
107 EXPORT_SYMBOL(inet_frags_init);
108
109 void inet_frags_init_net(struct netns_frags *nf)
110 {
111 nf->nqueues = 0;
112 init_frag_mem_limit(nf);
113 INIT_LIST_HEAD(&nf->lru_list);
114 spin_lock_init(&nf->lru_lock);
115 }
116 EXPORT_SYMBOL(inet_frags_init_net);
117
118 void inet_frags_fini(struct inet_frags *f)
119 {
120 del_timer(&f->secret_timer);
121 }
122 EXPORT_SYMBOL(inet_frags_fini);
123
124 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
125 {
126 nf->low_thresh = 0;
127
128 local_bh_disable();
129 inet_frag_evictor(nf, f, true);
130 local_bh_enable();
131
132 percpu_counter_destroy(&nf->mem);
133 }
134 EXPORT_SYMBOL(inet_frags_exit_net);
135
136 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
137 {
138 struct inet_frag_bucket *hb;
139 unsigned int hash;
140
141 read_lock(&f->lock);
142 hash = inet_frag_hashfn(f, fq);
143 hb = &f->hash[hash];
144
145 spin_lock(&hb->chain_lock);
146 hlist_del(&fq->list);
147 spin_unlock(&hb->chain_lock);
148
149 read_unlock(&f->lock);
150 inet_frag_lru_del(fq);
151 }
152
153 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
154 {
155 if (del_timer(&fq->timer))
156 atomic_dec(&fq->refcnt);
157
158 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
159 fq_unlink(fq, f);
160 atomic_dec(&fq->refcnt);
161 fq->last_in |= INET_FRAG_COMPLETE;
162 }
163 }
164 EXPORT_SYMBOL(inet_frag_kill);
165
166 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
167 struct sk_buff *skb)
168 {
169 if (f->skb_free)
170 f->skb_free(skb);
171 kfree_skb(skb);
172 }
173
174 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
175 int *work)
176 {
177 struct sk_buff *fp;
178 struct netns_frags *nf;
179 unsigned int sum, sum_truesize = 0;
180
181 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
182 WARN_ON(del_timer(&q->timer) != 0);
183
184 /* Release all fragment data. */
185 fp = q->fragments;
186 nf = q->net;
187 while (fp) {
188 struct sk_buff *xp = fp->next;
189
190 sum_truesize += fp->truesize;
191 frag_kfree_skb(nf, f, fp);
192 fp = xp;
193 }
194 sum = sum_truesize + f->qsize;
195 if (work)
196 *work -= sum;
197 sub_frag_mem_limit(q, sum);
198
199 if (f->destructor)
200 f->destructor(q);
201 kfree(q);
202
203 }
204 EXPORT_SYMBOL(inet_frag_destroy);
205
206 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
207 {
208 struct inet_frag_queue *q;
209 int work, evicted = 0;
210
211 if (!force) {
212 if (frag_mem_limit(nf) <= nf->high_thresh)
213 return 0;
214 }
215
216 work = frag_mem_limit(nf) - nf->low_thresh;
217 while (work > 0 || force) {
218 spin_lock(&nf->lru_lock);
219
220 if (list_empty(&nf->lru_list)) {
221 spin_unlock(&nf->lru_lock);
222 break;
223 }
224
225 q = list_first_entry(&nf->lru_list,
226 struct inet_frag_queue, lru_list);
227 atomic_inc(&q->refcnt);
228 /* Remove q from list to avoid several CPUs grabbing it */
229 list_del_init(&q->lru_list);
230
231 spin_unlock(&nf->lru_lock);
232
233 spin_lock(&q->lock);
234 if (!(q->last_in & INET_FRAG_COMPLETE))
235 inet_frag_kill(q, f);
236 spin_unlock(&q->lock);
237
238 if (atomic_dec_and_test(&q->refcnt))
239 inet_frag_destroy(q, f, &work);
240 evicted++;
241 }
242
243 return evicted;
244 }
245 EXPORT_SYMBOL(inet_frag_evictor);
246
247 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
248 struct inet_frag_queue *qp_in, struct inet_frags *f,
249 void *arg)
250 {
251 struct inet_frag_bucket *hb;
252 struct inet_frag_queue *qp;
253 unsigned int hash;
254
255 read_lock(&f->lock); /* Protects against hash rebuild */
256 /*
257 * While we stayed w/o the lock other CPU could update
258 * the rnd seed, so we need to re-calculate the hash
259 * chain. Fortunatelly the qp_in can be used to get one.
260 */
261 hash = inet_frag_hashfn(f, qp_in);
262 hb = &f->hash[hash];
263 spin_lock(&hb->chain_lock);
264
265 #ifdef CONFIG_SMP
266 /* With SMP race we have to recheck hash table, because
267 * such entry could be created on other cpu, while we
268 * released the hash bucket lock.
269 */
270 hlist_for_each_entry(qp, &hb->chain, list) {
271 if (qp->net == nf && f->match(qp, arg)) {
272 atomic_inc(&qp->refcnt);
273 spin_unlock(&hb->chain_lock);
274 read_unlock(&f->lock);
275 qp_in->last_in |= INET_FRAG_COMPLETE;
276 inet_frag_put(qp_in, f);
277 return qp;
278 }
279 }
280 #endif
281 qp = qp_in;
282 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
283 atomic_inc(&qp->refcnt);
284
285 atomic_inc(&qp->refcnt);
286 hlist_add_head(&qp->list, &hb->chain);
287 inet_frag_lru_add(nf, qp);
288 spin_unlock(&hb->chain_lock);
289 read_unlock(&f->lock);
290
291 return qp;
292 }
293
294 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
295 struct inet_frags *f, void *arg)
296 {
297 struct inet_frag_queue *q;
298
299 q = kzalloc(f->qsize, GFP_ATOMIC);
300 if (q == NULL)
301 return NULL;
302
303 q->net = nf;
304 f->constructor(q, arg);
305 add_frag_mem_limit(q, f->qsize);
306
307 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
308 spin_lock_init(&q->lock);
309 atomic_set(&q->refcnt, 1);
310 INIT_LIST_HEAD(&q->lru_list);
311
312 return q;
313 }
314
315 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
316 struct inet_frags *f, void *arg)
317 {
318 struct inet_frag_queue *q;
319
320 q = inet_frag_alloc(nf, f, arg);
321 if (q == NULL)
322 return NULL;
323
324 return inet_frag_intern(nf, q, f, arg);
325 }
326
327 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
328 struct inet_frags *f, void *key, unsigned int hash)
329 __releases(&f->lock)
330 {
331 struct inet_frag_bucket *hb;
332 struct inet_frag_queue *q;
333 int depth = 0;
334
335 hash &= (INETFRAGS_HASHSZ - 1);
336 hb = &f->hash[hash];
337
338 spin_lock(&hb->chain_lock);
339 hlist_for_each_entry(q, &hb->chain, list) {
340 if (q->net == nf && f->match(q, key)) {
341 atomic_inc(&q->refcnt);
342 spin_unlock(&hb->chain_lock);
343 read_unlock(&f->lock);
344 return q;
345 }
346 depth++;
347 }
348 spin_unlock(&hb->chain_lock);
349 read_unlock(&f->lock);
350
351 if (depth <= INETFRAGS_MAXDEPTH)
352 return inet_frag_create(nf, f, key);
353 else
354 return ERR_PTR(-ENOBUFS);
355 }
356 EXPORT_SYMBOL(inet_frag_find);
357
358 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
359 const char *prefix)
360 {
361 static const char msg[] = "inet_frag_find: Fragment hash bucket"
362 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
363 ". Dropping fragment.\n";
364
365 if (PTR_ERR(q) == -ENOBUFS)
366 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
367 }
368 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
This page took 0.06551 seconds and 4 git commands to generate.