Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
3 | * | |
4 | * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> | |
5 | * Florian La Roche <rzsfl@rz.uni-sb.de> | |
6 | * | |
7 | * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ | |
8 | * | |
9 | * Fixes: | |
10 | * Alan Cox : Fixed the worst of the load | |
11 | * balancer bugs. | |
12 | * Dave Platt : Interrupt stacking fix. | |
13 | * Richard Kooijman : Timestamp fixes. | |
14 | * Alan Cox : Changed buffer format. | |
15 | * Alan Cox : destructor hook for AF_UNIX etc. | |
16 | * Linus Torvalds : Better skb_clone. | |
17 | * Alan Cox : Added skb_copy. | |
18 | * Alan Cox : Added all the changed routines Linus | |
19 | * only put in the headers | |
20 | * Ray VanTassle : Fixed --skb->lock in free | |
21 | * Alan Cox : skb_copy copy arp field | |
22 | * Andi Kleen : slabified it. | |
23 | * Robert Olsson : Removed skb_head_pool | |
24 | * | |
25 | * NOTE: | |
26 | * The __skb_ routines should be called with interrupts | |
27 | * disabled, or you better be *real* sure that the operation is atomic | |
28 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
29 | * or via disabling bottom half handlers, etc). | |
30 | * | |
31 | * This program is free software; you can redistribute it and/or | |
32 | * modify it under the terms of the GNU General Public License | |
33 | * as published by the Free Software Foundation; either version | |
34 | * 2 of the License, or (at your option) any later version. | |
35 | */ | |
36 | ||
37 | /* | |
38 | * The functions in this file will not compile correctly with gcc 2.4.x | |
39 | */ | |
40 | ||
41 | #include <linux/config.h> | |
42 | #include <linux/module.h> | |
43 | #include <linux/types.h> | |
44 | #include <linux/kernel.h> | |
45 | #include <linux/sched.h> | |
46 | #include <linux/mm.h> | |
47 | #include <linux/interrupt.h> | |
48 | #include <linux/in.h> | |
49 | #include <linux/inet.h> | |
50 | #include <linux/slab.h> | |
51 | #include <linux/netdevice.h> | |
52 | #ifdef CONFIG_NET_CLS_ACT | |
53 | #include <net/pkt_sched.h> | |
54 | #endif | |
55 | #include <linux/string.h> | |
56 | #include <linux/skbuff.h> | |
57 | #include <linux/cache.h> | |
58 | #include <linux/rtnetlink.h> | |
59 | #include <linux/init.h> | |
60 | #include <linux/highmem.h> | |
61 | ||
62 | #include <net/protocol.h> | |
63 | #include <net/dst.h> | |
64 | #include <net/sock.h> | |
65 | #include <net/checksum.h> | |
66 | #include <net/xfrm.h> | |
67 | ||
68 | #include <asm/uaccess.h> | |
69 | #include <asm/system.h> | |
70 | ||
71 | static kmem_cache_t *skbuff_head_cache; | |
72 | ||
a61bbcf2 PM |
73 | struct timeval __read_mostly skb_tv_base; |
74 | ||
1da177e4 LT |
75 | /* |
76 | * Keep out-of-line to prevent kernel bloat. | |
77 | * __builtin_return_address is not used because it is not always | |
78 | * reliable. | |
79 | */ | |
80 | ||
81 | /** | |
82 | * skb_over_panic - private function | |
83 | * @skb: buffer | |
84 | * @sz: size | |
85 | * @here: address | |
86 | * | |
87 | * Out of line support code for skb_put(). Not user callable. | |
88 | */ | |
89 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |
90 | { | |
26095455 PM |
91 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
92 | "data:%p tail:%p end:%p dev:%s\n", | |
93 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | |
94 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
95 | BUG(); |
96 | } | |
97 | ||
98 | /** | |
99 | * skb_under_panic - private function | |
100 | * @skb: buffer | |
101 | * @sz: size | |
102 | * @here: address | |
103 | * | |
104 | * Out of line support code for skb_push(). Not user callable. | |
105 | */ | |
106 | ||
107 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |
108 | { | |
26095455 PM |
109 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
110 | "data:%p tail:%p end:%p dev:%s\n", | |
111 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | |
112 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
113 | BUG(); |
114 | } | |
115 | ||
116 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
117 | * 'private' fields and also do memory statistics to find all the | |
118 | * [BEEP] leaks. | |
119 | * | |
120 | */ | |
121 | ||
122 | /** | |
123 | * alloc_skb - allocate a network buffer | |
124 | * @size: size to allocate | |
125 | * @gfp_mask: allocation mask | |
126 | * | |
127 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
128 | * tail room of size bytes. The object has a reference count of one. | |
129 | * The return is the buffer. On a failure the return is %NULL. | |
130 | * | |
131 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
132 | * %GFP_ATOMIC. | |
133 | */ | |
86a76caf | 134 | struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) |
1da177e4 LT |
135 | { |
136 | struct sk_buff *skb; | |
137 | u8 *data; | |
138 | ||
139 | /* Get the HEAD */ | |
140 | skb = kmem_cache_alloc(skbuff_head_cache, | |
141 | gfp_mask & ~__GFP_DMA); | |
142 | if (!skb) | |
143 | goto out; | |
144 | ||
145 | /* Get the DATA. Size must match skb_add_mtu(). */ | |
146 | size = SKB_DATA_ALIGN(size); | |
147 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | |
148 | if (!data) | |
149 | goto nodata; | |
150 | ||
151 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | |
152 | skb->truesize = size + sizeof(struct sk_buff); | |
153 | atomic_set(&skb->users, 1); | |
154 | skb->head = data; | |
155 | skb->data = data; | |
156 | skb->tail = data; | |
157 | skb->end = data + size; | |
158 | ||
159 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | |
160 | skb_shinfo(skb)->nr_frags = 0; | |
161 | skb_shinfo(skb)->tso_size = 0; | |
162 | skb_shinfo(skb)->tso_segs = 0; | |
163 | skb_shinfo(skb)->frag_list = NULL; | |
164 | out: | |
165 | return skb; | |
166 | nodata: | |
167 | kmem_cache_free(skbuff_head_cache, skb); | |
168 | skb = NULL; | |
169 | goto out; | |
170 | } | |
171 | ||
172 | /** | |
173 | * alloc_skb_from_cache - allocate a network buffer | |
174 | * @cp: kmem_cache from which to allocate the data area | |
175 | * (object size must be big enough for @size bytes + skb overheads) | |
176 | * @size: size to allocate | |
177 | * @gfp_mask: allocation mask | |
178 | * | |
179 | * Allocate a new &sk_buff. The returned buffer has no headroom and | |
180 | * tail room of size bytes. The object has a reference count of one. | |
181 | * The return is the buffer. On a failure the return is %NULL. | |
182 | * | |
183 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
184 | * %GFP_ATOMIC. | |
185 | */ | |
186 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | |
86a76caf VF |
187 | unsigned int size, |
188 | unsigned int __nocast gfp_mask) | |
1da177e4 LT |
189 | { |
190 | struct sk_buff *skb; | |
191 | u8 *data; | |
192 | ||
193 | /* Get the HEAD */ | |
194 | skb = kmem_cache_alloc(skbuff_head_cache, | |
195 | gfp_mask & ~__GFP_DMA); | |
196 | if (!skb) | |
197 | goto out; | |
198 | ||
199 | /* Get the DATA. */ | |
200 | size = SKB_DATA_ALIGN(size); | |
201 | data = kmem_cache_alloc(cp, gfp_mask); | |
202 | if (!data) | |
203 | goto nodata; | |
204 | ||
205 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | |
206 | skb->truesize = size + sizeof(struct sk_buff); | |
207 | atomic_set(&skb->users, 1); | |
208 | skb->head = data; | |
209 | skb->data = data; | |
210 | skb->tail = data; | |
211 | skb->end = data + size; | |
212 | ||
213 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | |
214 | skb_shinfo(skb)->nr_frags = 0; | |
215 | skb_shinfo(skb)->tso_size = 0; | |
216 | skb_shinfo(skb)->tso_segs = 0; | |
217 | skb_shinfo(skb)->frag_list = NULL; | |
218 | out: | |
219 | return skb; | |
220 | nodata: | |
221 | kmem_cache_free(skbuff_head_cache, skb); | |
222 | skb = NULL; | |
223 | goto out; | |
224 | } | |
225 | ||
226 | ||
227 | static void skb_drop_fraglist(struct sk_buff *skb) | |
228 | { | |
229 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
230 | ||
231 | skb_shinfo(skb)->frag_list = NULL; | |
232 | ||
233 | do { | |
234 | struct sk_buff *this = list; | |
235 | list = list->next; | |
236 | kfree_skb(this); | |
237 | } while (list); | |
238 | } | |
239 | ||
240 | static void skb_clone_fraglist(struct sk_buff *skb) | |
241 | { | |
242 | struct sk_buff *list; | |
243 | ||
244 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | |
245 | skb_get(list); | |
246 | } | |
247 | ||
248 | void skb_release_data(struct sk_buff *skb) | |
249 | { | |
250 | if (!skb->cloned || | |
251 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
252 | &skb_shinfo(skb)->dataref)) { | |
253 | if (skb_shinfo(skb)->nr_frags) { | |
254 | int i; | |
255 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
256 | put_page(skb_shinfo(skb)->frags[i].page); | |
257 | } | |
258 | ||
259 | if (skb_shinfo(skb)->frag_list) | |
260 | skb_drop_fraglist(skb); | |
261 | ||
262 | kfree(skb->head); | |
263 | } | |
264 | } | |
265 | ||
266 | /* | |
267 | * Free an skbuff by memory without cleaning the state. | |
268 | */ | |
269 | void kfree_skbmem(struct sk_buff *skb) | |
270 | { | |
271 | skb_release_data(skb); | |
272 | kmem_cache_free(skbuff_head_cache, skb); | |
273 | } | |
274 | ||
275 | /** | |
276 | * __kfree_skb - private function | |
277 | * @skb: buffer | |
278 | * | |
279 | * Free an sk_buff. Release anything attached to the buffer. | |
280 | * Clean the state. This is an internal helper function. Users should | |
281 | * always call kfree_skb | |
282 | */ | |
283 | ||
284 | void __kfree_skb(struct sk_buff *skb) | |
285 | { | |
1da177e4 LT |
286 | dst_release(skb->dst); |
287 | #ifdef CONFIG_XFRM | |
288 | secpath_put(skb->sp); | |
289 | #endif | |
9c2b3328 SH |
290 | if (skb->destructor) { |
291 | WARN_ON(in_irq()); | |
1da177e4 LT |
292 | skb->destructor(skb); |
293 | } | |
294 | #ifdef CONFIG_NETFILTER | |
295 | nf_conntrack_put(skb->nfct); | |
296 | #ifdef CONFIG_BRIDGE_NETFILTER | |
297 | nf_bridge_put(skb->nf_bridge); | |
298 | #endif | |
299 | #endif | |
300 | /* XXX: IS this still necessary? - JHS */ | |
301 | #ifdef CONFIG_NET_SCHED | |
302 | skb->tc_index = 0; | |
303 | #ifdef CONFIG_NET_CLS_ACT | |
304 | skb->tc_verd = 0; | |
1da177e4 LT |
305 | #endif |
306 | #endif | |
307 | ||
308 | kfree_skbmem(skb); | |
309 | } | |
310 | ||
311 | /** | |
312 | * skb_clone - duplicate an sk_buff | |
313 | * @skb: buffer to clone | |
314 | * @gfp_mask: allocation priority | |
315 | * | |
316 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both | |
317 | * copies share the same packet data but not structure. The new | |
318 | * buffer has a reference count of 1. If the allocation fails the | |
319 | * function returns %NULL otherwise the new buffer is returned. | |
320 | * | |
321 | * If this function is called from an interrupt gfp_mask() must be | |
322 | * %GFP_ATOMIC. | |
323 | */ | |
324 | ||
86a76caf | 325 | struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
1da177e4 LT |
326 | { |
327 | struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | |
328 | ||
329 | if (!n) | |
330 | return NULL; | |
331 | ||
332 | #define C(x) n->x = skb->x | |
333 | ||
334 | n->next = n->prev = NULL; | |
1da177e4 | 335 | n->sk = NULL; |
a61bbcf2 | 336 | C(tstamp); |
1da177e4 | 337 | C(dev); |
1da177e4 LT |
338 | C(h); |
339 | C(nh); | |
340 | C(mac); | |
341 | C(dst); | |
342 | dst_clone(skb->dst); | |
343 | C(sp); | |
344 | #ifdef CONFIG_INET | |
345 | secpath_get(skb->sp); | |
346 | #endif | |
347 | memcpy(n->cb, skb->cb, sizeof(skb->cb)); | |
348 | C(len); | |
349 | C(data_len); | |
350 | C(csum); | |
351 | C(local_df); | |
352 | n->cloned = 1; | |
353 | n->nohdr = 0; | |
354 | C(pkt_type); | |
355 | C(ip_summed); | |
356 | C(priority); | |
357 | C(protocol); | |
1da177e4 LT |
358 | n->destructor = NULL; |
359 | #ifdef CONFIG_NETFILTER | |
360 | C(nfmark); | |
1da177e4 LT |
361 | C(nfct); |
362 | nf_conntrack_get(skb->nfct); | |
363 | C(nfctinfo); | |
1da177e4 LT |
364 | #ifdef CONFIG_BRIDGE_NETFILTER |
365 | C(nf_bridge); | |
366 | nf_bridge_get(skb->nf_bridge); | |
367 | #endif | |
368 | #endif /*CONFIG_NETFILTER*/ | |
1da177e4 LT |
369 | #ifdef CONFIG_NET_SCHED |
370 | C(tc_index); | |
371 | #ifdef CONFIG_NET_CLS_ACT | |
372 | n->tc_verd = SET_TC_VERD(skb->tc_verd,0); | |
b72f6ecc DM |
373 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); |
374 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | |
1da177e4 | 375 | C(input_dev); |
1da177e4 LT |
376 | #endif |
377 | ||
378 | #endif | |
379 | C(truesize); | |
380 | atomic_set(&n->users, 1); | |
381 | C(head); | |
382 | C(data); | |
383 | C(tail); | |
384 | C(end); | |
385 | ||
386 | atomic_inc(&(skb_shinfo(skb)->dataref)); | |
387 | skb->cloned = 1; | |
388 | ||
389 | return n; | |
390 | } | |
391 | ||
392 | static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |
393 | { | |
394 | /* | |
395 | * Shift between the two data areas in bytes | |
396 | */ | |
397 | unsigned long offset = new->data - old->data; | |
398 | ||
1da177e4 LT |
399 | new->sk = NULL; |
400 | new->dev = old->dev; | |
1da177e4 LT |
401 | new->priority = old->priority; |
402 | new->protocol = old->protocol; | |
403 | new->dst = dst_clone(old->dst); | |
404 | #ifdef CONFIG_INET | |
405 | new->sp = secpath_get(old->sp); | |
406 | #endif | |
407 | new->h.raw = old->h.raw + offset; | |
408 | new->nh.raw = old->nh.raw + offset; | |
409 | new->mac.raw = old->mac.raw + offset; | |
410 | memcpy(new->cb, old->cb, sizeof(old->cb)); | |
411 | new->local_df = old->local_df; | |
412 | new->pkt_type = old->pkt_type; | |
a61bbcf2 | 413 | new->tstamp = old->tstamp; |
1da177e4 | 414 | new->destructor = NULL; |
1da177e4 LT |
415 | #ifdef CONFIG_NETFILTER |
416 | new->nfmark = old->nfmark; | |
1da177e4 LT |
417 | new->nfct = old->nfct; |
418 | nf_conntrack_get(old->nfct); | |
419 | new->nfctinfo = old->nfctinfo; | |
1da177e4 LT |
420 | #ifdef CONFIG_BRIDGE_NETFILTER |
421 | new->nf_bridge = old->nf_bridge; | |
422 | nf_bridge_get(old->nf_bridge); | |
423 | #endif | |
424 | #endif | |
425 | #ifdef CONFIG_NET_SCHED | |
426 | #ifdef CONFIG_NET_CLS_ACT | |
427 | new->tc_verd = old->tc_verd; | |
428 | #endif | |
429 | new->tc_index = old->tc_index; | |
430 | #endif | |
431 | atomic_set(&new->users, 1); | |
432 | skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; | |
433 | skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; | |
434 | } | |
435 | ||
436 | /** | |
437 | * skb_copy - create private copy of an sk_buff | |
438 | * @skb: buffer to copy | |
439 | * @gfp_mask: allocation priority | |
440 | * | |
441 | * Make a copy of both an &sk_buff and its data. This is used when the | |
442 | * caller wishes to modify the data and needs a private copy of the | |
443 | * data to alter. Returns %NULL on failure or the pointer to the buffer | |
444 | * on success. The returned buffer has a reference count of 1. | |
445 | * | |
446 | * As by-product this function converts non-linear &sk_buff to linear | |
447 | * one, so that &sk_buff becomes completely private and caller is allowed | |
448 | * to modify all the data of returned buffer. This means that this | |
449 | * function is not recommended for use in circumstances when only | |
450 | * header is going to be modified. Use pskb_copy() instead. | |
451 | */ | |
452 | ||
86a76caf | 453 | struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) |
1da177e4 LT |
454 | { |
455 | int headerlen = skb->data - skb->head; | |
456 | /* | |
457 | * Allocate the copy buffer | |
458 | */ | |
459 | struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, | |
460 | gfp_mask); | |
461 | if (!n) | |
462 | return NULL; | |
463 | ||
464 | /* Set the data pointer */ | |
465 | skb_reserve(n, headerlen); | |
466 | /* Set the tail pointer and length */ | |
467 | skb_put(n, skb->len); | |
468 | n->csum = skb->csum; | |
469 | n->ip_summed = skb->ip_summed; | |
470 | ||
471 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) | |
472 | BUG(); | |
473 | ||
474 | copy_skb_header(n, skb); | |
475 | return n; | |
476 | } | |
477 | ||
478 | ||
479 | /** | |
480 | * pskb_copy - create copy of an sk_buff with private head. | |
481 | * @skb: buffer to copy | |
482 | * @gfp_mask: allocation priority | |
483 | * | |
484 | * Make a copy of both an &sk_buff and part of its data, located | |
485 | * in header. Fragmented data remain shared. This is used when | |
486 | * the caller wishes to modify only header of &sk_buff and needs | |
487 | * private copy of the header to alter. Returns %NULL on failure | |
488 | * or the pointer to the buffer on success. | |
489 | * The returned buffer has a reference count of 1. | |
490 | */ | |
491 | ||
86a76caf | 492 | struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) |
1da177e4 LT |
493 | { |
494 | /* | |
495 | * Allocate the copy buffer | |
496 | */ | |
497 | struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); | |
498 | ||
499 | if (!n) | |
500 | goto out; | |
501 | ||
502 | /* Set the data pointer */ | |
503 | skb_reserve(n, skb->data - skb->head); | |
504 | /* Set the tail pointer and length */ | |
505 | skb_put(n, skb_headlen(skb)); | |
506 | /* Copy the bytes */ | |
507 | memcpy(n->data, skb->data, n->len); | |
508 | n->csum = skb->csum; | |
509 | n->ip_summed = skb->ip_summed; | |
510 | ||
511 | n->data_len = skb->data_len; | |
512 | n->len = skb->len; | |
513 | ||
514 | if (skb_shinfo(skb)->nr_frags) { | |
515 | int i; | |
516 | ||
517 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
518 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; | |
519 | get_page(skb_shinfo(n)->frags[i].page); | |
520 | } | |
521 | skb_shinfo(n)->nr_frags = i; | |
522 | } | |
523 | ||
524 | if (skb_shinfo(skb)->frag_list) { | |
525 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | |
526 | skb_clone_fraglist(n); | |
527 | } | |
528 | ||
529 | copy_skb_header(n, skb); | |
530 | out: | |
531 | return n; | |
532 | } | |
533 | ||
534 | /** | |
535 | * pskb_expand_head - reallocate header of &sk_buff | |
536 | * @skb: buffer to reallocate | |
537 | * @nhead: room to add at head | |
538 | * @ntail: room to add at tail | |
539 | * @gfp_mask: allocation priority | |
540 | * | |
541 | * Expands (or creates identical copy, if &nhead and &ntail are zero) | |
542 | * header of skb. &sk_buff itself is not changed. &sk_buff MUST have | |
543 | * reference count of 1. Returns zero in the case of success or error, | |
544 | * if expansion failed. In the last case, &sk_buff is not changed. | |
545 | * | |
546 | * All the pointers pointing into skb header may change and must be | |
547 | * reloaded after call to this function. | |
548 | */ | |
549 | ||
86a76caf VF |
550 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
551 | unsigned int __nocast gfp_mask) | |
1da177e4 LT |
552 | { |
553 | int i; | |
554 | u8 *data; | |
555 | int size = nhead + (skb->end - skb->head) + ntail; | |
556 | long off; | |
557 | ||
558 | if (skb_shared(skb)) | |
559 | BUG(); | |
560 | ||
561 | size = SKB_DATA_ALIGN(size); | |
562 | ||
563 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | |
564 | if (!data) | |
565 | goto nodata; | |
566 | ||
567 | /* Copy only real data... and, alas, header. This should be | |
568 | * optimized for the cases when header is void. */ | |
569 | memcpy(data + nhead, skb->head, skb->tail - skb->head); | |
570 | memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); | |
571 | ||
572 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
573 | get_page(skb_shinfo(skb)->frags[i].page); | |
574 | ||
575 | if (skb_shinfo(skb)->frag_list) | |
576 | skb_clone_fraglist(skb); | |
577 | ||
578 | skb_release_data(skb); | |
579 | ||
580 | off = (data + nhead) - skb->head; | |
581 | ||
582 | skb->head = data; | |
583 | skb->end = data + size; | |
584 | skb->data += off; | |
585 | skb->tail += off; | |
586 | skb->mac.raw += off; | |
587 | skb->h.raw += off; | |
588 | skb->nh.raw += off; | |
589 | skb->cloned = 0; | |
590 | skb->nohdr = 0; | |
591 | atomic_set(&skb_shinfo(skb)->dataref, 1); | |
592 | return 0; | |
593 | ||
594 | nodata: | |
595 | return -ENOMEM; | |
596 | } | |
597 | ||
598 | /* Make private copy of skb with writable head and some headroom */ | |
599 | ||
600 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | |
601 | { | |
602 | struct sk_buff *skb2; | |
603 | int delta = headroom - skb_headroom(skb); | |
604 | ||
605 | if (delta <= 0) | |
606 | skb2 = pskb_copy(skb, GFP_ATOMIC); | |
607 | else { | |
608 | skb2 = skb_clone(skb, GFP_ATOMIC); | |
609 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, | |
610 | GFP_ATOMIC)) { | |
611 | kfree_skb(skb2); | |
612 | skb2 = NULL; | |
613 | } | |
614 | } | |
615 | return skb2; | |
616 | } | |
617 | ||
618 | ||
619 | /** | |
620 | * skb_copy_expand - copy and expand sk_buff | |
621 | * @skb: buffer to copy | |
622 | * @newheadroom: new free bytes at head | |
623 | * @newtailroom: new free bytes at tail | |
624 | * @gfp_mask: allocation priority | |
625 | * | |
626 | * Make a copy of both an &sk_buff and its data and while doing so | |
627 | * allocate additional space. | |
628 | * | |
629 | * This is used when the caller wishes to modify the data and needs a | |
630 | * private copy of the data to alter as well as more space for new fields. | |
631 | * Returns %NULL on failure or the pointer to the buffer | |
632 | * on success. The returned buffer has a reference count of 1. | |
633 | * | |
634 | * You must pass %GFP_ATOMIC as the allocation priority if this function | |
635 | * is called from an interrupt. | |
636 | * | |
637 | * BUG ALERT: ip_summed is not copied. Why does this work? Is it used | |
638 | * only by netfilter in the cases when checksum is recalculated? --ANK | |
639 | */ | |
640 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |
86a76caf VF |
641 | int newheadroom, int newtailroom, |
642 | unsigned int __nocast gfp_mask) | |
1da177e4 LT |
643 | { |
644 | /* | |
645 | * Allocate the copy buffer | |
646 | */ | |
647 | struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, | |
648 | gfp_mask); | |
649 | int head_copy_len, head_copy_off; | |
650 | ||
651 | if (!n) | |
652 | return NULL; | |
653 | ||
654 | skb_reserve(n, newheadroom); | |
655 | ||
656 | /* Set the tail pointer and length */ | |
657 | skb_put(n, skb->len); | |
658 | ||
659 | head_copy_len = skb_headroom(skb); | |
660 | head_copy_off = 0; | |
661 | if (newheadroom <= head_copy_len) | |
662 | head_copy_len = newheadroom; | |
663 | else | |
664 | head_copy_off = newheadroom - head_copy_len; | |
665 | ||
666 | /* Copy the linear header and data. */ | |
667 | if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, | |
668 | skb->len + head_copy_len)) | |
669 | BUG(); | |
670 | ||
671 | copy_skb_header(n, skb); | |
672 | ||
673 | return n; | |
674 | } | |
675 | ||
676 | /** | |
677 | * skb_pad - zero pad the tail of an skb | |
678 | * @skb: buffer to pad | |
679 | * @pad: space to pad | |
680 | * | |
681 | * Ensure that a buffer is followed by a padding area that is zero | |
682 | * filled. Used by network drivers which may DMA or transfer data | |
683 | * beyond the buffer end onto the wire. | |
684 | * | |
685 | * May return NULL in out of memory cases. | |
686 | */ | |
687 | ||
688 | struct sk_buff *skb_pad(struct sk_buff *skb, int pad) | |
689 | { | |
690 | struct sk_buff *nskb; | |
691 | ||
692 | /* If the skbuff is non linear tailroom is always zero.. */ | |
693 | if (skb_tailroom(skb) >= pad) { | |
694 | memset(skb->data+skb->len, 0, pad); | |
695 | return skb; | |
696 | } | |
697 | ||
698 | nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); | |
699 | kfree_skb(skb); | |
700 | if (nskb) | |
701 | memset(nskb->data+nskb->len, 0, pad); | |
702 | return nskb; | |
703 | } | |
704 | ||
705 | /* Trims skb to length len. It can change skb pointers, if "realloc" is 1. | |
706 | * If realloc==0 and trimming is impossible without change of data, | |
707 | * it is BUG(). | |
708 | */ | |
709 | ||
710 | int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) | |
711 | { | |
712 | int offset = skb_headlen(skb); | |
713 | int nfrags = skb_shinfo(skb)->nr_frags; | |
714 | int i; | |
715 | ||
716 | for (i = 0; i < nfrags; i++) { | |
717 | int end = offset + skb_shinfo(skb)->frags[i].size; | |
718 | if (end > len) { | |
719 | if (skb_cloned(skb)) { | |
720 | if (!realloc) | |
721 | BUG(); | |
722 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | |
723 | return -ENOMEM; | |
724 | } | |
725 | if (len <= offset) { | |
726 | put_page(skb_shinfo(skb)->frags[i].page); | |
727 | skb_shinfo(skb)->nr_frags--; | |
728 | } else { | |
729 | skb_shinfo(skb)->frags[i].size = len - offset; | |
730 | } | |
731 | } | |
732 | offset = end; | |
733 | } | |
734 | ||
735 | if (offset < len) { | |
736 | skb->data_len -= skb->len - len; | |
737 | skb->len = len; | |
738 | } else { | |
739 | if (len <= skb_headlen(skb)) { | |
740 | skb->len = len; | |
741 | skb->data_len = 0; | |
742 | skb->tail = skb->data + len; | |
743 | if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) | |
744 | skb_drop_fraglist(skb); | |
745 | } else { | |
746 | skb->data_len -= skb->len - len; | |
747 | skb->len = len; | |
748 | } | |
749 | } | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
754 | /** | |
755 | * __pskb_pull_tail - advance tail of skb header | |
756 | * @skb: buffer to reallocate | |
757 | * @delta: number of bytes to advance tail | |
758 | * | |
759 | * The function makes a sense only on a fragmented &sk_buff, | |
760 | * it expands header moving its tail forward and copying necessary | |
761 | * data from fragmented part. | |
762 | * | |
763 | * &sk_buff MUST have reference count of 1. | |
764 | * | |
765 | * Returns %NULL (and &sk_buff does not change) if pull failed | |
766 | * or value of new tail of skb in the case of success. | |
767 | * | |
768 | * All the pointers pointing into skb header may change and must be | |
769 | * reloaded after call to this function. | |
770 | */ | |
771 | ||
772 | /* Moves tail of skb head forward, copying data from fragmented part, | |
773 | * when it is necessary. | |
774 | * 1. It may fail due to malloc failure. | |
775 | * 2. It may change skb pointers. | |
776 | * | |
777 | * It is pretty complicated. Luckily, it is called only in exceptional cases. | |
778 | */ | |
779 | unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |
780 | { | |
781 | /* If skb has not enough free space at tail, get new one | |
782 | * plus 128 bytes for future expansions. If we have enough | |
783 | * room at tail, reallocate without expansion only if skb is cloned. | |
784 | */ | |
785 | int i, k, eat = (skb->tail + delta) - skb->end; | |
786 | ||
787 | if (eat > 0 || skb_cloned(skb)) { | |
788 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, | |
789 | GFP_ATOMIC)) | |
790 | return NULL; | |
791 | } | |
792 | ||
793 | if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) | |
794 | BUG(); | |
795 | ||
796 | /* Optimization: no fragments, no reasons to preestimate | |
797 | * size of pulled pages. Superb. | |
798 | */ | |
799 | if (!skb_shinfo(skb)->frag_list) | |
800 | goto pull_pages; | |
801 | ||
802 | /* Estimate size of pulled pages. */ | |
803 | eat = delta; | |
804 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
805 | if (skb_shinfo(skb)->frags[i].size >= eat) | |
806 | goto pull_pages; | |
807 | eat -= skb_shinfo(skb)->frags[i].size; | |
808 | } | |
809 | ||
810 | /* If we need update frag list, we are in troubles. | |
811 | * Certainly, it possible to add an offset to skb data, | |
812 | * but taking into account that pulling is expected to | |
813 | * be very rare operation, it is worth to fight against | |
814 | * further bloating skb head and crucify ourselves here instead. | |
815 | * Pure masohism, indeed. 8)8) | |
816 | */ | |
817 | if (eat) { | |
818 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
819 | struct sk_buff *clone = NULL; | |
820 | struct sk_buff *insp = NULL; | |
821 | ||
822 | do { | |
823 | if (!list) | |
824 | BUG(); | |
825 | ||
826 | if (list->len <= eat) { | |
827 | /* Eaten as whole. */ | |
828 | eat -= list->len; | |
829 | list = list->next; | |
830 | insp = list; | |
831 | } else { | |
832 | /* Eaten partially. */ | |
833 | ||
834 | if (skb_shared(list)) { | |
835 | /* Sucks! We need to fork list. :-( */ | |
836 | clone = skb_clone(list, GFP_ATOMIC); | |
837 | if (!clone) | |
838 | return NULL; | |
839 | insp = list->next; | |
840 | list = clone; | |
841 | } else { | |
842 | /* This may be pulled without | |
843 | * problems. */ | |
844 | insp = list; | |
845 | } | |
846 | if (!pskb_pull(list, eat)) { | |
847 | if (clone) | |
848 | kfree_skb(clone); | |
849 | return NULL; | |
850 | } | |
851 | break; | |
852 | } | |
853 | } while (eat); | |
854 | ||
855 | /* Free pulled out fragments. */ | |
856 | while ((list = skb_shinfo(skb)->frag_list) != insp) { | |
857 | skb_shinfo(skb)->frag_list = list->next; | |
858 | kfree_skb(list); | |
859 | } | |
860 | /* And insert new clone at head. */ | |
861 | if (clone) { | |
862 | clone->next = list; | |
863 | skb_shinfo(skb)->frag_list = clone; | |
864 | } | |
865 | } | |
866 | /* Success! Now we may commit changes to skb data. */ | |
867 | ||
868 | pull_pages: | |
869 | eat = delta; | |
870 | k = 0; | |
871 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
872 | if (skb_shinfo(skb)->frags[i].size <= eat) { | |
873 | put_page(skb_shinfo(skb)->frags[i].page); | |
874 | eat -= skb_shinfo(skb)->frags[i].size; | |
875 | } else { | |
876 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; | |
877 | if (eat) { | |
878 | skb_shinfo(skb)->frags[k].page_offset += eat; | |
879 | skb_shinfo(skb)->frags[k].size -= eat; | |
880 | eat = 0; | |
881 | } | |
882 | k++; | |
883 | } | |
884 | } | |
885 | skb_shinfo(skb)->nr_frags = k; | |
886 | ||
887 | skb->tail += delta; | |
888 | skb->data_len -= delta; | |
889 | ||
890 | return skb->tail; | |
891 | } | |
892 | ||
893 | /* Copy some data bits from skb to kernel buffer. */ | |
894 | ||
895 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |
896 | { | |
897 | int i, copy; | |
898 | int start = skb_headlen(skb); | |
899 | ||
900 | if (offset > (int)skb->len - len) | |
901 | goto fault; | |
902 | ||
903 | /* Copy header. */ | |
904 | if ((copy = start - offset) > 0) { | |
905 | if (copy > len) | |
906 | copy = len; | |
907 | memcpy(to, skb->data + offset, copy); | |
908 | if ((len -= copy) == 0) | |
909 | return 0; | |
910 | offset += copy; | |
911 | to += copy; | |
912 | } | |
913 | ||
914 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
915 | int end; | |
916 | ||
917 | BUG_TRAP(start <= offset + len); | |
918 | ||
919 | end = start + skb_shinfo(skb)->frags[i].size; | |
920 | if ((copy = end - offset) > 0) { | |
921 | u8 *vaddr; | |
922 | ||
923 | if (copy > len) | |
924 | copy = len; | |
925 | ||
926 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); | |
927 | memcpy(to, | |
928 | vaddr + skb_shinfo(skb)->frags[i].page_offset+ | |
929 | offset - start, copy); | |
930 | kunmap_skb_frag(vaddr); | |
931 | ||
932 | if ((len -= copy) == 0) | |
933 | return 0; | |
934 | offset += copy; | |
935 | to += copy; | |
936 | } | |
937 | start = end; | |
938 | } | |
939 | ||
940 | if (skb_shinfo(skb)->frag_list) { | |
941 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
942 | ||
943 | for (; list; list = list->next) { | |
944 | int end; | |
945 | ||
946 | BUG_TRAP(start <= offset + len); | |
947 | ||
948 | end = start + list->len; | |
949 | if ((copy = end - offset) > 0) { | |
950 | if (copy > len) | |
951 | copy = len; | |
952 | if (skb_copy_bits(list, offset - start, | |
953 | to, copy)) | |
954 | goto fault; | |
955 | if ((len -= copy) == 0) | |
956 | return 0; | |
957 | offset += copy; | |
958 | to += copy; | |
959 | } | |
960 | start = end; | |
961 | } | |
962 | } | |
963 | if (!len) | |
964 | return 0; | |
965 | ||
966 | fault: | |
967 | return -EFAULT; | |
968 | } | |
969 | ||
357b40a1 HX |
970 | /** |
971 | * skb_store_bits - store bits from kernel buffer to skb | |
972 | * @skb: destination buffer | |
973 | * @offset: offset in destination | |
974 | * @from: source buffer | |
975 | * @len: number of bytes to copy | |
976 | * | |
977 | * Copy the specified number of bytes from the source buffer to the | |
978 | * destination skb. This function handles all the messy bits of | |
979 | * traversing fragment lists and such. | |
980 | */ | |
981 | ||
982 | int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) | |
983 | { | |
984 | int i, copy; | |
985 | int start = skb_headlen(skb); | |
986 | ||
987 | if (offset > (int)skb->len - len) | |
988 | goto fault; | |
989 | ||
990 | if ((copy = start - offset) > 0) { | |
991 | if (copy > len) | |
992 | copy = len; | |
993 | memcpy(skb->data + offset, from, copy); | |
994 | if ((len -= copy) == 0) | |
995 | return 0; | |
996 | offset += copy; | |
997 | from += copy; | |
998 | } | |
999 | ||
1000 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1001 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1002 | int end; | |
1003 | ||
1004 | BUG_TRAP(start <= offset + len); | |
1005 | ||
1006 | end = start + frag->size; | |
1007 | if ((copy = end - offset) > 0) { | |
1008 | u8 *vaddr; | |
1009 | ||
1010 | if (copy > len) | |
1011 | copy = len; | |
1012 | ||
1013 | vaddr = kmap_skb_frag(frag); | |
1014 | memcpy(vaddr + frag->page_offset + offset - start, | |
1015 | from, copy); | |
1016 | kunmap_skb_frag(vaddr); | |
1017 | ||
1018 | if ((len -= copy) == 0) | |
1019 | return 0; | |
1020 | offset += copy; | |
1021 | from += copy; | |
1022 | } | |
1023 | start = end; | |
1024 | } | |
1025 | ||
1026 | if (skb_shinfo(skb)->frag_list) { | |
1027 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1028 | ||
1029 | for (; list; list = list->next) { | |
1030 | int end; | |
1031 | ||
1032 | BUG_TRAP(start <= offset + len); | |
1033 | ||
1034 | end = start + list->len; | |
1035 | if ((copy = end - offset) > 0) { | |
1036 | if (copy > len) | |
1037 | copy = len; | |
1038 | if (skb_store_bits(list, offset - start, | |
1039 | from, copy)) | |
1040 | goto fault; | |
1041 | if ((len -= copy) == 0) | |
1042 | return 0; | |
1043 | offset += copy; | |
1044 | from += copy; | |
1045 | } | |
1046 | start = end; | |
1047 | } | |
1048 | } | |
1049 | if (!len) | |
1050 | return 0; | |
1051 | ||
1052 | fault: | |
1053 | return -EFAULT; | |
1054 | } | |
1055 | ||
1056 | EXPORT_SYMBOL(skb_store_bits); | |
1057 | ||
1da177e4 LT |
1058 | /* Checksum skb data. */ |
1059 | ||
1060 | unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |
1061 | int len, unsigned int csum) | |
1062 | { | |
1063 | int start = skb_headlen(skb); | |
1064 | int i, copy = start - offset; | |
1065 | int pos = 0; | |
1066 | ||
1067 | /* Checksum header. */ | |
1068 | if (copy > 0) { | |
1069 | if (copy > len) | |
1070 | copy = len; | |
1071 | csum = csum_partial(skb->data + offset, copy, csum); | |
1072 | if ((len -= copy) == 0) | |
1073 | return csum; | |
1074 | offset += copy; | |
1075 | pos = copy; | |
1076 | } | |
1077 | ||
1078 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1079 | int end; | |
1080 | ||
1081 | BUG_TRAP(start <= offset + len); | |
1082 | ||
1083 | end = start + skb_shinfo(skb)->frags[i].size; | |
1084 | if ((copy = end - offset) > 0) { | |
1085 | unsigned int csum2; | |
1086 | u8 *vaddr; | |
1087 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1088 | ||
1089 | if (copy > len) | |
1090 | copy = len; | |
1091 | vaddr = kmap_skb_frag(frag); | |
1092 | csum2 = csum_partial(vaddr + frag->page_offset + | |
1093 | offset - start, copy, 0); | |
1094 | kunmap_skb_frag(vaddr); | |
1095 | csum = csum_block_add(csum, csum2, pos); | |
1096 | if (!(len -= copy)) | |
1097 | return csum; | |
1098 | offset += copy; | |
1099 | pos += copy; | |
1100 | } | |
1101 | start = end; | |
1102 | } | |
1103 | ||
1104 | if (skb_shinfo(skb)->frag_list) { | |
1105 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1106 | ||
1107 | for (; list; list = list->next) { | |
1108 | int end; | |
1109 | ||
1110 | BUG_TRAP(start <= offset + len); | |
1111 | ||
1112 | end = start + list->len; | |
1113 | if ((copy = end - offset) > 0) { | |
1114 | unsigned int csum2; | |
1115 | if (copy > len) | |
1116 | copy = len; | |
1117 | csum2 = skb_checksum(list, offset - start, | |
1118 | copy, 0); | |
1119 | csum = csum_block_add(csum, csum2, pos); | |
1120 | if ((len -= copy) == 0) | |
1121 | return csum; | |
1122 | offset += copy; | |
1123 | pos += copy; | |
1124 | } | |
1125 | start = end; | |
1126 | } | |
1127 | } | |
1128 | if (len) | |
1129 | BUG(); | |
1130 | ||
1131 | return csum; | |
1132 | } | |
1133 | ||
1134 | /* Both of above in one bottle. */ | |
1135 | ||
1136 | unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |
1137 | u8 *to, int len, unsigned int csum) | |
1138 | { | |
1139 | int start = skb_headlen(skb); | |
1140 | int i, copy = start - offset; | |
1141 | int pos = 0; | |
1142 | ||
1143 | /* Copy header. */ | |
1144 | if (copy > 0) { | |
1145 | if (copy > len) | |
1146 | copy = len; | |
1147 | csum = csum_partial_copy_nocheck(skb->data + offset, to, | |
1148 | copy, csum); | |
1149 | if ((len -= copy) == 0) | |
1150 | return csum; | |
1151 | offset += copy; | |
1152 | to += copy; | |
1153 | pos = copy; | |
1154 | } | |
1155 | ||
1156 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1157 | int end; | |
1158 | ||
1159 | BUG_TRAP(start <= offset + len); | |
1160 | ||
1161 | end = start + skb_shinfo(skb)->frags[i].size; | |
1162 | if ((copy = end - offset) > 0) { | |
1163 | unsigned int csum2; | |
1164 | u8 *vaddr; | |
1165 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1166 | ||
1167 | if (copy > len) | |
1168 | copy = len; | |
1169 | vaddr = kmap_skb_frag(frag); | |
1170 | csum2 = csum_partial_copy_nocheck(vaddr + | |
1171 | frag->page_offset + | |
1172 | offset - start, to, | |
1173 | copy, 0); | |
1174 | kunmap_skb_frag(vaddr); | |
1175 | csum = csum_block_add(csum, csum2, pos); | |
1176 | if (!(len -= copy)) | |
1177 | return csum; | |
1178 | offset += copy; | |
1179 | to += copy; | |
1180 | pos += copy; | |
1181 | } | |
1182 | start = end; | |
1183 | } | |
1184 | ||
1185 | if (skb_shinfo(skb)->frag_list) { | |
1186 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1187 | ||
1188 | for (; list; list = list->next) { | |
1189 | unsigned int csum2; | |
1190 | int end; | |
1191 | ||
1192 | BUG_TRAP(start <= offset + len); | |
1193 | ||
1194 | end = start + list->len; | |
1195 | if ((copy = end - offset) > 0) { | |
1196 | if (copy > len) | |
1197 | copy = len; | |
1198 | csum2 = skb_copy_and_csum_bits(list, | |
1199 | offset - start, | |
1200 | to, copy, 0); | |
1201 | csum = csum_block_add(csum, csum2, pos); | |
1202 | if ((len -= copy) == 0) | |
1203 | return csum; | |
1204 | offset += copy; | |
1205 | to += copy; | |
1206 | pos += copy; | |
1207 | } | |
1208 | start = end; | |
1209 | } | |
1210 | } | |
1211 | if (len) | |
1212 | BUG(); | |
1213 | return csum; | |
1214 | } | |
1215 | ||
1216 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | |
1217 | { | |
1218 | unsigned int csum; | |
1219 | long csstart; | |
1220 | ||
1221 | if (skb->ip_summed == CHECKSUM_HW) | |
1222 | csstart = skb->h.raw - skb->data; | |
1223 | else | |
1224 | csstart = skb_headlen(skb); | |
1225 | ||
1226 | if (csstart > skb_headlen(skb)) | |
1227 | BUG(); | |
1228 | ||
1229 | memcpy(to, skb->data, csstart); | |
1230 | ||
1231 | csum = 0; | |
1232 | if (csstart != skb->len) | |
1233 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, | |
1234 | skb->len - csstart, 0); | |
1235 | ||
1236 | if (skb->ip_summed == CHECKSUM_HW) { | |
1237 | long csstuff = csstart + skb->csum; | |
1238 | ||
1239 | *((unsigned short *)(to + csstuff)) = csum_fold(csum); | |
1240 | } | |
1241 | } | |
1242 | ||
1243 | /** | |
1244 | * skb_dequeue - remove from the head of the queue | |
1245 | * @list: list to dequeue from | |
1246 | * | |
1247 | * Remove the head of the list. The list lock is taken so the function | |
1248 | * may be used safely with other locking list functions. The head item is | |
1249 | * returned or %NULL if the list is empty. | |
1250 | */ | |
1251 | ||
1252 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) | |
1253 | { | |
1254 | unsigned long flags; | |
1255 | struct sk_buff *result; | |
1256 | ||
1257 | spin_lock_irqsave(&list->lock, flags); | |
1258 | result = __skb_dequeue(list); | |
1259 | spin_unlock_irqrestore(&list->lock, flags); | |
1260 | return result; | |
1261 | } | |
1262 | ||
1263 | /** | |
1264 | * skb_dequeue_tail - remove from the tail of the queue | |
1265 | * @list: list to dequeue from | |
1266 | * | |
1267 | * Remove the tail of the list. The list lock is taken so the function | |
1268 | * may be used safely with other locking list functions. The tail item is | |
1269 | * returned or %NULL if the list is empty. | |
1270 | */ | |
1271 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) | |
1272 | { | |
1273 | unsigned long flags; | |
1274 | struct sk_buff *result; | |
1275 | ||
1276 | spin_lock_irqsave(&list->lock, flags); | |
1277 | result = __skb_dequeue_tail(list); | |
1278 | spin_unlock_irqrestore(&list->lock, flags); | |
1279 | return result; | |
1280 | } | |
1281 | ||
1282 | /** | |
1283 | * skb_queue_purge - empty a list | |
1284 | * @list: list to empty | |
1285 | * | |
1286 | * Delete all buffers on an &sk_buff list. Each buffer is removed from | |
1287 | * the list and one reference dropped. This function takes the list | |
1288 | * lock and is atomic with respect to other list locking functions. | |
1289 | */ | |
1290 | void skb_queue_purge(struct sk_buff_head *list) | |
1291 | { | |
1292 | struct sk_buff *skb; | |
1293 | while ((skb = skb_dequeue(list)) != NULL) | |
1294 | kfree_skb(skb); | |
1295 | } | |
1296 | ||
1297 | /** | |
1298 | * skb_queue_head - queue a buffer at the list head | |
1299 | * @list: list to use | |
1300 | * @newsk: buffer to queue | |
1301 | * | |
1302 | * Queue a buffer at the start of the list. This function takes the | |
1303 | * list lock and can be used safely with other locking &sk_buff functions | |
1304 | * safely. | |
1305 | * | |
1306 | * A buffer cannot be placed on two lists at the same time. | |
1307 | */ | |
1308 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) | |
1309 | { | |
1310 | unsigned long flags; | |
1311 | ||
1312 | spin_lock_irqsave(&list->lock, flags); | |
1313 | __skb_queue_head(list, newsk); | |
1314 | spin_unlock_irqrestore(&list->lock, flags); | |
1315 | } | |
1316 | ||
1317 | /** | |
1318 | * skb_queue_tail - queue a buffer at the list tail | |
1319 | * @list: list to use | |
1320 | * @newsk: buffer to queue | |
1321 | * | |
1322 | * Queue a buffer at the tail of the list. This function takes the | |
1323 | * list lock and can be used safely with other locking &sk_buff functions | |
1324 | * safely. | |
1325 | * | |
1326 | * A buffer cannot be placed on two lists at the same time. | |
1327 | */ | |
1328 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) | |
1329 | { | |
1330 | unsigned long flags; | |
1331 | ||
1332 | spin_lock_irqsave(&list->lock, flags); | |
1333 | __skb_queue_tail(list, newsk); | |
1334 | spin_unlock_irqrestore(&list->lock, flags); | |
1335 | } | |
8728b834 | 1336 | |
1da177e4 LT |
1337 | /** |
1338 | * skb_unlink - remove a buffer from a list | |
1339 | * @skb: buffer to remove | |
8728b834 | 1340 | * @list: list to use |
1da177e4 | 1341 | * |
8728b834 DM |
1342 | * Remove a packet from a list. The list locks are taken and this |
1343 | * function is atomic with respect to other list locked calls | |
1da177e4 | 1344 | * |
8728b834 | 1345 | * You must know what list the SKB is on. |
1da177e4 | 1346 | */ |
8728b834 | 1347 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
1da177e4 | 1348 | { |
8728b834 | 1349 | unsigned long flags; |
1da177e4 | 1350 | |
8728b834 DM |
1351 | spin_lock_irqsave(&list->lock, flags); |
1352 | __skb_unlink(skb, list); | |
1353 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1354 | } |
1355 | ||
1da177e4 LT |
1356 | /** |
1357 | * skb_append - append a buffer | |
1358 | * @old: buffer to insert after | |
1359 | * @newsk: buffer to insert | |
8728b834 | 1360 | * @list: list to use |
1da177e4 LT |
1361 | * |
1362 | * Place a packet after a given packet in a list. The list locks are taken | |
1363 | * and this function is atomic with respect to other list locked calls. | |
1364 | * A buffer cannot be placed on two lists at the same time. | |
1365 | */ | |
8728b834 | 1366 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
1da177e4 LT |
1367 | { |
1368 | unsigned long flags; | |
1369 | ||
8728b834 DM |
1370 | spin_lock_irqsave(&list->lock, flags); |
1371 | __skb_append(old, newsk, list); | |
1372 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1373 | } |
1374 | ||
1375 | ||
1376 | /** | |
1377 | * skb_insert - insert a buffer | |
1378 | * @old: buffer to insert before | |
1379 | * @newsk: buffer to insert | |
8728b834 DM |
1380 | * @list: list to use |
1381 | * | |
1382 | * Place a packet before a given packet in a list. The list locks are | |
1383 | * taken and this function is atomic with respect to other list locked | |
1384 | * calls. | |
1da177e4 | 1385 | * |
1da177e4 LT |
1386 | * A buffer cannot be placed on two lists at the same time. |
1387 | */ | |
8728b834 | 1388 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
1da177e4 LT |
1389 | { |
1390 | unsigned long flags; | |
1391 | ||
8728b834 DM |
1392 | spin_lock_irqsave(&list->lock, flags); |
1393 | __skb_insert(newsk, old->prev, old, list); | |
1394 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1395 | } |
1396 | ||
1397 | #if 0 | |
1398 | /* | |
1399 | * Tune the memory allocator for a new MTU size. | |
1400 | */ | |
1401 | void skb_add_mtu(int mtu) | |
1402 | { | |
1403 | /* Must match allocation in alloc_skb */ | |
1404 | mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); | |
1405 | ||
1406 | kmem_add_cache_size(mtu); | |
1407 | } | |
1408 | #endif | |
1409 | ||
1410 | static inline void skb_split_inside_header(struct sk_buff *skb, | |
1411 | struct sk_buff* skb1, | |
1412 | const u32 len, const int pos) | |
1413 | { | |
1414 | int i; | |
1415 | ||
1416 | memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); | |
1417 | ||
1418 | /* And move data appendix as is. */ | |
1419 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
1420 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; | |
1421 | ||
1422 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; | |
1423 | skb_shinfo(skb)->nr_frags = 0; | |
1424 | skb1->data_len = skb->data_len; | |
1425 | skb1->len += skb1->data_len; | |
1426 | skb->data_len = 0; | |
1427 | skb->len = len; | |
1428 | skb->tail = skb->data + len; | |
1429 | } | |
1430 | ||
1431 | static inline void skb_split_no_header(struct sk_buff *skb, | |
1432 | struct sk_buff* skb1, | |
1433 | const u32 len, int pos) | |
1434 | { | |
1435 | int i, k = 0; | |
1436 | const int nfrags = skb_shinfo(skb)->nr_frags; | |
1437 | ||
1438 | skb_shinfo(skb)->nr_frags = 0; | |
1439 | skb1->len = skb1->data_len = skb->len - len; | |
1440 | skb->len = len; | |
1441 | skb->data_len = len - pos; | |
1442 | ||
1443 | for (i = 0; i < nfrags; i++) { | |
1444 | int size = skb_shinfo(skb)->frags[i].size; | |
1445 | ||
1446 | if (pos + size > len) { | |
1447 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; | |
1448 | ||
1449 | if (pos < len) { | |
1450 | /* Split frag. | |
1451 | * We have two variants in this case: | |
1452 | * 1. Move all the frag to the second | |
1453 | * part, if it is possible. F.e. | |
1454 | * this approach is mandatory for TUX, | |
1455 | * where splitting is expensive. | |
1456 | * 2. Split is accurately. We make this. | |
1457 | */ | |
1458 | get_page(skb_shinfo(skb)->frags[i].page); | |
1459 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; | |
1460 | skb_shinfo(skb1)->frags[0].size -= len - pos; | |
1461 | skb_shinfo(skb)->frags[i].size = len - pos; | |
1462 | skb_shinfo(skb)->nr_frags++; | |
1463 | } | |
1464 | k++; | |
1465 | } else | |
1466 | skb_shinfo(skb)->nr_frags++; | |
1467 | pos += size; | |
1468 | } | |
1469 | skb_shinfo(skb1)->nr_frags = k; | |
1470 | } | |
1471 | ||
1472 | /** | |
1473 | * skb_split - Split fragmented skb to two parts at length len. | |
1474 | * @skb: the buffer to split | |
1475 | * @skb1: the buffer to receive the second part | |
1476 | * @len: new length for skb | |
1477 | */ | |
1478 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) | |
1479 | { | |
1480 | int pos = skb_headlen(skb); | |
1481 | ||
1482 | if (len < pos) /* Split line is inside header. */ | |
1483 | skb_split_inside_header(skb, skb1, len, pos); | |
1484 | else /* Second chunk has no header, nothing to copy. */ | |
1485 | skb_split_no_header(skb, skb1, len, pos); | |
1486 | } | |
1487 | ||
677e90ed TG |
1488 | /** |
1489 | * skb_prepare_seq_read - Prepare a sequential read of skb data | |
1490 | * @skb: the buffer to read | |
1491 | * @from: lower offset of data to be read | |
1492 | * @to: upper offset of data to be read | |
1493 | * @st: state variable | |
1494 | * | |
1495 | * Initializes the specified state variable. Must be called before | |
1496 | * invoking skb_seq_read() for the first time. | |
1497 | */ | |
1498 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, | |
1499 | unsigned int to, struct skb_seq_state *st) | |
1500 | { | |
1501 | st->lower_offset = from; | |
1502 | st->upper_offset = to; | |
1503 | st->root_skb = st->cur_skb = skb; | |
1504 | st->frag_idx = st->stepped_offset = 0; | |
1505 | st->frag_data = NULL; | |
1506 | } | |
1507 | ||
1508 | /** | |
1509 | * skb_seq_read - Sequentially read skb data | |
1510 | * @consumed: number of bytes consumed by the caller so far | |
1511 | * @data: destination pointer for data to be returned | |
1512 | * @st: state variable | |
1513 | * | |
1514 | * Reads a block of skb data at &consumed relative to the | |
1515 | * lower offset specified to skb_prepare_seq_read(). Assigns | |
1516 | * the head of the data block to &data and returns the length | |
1517 | * of the block or 0 if the end of the skb data or the upper | |
1518 | * offset has been reached. | |
1519 | * | |
1520 | * The caller is not required to consume all of the data | |
1521 | * returned, i.e. &consumed is typically set to the number | |
1522 | * of bytes already consumed and the next call to | |
1523 | * skb_seq_read() will return the remaining part of the block. | |
1524 | * | |
1525 | * Note: The size of each block of data returned can be arbitary, | |
1526 | * this limitation is the cost for zerocopy seqeuental | |
1527 | * reads of potentially non linear data. | |
1528 | * | |
1529 | * Note: Fragment lists within fragments are not implemented | |
1530 | * at the moment, state->root_skb could be replaced with | |
1531 | * a stack for this purpose. | |
1532 | */ | |
1533 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | |
1534 | struct skb_seq_state *st) | |
1535 | { | |
1536 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; | |
1537 | skb_frag_t *frag; | |
1538 | ||
1539 | if (unlikely(abs_offset >= st->upper_offset)) | |
1540 | return 0; | |
1541 | ||
1542 | next_skb: | |
1543 | block_limit = skb_headlen(st->cur_skb); | |
1544 | ||
1545 | if (abs_offset < block_limit) { | |
1546 | *data = st->cur_skb->data + abs_offset; | |
1547 | return block_limit - abs_offset; | |
1548 | } | |
1549 | ||
1550 | if (st->frag_idx == 0 && !st->frag_data) | |
1551 | st->stepped_offset += skb_headlen(st->cur_skb); | |
1552 | ||
1553 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { | |
1554 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; | |
1555 | block_limit = frag->size + st->stepped_offset; | |
1556 | ||
1557 | if (abs_offset < block_limit) { | |
1558 | if (!st->frag_data) | |
1559 | st->frag_data = kmap_skb_frag(frag); | |
1560 | ||
1561 | *data = (u8 *) st->frag_data + frag->page_offset + | |
1562 | (abs_offset - st->stepped_offset); | |
1563 | ||
1564 | return block_limit - abs_offset; | |
1565 | } | |
1566 | ||
1567 | if (st->frag_data) { | |
1568 | kunmap_skb_frag(st->frag_data); | |
1569 | st->frag_data = NULL; | |
1570 | } | |
1571 | ||
1572 | st->frag_idx++; | |
1573 | st->stepped_offset += frag->size; | |
1574 | } | |
1575 | ||
1576 | if (st->cur_skb->next) { | |
1577 | st->cur_skb = st->cur_skb->next; | |
1578 | st->frag_idx = 0; | |
1579 | goto next_skb; | |
1580 | } else if (st->root_skb == st->cur_skb && | |
1581 | skb_shinfo(st->root_skb)->frag_list) { | |
1582 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | |
1583 | goto next_skb; | |
1584 | } | |
1585 | ||
1586 | return 0; | |
1587 | } | |
1588 | ||
1589 | /** | |
1590 | * skb_abort_seq_read - Abort a sequential read of skb data | |
1591 | * @st: state variable | |
1592 | * | |
1593 | * Must be called if skb_seq_read() was not called until it | |
1594 | * returned 0. | |
1595 | */ | |
1596 | void skb_abort_seq_read(struct skb_seq_state *st) | |
1597 | { | |
1598 | if (st->frag_data) | |
1599 | kunmap_skb_frag(st->frag_data); | |
1600 | } | |
1601 | ||
3fc7e8a6 TG |
1602 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
1603 | ||
1604 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, | |
1605 | struct ts_config *conf, | |
1606 | struct ts_state *state) | |
1607 | { | |
1608 | return skb_seq_read(offset, text, TS_SKB_CB(state)); | |
1609 | } | |
1610 | ||
1611 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) | |
1612 | { | |
1613 | skb_abort_seq_read(TS_SKB_CB(state)); | |
1614 | } | |
1615 | ||
1616 | /** | |
1617 | * skb_find_text - Find a text pattern in skb data | |
1618 | * @skb: the buffer to look in | |
1619 | * @from: search offset | |
1620 | * @to: search limit | |
1621 | * @config: textsearch configuration | |
1622 | * @state: uninitialized textsearch state variable | |
1623 | * | |
1624 | * Finds a pattern in the skb data according to the specified | |
1625 | * textsearch configuration. Use textsearch_next() to retrieve | |
1626 | * subsequent occurrences of the pattern. Returns the offset | |
1627 | * to the first occurrence or UINT_MAX if no match was found. | |
1628 | */ | |
1629 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |
1630 | unsigned int to, struct ts_config *config, | |
1631 | struct ts_state *state) | |
1632 | { | |
1633 | config->get_next_block = skb_ts_get_next_block; | |
1634 | config->finish = skb_ts_finish; | |
1635 | ||
1636 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); | |
1637 | ||
1638 | return textsearch_find(config, state); | |
1639 | } | |
1640 | ||
1da177e4 LT |
1641 | void __init skb_init(void) |
1642 | { | |
1643 | skbuff_head_cache = kmem_cache_create("skbuff_head_cache", | |
1644 | sizeof(struct sk_buff), | |
1645 | 0, | |
1646 | SLAB_HWCACHE_ALIGN, | |
1647 | NULL, NULL); | |
1648 | if (!skbuff_head_cache) | |
1649 | panic("cannot create skbuff cache"); | |
a61bbcf2 | 1650 | do_gettimeofday(&skb_tv_base); |
1da177e4 LT |
1651 | } |
1652 | ||
1653 | EXPORT_SYMBOL(___pskb_trim); | |
1654 | EXPORT_SYMBOL(__kfree_skb); | |
1655 | EXPORT_SYMBOL(__pskb_pull_tail); | |
1656 | EXPORT_SYMBOL(alloc_skb); | |
1657 | EXPORT_SYMBOL(pskb_copy); | |
1658 | EXPORT_SYMBOL(pskb_expand_head); | |
1659 | EXPORT_SYMBOL(skb_checksum); | |
1660 | EXPORT_SYMBOL(skb_clone); | |
1661 | EXPORT_SYMBOL(skb_clone_fraglist); | |
1662 | EXPORT_SYMBOL(skb_copy); | |
1663 | EXPORT_SYMBOL(skb_copy_and_csum_bits); | |
1664 | EXPORT_SYMBOL(skb_copy_and_csum_dev); | |
1665 | EXPORT_SYMBOL(skb_copy_bits); | |
1666 | EXPORT_SYMBOL(skb_copy_expand); | |
1667 | EXPORT_SYMBOL(skb_over_panic); | |
1668 | EXPORT_SYMBOL(skb_pad); | |
1669 | EXPORT_SYMBOL(skb_realloc_headroom); | |
1670 | EXPORT_SYMBOL(skb_under_panic); | |
1671 | EXPORT_SYMBOL(skb_dequeue); | |
1672 | EXPORT_SYMBOL(skb_dequeue_tail); | |
1673 | EXPORT_SYMBOL(skb_insert); | |
1674 | EXPORT_SYMBOL(skb_queue_purge); | |
1675 | EXPORT_SYMBOL(skb_queue_head); | |
1676 | EXPORT_SYMBOL(skb_queue_tail); | |
1677 | EXPORT_SYMBOL(skb_unlink); | |
1678 | EXPORT_SYMBOL(skb_append); | |
1679 | EXPORT_SYMBOL(skb_split); | |
677e90ed TG |
1680 | EXPORT_SYMBOL(skb_prepare_seq_read); |
1681 | EXPORT_SYMBOL(skb_seq_read); | |
1682 | EXPORT_SYMBOL(skb_abort_seq_read); | |
3fc7e8a6 | 1683 | EXPORT_SYMBOL(skb_find_text); |
a61bbcf2 | 1684 | EXPORT_SYMBOL(skb_tv_base); |