86e5682728be339c3930eca391739ba373d908fc
[deliverable/linux.git] / net / core / skbuff.c
1 /*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8 *
9 * Fixes:
10 * Alan Cox : Fixed the worst of the load
11 * balancer bugs.
12 * Dave Platt : Interrupt stacking fix.
13 * Richard Kooijman : Timestamp fixes.
14 * Alan Cox : Changed buffer format.
15 * Alan Cox : destructor hook for AF_UNIX etc.
16 * Linus Torvalds : Better skb_clone.
17 * Alan Cox : Added skb_copy.
18 * Alan Cox : Added all the changed routines Linus
19 * only put in the headers
20 * Ray VanTassle : Fixed --skb->lock in free
21 * Alan Cox : skb_copy copy arp field
22 * Andi Kleen : slabified it.
23 * Robert Olsson : Removed skb_head_pool
24 *
25 * NOTE:
26 * The __skb_ routines should be called with interrupts
27 * disabled, or you better be *real* sure that the operation is atomic
28 * with respect to whatever list is being frobbed (e.g. via lock_sock()
29 * or via disabling bottom half handlers, etc).
30 *
31 * This program is free software; you can redistribute it and/or
32 * modify it under the terms of the GNU General Public License
33 * as published by the Free Software Foundation; either version
34 * 2 of the License, or (at your option) any later version.
35 */
36
37 /*
38 * The functions in this file will not compile correctly with gcc 2.4.x
39 */
40
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/mm.h>
45 #include <linux/interrupt.h>
46 #include <linux/in.h>
47 #include <linux/inet.h>
48 #include <linux/slab.h>
49 #include <linux/netdevice.h>
50 #ifdef CONFIG_NET_CLS_ACT
51 #include <net/pkt_sched.h>
52 #endif
53 #include <linux/string.h>
54 #include <linux/skbuff.h>
55 #include <linux/splice.h>
56 #include <linux/cache.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/init.h>
59 #include <linux/scatterlist.h>
60
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69
70 #include "kmap_skb.h"
71
72 static struct kmem_cache *skbuff_head_cache __read_mostly;
73 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
74
75 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
76 struct pipe_buffer *buf)
77 {
78 struct sk_buff *skb = (struct sk_buff *) buf->private;
79
80 kfree_skb(skb);
81 }
82
83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 struct pipe_buffer *buf)
85 {
86 struct sk_buff *skb = (struct sk_buff *) buf->private;
87
88 skb_get(skb);
89 }
90
91 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
92 struct pipe_buffer *buf)
93 {
94 return 1;
95 }
96
97
98 /* Pipe buffer operations for a socket. */
99 static struct pipe_buf_operations sock_pipe_buf_ops = {
100 .can_merge = 0,
101 .map = generic_pipe_buf_map,
102 .unmap = generic_pipe_buf_unmap,
103 .confirm = generic_pipe_buf_confirm,
104 .release = sock_pipe_buf_release,
105 .steal = sock_pipe_buf_steal,
106 .get = sock_pipe_buf_get,
107 };
108
109 /*
110 * Keep out-of-line to prevent kernel bloat.
111 * __builtin_return_address is not used because it is not always
112 * reliable.
113 */
114
115 /**
116 * skb_over_panic - private function
117 * @skb: buffer
118 * @sz: size
119 * @here: address
120 *
121 * Out of line support code for skb_put(). Not user callable.
122 */
123 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
124 {
125 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
126 "data:%p tail:%#lx end:%#lx dev:%s\n",
127 here, skb->len, sz, skb->head, skb->data,
128 (unsigned long)skb->tail, (unsigned long)skb->end,
129 skb->dev ? skb->dev->name : "<NULL>");
130 BUG();
131 }
132
133 /**
134 * skb_under_panic - private function
135 * @skb: buffer
136 * @sz: size
137 * @here: address
138 *
139 * Out of line support code for skb_push(). Not user callable.
140 */
141
142 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 {
144 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
145 "data:%p tail:%#lx end:%#lx dev:%s\n",
146 here, skb->len, sz, skb->head, skb->data,
147 (unsigned long)skb->tail, (unsigned long)skb->end,
148 skb->dev ? skb->dev->name : "<NULL>");
149 BUG();
150 }
151
152 void skb_truesize_bug(struct sk_buff *skb)
153 {
154 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
155 "len=%u, sizeof(sk_buff)=%Zd\n",
156 skb->truesize, skb->len, sizeof(struct sk_buff));
157 }
158 EXPORT_SYMBOL(skb_truesize_bug);
159
160 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
161 * 'private' fields and also do memory statistics to find all the
162 * [BEEP] leaks.
163 *
164 */
165
166 /**
167 * __alloc_skb - allocate a network buffer
168 * @size: size to allocate
169 * @gfp_mask: allocation mask
170 * @fclone: allocate from fclone cache instead of head cache
171 * and allocate a cloned (child) skb
172 * @node: numa node to allocate memory on
173 *
174 * Allocate a new &sk_buff. The returned buffer has no headroom and a
175 * tail room of size bytes. The object has a reference count of one.
176 * The return is the buffer. On a failure the return is %NULL.
177 *
178 * Buffers may only be allocated from interrupts using a @gfp_mask of
179 * %GFP_ATOMIC.
180 */
181 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
182 int fclone, int node)
183 {
184 struct kmem_cache *cache;
185 struct skb_shared_info *shinfo;
186 struct sk_buff *skb;
187 u8 *data;
188
189 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
190
191 /* Get the HEAD */
192 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
193 if (!skb)
194 goto out;
195
196 size = SKB_DATA_ALIGN(size);
197 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
198 gfp_mask, node);
199 if (!data)
200 goto nodata;
201
202 /*
203 * See comment in sk_buff definition, just before the 'tail' member
204 */
205 memset(skb, 0, offsetof(struct sk_buff, tail));
206 skb->truesize = size + sizeof(struct sk_buff);
207 atomic_set(&skb->users, 1);
208 skb->head = data;
209 skb->data = data;
210 skb_reset_tail_pointer(skb);
211 skb->end = skb->tail + size;
212 /* make sure we initialize shinfo sequentially */
213 shinfo = skb_shinfo(skb);
214 atomic_set(&shinfo->dataref, 1);
215 shinfo->nr_frags = 0;
216 shinfo->gso_size = 0;
217 shinfo->gso_segs = 0;
218 shinfo->gso_type = 0;
219 shinfo->ip6_frag_id = 0;
220 shinfo->frag_list = NULL;
221
222 if (fclone) {
223 struct sk_buff *child = skb + 1;
224 atomic_t *fclone_ref = (atomic_t *) (child + 1);
225
226 skb->fclone = SKB_FCLONE_ORIG;
227 atomic_set(fclone_ref, 1);
228
229 child->fclone = SKB_FCLONE_UNAVAILABLE;
230 }
231 out:
232 return skb;
233 nodata:
234 kmem_cache_free(cache, skb);
235 skb = NULL;
236 goto out;
237 }
238
239 /**
240 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
241 * @dev: network device to receive on
242 * @length: length to allocate
243 * @gfp_mask: get_free_pages mask, passed to alloc_skb
244 *
245 * Allocate a new &sk_buff and assign it a usage count of one. The
246 * buffer has unspecified headroom built in. Users should allocate
247 * the headroom they think they need without accounting for the
248 * built in space. The built in space is used for optimisations.
249 *
250 * %NULL is returned if there is no free memory.
251 */
252 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
253 unsigned int length, gfp_t gfp_mask)
254 {
255 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
256 struct sk_buff *skb;
257
258 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
259 if (likely(skb)) {
260 skb_reserve(skb, NET_SKB_PAD);
261 skb->dev = dev;
262 }
263 return skb;
264 }
265
266 /**
267 * dev_alloc_skb - allocate an skbuff for receiving
268 * @length: length to allocate
269 *
270 * Allocate a new &sk_buff and assign it a usage count of one. The
271 * buffer has unspecified headroom built in. Users should allocate
272 * the headroom they think they need without accounting for the
273 * built in space. The built in space is used for optimisations.
274 *
275 * %NULL is returned if there is no free memory. Although this function
276 * allocates memory it can be called from an interrupt.
277 */
278 struct sk_buff *dev_alloc_skb(unsigned int length)
279 {
280 return __dev_alloc_skb(length, GFP_ATOMIC);
281 }
282 EXPORT_SYMBOL(dev_alloc_skb);
283
284 static void skb_drop_list(struct sk_buff **listp)
285 {
286 struct sk_buff *list = *listp;
287
288 *listp = NULL;
289
290 do {
291 struct sk_buff *this = list;
292 list = list->next;
293 kfree_skb(this);
294 } while (list);
295 }
296
297 static inline void skb_drop_fraglist(struct sk_buff *skb)
298 {
299 skb_drop_list(&skb_shinfo(skb)->frag_list);
300 }
301
302 static void skb_clone_fraglist(struct sk_buff *skb)
303 {
304 struct sk_buff *list;
305
306 for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
307 skb_get(list);
308 }
309
310 static void skb_release_data(struct sk_buff *skb)
311 {
312 if (!skb->cloned ||
313 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
314 &skb_shinfo(skb)->dataref)) {
315 if (skb_shinfo(skb)->nr_frags) {
316 int i;
317 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
318 put_page(skb_shinfo(skb)->frags[i].page);
319 }
320
321 if (skb_shinfo(skb)->frag_list)
322 skb_drop_fraglist(skb);
323
324 kfree(skb->head);
325 }
326 }
327
328 /*
329 * Free an skbuff by memory without cleaning the state.
330 */
331 static void kfree_skbmem(struct sk_buff *skb)
332 {
333 struct sk_buff *other;
334 atomic_t *fclone_ref;
335
336 switch (skb->fclone) {
337 case SKB_FCLONE_UNAVAILABLE:
338 kmem_cache_free(skbuff_head_cache, skb);
339 break;
340
341 case SKB_FCLONE_ORIG:
342 fclone_ref = (atomic_t *) (skb + 2);
343 if (atomic_dec_and_test(fclone_ref))
344 kmem_cache_free(skbuff_fclone_cache, skb);
345 break;
346
347 case SKB_FCLONE_CLONE:
348 fclone_ref = (atomic_t *) (skb + 1);
349 other = skb - 1;
350
351 /* The clone portion is available for
352 * fast-cloning again.
353 */
354 skb->fclone = SKB_FCLONE_UNAVAILABLE;
355
356 if (atomic_dec_and_test(fclone_ref))
357 kmem_cache_free(skbuff_fclone_cache, other);
358 break;
359 }
360 }
361
362 /* Free everything but the sk_buff shell. */
363 static void skb_release_all(struct sk_buff *skb)
364 {
365 dst_release(skb->dst);
366 #ifdef CONFIG_XFRM
367 secpath_put(skb->sp);
368 #endif
369 if (skb->destructor) {
370 WARN_ON(in_irq());
371 skb->destructor(skb);
372 }
373 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
374 nf_conntrack_put(skb->nfct);
375 nf_conntrack_put_reasm(skb->nfct_reasm);
376 #endif
377 #ifdef CONFIG_BRIDGE_NETFILTER
378 nf_bridge_put(skb->nf_bridge);
379 #endif
380 /* XXX: IS this still necessary? - JHS */
381 #ifdef CONFIG_NET_SCHED
382 skb->tc_index = 0;
383 #ifdef CONFIG_NET_CLS_ACT
384 skb->tc_verd = 0;
385 #endif
386 #endif
387 skb_release_data(skb);
388 }
389
390 /**
391 * __kfree_skb - private function
392 * @skb: buffer
393 *
394 * Free an sk_buff. Release anything attached to the buffer.
395 * Clean the state. This is an internal helper function. Users should
396 * always call kfree_skb
397 */
398
399 void __kfree_skb(struct sk_buff *skb)
400 {
401 skb_release_all(skb);
402 kfree_skbmem(skb);
403 }
404
405 /**
406 * kfree_skb - free an sk_buff
407 * @skb: buffer to free
408 *
409 * Drop a reference to the buffer and free it if the usage count has
410 * hit zero.
411 */
412 void kfree_skb(struct sk_buff *skb)
413 {
414 if (unlikely(!skb))
415 return;
416 if (likely(atomic_read(&skb->users) == 1))
417 smp_rmb();
418 else if (likely(!atomic_dec_and_test(&skb->users)))
419 return;
420 __kfree_skb(skb);
421 }
422
423 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
424 {
425 new->tstamp = old->tstamp;
426 new->dev = old->dev;
427 new->transport_header = old->transport_header;
428 new->network_header = old->network_header;
429 new->mac_header = old->mac_header;
430 new->dst = dst_clone(old->dst);
431 #ifdef CONFIG_INET
432 new->sp = secpath_get(old->sp);
433 #endif
434 memcpy(new->cb, old->cb, sizeof(old->cb));
435 new->csum_start = old->csum_start;
436 new->csum_offset = old->csum_offset;
437 new->local_df = old->local_df;
438 new->pkt_type = old->pkt_type;
439 new->ip_summed = old->ip_summed;
440 skb_copy_queue_mapping(new, old);
441 new->priority = old->priority;
442 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
443 new->ipvs_property = old->ipvs_property;
444 #endif
445 new->protocol = old->protocol;
446 new->mark = old->mark;
447 __nf_copy(new, old);
448 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
449 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
450 new->nf_trace = old->nf_trace;
451 #endif
452 #ifdef CONFIG_NET_SCHED
453 new->tc_index = old->tc_index;
454 #ifdef CONFIG_NET_CLS_ACT
455 new->tc_verd = old->tc_verd;
456 #endif
457 #endif
458 skb_copy_secmark(new, old);
459 }
460
461 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
462 {
463 #define C(x) n->x = skb->x
464
465 n->next = n->prev = NULL;
466 n->sk = NULL;
467 __copy_skb_header(n, skb);
468
469 C(len);
470 C(data_len);
471 C(mac_len);
472 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
473 n->cloned = 1;
474 n->nohdr = 0;
475 n->destructor = NULL;
476 C(iif);
477 C(tail);
478 C(end);
479 C(head);
480 C(data);
481 C(truesize);
482 atomic_set(&n->users, 1);
483
484 atomic_inc(&(skb_shinfo(skb)->dataref));
485 skb->cloned = 1;
486
487 return n;
488 #undef C
489 }
490
491 /**
492 * skb_morph - morph one skb into another
493 * @dst: the skb to receive the contents
494 * @src: the skb to supply the contents
495 *
496 * This is identical to skb_clone except that the target skb is
497 * supplied by the user.
498 *
499 * The target skb is returned upon exit.
500 */
501 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
502 {
503 skb_release_all(dst);
504 return __skb_clone(dst, src);
505 }
506 EXPORT_SYMBOL_GPL(skb_morph);
507
508 /**
509 * skb_clone - duplicate an sk_buff
510 * @skb: buffer to clone
511 * @gfp_mask: allocation priority
512 *
513 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
514 * copies share the same packet data but not structure. The new
515 * buffer has a reference count of 1. If the allocation fails the
516 * function returns %NULL otherwise the new buffer is returned.
517 *
518 * If this function is called from an interrupt gfp_mask() must be
519 * %GFP_ATOMIC.
520 */
521
522 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
523 {
524 struct sk_buff *n;
525
526 n = skb + 1;
527 if (skb->fclone == SKB_FCLONE_ORIG &&
528 n->fclone == SKB_FCLONE_UNAVAILABLE) {
529 atomic_t *fclone_ref = (atomic_t *) (n + 1);
530 n->fclone = SKB_FCLONE_CLONE;
531 atomic_inc(fclone_ref);
532 } else {
533 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
534 if (!n)
535 return NULL;
536 n->fclone = SKB_FCLONE_UNAVAILABLE;
537 }
538
539 return __skb_clone(n, skb);
540 }
541
542 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
543 {
544 #ifndef NET_SKBUFF_DATA_USES_OFFSET
545 /*
546 * Shift between the two data areas in bytes
547 */
548 unsigned long offset = new->data - old->data;
549 #endif
550
551 __copy_skb_header(new, old);
552
553 #ifndef NET_SKBUFF_DATA_USES_OFFSET
554 /* {transport,network,mac}_header are relative to skb->head */
555 new->transport_header += offset;
556 new->network_header += offset;
557 new->mac_header += offset;
558 #endif
559 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
560 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
561 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
562 }
563
564 /**
565 * skb_copy - create private copy of an sk_buff
566 * @skb: buffer to copy
567 * @gfp_mask: allocation priority
568 *
569 * Make a copy of both an &sk_buff and its data. This is used when the
570 * caller wishes to modify the data and needs a private copy of the
571 * data to alter. Returns %NULL on failure or the pointer to the buffer
572 * on success. The returned buffer has a reference count of 1.
573 *
574 * As by-product this function converts non-linear &sk_buff to linear
575 * one, so that &sk_buff becomes completely private and caller is allowed
576 * to modify all the data of returned buffer. This means that this
577 * function is not recommended for use in circumstances when only
578 * header is going to be modified. Use pskb_copy() instead.
579 */
580
581 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
582 {
583 int headerlen = skb->data - skb->head;
584 /*
585 * Allocate the copy buffer
586 */
587 struct sk_buff *n;
588 #ifdef NET_SKBUFF_DATA_USES_OFFSET
589 n = alloc_skb(skb->end + skb->data_len, gfp_mask);
590 #else
591 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
592 #endif
593 if (!n)
594 return NULL;
595
596 /* Set the data pointer */
597 skb_reserve(n, headerlen);
598 /* Set the tail pointer and length */
599 skb_put(n, skb->len);
600
601 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
602 BUG();
603
604 copy_skb_header(n, skb);
605 return n;
606 }
607
608
609 /**
610 * pskb_copy - create copy of an sk_buff with private head.
611 * @skb: buffer to copy
612 * @gfp_mask: allocation priority
613 *
614 * Make a copy of both an &sk_buff and part of its data, located
615 * in header. Fragmented data remain shared. This is used when
616 * the caller wishes to modify only header of &sk_buff and needs
617 * private copy of the header to alter. Returns %NULL on failure
618 * or the pointer to the buffer on success.
619 * The returned buffer has a reference count of 1.
620 */
621
622 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
623 {
624 /*
625 * Allocate the copy buffer
626 */
627 struct sk_buff *n;
628 #ifdef NET_SKBUFF_DATA_USES_OFFSET
629 n = alloc_skb(skb->end, gfp_mask);
630 #else
631 n = alloc_skb(skb->end - skb->head, gfp_mask);
632 #endif
633 if (!n)
634 goto out;
635
636 /* Set the data pointer */
637 skb_reserve(n, skb->data - skb->head);
638 /* Set the tail pointer and length */
639 skb_put(n, skb_headlen(skb));
640 /* Copy the bytes */
641 skb_copy_from_linear_data(skb, n->data, n->len);
642
643 n->truesize += skb->data_len;
644 n->data_len = skb->data_len;
645 n->len = skb->len;
646
647 if (skb_shinfo(skb)->nr_frags) {
648 int i;
649
650 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
651 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
652 get_page(skb_shinfo(n)->frags[i].page);
653 }
654 skb_shinfo(n)->nr_frags = i;
655 }
656
657 if (skb_shinfo(skb)->frag_list) {
658 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
659 skb_clone_fraglist(n);
660 }
661
662 copy_skb_header(n, skb);
663 out:
664 return n;
665 }
666
667 /**
668 * pskb_expand_head - reallocate header of &sk_buff
669 * @skb: buffer to reallocate
670 * @nhead: room to add at head
671 * @ntail: room to add at tail
672 * @gfp_mask: allocation priority
673 *
674 * Expands (or creates identical copy, if &nhead and &ntail are zero)
675 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
676 * reference count of 1. Returns zero in the case of success or error,
677 * if expansion failed. In the last case, &sk_buff is not changed.
678 *
679 * All the pointers pointing into skb header may change and must be
680 * reloaded after call to this function.
681 */
682
683 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
684 gfp_t gfp_mask)
685 {
686 int i;
687 u8 *data;
688 #ifdef NET_SKBUFF_DATA_USES_OFFSET
689 int size = nhead + skb->end + ntail;
690 #else
691 int size = nhead + (skb->end - skb->head) + ntail;
692 #endif
693 long off;
694
695 if (skb_shared(skb))
696 BUG();
697
698 size = SKB_DATA_ALIGN(size);
699
700 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
701 if (!data)
702 goto nodata;
703
704 /* Copy only real data... and, alas, header. This should be
705 * optimized for the cases when header is void. */
706 #ifdef NET_SKBUFF_DATA_USES_OFFSET
707 memcpy(data + nhead, skb->head, skb->tail);
708 #else
709 memcpy(data + nhead, skb->head, skb->tail - skb->head);
710 #endif
711 memcpy(data + size, skb_end_pointer(skb),
712 sizeof(struct skb_shared_info));
713
714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
715 get_page(skb_shinfo(skb)->frags[i].page);
716
717 if (skb_shinfo(skb)->frag_list)
718 skb_clone_fraglist(skb);
719
720 skb_release_data(skb);
721
722 off = (data + nhead) - skb->head;
723
724 skb->head = data;
725 skb->data += off;
726 #ifdef NET_SKBUFF_DATA_USES_OFFSET
727 skb->end = size;
728 off = nhead;
729 #else
730 skb->end = skb->head + size;
731 #endif
732 /* {transport,network,mac}_header and tail are relative to skb->head */
733 skb->tail += off;
734 skb->transport_header += off;
735 skb->network_header += off;
736 skb->mac_header += off;
737 skb->csum_start += nhead;
738 skb->cloned = 0;
739 skb->hdr_len = 0;
740 skb->nohdr = 0;
741 atomic_set(&skb_shinfo(skb)->dataref, 1);
742 return 0;
743
744 nodata:
745 return -ENOMEM;
746 }
747
748 /* Make private copy of skb with writable head and some headroom */
749
750 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
751 {
752 struct sk_buff *skb2;
753 int delta = headroom - skb_headroom(skb);
754
755 if (delta <= 0)
756 skb2 = pskb_copy(skb, GFP_ATOMIC);
757 else {
758 skb2 = skb_clone(skb, GFP_ATOMIC);
759 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
760 GFP_ATOMIC)) {
761 kfree_skb(skb2);
762 skb2 = NULL;
763 }
764 }
765 return skb2;
766 }
767
768
769 /**
770 * skb_copy_expand - copy and expand sk_buff
771 * @skb: buffer to copy
772 * @newheadroom: new free bytes at head
773 * @newtailroom: new free bytes at tail
774 * @gfp_mask: allocation priority
775 *
776 * Make a copy of both an &sk_buff and its data and while doing so
777 * allocate additional space.
778 *
779 * This is used when the caller wishes to modify the data and needs a
780 * private copy of the data to alter as well as more space for new fields.
781 * Returns %NULL on failure or the pointer to the buffer
782 * on success. The returned buffer has a reference count of 1.
783 *
784 * You must pass %GFP_ATOMIC as the allocation priority if this function
785 * is called from an interrupt.
786 */
787 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
788 int newheadroom, int newtailroom,
789 gfp_t gfp_mask)
790 {
791 /*
792 * Allocate the copy buffer
793 */
794 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
795 gfp_mask);
796 int oldheadroom = skb_headroom(skb);
797 int head_copy_len, head_copy_off;
798 int off;
799
800 if (!n)
801 return NULL;
802
803 skb_reserve(n, newheadroom);
804
805 /* Set the tail pointer and length */
806 skb_put(n, skb->len);
807
808 head_copy_len = oldheadroom;
809 head_copy_off = 0;
810 if (newheadroom <= head_copy_len)
811 head_copy_len = newheadroom;
812 else
813 head_copy_off = newheadroom - head_copy_len;
814
815 /* Copy the linear header and data. */
816 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
817 skb->len + head_copy_len))
818 BUG();
819
820 copy_skb_header(n, skb);
821
822 off = newheadroom - oldheadroom;
823 n->csum_start += off;
824 #ifdef NET_SKBUFF_DATA_USES_OFFSET
825 n->transport_header += off;
826 n->network_header += off;
827 n->mac_header += off;
828 #endif
829
830 return n;
831 }
832
833 /**
834 * skb_pad - zero pad the tail of an skb
835 * @skb: buffer to pad
836 * @pad: space to pad
837 *
838 * Ensure that a buffer is followed by a padding area that is zero
839 * filled. Used by network drivers which may DMA or transfer data
840 * beyond the buffer end onto the wire.
841 *
842 * May return error in out of memory cases. The skb is freed on error.
843 */
844
845 int skb_pad(struct sk_buff *skb, int pad)
846 {
847 int err;
848 int ntail;
849
850 /* If the skbuff is non linear tailroom is always zero.. */
851 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
852 memset(skb->data+skb->len, 0, pad);
853 return 0;
854 }
855
856 ntail = skb->data_len + pad - (skb->end - skb->tail);
857 if (likely(skb_cloned(skb) || ntail > 0)) {
858 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
859 if (unlikely(err))
860 goto free_skb;
861 }
862
863 /* FIXME: The use of this function with non-linear skb's really needs
864 * to be audited.
865 */
866 err = skb_linearize(skb);
867 if (unlikely(err))
868 goto free_skb;
869
870 memset(skb->data + skb->len, 0, pad);
871 return 0;
872
873 free_skb:
874 kfree_skb(skb);
875 return err;
876 }
877
878 /**
879 * skb_put - add data to a buffer
880 * @skb: buffer to use
881 * @len: amount of data to add
882 *
883 * This function extends the used data area of the buffer. If this would
884 * exceed the total buffer size the kernel will panic. A pointer to the
885 * first byte of the extra data is returned.
886 */
887 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
888 {
889 unsigned char *tmp = skb_tail_pointer(skb);
890 SKB_LINEAR_ASSERT(skb);
891 skb->tail += len;
892 skb->len += len;
893 if (unlikely(skb->tail > skb->end))
894 skb_over_panic(skb, len, __builtin_return_address(0));
895 return tmp;
896 }
897 EXPORT_SYMBOL(skb_put);
898
899 /**
900 * skb_push - add data to the start of a buffer
901 * @skb: buffer to use
902 * @len: amount of data to add
903 *
904 * This function extends the used data area of the buffer at the buffer
905 * start. If this would exceed the total buffer headroom the kernel will
906 * panic. A pointer to the first byte of the extra data is returned.
907 */
908 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
909 {
910 skb->data -= len;
911 skb->len += len;
912 if (unlikely(skb->data<skb->head))
913 skb_under_panic(skb, len, __builtin_return_address(0));
914 return skb->data;
915 }
916 EXPORT_SYMBOL(skb_push);
917
918 /**
919 * skb_pull - remove data from the start of a buffer
920 * @skb: buffer to use
921 * @len: amount of data to remove
922 *
923 * This function removes data from the start of a buffer, returning
924 * the memory to the headroom. A pointer to the next data in the buffer
925 * is returned. Once the data has been pulled future pushes will overwrite
926 * the old data.
927 */
928 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
929 {
930 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
931 }
932 EXPORT_SYMBOL(skb_pull);
933
934 /**
935 * skb_trim - remove end from a buffer
936 * @skb: buffer to alter
937 * @len: new length
938 *
939 * Cut the length of a buffer down by removing data from the tail. If
940 * the buffer is already under the length specified it is not modified.
941 * The skb must be linear.
942 */
943 void skb_trim(struct sk_buff *skb, unsigned int len)
944 {
945 if (skb->len > len)
946 __skb_trim(skb, len);
947 }
948 EXPORT_SYMBOL(skb_trim);
949
950 /* Trims skb to length len. It can change skb pointers.
951 */
952
953 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
954 {
955 struct sk_buff **fragp;
956 struct sk_buff *frag;
957 int offset = skb_headlen(skb);
958 int nfrags = skb_shinfo(skb)->nr_frags;
959 int i;
960 int err;
961
962 if (skb_cloned(skb) &&
963 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
964 return err;
965
966 i = 0;
967 if (offset >= len)
968 goto drop_pages;
969
970 for (; i < nfrags; i++) {
971 int end = offset + skb_shinfo(skb)->frags[i].size;
972
973 if (end < len) {
974 offset = end;
975 continue;
976 }
977
978 skb_shinfo(skb)->frags[i++].size = len - offset;
979
980 drop_pages:
981 skb_shinfo(skb)->nr_frags = i;
982
983 for (; i < nfrags; i++)
984 put_page(skb_shinfo(skb)->frags[i].page);
985
986 if (skb_shinfo(skb)->frag_list)
987 skb_drop_fraglist(skb);
988 goto done;
989 }
990
991 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
992 fragp = &frag->next) {
993 int end = offset + frag->len;
994
995 if (skb_shared(frag)) {
996 struct sk_buff *nfrag;
997
998 nfrag = skb_clone(frag, GFP_ATOMIC);
999 if (unlikely(!nfrag))
1000 return -ENOMEM;
1001
1002 nfrag->next = frag->next;
1003 kfree_skb(frag);
1004 frag = nfrag;
1005 *fragp = frag;
1006 }
1007
1008 if (end < len) {
1009 offset = end;
1010 continue;
1011 }
1012
1013 if (end > len &&
1014 unlikely((err = pskb_trim(frag, len - offset))))
1015 return err;
1016
1017 if (frag->next)
1018 skb_drop_list(&frag->next);
1019 break;
1020 }
1021
1022 done:
1023 if (len > skb_headlen(skb)) {
1024 skb->data_len -= skb->len - len;
1025 skb->len = len;
1026 } else {
1027 skb->len = len;
1028 skb->data_len = 0;
1029 skb_set_tail_pointer(skb, len);
1030 }
1031
1032 return 0;
1033 }
1034
1035 /**
1036 * __pskb_pull_tail - advance tail of skb header
1037 * @skb: buffer to reallocate
1038 * @delta: number of bytes to advance tail
1039 *
1040 * The function makes a sense only on a fragmented &sk_buff,
1041 * it expands header moving its tail forward and copying necessary
1042 * data from fragmented part.
1043 *
1044 * &sk_buff MUST have reference count of 1.
1045 *
1046 * Returns %NULL (and &sk_buff does not change) if pull failed
1047 * or value of new tail of skb in the case of success.
1048 *
1049 * All the pointers pointing into skb header may change and must be
1050 * reloaded after call to this function.
1051 */
1052
1053 /* Moves tail of skb head forward, copying data from fragmented part,
1054 * when it is necessary.
1055 * 1. It may fail due to malloc failure.
1056 * 2. It may change skb pointers.
1057 *
1058 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1059 */
1060 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1061 {
1062 /* If skb has not enough free space at tail, get new one
1063 * plus 128 bytes for future expansions. If we have enough
1064 * room at tail, reallocate without expansion only if skb is cloned.
1065 */
1066 int i, k, eat = (skb->tail + delta) - skb->end;
1067
1068 if (eat > 0 || skb_cloned(skb)) {
1069 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1070 GFP_ATOMIC))
1071 return NULL;
1072 }
1073
1074 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1075 BUG();
1076
1077 /* Optimization: no fragments, no reasons to preestimate
1078 * size of pulled pages. Superb.
1079 */
1080 if (!skb_shinfo(skb)->frag_list)
1081 goto pull_pages;
1082
1083 /* Estimate size of pulled pages. */
1084 eat = delta;
1085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1086 if (skb_shinfo(skb)->frags[i].size >= eat)
1087 goto pull_pages;
1088 eat -= skb_shinfo(skb)->frags[i].size;
1089 }
1090
1091 /* If we need update frag list, we are in troubles.
1092 * Certainly, it possible to add an offset to skb data,
1093 * but taking into account that pulling is expected to
1094 * be very rare operation, it is worth to fight against
1095 * further bloating skb head and crucify ourselves here instead.
1096 * Pure masohism, indeed. 8)8)
1097 */
1098 if (eat) {
1099 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1100 struct sk_buff *clone = NULL;
1101 struct sk_buff *insp = NULL;
1102
1103 do {
1104 BUG_ON(!list);
1105
1106 if (list->len <= eat) {
1107 /* Eaten as whole. */
1108 eat -= list->len;
1109 list = list->next;
1110 insp = list;
1111 } else {
1112 /* Eaten partially. */
1113
1114 if (skb_shared(list)) {
1115 /* Sucks! We need to fork list. :-( */
1116 clone = skb_clone(list, GFP_ATOMIC);
1117 if (!clone)
1118 return NULL;
1119 insp = list->next;
1120 list = clone;
1121 } else {
1122 /* This may be pulled without
1123 * problems. */
1124 insp = list;
1125 }
1126 if (!pskb_pull(list, eat)) {
1127 if (clone)
1128 kfree_skb(clone);
1129 return NULL;
1130 }
1131 break;
1132 }
1133 } while (eat);
1134
1135 /* Free pulled out fragments. */
1136 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1137 skb_shinfo(skb)->frag_list = list->next;
1138 kfree_skb(list);
1139 }
1140 /* And insert new clone at head. */
1141 if (clone) {
1142 clone->next = list;
1143 skb_shinfo(skb)->frag_list = clone;
1144 }
1145 }
1146 /* Success! Now we may commit changes to skb data. */
1147
1148 pull_pages:
1149 eat = delta;
1150 k = 0;
1151 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1152 if (skb_shinfo(skb)->frags[i].size <= eat) {
1153 put_page(skb_shinfo(skb)->frags[i].page);
1154 eat -= skb_shinfo(skb)->frags[i].size;
1155 } else {
1156 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1157 if (eat) {
1158 skb_shinfo(skb)->frags[k].page_offset += eat;
1159 skb_shinfo(skb)->frags[k].size -= eat;
1160 eat = 0;
1161 }
1162 k++;
1163 }
1164 }
1165 skb_shinfo(skb)->nr_frags = k;
1166
1167 skb->tail += delta;
1168 skb->data_len -= delta;
1169
1170 return skb_tail_pointer(skb);
1171 }
1172
1173 /* Copy some data bits from skb to kernel buffer. */
1174
1175 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1176 {
1177 int i, copy;
1178 int start = skb_headlen(skb);
1179
1180 if (offset > (int)skb->len - len)
1181 goto fault;
1182
1183 /* Copy header. */
1184 if ((copy = start - offset) > 0) {
1185 if (copy > len)
1186 copy = len;
1187 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1188 if ((len -= copy) == 0)
1189 return 0;
1190 offset += copy;
1191 to += copy;
1192 }
1193
1194 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1195 int end;
1196
1197 BUG_TRAP(start <= offset + len);
1198
1199 end = start + skb_shinfo(skb)->frags[i].size;
1200 if ((copy = end - offset) > 0) {
1201 u8 *vaddr;
1202
1203 if (copy > len)
1204 copy = len;
1205
1206 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1207 memcpy(to,
1208 vaddr + skb_shinfo(skb)->frags[i].page_offset+
1209 offset - start, copy);
1210 kunmap_skb_frag(vaddr);
1211
1212 if ((len -= copy) == 0)
1213 return 0;
1214 offset += copy;
1215 to += copy;
1216 }
1217 start = end;
1218 }
1219
1220 if (skb_shinfo(skb)->frag_list) {
1221 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1222
1223 for (; list; list = list->next) {
1224 int end;
1225
1226 BUG_TRAP(start <= offset + len);
1227
1228 end = start + list->len;
1229 if ((copy = end - offset) > 0) {
1230 if (copy > len)
1231 copy = len;
1232 if (skb_copy_bits(list, offset - start,
1233 to, copy))
1234 goto fault;
1235 if ((len -= copy) == 0)
1236 return 0;
1237 offset += copy;
1238 to += copy;
1239 }
1240 start = end;
1241 }
1242 }
1243 if (!len)
1244 return 0;
1245
1246 fault:
1247 return -EFAULT;
1248 }
1249
1250 /*
1251 * Callback from splice_to_pipe(), if we need to release some pages
1252 * at the end of the spd in case we error'ed out in filling the pipe.
1253 */
1254 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1255 {
1256 struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
1257
1258 kfree_skb(skb);
1259 }
1260
1261 /*
1262 * Fill page/offset/length into spd, if it can hold more pages.
1263 */
1264 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1265 unsigned int len, unsigned int offset,
1266 struct sk_buff *skb)
1267 {
1268 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1269 return 1;
1270
1271 spd->pages[spd->nr_pages] = page;
1272 spd->partial[spd->nr_pages].len = len;
1273 spd->partial[spd->nr_pages].offset = offset;
1274 spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
1275 spd->nr_pages++;
1276 return 0;
1277 }
1278
1279 /*
1280 * Map linear and fragment data from the skb to spd. Returns number of
1281 * pages mapped.
1282 */
1283 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1284 unsigned int *total_len,
1285 struct splice_pipe_desc *spd)
1286 {
1287 unsigned int nr_pages = spd->nr_pages;
1288 unsigned int poff, plen, len, toff, tlen;
1289 int headlen, seg;
1290
1291 toff = *offset;
1292 tlen = *total_len;
1293 if (!tlen)
1294 goto err;
1295
1296 /*
1297 * if the offset is greater than the linear part, go directly to
1298 * the fragments.
1299 */
1300 headlen = skb_headlen(skb);
1301 if (toff >= headlen) {
1302 toff -= headlen;
1303 goto map_frag;
1304 }
1305
1306 /*
1307 * first map the linear region into the pages/partial map, skipping
1308 * any potential initial offset.
1309 */
1310 len = 0;
1311 while (len < headlen) {
1312 void *p = skb->data + len;
1313
1314 poff = (unsigned long) p & (PAGE_SIZE - 1);
1315 plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff);
1316 len += plen;
1317
1318 if (toff) {
1319 if (plen <= toff) {
1320 toff -= plen;
1321 continue;
1322 }
1323 plen -= toff;
1324 poff += toff;
1325 toff = 0;
1326 }
1327
1328 plen = min(plen, tlen);
1329 if (!plen)
1330 break;
1331
1332 /*
1333 * just jump directly to update and return, no point
1334 * in going over fragments when the output is full.
1335 */
1336 if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb))
1337 goto done;
1338
1339 tlen -= plen;
1340 }
1341
1342 /*
1343 * then map the fragments
1344 */
1345 map_frag:
1346 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1347 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1348
1349 plen = f->size;
1350 poff = f->page_offset;
1351
1352 if (toff) {
1353 if (plen <= toff) {
1354 toff -= plen;
1355 continue;
1356 }
1357 plen -= toff;
1358 poff += toff;
1359 toff = 0;
1360 }
1361
1362 plen = min(plen, tlen);
1363 if (!plen)
1364 break;
1365
1366 if (spd_fill_page(spd, f->page, plen, poff, skb))
1367 break;
1368
1369 tlen -= plen;
1370 }
1371
1372 done:
1373 if (spd->nr_pages - nr_pages) {
1374 *offset = 0;
1375 *total_len = tlen;
1376 return 0;
1377 }
1378 err:
1379 return 1;
1380 }
1381
1382 /*
1383 * Map data from the skb to a pipe. Should handle both the linear part,
1384 * the fragments, and the frag list. It does NOT handle frag lists within
1385 * the frag list, if such a thing exists. We'd probably need to recurse to
1386 * handle that cleanly.
1387 */
1388 int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
1389 struct pipe_inode_info *pipe, unsigned int tlen,
1390 unsigned int flags)
1391 {
1392 struct partial_page partial[PIPE_BUFFERS];
1393 struct page *pages[PIPE_BUFFERS];
1394 struct splice_pipe_desc spd = {
1395 .pages = pages,
1396 .partial = partial,
1397 .flags = flags,
1398 .ops = &sock_pipe_buf_ops,
1399 .spd_release = sock_spd_release,
1400 };
1401 struct sk_buff *skb;
1402
1403 /*
1404 * I'd love to avoid the clone here, but tcp_read_sock()
1405 * ignores reference counts and unconditonally kills the sk_buff
1406 * on return from the actor.
1407 */
1408 skb = skb_clone(__skb, GFP_KERNEL);
1409 if (unlikely(!skb))
1410 return -ENOMEM;
1411
1412 /*
1413 * __skb_splice_bits() only fails if the output has no room left,
1414 * so no point in going over the frag_list for the error case.
1415 */
1416 if (__skb_splice_bits(skb, &offset, &tlen, &spd))
1417 goto done;
1418 else if (!tlen)
1419 goto done;
1420
1421 /*
1422 * now see if we have a frag_list to map
1423 */
1424 if (skb_shinfo(skb)->frag_list) {
1425 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1426
1427 for (; list && tlen; list = list->next) {
1428 if (__skb_splice_bits(list, &offset, &tlen, &spd))
1429 break;
1430 }
1431 }
1432
1433 done:
1434 /*
1435 * drop our reference to the clone, the pipe consumption will
1436 * drop the rest.
1437 */
1438 kfree_skb(skb);
1439
1440 if (spd.nr_pages) {
1441 int ret;
1442
1443 /*
1444 * Drop the socket lock, otherwise we have reverse
1445 * locking dependencies between sk_lock and i_mutex
1446 * here as compared to sendfile(). We enter here
1447 * with the socket lock held, and splice_to_pipe() will
1448 * grab the pipe inode lock. For sendfile() emulation,
1449 * we call into ->sendpage() with the i_mutex lock held
1450 * and networking will grab the socket lock.
1451 */
1452 release_sock(__skb->sk);
1453 ret = splice_to_pipe(pipe, &spd);
1454 lock_sock(__skb->sk);
1455 return ret;
1456 }
1457
1458 return 0;
1459 }
1460
1461 /**
1462 * skb_store_bits - store bits from kernel buffer to skb
1463 * @skb: destination buffer
1464 * @offset: offset in destination
1465 * @from: source buffer
1466 * @len: number of bytes to copy
1467 *
1468 * Copy the specified number of bytes from the source buffer to the
1469 * destination skb. This function handles all the messy bits of
1470 * traversing fragment lists and such.
1471 */
1472
1473 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1474 {
1475 int i, copy;
1476 int start = skb_headlen(skb);
1477
1478 if (offset > (int)skb->len - len)
1479 goto fault;
1480
1481 if ((copy = start - offset) > 0) {
1482 if (copy > len)
1483 copy = len;
1484 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1485 if ((len -= copy) == 0)
1486 return 0;
1487 offset += copy;
1488 from += copy;
1489 }
1490
1491 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1492 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1493 int end;
1494
1495 BUG_TRAP(start <= offset + len);
1496
1497 end = start + frag->size;
1498 if ((copy = end - offset) > 0) {
1499 u8 *vaddr;
1500
1501 if (copy > len)
1502 copy = len;
1503
1504 vaddr = kmap_skb_frag(frag);
1505 memcpy(vaddr + frag->page_offset + offset - start,
1506 from, copy);
1507 kunmap_skb_frag(vaddr);
1508
1509 if ((len -= copy) == 0)
1510 return 0;
1511 offset += copy;
1512 from += copy;
1513 }
1514 start = end;
1515 }
1516
1517 if (skb_shinfo(skb)->frag_list) {
1518 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1519
1520 for (; list; list = list->next) {
1521 int end;
1522
1523 BUG_TRAP(start <= offset + len);
1524
1525 end = start + list->len;
1526 if ((copy = end - offset) > 0) {
1527 if (copy > len)
1528 copy = len;
1529 if (skb_store_bits(list, offset - start,
1530 from, copy))
1531 goto fault;
1532 if ((len -= copy) == 0)
1533 return 0;
1534 offset += copy;
1535 from += copy;
1536 }
1537 start = end;
1538 }
1539 }
1540 if (!len)
1541 return 0;
1542
1543 fault:
1544 return -EFAULT;
1545 }
1546
1547 EXPORT_SYMBOL(skb_store_bits);
1548
1549 /* Checksum skb data. */
1550
1551 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1552 int len, __wsum csum)
1553 {
1554 int start = skb_headlen(skb);
1555 int i, copy = start - offset;
1556 int pos = 0;
1557
1558 /* Checksum header. */
1559 if (copy > 0) {
1560 if (copy > len)
1561 copy = len;
1562 csum = csum_partial(skb->data + offset, copy, csum);
1563 if ((len -= copy) == 0)
1564 return csum;
1565 offset += copy;
1566 pos = copy;
1567 }
1568
1569 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1570 int end;
1571
1572 BUG_TRAP(start <= offset + len);
1573
1574 end = start + skb_shinfo(skb)->frags[i].size;
1575 if ((copy = end - offset) > 0) {
1576 __wsum csum2;
1577 u8 *vaddr;
1578 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1579
1580 if (copy > len)
1581 copy = len;
1582 vaddr = kmap_skb_frag(frag);
1583 csum2 = csum_partial(vaddr + frag->page_offset +
1584 offset - start, copy, 0);
1585 kunmap_skb_frag(vaddr);
1586 csum = csum_block_add(csum, csum2, pos);
1587 if (!(len -= copy))
1588 return csum;
1589 offset += copy;
1590 pos += copy;
1591 }
1592 start = end;
1593 }
1594
1595 if (skb_shinfo(skb)->frag_list) {
1596 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1597
1598 for (; list; list = list->next) {
1599 int end;
1600
1601 BUG_TRAP(start <= offset + len);
1602
1603 end = start + list->len;
1604 if ((copy = end - offset) > 0) {
1605 __wsum csum2;
1606 if (copy > len)
1607 copy = len;
1608 csum2 = skb_checksum(list, offset - start,
1609 copy, 0);
1610 csum = csum_block_add(csum, csum2, pos);
1611 if ((len -= copy) == 0)
1612 return csum;
1613 offset += copy;
1614 pos += copy;
1615 }
1616 start = end;
1617 }
1618 }
1619 BUG_ON(len);
1620
1621 return csum;
1622 }
1623
1624 /* Both of above in one bottle. */
1625
1626 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1627 u8 *to, int len, __wsum csum)
1628 {
1629 int start = skb_headlen(skb);
1630 int i, copy = start - offset;
1631 int pos = 0;
1632
1633 /* Copy header. */
1634 if (copy > 0) {
1635 if (copy > len)
1636 copy = len;
1637 csum = csum_partial_copy_nocheck(skb->data + offset, to,
1638 copy, csum);
1639 if ((len -= copy) == 0)
1640 return csum;
1641 offset += copy;
1642 to += copy;
1643 pos = copy;
1644 }
1645
1646 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1647 int end;
1648
1649 BUG_TRAP(start <= offset + len);
1650
1651 end = start + skb_shinfo(skb)->frags[i].size;
1652 if ((copy = end - offset) > 0) {
1653 __wsum csum2;
1654 u8 *vaddr;
1655 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1656
1657 if (copy > len)
1658 copy = len;
1659 vaddr = kmap_skb_frag(frag);
1660 csum2 = csum_partial_copy_nocheck(vaddr +
1661 frag->page_offset +
1662 offset - start, to,
1663 copy, 0);
1664 kunmap_skb_frag(vaddr);
1665 csum = csum_block_add(csum, csum2, pos);
1666 if (!(len -= copy))
1667 return csum;
1668 offset += copy;
1669 to += copy;
1670 pos += copy;
1671 }
1672 start = end;
1673 }
1674
1675 if (skb_shinfo(skb)->frag_list) {
1676 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1677
1678 for (; list; list = list->next) {
1679 __wsum csum2;
1680 int end;
1681
1682 BUG_TRAP(start <= offset + len);
1683
1684 end = start + list->len;
1685 if ((copy = end - offset) > 0) {
1686 if (copy > len)
1687 copy = len;
1688 csum2 = skb_copy_and_csum_bits(list,
1689 offset - start,
1690 to, copy, 0);
1691 csum = csum_block_add(csum, csum2, pos);
1692 if ((len -= copy) == 0)
1693 return csum;
1694 offset += copy;
1695 to += copy;
1696 pos += copy;
1697 }
1698 start = end;
1699 }
1700 }
1701 BUG_ON(len);
1702 return csum;
1703 }
1704
1705 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1706 {
1707 __wsum csum;
1708 long csstart;
1709
1710 if (skb->ip_summed == CHECKSUM_PARTIAL)
1711 csstart = skb->csum_start - skb_headroom(skb);
1712 else
1713 csstart = skb_headlen(skb);
1714
1715 BUG_ON(csstart > skb_headlen(skb));
1716
1717 skb_copy_from_linear_data(skb, to, csstart);
1718
1719 csum = 0;
1720 if (csstart != skb->len)
1721 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1722 skb->len - csstart, 0);
1723
1724 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1725 long csstuff = csstart + skb->csum_offset;
1726
1727 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1728 }
1729 }
1730
1731 /**
1732 * skb_dequeue - remove from the head of the queue
1733 * @list: list to dequeue from
1734 *
1735 * Remove the head of the list. The list lock is taken so the function
1736 * may be used safely with other locking list functions. The head item is
1737 * returned or %NULL if the list is empty.
1738 */
1739
1740 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1741 {
1742 unsigned long flags;
1743 struct sk_buff *result;
1744
1745 spin_lock_irqsave(&list->lock, flags);
1746 result = __skb_dequeue(list);
1747 spin_unlock_irqrestore(&list->lock, flags);
1748 return result;
1749 }
1750
1751 /**
1752 * skb_dequeue_tail - remove from the tail of the queue
1753 * @list: list to dequeue from
1754 *
1755 * Remove the tail of the list. The list lock is taken so the function
1756 * may be used safely with other locking list functions. The tail item is
1757 * returned or %NULL if the list is empty.
1758 */
1759 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1760 {
1761 unsigned long flags;
1762 struct sk_buff *result;
1763
1764 spin_lock_irqsave(&list->lock, flags);
1765 result = __skb_dequeue_tail(list);
1766 spin_unlock_irqrestore(&list->lock, flags);
1767 return result;
1768 }
1769
1770 /**
1771 * skb_queue_purge - empty a list
1772 * @list: list to empty
1773 *
1774 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1775 * the list and one reference dropped. This function takes the list
1776 * lock and is atomic with respect to other list locking functions.
1777 */
1778 void skb_queue_purge(struct sk_buff_head *list)
1779 {
1780 struct sk_buff *skb;
1781 while ((skb = skb_dequeue(list)) != NULL)
1782 kfree_skb(skb);
1783 }
1784
1785 /**
1786 * skb_queue_head - queue a buffer at the list head
1787 * @list: list to use
1788 * @newsk: buffer to queue
1789 *
1790 * Queue a buffer at the start of the list. This function takes the
1791 * list lock and can be used safely with other locking &sk_buff functions
1792 * safely.
1793 *
1794 * A buffer cannot be placed on two lists at the same time.
1795 */
1796 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1797 {
1798 unsigned long flags;
1799
1800 spin_lock_irqsave(&list->lock, flags);
1801 __skb_queue_head(list, newsk);
1802 spin_unlock_irqrestore(&list->lock, flags);
1803 }
1804
1805 /**
1806 * skb_queue_tail - queue a buffer at the list tail
1807 * @list: list to use
1808 * @newsk: buffer to queue
1809 *
1810 * Queue a buffer at the tail of the list. This function takes the
1811 * list lock and can be used safely with other locking &sk_buff functions
1812 * safely.
1813 *
1814 * A buffer cannot be placed on two lists at the same time.
1815 */
1816 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1817 {
1818 unsigned long flags;
1819
1820 spin_lock_irqsave(&list->lock, flags);
1821 __skb_queue_tail(list, newsk);
1822 spin_unlock_irqrestore(&list->lock, flags);
1823 }
1824
1825 /**
1826 * skb_unlink - remove a buffer from a list
1827 * @skb: buffer to remove
1828 * @list: list to use
1829 *
1830 * Remove a packet from a list. The list locks are taken and this
1831 * function is atomic with respect to other list locked calls
1832 *
1833 * You must know what list the SKB is on.
1834 */
1835 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1836 {
1837 unsigned long flags;
1838
1839 spin_lock_irqsave(&list->lock, flags);
1840 __skb_unlink(skb, list);
1841 spin_unlock_irqrestore(&list->lock, flags);
1842 }
1843
1844 /**
1845 * skb_append - append a buffer
1846 * @old: buffer to insert after
1847 * @newsk: buffer to insert
1848 * @list: list to use
1849 *
1850 * Place a packet after a given packet in a list. The list locks are taken
1851 * and this function is atomic with respect to other list locked calls.
1852 * A buffer cannot be placed on two lists at the same time.
1853 */
1854 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1855 {
1856 unsigned long flags;
1857
1858 spin_lock_irqsave(&list->lock, flags);
1859 __skb_append(old, newsk, list);
1860 spin_unlock_irqrestore(&list->lock, flags);
1861 }
1862
1863
1864 /**
1865 * skb_insert - insert a buffer
1866 * @old: buffer to insert before
1867 * @newsk: buffer to insert
1868 * @list: list to use
1869 *
1870 * Place a packet before a given packet in a list. The list locks are
1871 * taken and this function is atomic with respect to other list locked
1872 * calls.
1873 *
1874 * A buffer cannot be placed on two lists at the same time.
1875 */
1876 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1877 {
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&list->lock, flags);
1881 __skb_insert(newsk, old->prev, old, list);
1882 spin_unlock_irqrestore(&list->lock, flags);
1883 }
1884
1885 static inline void skb_split_inside_header(struct sk_buff *skb,
1886 struct sk_buff* skb1,
1887 const u32 len, const int pos)
1888 {
1889 int i;
1890
1891 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
1892 pos - len);
1893 /* And move data appendix as is. */
1894 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1895 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1896
1897 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1898 skb_shinfo(skb)->nr_frags = 0;
1899 skb1->data_len = skb->data_len;
1900 skb1->len += skb1->data_len;
1901 skb->data_len = 0;
1902 skb->len = len;
1903 skb_set_tail_pointer(skb, len);
1904 }
1905
1906 static inline void skb_split_no_header(struct sk_buff *skb,
1907 struct sk_buff* skb1,
1908 const u32 len, int pos)
1909 {
1910 int i, k = 0;
1911 const int nfrags = skb_shinfo(skb)->nr_frags;
1912
1913 skb_shinfo(skb)->nr_frags = 0;
1914 skb1->len = skb1->data_len = skb->len - len;
1915 skb->len = len;
1916 skb->data_len = len - pos;
1917
1918 for (i = 0; i < nfrags; i++) {
1919 int size = skb_shinfo(skb)->frags[i].size;
1920
1921 if (pos + size > len) {
1922 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1923
1924 if (pos < len) {
1925 /* Split frag.
1926 * We have two variants in this case:
1927 * 1. Move all the frag to the second
1928 * part, if it is possible. F.e.
1929 * this approach is mandatory for TUX,
1930 * where splitting is expensive.
1931 * 2. Split is accurately. We make this.
1932 */
1933 get_page(skb_shinfo(skb)->frags[i].page);
1934 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1935 skb_shinfo(skb1)->frags[0].size -= len - pos;
1936 skb_shinfo(skb)->frags[i].size = len - pos;
1937 skb_shinfo(skb)->nr_frags++;
1938 }
1939 k++;
1940 } else
1941 skb_shinfo(skb)->nr_frags++;
1942 pos += size;
1943 }
1944 skb_shinfo(skb1)->nr_frags = k;
1945 }
1946
1947 /**
1948 * skb_split - Split fragmented skb to two parts at length len.
1949 * @skb: the buffer to split
1950 * @skb1: the buffer to receive the second part
1951 * @len: new length for skb
1952 */
1953 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1954 {
1955 int pos = skb_headlen(skb);
1956
1957 if (len < pos) /* Split line is inside header. */
1958 skb_split_inside_header(skb, skb1, len, pos);
1959 else /* Second chunk has no header, nothing to copy. */
1960 skb_split_no_header(skb, skb1, len, pos);
1961 }
1962
1963 /**
1964 * skb_prepare_seq_read - Prepare a sequential read of skb data
1965 * @skb: the buffer to read
1966 * @from: lower offset of data to be read
1967 * @to: upper offset of data to be read
1968 * @st: state variable
1969 *
1970 * Initializes the specified state variable. Must be called before
1971 * invoking skb_seq_read() for the first time.
1972 */
1973 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1974 unsigned int to, struct skb_seq_state *st)
1975 {
1976 st->lower_offset = from;
1977 st->upper_offset = to;
1978 st->root_skb = st->cur_skb = skb;
1979 st->frag_idx = st->stepped_offset = 0;
1980 st->frag_data = NULL;
1981 }
1982
1983 /**
1984 * skb_seq_read - Sequentially read skb data
1985 * @consumed: number of bytes consumed by the caller so far
1986 * @data: destination pointer for data to be returned
1987 * @st: state variable
1988 *
1989 * Reads a block of skb data at &consumed relative to the
1990 * lower offset specified to skb_prepare_seq_read(). Assigns
1991 * the head of the data block to &data and returns the length
1992 * of the block or 0 if the end of the skb data or the upper
1993 * offset has been reached.
1994 *
1995 * The caller is not required to consume all of the data
1996 * returned, i.e. &consumed is typically set to the number
1997 * of bytes already consumed and the next call to
1998 * skb_seq_read() will return the remaining part of the block.
1999 *
2000 * Note 1: The size of each block of data returned can be arbitary,
2001 * this limitation is the cost for zerocopy seqeuental
2002 * reads of potentially non linear data.
2003 *
2004 * Note 2: Fragment lists within fragments are not implemented
2005 * at the moment, state->root_skb could be replaced with
2006 * a stack for this purpose.
2007 */
2008 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2009 struct skb_seq_state *st)
2010 {
2011 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2012 skb_frag_t *frag;
2013
2014 if (unlikely(abs_offset >= st->upper_offset))
2015 return 0;
2016
2017 next_skb:
2018 block_limit = skb_headlen(st->cur_skb);
2019
2020 if (abs_offset < block_limit) {
2021 *data = st->cur_skb->data + abs_offset;
2022 return block_limit - abs_offset;
2023 }
2024
2025 if (st->frag_idx == 0 && !st->frag_data)
2026 st->stepped_offset += skb_headlen(st->cur_skb);
2027
2028 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2029 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2030 block_limit = frag->size + st->stepped_offset;
2031
2032 if (abs_offset < block_limit) {
2033 if (!st->frag_data)
2034 st->frag_data = kmap_skb_frag(frag);
2035
2036 *data = (u8 *) st->frag_data + frag->page_offset +
2037 (abs_offset - st->stepped_offset);
2038
2039 return block_limit - abs_offset;
2040 }
2041
2042 if (st->frag_data) {
2043 kunmap_skb_frag(st->frag_data);
2044 st->frag_data = NULL;
2045 }
2046
2047 st->frag_idx++;
2048 st->stepped_offset += frag->size;
2049 }
2050
2051 if (st->frag_data) {
2052 kunmap_skb_frag(st->frag_data);
2053 st->frag_data = NULL;
2054 }
2055
2056 if (st->cur_skb->next) {
2057 st->cur_skb = st->cur_skb->next;
2058 st->frag_idx = 0;
2059 goto next_skb;
2060 } else if (st->root_skb == st->cur_skb &&
2061 skb_shinfo(st->root_skb)->frag_list) {
2062 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2063 goto next_skb;
2064 }
2065
2066 return 0;
2067 }
2068
2069 /**
2070 * skb_abort_seq_read - Abort a sequential read of skb data
2071 * @st: state variable
2072 *
2073 * Must be called if skb_seq_read() was not called until it
2074 * returned 0.
2075 */
2076 void skb_abort_seq_read(struct skb_seq_state *st)
2077 {
2078 if (st->frag_data)
2079 kunmap_skb_frag(st->frag_data);
2080 }
2081
2082 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2083
2084 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2085 struct ts_config *conf,
2086 struct ts_state *state)
2087 {
2088 return skb_seq_read(offset, text, TS_SKB_CB(state));
2089 }
2090
2091 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2092 {
2093 skb_abort_seq_read(TS_SKB_CB(state));
2094 }
2095
2096 /**
2097 * skb_find_text - Find a text pattern in skb data
2098 * @skb: the buffer to look in
2099 * @from: search offset
2100 * @to: search limit
2101 * @config: textsearch configuration
2102 * @state: uninitialized textsearch state variable
2103 *
2104 * Finds a pattern in the skb data according to the specified
2105 * textsearch configuration. Use textsearch_next() to retrieve
2106 * subsequent occurrences of the pattern. Returns the offset
2107 * to the first occurrence or UINT_MAX if no match was found.
2108 */
2109 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2110 unsigned int to, struct ts_config *config,
2111 struct ts_state *state)
2112 {
2113 unsigned int ret;
2114
2115 config->get_next_block = skb_ts_get_next_block;
2116 config->finish = skb_ts_finish;
2117
2118 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2119
2120 ret = textsearch_find(config, state);
2121 return (ret <= to - from ? ret : UINT_MAX);
2122 }
2123
2124 /**
2125 * skb_append_datato_frags: - append the user data to a skb
2126 * @sk: sock structure
2127 * @skb: skb structure to be appened with user data.
2128 * @getfrag: call back function to be used for getting the user data
2129 * @from: pointer to user message iov
2130 * @length: length of the iov message
2131 *
2132 * Description: This procedure append the user data in the fragment part
2133 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2134 */
2135 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2136 int (*getfrag)(void *from, char *to, int offset,
2137 int len, int odd, struct sk_buff *skb),
2138 void *from, int length)
2139 {
2140 int frg_cnt = 0;
2141 skb_frag_t *frag = NULL;
2142 struct page *page = NULL;
2143 int copy, left;
2144 int offset = 0;
2145 int ret;
2146
2147 do {
2148 /* Return error if we don't have space for new frag */
2149 frg_cnt = skb_shinfo(skb)->nr_frags;
2150 if (frg_cnt >= MAX_SKB_FRAGS)
2151 return -EFAULT;
2152
2153 /* allocate a new page for next frag */
2154 page = alloc_pages(sk->sk_allocation, 0);
2155
2156 /* If alloc_page fails just return failure and caller will
2157 * free previous allocated pages by doing kfree_skb()
2158 */
2159 if (page == NULL)
2160 return -ENOMEM;
2161
2162 /* initialize the next frag */
2163 sk->sk_sndmsg_page = page;
2164 sk->sk_sndmsg_off = 0;
2165 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2166 skb->truesize += PAGE_SIZE;
2167 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2168
2169 /* get the new initialized frag */
2170 frg_cnt = skb_shinfo(skb)->nr_frags;
2171 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2172
2173 /* copy the user data to page */
2174 left = PAGE_SIZE - frag->page_offset;
2175 copy = (length > left)? left : length;
2176
2177 ret = getfrag(from, (page_address(frag->page) +
2178 frag->page_offset + frag->size),
2179 offset, copy, 0, skb);
2180 if (ret < 0)
2181 return -EFAULT;
2182
2183 /* copy was successful so update the size parameters */
2184 sk->sk_sndmsg_off += copy;
2185 frag->size += copy;
2186 skb->len += copy;
2187 skb->data_len += copy;
2188 offset += copy;
2189 length -= copy;
2190
2191 } while (length > 0);
2192
2193 return 0;
2194 }
2195
2196 /**
2197 * skb_pull_rcsum - pull skb and update receive checksum
2198 * @skb: buffer to update
2199 * @len: length of data pulled
2200 *
2201 * This function performs an skb_pull on the packet and updates
2202 * the CHECKSUM_COMPLETE checksum. It should be used on
2203 * receive path processing instead of skb_pull unless you know
2204 * that the checksum difference is zero (e.g., a valid IP header)
2205 * or you are setting ip_summed to CHECKSUM_NONE.
2206 */
2207 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2208 {
2209 BUG_ON(len > skb->len);
2210 skb->len -= len;
2211 BUG_ON(skb->len < skb->data_len);
2212 skb_postpull_rcsum(skb, skb->data, len);
2213 return skb->data += len;
2214 }
2215
2216 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2217
2218 /**
2219 * skb_segment - Perform protocol segmentation on skb.
2220 * @skb: buffer to segment
2221 * @features: features for the output path (see dev->features)
2222 *
2223 * This function performs segmentation on the given skb. It returns
2224 * the segment at the given position. It returns NULL if there are
2225 * no more segments to generate, or when an error is encountered.
2226 */
2227 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2228 {
2229 struct sk_buff *segs = NULL;
2230 struct sk_buff *tail = NULL;
2231 unsigned int mss = skb_shinfo(skb)->gso_size;
2232 unsigned int doffset = skb->data - skb_mac_header(skb);
2233 unsigned int offset = doffset;
2234 unsigned int headroom;
2235 unsigned int len;
2236 int sg = features & NETIF_F_SG;
2237 int nfrags = skb_shinfo(skb)->nr_frags;
2238 int err = -ENOMEM;
2239 int i = 0;
2240 int pos;
2241
2242 __skb_push(skb, doffset);
2243 headroom = skb_headroom(skb);
2244 pos = skb_headlen(skb);
2245
2246 do {
2247 struct sk_buff *nskb;
2248 skb_frag_t *frag;
2249 int hsize;
2250 int k;
2251 int size;
2252
2253 len = skb->len - offset;
2254 if (len > mss)
2255 len = mss;
2256
2257 hsize = skb_headlen(skb) - offset;
2258 if (hsize < 0)
2259 hsize = 0;
2260 if (hsize > len || !sg)
2261 hsize = len;
2262
2263 nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
2264 if (unlikely(!nskb))
2265 goto err;
2266
2267 if (segs)
2268 tail->next = nskb;
2269 else
2270 segs = nskb;
2271 tail = nskb;
2272
2273 nskb->dev = skb->dev;
2274 skb_copy_queue_mapping(nskb, skb);
2275 nskb->priority = skb->priority;
2276 nskb->protocol = skb->protocol;
2277 nskb->dst = dst_clone(skb->dst);
2278 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
2279 nskb->pkt_type = skb->pkt_type;
2280 nskb->mac_len = skb->mac_len;
2281
2282 skb_reserve(nskb, headroom);
2283 skb_reset_mac_header(nskb);
2284 skb_set_network_header(nskb, skb->mac_len);
2285 nskb->transport_header = (nskb->network_header +
2286 skb_network_header_len(skb));
2287 skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
2288 doffset);
2289 if (!sg) {
2290 nskb->csum = skb_copy_and_csum_bits(skb, offset,
2291 skb_put(nskb, len),
2292 len, 0);
2293 continue;
2294 }
2295
2296 frag = skb_shinfo(nskb)->frags;
2297 k = 0;
2298
2299 nskb->ip_summed = CHECKSUM_PARTIAL;
2300 nskb->csum = skb->csum;
2301 skb_copy_from_linear_data_offset(skb, offset,
2302 skb_put(nskb, hsize), hsize);
2303
2304 while (pos < offset + len) {
2305 BUG_ON(i >= nfrags);
2306
2307 *frag = skb_shinfo(skb)->frags[i];
2308 get_page(frag->page);
2309 size = frag->size;
2310
2311 if (pos < offset) {
2312 frag->page_offset += offset - pos;
2313 frag->size -= offset - pos;
2314 }
2315
2316 k++;
2317
2318 if (pos + size <= offset + len) {
2319 i++;
2320 pos += size;
2321 } else {
2322 frag->size -= pos + size - (offset + len);
2323 break;
2324 }
2325
2326 frag++;
2327 }
2328
2329 skb_shinfo(nskb)->nr_frags = k;
2330 nskb->data_len = len - hsize;
2331 nskb->len += nskb->data_len;
2332 nskb->truesize += nskb->data_len;
2333 } while ((offset += len) < skb->len);
2334
2335 return segs;
2336
2337 err:
2338 while ((skb = segs)) {
2339 segs = skb->next;
2340 kfree_skb(skb);
2341 }
2342 return ERR_PTR(err);
2343 }
2344
2345 EXPORT_SYMBOL_GPL(skb_segment);
2346
2347 void __init skb_init(void)
2348 {
2349 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2350 sizeof(struct sk_buff),
2351 0,
2352 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2353 NULL);
2354 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2355 (2*sizeof(struct sk_buff)) +
2356 sizeof(atomic_t),
2357 0,
2358 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2359 NULL);
2360 }
2361
2362 /**
2363 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2364 * @skb: Socket buffer containing the buffers to be mapped
2365 * @sg: The scatter-gather list to map into
2366 * @offset: The offset into the buffer's contents to start mapping
2367 * @len: Length of buffer space to be mapped
2368 *
2369 * Fill the specified scatter-gather list with mappings/pointers into a
2370 * region of the buffer space attached to a socket buffer.
2371 */
2372 static int
2373 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2374 {
2375 int start = skb_headlen(skb);
2376 int i, copy = start - offset;
2377 int elt = 0;
2378
2379 if (copy > 0) {
2380 if (copy > len)
2381 copy = len;
2382 sg_set_buf(sg, skb->data + offset, copy);
2383 elt++;
2384 if ((len -= copy) == 0)
2385 return elt;
2386 offset += copy;
2387 }
2388
2389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2390 int end;
2391
2392 BUG_TRAP(start <= offset + len);
2393
2394 end = start + skb_shinfo(skb)->frags[i].size;
2395 if ((copy = end - offset) > 0) {
2396 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2397
2398 if (copy > len)
2399 copy = len;
2400 sg_set_page(&sg[elt], frag->page, copy,
2401 frag->page_offset+offset-start);
2402 elt++;
2403 if (!(len -= copy))
2404 return elt;
2405 offset += copy;
2406 }
2407 start = end;
2408 }
2409
2410 if (skb_shinfo(skb)->frag_list) {
2411 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2412
2413 for (; list; list = list->next) {
2414 int end;
2415
2416 BUG_TRAP(start <= offset + len);
2417
2418 end = start + list->len;
2419 if ((copy = end - offset) > 0) {
2420 if (copy > len)
2421 copy = len;
2422 elt += __skb_to_sgvec(list, sg+elt, offset - start,
2423 copy);
2424 if ((len -= copy) == 0)
2425 return elt;
2426 offset += copy;
2427 }
2428 start = end;
2429 }
2430 }
2431 BUG_ON(len);
2432 return elt;
2433 }
2434
2435 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2436 {
2437 int nsg = __skb_to_sgvec(skb, sg, offset, len);
2438
2439 sg_mark_end(&sg[nsg - 1]);
2440
2441 return nsg;
2442 }
2443
2444 /**
2445 * skb_cow_data - Check that a socket buffer's data buffers are writable
2446 * @skb: The socket buffer to check.
2447 * @tailbits: Amount of trailing space to be added
2448 * @trailer: Returned pointer to the skb where the @tailbits space begins
2449 *
2450 * Make sure that the data buffers attached to a socket buffer are
2451 * writable. If they are not, private copies are made of the data buffers
2452 * and the socket buffer is set to use these instead.
2453 *
2454 * If @tailbits is given, make sure that there is space to write @tailbits
2455 * bytes of data beyond current end of socket buffer. @trailer will be
2456 * set to point to the skb in which this space begins.
2457 *
2458 * The number of scatterlist elements required to completely map the
2459 * COW'd and extended socket buffer will be returned.
2460 */
2461 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2462 {
2463 int copyflag;
2464 int elt;
2465 struct sk_buff *skb1, **skb_p;
2466
2467 /* If skb is cloned or its head is paged, reallocate
2468 * head pulling out all the pages (pages are considered not writable
2469 * at the moment even if they are anonymous).
2470 */
2471 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2472 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2473 return -ENOMEM;
2474
2475 /* Easy case. Most of packets will go this way. */
2476 if (!skb_shinfo(skb)->frag_list) {
2477 /* A little of trouble, not enough of space for trailer.
2478 * This should not happen, when stack is tuned to generate
2479 * good frames. OK, on miss we reallocate and reserve even more
2480 * space, 128 bytes is fair. */
2481
2482 if (skb_tailroom(skb) < tailbits &&
2483 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2484 return -ENOMEM;
2485
2486 /* Voila! */
2487 *trailer = skb;
2488 return 1;
2489 }
2490
2491 /* Misery. We are in troubles, going to mincer fragments... */
2492
2493 elt = 1;
2494 skb_p = &skb_shinfo(skb)->frag_list;
2495 copyflag = 0;
2496
2497 while ((skb1 = *skb_p) != NULL) {
2498 int ntail = 0;
2499
2500 /* The fragment is partially pulled by someone,
2501 * this can happen on input. Copy it and everything
2502 * after it. */
2503
2504 if (skb_shared(skb1))
2505 copyflag = 1;
2506
2507 /* If the skb is the last, worry about trailer. */
2508
2509 if (skb1->next == NULL && tailbits) {
2510 if (skb_shinfo(skb1)->nr_frags ||
2511 skb_shinfo(skb1)->frag_list ||
2512 skb_tailroom(skb1) < tailbits)
2513 ntail = tailbits + 128;
2514 }
2515
2516 if (copyflag ||
2517 skb_cloned(skb1) ||
2518 ntail ||
2519 skb_shinfo(skb1)->nr_frags ||
2520 skb_shinfo(skb1)->frag_list) {
2521 struct sk_buff *skb2;
2522
2523 /* Fuck, we are miserable poor guys... */
2524 if (ntail == 0)
2525 skb2 = skb_copy(skb1, GFP_ATOMIC);
2526 else
2527 skb2 = skb_copy_expand(skb1,
2528 skb_headroom(skb1),
2529 ntail,
2530 GFP_ATOMIC);
2531 if (unlikely(skb2 == NULL))
2532 return -ENOMEM;
2533
2534 if (skb1->sk)
2535 skb_set_owner_w(skb2, skb1->sk);
2536
2537 /* Looking around. Are we still alive?
2538 * OK, link new skb, drop old one */
2539
2540 skb2->next = skb1->next;
2541 *skb_p = skb2;
2542 kfree_skb(skb1);
2543 skb1 = skb2;
2544 }
2545 elt++;
2546 *trailer = skb1;
2547 skb_p = &skb1->next;
2548 }
2549
2550 return elt;
2551 }
2552
2553 /**
2554 * skb_partial_csum_set - set up and verify partial csum values for packet
2555 * @skb: the skb to set
2556 * @start: the number of bytes after skb->data to start checksumming.
2557 * @off: the offset from start to place the checksum.
2558 *
2559 * For untrusted partially-checksummed packets, we need to make sure the values
2560 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
2561 *
2562 * This function checks and sets those values and skb->ip_summed: if this
2563 * returns false you should drop the packet.
2564 */
2565 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2566 {
2567 if (unlikely(start > skb->len - 2) ||
2568 unlikely((int)start + off > skb->len - 2)) {
2569 if (net_ratelimit())
2570 printk(KERN_WARNING
2571 "bad partial csum: csum=%u/%u len=%u\n",
2572 start, off, skb->len);
2573 return false;
2574 }
2575 skb->ip_summed = CHECKSUM_PARTIAL;
2576 skb->csum_start = skb_headroom(skb) + start;
2577 skb->csum_offset = off;
2578 return true;
2579 }
2580
2581 EXPORT_SYMBOL(___pskb_trim);
2582 EXPORT_SYMBOL(__kfree_skb);
2583 EXPORT_SYMBOL(kfree_skb);
2584 EXPORT_SYMBOL(__pskb_pull_tail);
2585 EXPORT_SYMBOL(__alloc_skb);
2586 EXPORT_SYMBOL(__netdev_alloc_skb);
2587 EXPORT_SYMBOL(pskb_copy);
2588 EXPORT_SYMBOL(pskb_expand_head);
2589 EXPORT_SYMBOL(skb_checksum);
2590 EXPORT_SYMBOL(skb_clone);
2591 EXPORT_SYMBOL(skb_copy);
2592 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2593 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2594 EXPORT_SYMBOL(skb_copy_bits);
2595 EXPORT_SYMBOL(skb_copy_expand);
2596 EXPORT_SYMBOL(skb_over_panic);
2597 EXPORT_SYMBOL(skb_pad);
2598 EXPORT_SYMBOL(skb_realloc_headroom);
2599 EXPORT_SYMBOL(skb_under_panic);
2600 EXPORT_SYMBOL(skb_dequeue);
2601 EXPORT_SYMBOL(skb_dequeue_tail);
2602 EXPORT_SYMBOL(skb_insert);
2603 EXPORT_SYMBOL(skb_queue_purge);
2604 EXPORT_SYMBOL(skb_queue_head);
2605 EXPORT_SYMBOL(skb_queue_tail);
2606 EXPORT_SYMBOL(skb_unlink);
2607 EXPORT_SYMBOL(skb_append);
2608 EXPORT_SYMBOL(skb_split);
2609 EXPORT_SYMBOL(skb_prepare_seq_read);
2610 EXPORT_SYMBOL(skb_seq_read);
2611 EXPORT_SYMBOL(skb_abort_seq_read);
2612 EXPORT_SYMBOL(skb_find_text);
2613 EXPORT_SYMBOL(skb_append_datato_frags);
2614
2615 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2616 EXPORT_SYMBOL_GPL(skb_cow_data);
2617 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
This page took 0.082521 seconds and 4 git commands to generate.