Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <asm/uaccess.h> | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/inet.h> | |
1da177e4 LT |
45 | #include <linux/netdevice.h> |
46 | #include <linux/rtnetlink.h> | |
47 | #include <linux/poll.h> | |
48 | #include <linux/highmem.h> | |
3305b80c | 49 | #include <linux/spinlock.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
1da177e4 LT |
51 | |
52 | #include <net/protocol.h> | |
53 | #include <linux/skbuff.h> | |
1da177e4 | 54 | |
c752f073 ACM |
55 | #include <net/checksum.h> |
56 | #include <net/sock.h> | |
57 | #include <net/tcp_states.h> | |
e9b3cc1b | 58 | #include <trace/events/skb.h> |
076bb0c8 | 59 | #include <net/busy_poll.h> |
1da177e4 LT |
60 | |
61 | /* | |
62 | * Is a socket 'connection oriented' ? | |
63 | */ | |
64 | static inline int connection_based(struct sock *sk) | |
65 | { | |
66 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
67 | } | |
68 | ||
95c96174 | 69 | static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, |
bf368e4e ED |
70 | void *key) |
71 | { | |
72 | unsigned long bits = (unsigned long)key; | |
73 | ||
74 | /* | |
75 | * Avoid a wakeup if event not interesting for us | |
76 | */ | |
77 | if (bits && !(bits & (POLLIN | POLLERR))) | |
78 | return 0; | |
79 | return autoremove_wake_function(wait, mode, sync, key); | |
80 | } | |
1da177e4 | 81 | /* |
39cc8613 | 82 | * Wait for the last received packet to be different from skb |
1da177e4 | 83 | */ |
39cc8613 BP |
84 | static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
85 | const struct sk_buff *skb) | |
1da177e4 LT |
86 | { |
87 | int error; | |
bf368e4e | 88 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 | 89 | |
aa395145 | 90 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1da177e4 LT |
91 | |
92 | /* Socket errors? */ | |
93 | error = sock_error(sk); | |
94 | if (error) | |
95 | goto out_err; | |
96 | ||
39cc8613 | 97 | if (sk->sk_receive_queue.prev != skb) |
1da177e4 LT |
98 | goto out; |
99 | ||
100 | /* Socket shut down? */ | |
101 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
102 | goto out_noerr; | |
103 | ||
104 | /* Sequenced packets can come disconnected. | |
105 | * If so we report the problem | |
106 | */ | |
107 | error = -ENOTCONN; | |
108 | if (connection_based(sk) && | |
109 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
110 | goto out_err; | |
111 | ||
112 | /* handle signals */ | |
113 | if (signal_pending(current)) | |
114 | goto interrupted; | |
115 | ||
116 | error = 0; | |
117 | *timeo_p = schedule_timeout(*timeo_p); | |
118 | out: | |
aa395145 | 119 | finish_wait(sk_sleep(sk), &wait); |
1da177e4 LT |
120 | return error; |
121 | interrupted: | |
122 | error = sock_intr_errno(*timeo_p); | |
123 | out_err: | |
124 | *err = error; | |
125 | goto out; | |
126 | out_noerr: | |
127 | *err = 0; | |
128 | error = 1; | |
129 | goto out; | |
130 | } | |
131 | ||
132 | /** | |
a59322be | 133 | * __skb_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
134 | * @sk: socket |
135 | * @flags: MSG_ flags | |
39cc8613 | 136 | * @peeked: returns non-zero if this packet has been seen before |
3f518bf7 PE |
137 | * @off: an offset in bytes to peek skb from. Returns an offset |
138 | * within an skb where data actually starts | |
4dc3b16b | 139 | * @err: error code returned |
1da177e4 LT |
140 | * |
141 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
142 | * and possible races. This replaces identical code in packet, raw and | |
143 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
144 | * the long standing peek and read race for datagram sockets. If you | |
145 | * alter this routine remember it must be re-entrant. | |
146 | * | |
147 | * This function will lock the socket if a skb is returned, so the caller | |
148 | * needs to unlock the socket in that case (usually by calling | |
149 | * skb_free_datagram) | |
150 | * | |
151 | * * It does not lock socket since today. This function is | |
152 | * * free of race conditions. This measure should/can improve | |
153 | * * significantly datagram socket latencies at high loads, | |
154 | * * when data copying to user space takes lots of time. | |
155 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
156 | * * 8) Great win.) | |
157 | * * --ANK (980729) | |
158 | * | |
159 | * The order of the tests when we find no data waiting are specified | |
160 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
161 | * the standard around please. | |
162 | */ | |
95c96174 | 163 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
3f518bf7 | 164 | int *peeked, int *off, int *err) |
1da177e4 | 165 | { |
39cc8613 | 166 | struct sk_buff *skb, *last; |
1da177e4 LT |
167 | long timeo; |
168 | /* | |
169 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
170 | */ | |
171 | int error = sock_error(sk); | |
172 | ||
173 | if (error) | |
174 | goto no_packet; | |
175 | ||
a59322be | 176 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1da177e4 LT |
177 | |
178 | do { | |
179 | /* Again only user level code calls this function, so nothing | |
180 | * interrupt level will suddenly eat the receive_queue. | |
181 | * | |
182 | * Look at current nfs client by the way... | |
8917a3c0 | 183 | * However, this function was correct in any case. 8) |
1da177e4 | 184 | */ |
a59322be | 185 | unsigned long cpu_flags; |
4934b032 | 186 | struct sk_buff_head *queue = &sk->sk_receive_queue; |
39cc8613 | 187 | int _off = *off; |
a59322be | 188 | |
39cc8613 | 189 | last = (struct sk_buff *)queue; |
4934b032 | 190 | spin_lock_irqsave(&queue->lock, cpu_flags); |
3f518bf7 | 191 | skb_queue_walk(queue, skb) { |
39cc8613 | 192 | last = skb; |
a59322be HX |
193 | *peeked = skb->peeked; |
194 | if (flags & MSG_PEEK) { | |
39cc8613 | 195 | if (_off >= skb->len && (skb->len || _off || |
add05ad4 | 196 | skb->peeked)) { |
39cc8613 | 197 | _off -= skb->len; |
3f518bf7 PE |
198 | continue; |
199 | } | |
a59322be | 200 | skb->peeked = 1; |
1da177e4 | 201 | atomic_inc(&skb->users); |
a59322be | 202 | } else |
4934b032 | 203 | __skb_unlink(skb, queue); |
1da177e4 | 204 | |
3f518bf7 | 205 | spin_unlock_irqrestore(&queue->lock, cpu_flags); |
39cc8613 | 206 | *off = _off; |
1da177e4 | 207 | return skb; |
3f518bf7 PE |
208 | } |
209 | spin_unlock_irqrestore(&queue->lock, cpu_flags); | |
1da177e4 | 210 | |
cbf55001 ET |
211 | if (sk_can_busy_loop(sk) && |
212 | sk_busy_loop(sk, flags & MSG_DONTWAIT)) | |
a5b50476 ET |
213 | continue; |
214 | ||
1da177e4 LT |
215 | /* User doesn't want to wait */ |
216 | error = -EAGAIN; | |
217 | if (!timeo) | |
218 | goto no_packet; | |
219 | ||
39cc8613 | 220 | } while (!wait_for_more_packets(sk, err, &timeo, last)); |
1da177e4 LT |
221 | |
222 | return NULL; | |
223 | ||
224 | no_packet: | |
225 | *err = error; | |
226 | return NULL; | |
227 | } | |
a59322be HX |
228 | EXPORT_SYMBOL(__skb_recv_datagram); |
229 | ||
95c96174 | 230 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, |
a59322be HX |
231 | int noblock, int *err) |
232 | { | |
3f518bf7 | 233 | int peeked, off = 0; |
a59322be HX |
234 | |
235 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
3f518bf7 | 236 | &peeked, &off, err); |
a59322be | 237 | } |
9e34a5b5 | 238 | EXPORT_SYMBOL(skb_recv_datagram); |
1da177e4 LT |
239 | |
240 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
241 | { | |
ead2ceb0 | 242 | consume_skb(skb); |
270acefa | 243 | sk_mem_reclaim_partial(sk); |
1da177e4 | 244 | } |
9d410c79 ED |
245 | EXPORT_SYMBOL(skb_free_datagram); |
246 | ||
247 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | |
248 | { | |
8a74ad60 ED |
249 | bool slow; |
250 | ||
93bb64ea ED |
251 | if (likely(atomic_read(&skb->users) == 1)) |
252 | smp_rmb(); | |
253 | else if (likely(!atomic_dec_and_test(&skb->users))) | |
254 | return; | |
255 | ||
8a74ad60 | 256 | slow = lock_sock_fast(sk); |
4b0b72f7 ED |
257 | skb_orphan(skb); |
258 | sk_mem_reclaim_partial(sk); | |
8a74ad60 | 259 | unlock_sock_fast(sk, slow); |
4b0b72f7 | 260 | |
93bb64ea ED |
261 | /* skb is now orphaned, can be freed outside of locked section */ |
262 | __kfree_skb(skb); | |
9d410c79 ED |
263 | } |
264 | EXPORT_SYMBOL(skb_free_datagram_locked); | |
1da177e4 | 265 | |
3305b80c HX |
266 | /** |
267 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
268 | * @sk: socket | |
269 | * @skb: datagram skbuff | |
270 | * @flags: MSG_ flags | |
271 | * | |
272 | * This function frees a datagram skbuff that was received by | |
273 | * skb_recv_datagram. The flags argument must match the one | |
274 | * used for skb_recv_datagram. | |
275 | * | |
276 | * If the MSG_PEEK flag is set, and the packet is still on the | |
277 | * receive queue of the socket, it will be taken off the queue | |
278 | * before it is freed. | |
279 | * | |
280 | * This function currently only disables BH when acquiring the | |
281 | * sk_receive_queue lock. Therefore it must not be used in a | |
282 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
283 | * |
284 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
285 | */ |
286 | ||
27ab2568 | 287 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 288 | { |
27ab2568 HX |
289 | int err = 0; |
290 | ||
3305b80c | 291 | if (flags & MSG_PEEK) { |
27ab2568 | 292 | err = -ENOENT; |
3305b80c HX |
293 | spin_lock_bh(&sk->sk_receive_queue.lock); |
294 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
295 | __skb_unlink(skb, &sk->sk_receive_queue); | |
296 | atomic_dec(&skb->users); | |
27ab2568 | 297 | err = 0; |
3305b80c HX |
298 | } |
299 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
300 | } | |
301 | ||
61de71c6 | 302 | kfree_skb(skb); |
8edf19c2 | 303 | atomic_inc(&sk->sk_drops); |
61de71c6 JD |
304 | sk_mem_reclaim_partial(sk); |
305 | ||
27ab2568 | 306 | return err; |
3305b80c | 307 | } |
3305b80c HX |
308 | EXPORT_SYMBOL(skb_kill_datagram); |
309 | ||
1da177e4 LT |
310 | /** |
311 | * skb_copy_datagram_iovec - Copy a datagram to an iovec. | |
4dc3b16b PP |
312 | * @skb: buffer to copy |
313 | * @offset: offset in the buffer to start copying from | |
67be2dd1 | 314 | * @to: io vector to copy to |
4dc3b16b | 315 | * @len: amount of data to copy from buffer to iovec |
1da177e4 LT |
316 | * |
317 | * Note: the iovec is modified during the copy. | |
318 | */ | |
319 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |
320 | struct iovec *to, int len) | |
321 | { | |
1a028e50 DM |
322 | int start = skb_headlen(skb); |
323 | int i, copy = start - offset; | |
5b1a002a | 324 | struct sk_buff *frag_iter; |
c75d721c | 325 | |
e9b3cc1b NH |
326 | trace_skb_copy_datagram_iovec(skb, len); |
327 | ||
b4d9eda0 DM |
328 | /* Copy header. */ |
329 | if (copy > 0) { | |
330 | if (copy > len) | |
331 | copy = len; | |
332 | if (memcpy_toiovec(to, skb->data + offset, copy)) | |
333 | goto fault; | |
334 | if ((len -= copy) == 0) | |
335 | return 0; | |
336 | offset += copy; | |
337 | } | |
c75d721c | 338 | |
b4d9eda0 DM |
339 | /* Copy paged appendix. Hmm... why does this look so complicated? */ |
340 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 341 | int end; |
9e903e08 | 342 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 343 | |
547b792c | 344 | WARN_ON(start > offset + len); |
1a028e50 | 345 | |
9e903e08 | 346 | end = start + skb_frag_size(frag); |
b4d9eda0 DM |
347 | if ((copy = end - offset) > 0) { |
348 | int err; | |
349 | u8 *vaddr; | |
ea2ab693 | 350 | struct page *page = skb_frag_page(frag); |
1da177e4 LT |
351 | |
352 | if (copy > len) | |
353 | copy = len; | |
b4d9eda0 | 354 | vaddr = kmap(page); |
1a028e50 DM |
355 | err = memcpy_toiovec(to, vaddr + frag->page_offset + |
356 | offset - start, copy); | |
b4d9eda0 | 357 | kunmap(page); |
1da177e4 LT |
358 | if (err) |
359 | goto fault; | |
360 | if (!(len -= copy)) | |
361 | return 0; | |
362 | offset += copy; | |
363 | } | |
1a028e50 | 364 | start = end; |
1da177e4 | 365 | } |
b4d9eda0 | 366 | |
5b1a002a DM |
367 | skb_walk_frags(skb, frag_iter) { |
368 | int end; | |
369 | ||
370 | WARN_ON(start > offset + len); | |
371 | ||
372 | end = start + frag_iter->len; | |
373 | if ((copy = end - offset) > 0) { | |
374 | if (copy > len) | |
375 | copy = len; | |
376 | if (skb_copy_datagram_iovec(frag_iter, | |
377 | offset - start, | |
378 | to, copy)) | |
379 | goto fault; | |
380 | if ((len -= copy) == 0) | |
381 | return 0; | |
382 | offset += copy; | |
b4d9eda0 | 383 | } |
5b1a002a | 384 | start = end; |
1da177e4 | 385 | } |
b4d9eda0 DM |
386 | if (!len) |
387 | return 0; | |
388 | ||
1da177e4 LT |
389 | fault: |
390 | return -EFAULT; | |
391 | } | |
9e34a5b5 | 392 | EXPORT_SYMBOL(skb_copy_datagram_iovec); |
1da177e4 | 393 | |
0a1ec07a MT |
394 | /** |
395 | * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. | |
396 | * @skb: buffer to copy | |
397 | * @offset: offset in the buffer to start copying from | |
398 | * @to: io vector to copy to | |
399 | * @to_offset: offset in the io vector to start copying to | |
400 | * @len: amount of data to copy from buffer to iovec | |
401 | * | |
402 | * Returns 0 or -EFAULT. | |
403 | * Note: the iovec is not modified during the copy. | |
404 | */ | |
405 | int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |
406 | const struct iovec *to, int to_offset, | |
407 | int len) | |
408 | { | |
409 | int start = skb_headlen(skb); | |
410 | int i, copy = start - offset; | |
5b1a002a | 411 | struct sk_buff *frag_iter; |
0a1ec07a MT |
412 | |
413 | /* Copy header. */ | |
414 | if (copy > 0) { | |
415 | if (copy > len) | |
416 | copy = len; | |
417 | if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) | |
418 | goto fault; | |
419 | if ((len -= copy) == 0) | |
420 | return 0; | |
421 | offset += copy; | |
422 | to_offset += copy; | |
423 | } | |
424 | ||
425 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
426 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
427 | int end; | |
9e903e08 | 428 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
0a1ec07a MT |
429 | |
430 | WARN_ON(start > offset + len); | |
431 | ||
9e903e08 | 432 | end = start + skb_frag_size(frag); |
0a1ec07a MT |
433 | if ((copy = end - offset) > 0) { |
434 | int err; | |
435 | u8 *vaddr; | |
ea2ab693 | 436 | struct page *page = skb_frag_page(frag); |
0a1ec07a MT |
437 | |
438 | if (copy > len) | |
439 | copy = len; | |
440 | vaddr = kmap(page); | |
441 | err = memcpy_toiovecend(to, vaddr + frag->page_offset + | |
442 | offset - start, to_offset, copy); | |
443 | kunmap(page); | |
444 | if (err) | |
445 | goto fault; | |
446 | if (!(len -= copy)) | |
447 | return 0; | |
448 | offset += copy; | |
449 | to_offset += copy; | |
450 | } | |
451 | start = end; | |
452 | } | |
453 | ||
5b1a002a DM |
454 | skb_walk_frags(skb, frag_iter) { |
455 | int end; | |
456 | ||
457 | WARN_ON(start > offset + len); | |
458 | ||
459 | end = start + frag_iter->len; | |
460 | if ((copy = end - offset) > 0) { | |
461 | if (copy > len) | |
462 | copy = len; | |
463 | if (skb_copy_datagram_const_iovec(frag_iter, | |
464 | offset - start, | |
465 | to, to_offset, | |
466 | copy)) | |
467 | goto fault; | |
468 | if ((len -= copy) == 0) | |
469 | return 0; | |
470 | offset += copy; | |
471 | to_offset += copy; | |
0a1ec07a | 472 | } |
5b1a002a | 473 | start = end; |
0a1ec07a MT |
474 | } |
475 | if (!len) | |
476 | return 0; | |
477 | ||
478 | fault: | |
479 | return -EFAULT; | |
480 | } | |
481 | EXPORT_SYMBOL(skb_copy_datagram_const_iovec); | |
482 | ||
db543c1f RR |
483 | /** |
484 | * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. | |
485 | * @skb: buffer to copy | |
486 | * @offset: offset in the buffer to start copying to | |
487 | * @from: io vector to copy to | |
6f26c9a7 | 488 | * @from_offset: offset in the io vector to start copying from |
db543c1f RR |
489 | * @len: amount of data to copy to buffer from iovec |
490 | * | |
491 | * Returns 0 or -EFAULT. | |
6f26c9a7 | 492 | * Note: the iovec is not modified during the copy. |
db543c1f RR |
493 | */ |
494 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |
6f26c9a7 MT |
495 | const struct iovec *from, int from_offset, |
496 | int len) | |
db543c1f RR |
497 | { |
498 | int start = skb_headlen(skb); | |
499 | int i, copy = start - offset; | |
5b1a002a | 500 | struct sk_buff *frag_iter; |
db543c1f RR |
501 | |
502 | /* Copy header. */ | |
503 | if (copy > 0) { | |
504 | if (copy > len) | |
505 | copy = len; | |
d2d27bfd SS |
506 | if (memcpy_fromiovecend(skb->data + offset, from, from_offset, |
507 | copy)) | |
db543c1f RR |
508 | goto fault; |
509 | if ((len -= copy) == 0) | |
510 | return 0; | |
511 | offset += copy; | |
6f26c9a7 | 512 | from_offset += copy; |
db543c1f RR |
513 | } |
514 | ||
515 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
516 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
517 | int end; | |
9e903e08 | 518 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
db543c1f RR |
519 | |
520 | WARN_ON(start > offset + len); | |
521 | ||
9e903e08 | 522 | end = start + skb_frag_size(frag); |
db543c1f RR |
523 | if ((copy = end - offset) > 0) { |
524 | int err; | |
525 | u8 *vaddr; | |
ea2ab693 | 526 | struct page *page = skb_frag_page(frag); |
db543c1f RR |
527 | |
528 | if (copy > len) | |
529 | copy = len; | |
530 | vaddr = kmap(page); | |
6f26c9a7 MT |
531 | err = memcpy_fromiovecend(vaddr + frag->page_offset + |
532 | offset - start, | |
533 | from, from_offset, copy); | |
db543c1f RR |
534 | kunmap(page); |
535 | if (err) | |
536 | goto fault; | |
537 | ||
538 | if (!(len -= copy)) | |
539 | return 0; | |
540 | offset += copy; | |
6f26c9a7 | 541 | from_offset += copy; |
db543c1f RR |
542 | } |
543 | start = end; | |
544 | } | |
545 | ||
5b1a002a DM |
546 | skb_walk_frags(skb, frag_iter) { |
547 | int end; | |
548 | ||
549 | WARN_ON(start > offset + len); | |
550 | ||
551 | end = start + frag_iter->len; | |
552 | if ((copy = end - offset) > 0) { | |
553 | if (copy > len) | |
554 | copy = len; | |
555 | if (skb_copy_datagram_from_iovec(frag_iter, | |
556 | offset - start, | |
557 | from, | |
558 | from_offset, | |
559 | copy)) | |
560 | goto fault; | |
561 | if ((len -= copy) == 0) | |
562 | return 0; | |
563 | offset += copy; | |
564 | from_offset += copy; | |
db543c1f | 565 | } |
5b1a002a | 566 | start = end; |
db543c1f RR |
567 | } |
568 | if (!len) | |
569 | return 0; | |
570 | ||
571 | fault: | |
572 | return -EFAULT; | |
573 | } | |
574 | EXPORT_SYMBOL(skb_copy_datagram_from_iovec); | |
575 | ||
1da177e4 LT |
576 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
577 | u8 __user *to, int len, | |
5084205f | 578 | __wsum *csump) |
1da177e4 | 579 | { |
1a028e50 | 580 | int start = skb_headlen(skb); |
1a028e50 | 581 | int i, copy = start - offset; |
5b1a002a DM |
582 | struct sk_buff *frag_iter; |
583 | int pos = 0; | |
1da177e4 LT |
584 | |
585 | /* Copy header. */ | |
586 | if (copy > 0) { | |
587 | int err = 0; | |
588 | if (copy > len) | |
589 | copy = len; | |
590 | *csump = csum_and_copy_to_user(skb->data + offset, to, copy, | |
591 | *csump, &err); | |
592 | if (err) | |
593 | goto fault; | |
594 | if ((len -= copy) == 0) | |
595 | return 0; | |
596 | offset += copy; | |
597 | to += copy; | |
598 | pos = copy; | |
599 | } | |
600 | ||
601 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 602 | int end; |
9e903e08 | 603 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1da177e4 | 604 | |
547b792c | 605 | WARN_ON(start > offset + len); |
1a028e50 | 606 | |
9e903e08 | 607 | end = start + skb_frag_size(frag); |
1da177e4 | 608 | if ((copy = end - offset) > 0) { |
5084205f | 609 | __wsum csum2; |
1da177e4 LT |
610 | int err = 0; |
611 | u8 *vaddr; | |
ea2ab693 | 612 | struct page *page = skb_frag_page(frag); |
1da177e4 LT |
613 | |
614 | if (copy > len) | |
615 | copy = len; | |
616 | vaddr = kmap(page); | |
617 | csum2 = csum_and_copy_to_user(vaddr + | |
1a028e50 DM |
618 | frag->page_offset + |
619 | offset - start, | |
1da177e4 LT |
620 | to, copy, 0, &err); |
621 | kunmap(page); | |
622 | if (err) | |
623 | goto fault; | |
624 | *csump = csum_block_add(*csump, csum2, pos); | |
625 | if (!(len -= copy)) | |
626 | return 0; | |
627 | offset += copy; | |
628 | to += copy; | |
629 | pos += copy; | |
630 | } | |
1a028e50 | 631 | start = end; |
1da177e4 LT |
632 | } |
633 | ||
5b1a002a DM |
634 | skb_walk_frags(skb, frag_iter) { |
635 | int end; | |
636 | ||
637 | WARN_ON(start > offset + len); | |
638 | ||
639 | end = start + frag_iter->len; | |
640 | if ((copy = end - offset) > 0) { | |
641 | __wsum csum2 = 0; | |
642 | if (copy > len) | |
643 | copy = len; | |
644 | if (skb_copy_and_csum_datagram(frag_iter, | |
645 | offset - start, | |
646 | to, copy, | |
647 | &csum2)) | |
648 | goto fault; | |
649 | *csump = csum_block_add(*csump, csum2, pos); | |
650 | if ((len -= copy) == 0) | |
651 | return 0; | |
652 | offset += copy; | |
653 | to += copy; | |
654 | pos += copy; | |
1da177e4 | 655 | } |
5b1a002a | 656 | start = end; |
1da177e4 LT |
657 | } |
658 | if (!len) | |
659 | return 0; | |
660 | ||
661 | fault: | |
662 | return -EFAULT; | |
663 | } | |
664 | ||
759e5d00 | 665 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 666 | { |
d3bc23e7 | 667 | __sum16 sum; |
fb286bb2 | 668 | |
759e5d00 | 669 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
fb286bb2 | 670 | if (likely(!sum)) { |
84fa7933 | 671 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 HX |
672 | netdev_rx_csum_fault(skb->dev); |
673 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
674 | } | |
675 | return sum; | |
676 | } | |
759e5d00 HX |
677 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
678 | ||
679 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
680 | { | |
681 | return __skb_checksum_complete_head(skb, skb->len); | |
682 | } | |
fb286bb2 HX |
683 | EXPORT_SYMBOL(__skb_checksum_complete); |
684 | ||
1da177e4 LT |
685 | /** |
686 | * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. | |
4dc3b16b PP |
687 | * @skb: skbuff |
688 | * @hlen: hardware length | |
67be2dd1 | 689 | * @iov: io vector |
4ec93edb | 690 | * |
1da177e4 LT |
691 | * Caller _must_ check that skb will fit to this iovec. |
692 | * | |
693 | * Returns: 0 - success. | |
694 | * -EINVAL - checksum failure. | |
695 | * -EFAULT - fault during copy. Beware, in this case iovec | |
696 | * can be modified! | |
697 | */ | |
fb286bb2 | 698 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
1da177e4 LT |
699 | int hlen, struct iovec *iov) |
700 | { | |
d3bc23e7 | 701 | __wsum csum; |
1da177e4 LT |
702 | int chunk = skb->len - hlen; |
703 | ||
ef8aef55 HX |
704 | if (!chunk) |
705 | return 0; | |
706 | ||
1da177e4 LT |
707 | /* Skip filled elements. |
708 | * Pretty silly, look at memcpy_toiovec, though 8) | |
709 | */ | |
710 | while (!iov->iov_len) | |
711 | iov++; | |
712 | ||
713 | if (iov->iov_len < chunk) { | |
fb286bb2 | 714 | if (__skb_checksum_complete(skb)) |
1da177e4 LT |
715 | goto csum_error; |
716 | if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) | |
717 | goto fault; | |
718 | } else { | |
719 | csum = csum_partial(skb->data, hlen, skb->csum); | |
720 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | |
721 | chunk, &csum)) | |
722 | goto fault; | |
d3bc23e7 | 723 | if (csum_fold(csum)) |
1da177e4 | 724 | goto csum_error; |
84fa7933 | 725 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 726 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
727 | iov->iov_len -= chunk; |
728 | iov->iov_base += chunk; | |
729 | } | |
730 | return 0; | |
731 | csum_error: | |
732 | return -EINVAL; | |
733 | fault: | |
734 | return -EFAULT; | |
735 | } | |
9e34a5b5 | 736 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); |
1da177e4 LT |
737 | |
738 | /** | |
739 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
740 | * @file: file struct |
741 | * @sock: socket | |
742 | * @wait: poll table | |
1da177e4 LT |
743 | * |
744 | * Datagram poll: Again totally generic. This also handles | |
745 | * sequenced packet sockets providing the socket receive queue | |
746 | * is only ever holding data ready to receive. | |
747 | * | |
748 | * Note: when you _don't_ use this routine for this protocol, | |
749 | * and you use a different write policy from sock_writeable() | |
750 | * then please supply your own write_space callback. | |
751 | */ | |
752 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
753 | poll_table *wait) | |
754 | { | |
755 | struct sock *sk = sock->sk; | |
756 | unsigned int mask; | |
757 | ||
aa395145 | 758 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 LT |
759 | mask = 0; |
760 | ||
761 | /* exceptional events? */ | |
762 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
7d4c04fc | 763 | mask |= POLLERR | |
8facd5fb | 764 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); |
7d4c04fc | 765 | |
f348d70a | 766 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
db40980f | 767 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
1da177e4 LT |
768 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
769 | mask |= POLLHUP; | |
770 | ||
771 | /* readable? */ | |
db40980f | 772 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1da177e4 LT |
773 | mask |= POLLIN | POLLRDNORM; |
774 | ||
775 | /* Connection-based need to check for termination and startup */ | |
776 | if (connection_based(sk)) { | |
777 | if (sk->sk_state == TCP_CLOSE) | |
778 | mask |= POLLHUP; | |
779 | /* connection hasn't started yet? */ | |
780 | if (sk->sk_state == TCP_SYN_SENT) | |
781 | return mask; | |
782 | } | |
783 | ||
784 | /* writable? */ | |
785 | if (sock_writeable(sk)) | |
786 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
787 | else | |
788 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
789 | ||
790 | return mask; | |
791 | } | |
1da177e4 | 792 | EXPORT_SYMBOL(datagram_poll); |