Merge branch 'for-davem' into for-next
[deliverable/linux.git] / net / packet / af_packet.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
12 * Fixes:
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95
96 #include "internal.h"
97
98 /*
99 Assumptions:
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
105 (PPP).
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
108
109 On receive:
110 -----------
111
112 Incoming, dev->hard_header!=NULL
113 mac_header -> ll header
114 data -> data
115
116 Outgoing, dev->hard_header!=NULL
117 mac_header -> ll header
118 data -> ll header
119
120 Incoming, dev->hard_header==NULL
121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
123 assymetry between rx and tx paths.
124 data -> data
125
126 Outgoing, dev->hard_header==NULL
127 mac_header -> data. ll header is still not built!
128 data -> data
129
130 Resume
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132
133
134 On transmit:
135 ------------
136
137 dev->hard_header != NULL
138 mac_header -> ll header
139 data -> ll header
140
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
142 mac_header -> data
143 data -> data
144
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
147 */
148
149 /* Private packet socket structures. */
150
151 /* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
154 struct packet_mreq_max {
155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
159 };
160
161 union tpacket_uhdr {
162 struct tpacket_hdr *h1;
163 struct tpacket2_hdr *h2;
164 struct tpacket3_hdr *h3;
165 void *raw;
166 };
167
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 int closing, int tx_ring);
170
171 #define V3_ALIGNMENT (8)
172
173 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178 #define PGV_FROM_VMALLOC 1
179
180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187
188 struct packet_sock;
189 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
190 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 struct packet_type *pt, struct net_device *orig_dev);
192
193 static void *packet_previous_frame(struct packet_sock *po,
194 struct packet_ring_buffer *rb,
195 int status);
196 static void packet_increment_head(struct packet_ring_buffer *buff);
197 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(unsigned long);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_init_blk_timer(struct packet_sock *,
209 struct tpacket_kbdq_core *,
210 void (*func) (unsigned long));
211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 struct tpacket3_hdr *);
214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *);
216 static void packet_flush_mclist(struct sock *sk);
217
218 struct packet_skb_cb {
219 union {
220 struct sockaddr_pkt pkt;
221 union {
222 /* Trick: alias skb original length with
223 * ll.sll_family and ll.protocol in order
224 * to save room.
225 */
226 unsigned int origlen;
227 struct sockaddr_ll ll;
228 };
229 } sa;
230 };
231
232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 static int packet_direct_xmit(struct sk_buff *skb)
247 {
248 struct net_device *dev = skb->dev;
249 netdev_features_t features;
250 struct netdev_queue *txq;
251 int ret = NETDEV_TX_BUSY;
252
253 if (unlikely(!netif_running(dev) ||
254 !netif_carrier_ok(dev)))
255 goto drop;
256
257 features = netif_skb_features(skb);
258 if (skb_needs_linearize(skb, features) &&
259 __skb_linearize(skb))
260 goto drop;
261
262 txq = skb_get_tx_queue(dev, skb);
263
264 local_bh_disable();
265
266 HARD_TX_LOCK(dev, txq, smp_processor_id());
267 if (!netif_xmit_frozen_or_drv_stopped(txq))
268 ret = netdev_start_xmit(skb, dev, txq, false);
269 HARD_TX_UNLOCK(dev, txq);
270
271 local_bh_enable();
272
273 if (!dev_xmit_complete(ret))
274 kfree_skb(skb);
275
276 return ret;
277 drop:
278 atomic_long_inc(&dev->tx_dropped);
279 kfree_skb(skb);
280 return NET_XMIT_DROP;
281 }
282
283 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284 {
285 struct net_device *dev;
286
287 rcu_read_lock();
288 dev = rcu_dereference(po->cached_dev);
289 if (likely(dev))
290 dev_hold(dev);
291 rcu_read_unlock();
292
293 return dev;
294 }
295
296 static void packet_cached_dev_assign(struct packet_sock *po,
297 struct net_device *dev)
298 {
299 rcu_assign_pointer(po->cached_dev, dev);
300 }
301
302 static void packet_cached_dev_reset(struct packet_sock *po)
303 {
304 RCU_INIT_POINTER(po->cached_dev, NULL);
305 }
306
307 static bool packet_use_direct_xmit(const struct packet_sock *po)
308 {
309 return po->xmit == packet_direct_xmit;
310 }
311
312 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
313 {
314 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
315 }
316
317 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318 {
319 const struct net_device_ops *ops = dev->netdev_ops;
320 u16 queue_index;
321
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 __packet_pick_tx_queue);
325 queue_index = netdev_cap_txqueue(dev, queue_index);
326 } else {
327 queue_index = __packet_pick_tx_queue(dev, skb);
328 }
329
330 skb_set_queue_mapping(skb, queue_index);
331 }
332
333 /* register_prot_hook must be invoked with the po->bind_lock held,
334 * or from a context in which asynchronous accesses to the packet
335 * socket is not possible (packet_create()).
336 */
337 static void register_prot_hook(struct sock *sk)
338 {
339 struct packet_sock *po = pkt_sk(sk);
340
341 if (!po->running) {
342 if (po->fanout)
343 __fanout_link(sk, po);
344 else
345 dev_add_pack(&po->prot_hook);
346
347 sock_hold(sk);
348 po->running = 1;
349 }
350 }
351
352 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353 * held. If the sync parameter is true, we will temporarily drop
354 * the po->bind_lock and do a synchronize_net to make sure no
355 * asynchronous packet processing paths still refer to the elements
356 * of po->prot_hook. If the sync parameter is false, it is the
357 * callers responsibility to take care of this.
358 */
359 static void __unregister_prot_hook(struct sock *sk, bool sync)
360 {
361 struct packet_sock *po = pkt_sk(sk);
362
363 po->running = 0;
364
365 if (po->fanout)
366 __fanout_unlink(sk, po);
367 else
368 __dev_remove_pack(&po->prot_hook);
369
370 __sock_put(sk);
371
372 if (sync) {
373 spin_unlock(&po->bind_lock);
374 synchronize_net();
375 spin_lock(&po->bind_lock);
376 }
377 }
378
379 static void unregister_prot_hook(struct sock *sk, bool sync)
380 {
381 struct packet_sock *po = pkt_sk(sk);
382
383 if (po->running)
384 __unregister_prot_hook(sk, sync);
385 }
386
387 static inline struct page * __pure pgv_to_page(void *addr)
388 {
389 if (is_vmalloc_addr(addr))
390 return vmalloc_to_page(addr);
391 return virt_to_page(addr);
392 }
393
394 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
395 {
396 union tpacket_uhdr h;
397
398 h.raw = frame;
399 switch (po->tp_version) {
400 case TPACKET_V1:
401 h.h1->tp_status = status;
402 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
403 break;
404 case TPACKET_V2:
405 h.h2->tp_status = status;
406 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
407 break;
408 case TPACKET_V3:
409 default:
410 WARN(1, "TPACKET version not supported.\n");
411 BUG();
412 }
413
414 smp_wmb();
415 }
416
417 static int __packet_get_status(struct packet_sock *po, void *frame)
418 {
419 union tpacket_uhdr h;
420
421 smp_rmb();
422
423 h.raw = frame;
424 switch (po->tp_version) {
425 case TPACKET_V1:
426 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
427 return h.h1->tp_status;
428 case TPACKET_V2:
429 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
430 return h.h2->tp_status;
431 case TPACKET_V3:
432 default:
433 WARN(1, "TPACKET version not supported.\n");
434 BUG();
435 return 0;
436 }
437 }
438
439 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 unsigned int flags)
441 {
442 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443
444 if (shhwtstamps &&
445 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 return TP_STATUS_TS_RAW_HARDWARE;
448
449 if (ktime_to_timespec_cond(skb->tstamp, ts))
450 return TP_STATUS_TS_SOFTWARE;
451
452 return 0;
453 }
454
455 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 struct sk_buff *skb)
457 {
458 union tpacket_uhdr h;
459 struct timespec ts;
460 __u32 ts_status;
461
462 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 return 0;
464
465 h.raw = frame;
466 switch (po->tp_version) {
467 case TPACKET_V1:
468 h.h1->tp_sec = ts.tv_sec;
469 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 break;
471 case TPACKET_V2:
472 h.h2->tp_sec = ts.tv_sec;
473 h.h2->tp_nsec = ts.tv_nsec;
474 break;
475 case TPACKET_V3:
476 default:
477 WARN(1, "TPACKET version not supported.\n");
478 BUG();
479 }
480
481 /* one flush is safe, as both fields always lie on the same cacheline */
482 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 smp_wmb();
484
485 return ts_status;
486 }
487
488 static void *packet_lookup_frame(struct packet_sock *po,
489 struct packet_ring_buffer *rb,
490 unsigned int position,
491 int status)
492 {
493 unsigned int pg_vec_pos, frame_offset;
494 union tpacket_uhdr h;
495
496 pg_vec_pos = position / rb->frames_per_block;
497 frame_offset = position % rb->frames_per_block;
498
499 h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 (frame_offset * rb->frame_size);
501
502 if (status != __packet_get_status(po, h.raw))
503 return NULL;
504
505 return h.raw;
506 }
507
508 static void *packet_current_frame(struct packet_sock *po,
509 struct packet_ring_buffer *rb,
510 int status)
511 {
512 return packet_lookup_frame(po, rb, rb->head, status);
513 }
514
515 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
516 {
517 del_timer_sync(&pkc->retire_blk_timer);
518 }
519
520 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 int tx_ring,
522 struct sk_buff_head *rb_queue)
523 {
524 struct tpacket_kbdq_core *pkc;
525
526 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 GET_PBDQC_FROM_RB(&po->rx_ring);
528
529 spin_lock_bh(&rb_queue->lock);
530 pkc->delete_blk_timer = 1;
531 spin_unlock_bh(&rb_queue->lock);
532
533 prb_del_retire_blk_timer(pkc);
534 }
535
536 static void prb_init_blk_timer(struct packet_sock *po,
537 struct tpacket_kbdq_core *pkc,
538 void (*func) (unsigned long))
539 {
540 init_timer(&pkc->retire_blk_timer);
541 pkc->retire_blk_timer.data = (long)po;
542 pkc->retire_blk_timer.function = func;
543 pkc->retire_blk_timer.expires = jiffies;
544 }
545
546 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
547 {
548 struct tpacket_kbdq_core *pkc;
549
550 if (tx_ring)
551 BUG();
552
553 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
554 GET_PBDQC_FROM_RB(&po->rx_ring);
555 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
556 }
557
558 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
559 int blk_size_in_bytes)
560 {
561 struct net_device *dev;
562 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
563 struct ethtool_cmd ecmd;
564 int err;
565 u32 speed;
566
567 rtnl_lock();
568 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
569 if (unlikely(!dev)) {
570 rtnl_unlock();
571 return DEFAULT_PRB_RETIRE_TOV;
572 }
573 err = __ethtool_get_settings(dev, &ecmd);
574 speed = ethtool_cmd_speed(&ecmd);
575 rtnl_unlock();
576 if (!err) {
577 /*
578 * If the link speed is so slow you don't really
579 * need to worry about perf anyways
580 */
581 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
582 return DEFAULT_PRB_RETIRE_TOV;
583 } else {
584 msec = 1;
585 div = speed / 1000;
586 }
587 }
588
589 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
590
591 if (div)
592 mbits /= div;
593
594 tmo = mbits * msec;
595
596 if (div)
597 return tmo+1;
598 return tmo;
599 }
600
601 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
602 union tpacket_req_u *req_u)
603 {
604 p1->feature_req_word = req_u->req3.tp_feature_req_word;
605 }
606
607 static void init_prb_bdqc(struct packet_sock *po,
608 struct packet_ring_buffer *rb,
609 struct pgv *pg_vec,
610 union tpacket_req_u *req_u, int tx_ring)
611 {
612 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
613 struct tpacket_block_desc *pbd;
614
615 memset(p1, 0x0, sizeof(*p1));
616
617 p1->knxt_seq_num = 1;
618 p1->pkbdq = pg_vec;
619 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
620 p1->pkblk_start = pg_vec[0].buffer;
621 p1->kblk_size = req_u->req3.tp_block_size;
622 p1->knum_blocks = req_u->req3.tp_block_nr;
623 p1->hdrlen = po->tp_hdrlen;
624 p1->version = po->tp_version;
625 p1->last_kactive_blk_num = 0;
626 po->stats.stats3.tp_freeze_q_cnt = 0;
627 if (req_u->req3.tp_retire_blk_tov)
628 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
629 else
630 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
631 req_u->req3.tp_block_size);
632 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
633 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
634
635 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
636 prb_init_ft_ops(p1, req_u);
637 prb_setup_retire_blk_timer(po, tx_ring);
638 prb_open_block(p1, pbd);
639 }
640
641 /* Do NOT update the last_blk_num first.
642 * Assumes sk_buff_head lock is held.
643 */
644 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
645 {
646 mod_timer(&pkc->retire_blk_timer,
647 jiffies + pkc->tov_in_jiffies);
648 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
649 }
650
651 /*
652 * Timer logic:
653 * 1) We refresh the timer only when we open a block.
654 * By doing this we don't waste cycles refreshing the timer
655 * on packet-by-packet basis.
656 *
657 * With a 1MB block-size, on a 1Gbps line, it will take
658 * i) ~8 ms to fill a block + ii) memcpy etc.
659 * In this cut we are not accounting for the memcpy time.
660 *
661 * So, if the user sets the 'tmo' to 10ms then the timer
662 * will never fire while the block is still getting filled
663 * (which is what we want). However, the user could choose
664 * to close a block early and that's fine.
665 *
666 * But when the timer does fire, we check whether or not to refresh it.
667 * Since the tmo granularity is in msecs, it is not too expensive
668 * to refresh the timer, lets say every '8' msecs.
669 * Either the user can set the 'tmo' or we can derive it based on
670 * a) line-speed and b) block-size.
671 * prb_calc_retire_blk_tmo() calculates the tmo.
672 *
673 */
674 static void prb_retire_rx_blk_timer_expired(unsigned long data)
675 {
676 struct packet_sock *po = (struct packet_sock *)data;
677 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
678 unsigned int frozen;
679 struct tpacket_block_desc *pbd;
680
681 spin_lock(&po->sk.sk_receive_queue.lock);
682
683 frozen = prb_queue_frozen(pkc);
684 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
685
686 if (unlikely(pkc->delete_blk_timer))
687 goto out;
688
689 /* We only need to plug the race when the block is partially filled.
690 * tpacket_rcv:
691 * lock(); increment BLOCK_NUM_PKTS; unlock()
692 * copy_bits() is in progress ...
693 * timer fires on other cpu:
694 * we can't retire the current block because copy_bits
695 * is in progress.
696 *
697 */
698 if (BLOCK_NUM_PKTS(pbd)) {
699 while (atomic_read(&pkc->blk_fill_in_prog)) {
700 /* Waiting for skb_copy_bits to finish... */
701 cpu_relax();
702 }
703 }
704
705 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
706 if (!frozen) {
707 if (!BLOCK_NUM_PKTS(pbd)) {
708 /* An empty block. Just refresh the timer. */
709 goto refresh_timer;
710 }
711 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
712 if (!prb_dispatch_next_block(pkc, po))
713 goto refresh_timer;
714 else
715 goto out;
716 } else {
717 /* Case 1. Queue was frozen because user-space was
718 * lagging behind.
719 */
720 if (prb_curr_blk_in_use(pkc, pbd)) {
721 /*
722 * Ok, user-space is still behind.
723 * So just refresh the timer.
724 */
725 goto refresh_timer;
726 } else {
727 /* Case 2. queue was frozen,user-space caught up,
728 * now the link went idle && the timer fired.
729 * We don't have a block to close.So we open this
730 * block and restart the timer.
731 * opening a block thaws the queue,restarts timer
732 * Thawing/timer-refresh is a side effect.
733 */
734 prb_open_block(pkc, pbd);
735 goto out;
736 }
737 }
738 }
739
740 refresh_timer:
741 _prb_refresh_rx_retire_blk_timer(pkc);
742
743 out:
744 spin_unlock(&po->sk.sk_receive_queue.lock);
745 }
746
747 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
748 struct tpacket_block_desc *pbd1, __u32 status)
749 {
750 /* Flush everything minus the block header */
751
752 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
753 u8 *start, *end;
754
755 start = (u8 *)pbd1;
756
757 /* Skip the block header(we know header WILL fit in 4K) */
758 start += PAGE_SIZE;
759
760 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
761 for (; start < end; start += PAGE_SIZE)
762 flush_dcache_page(pgv_to_page(start));
763
764 smp_wmb();
765 #endif
766
767 /* Now update the block status. */
768
769 BLOCK_STATUS(pbd1) = status;
770
771 /* Flush the block header */
772
773 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
774 start = (u8 *)pbd1;
775 flush_dcache_page(pgv_to_page(start));
776
777 smp_wmb();
778 #endif
779 }
780
781 /*
782 * Side effect:
783 *
784 * 1) flush the block
785 * 2) Increment active_blk_num
786 *
787 * Note:We DONT refresh the timer on purpose.
788 * Because almost always the next block will be opened.
789 */
790 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
791 struct tpacket_block_desc *pbd1,
792 struct packet_sock *po, unsigned int stat)
793 {
794 __u32 status = TP_STATUS_USER | stat;
795
796 struct tpacket3_hdr *last_pkt;
797 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
798 struct sock *sk = &po->sk;
799
800 if (po->stats.stats3.tp_drops)
801 status |= TP_STATUS_LOSING;
802
803 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
804 last_pkt->tp_next_offset = 0;
805
806 /* Get the ts of the last pkt */
807 if (BLOCK_NUM_PKTS(pbd1)) {
808 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
809 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
810 } else {
811 /* Ok, we tmo'd - so get the current time.
812 *
813 * It shouldn't really happen as we don't close empty
814 * blocks. See prb_retire_rx_blk_timer_expired().
815 */
816 struct timespec ts;
817 getnstimeofday(&ts);
818 h1->ts_last_pkt.ts_sec = ts.tv_sec;
819 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
820 }
821
822 smp_wmb();
823
824 /* Flush the block */
825 prb_flush_block(pkc1, pbd1, status);
826
827 sk->sk_data_ready(sk);
828
829 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
830 }
831
832 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
833 {
834 pkc->reset_pending_on_curr_blk = 0;
835 }
836
837 /*
838 * Side effect of opening a block:
839 *
840 * 1) prb_queue is thawed.
841 * 2) retire_blk_timer is refreshed.
842 *
843 */
844 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
845 struct tpacket_block_desc *pbd1)
846 {
847 struct timespec ts;
848 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
849
850 smp_rmb();
851
852 /* We could have just memset this but we will lose the
853 * flexibility of making the priv area sticky
854 */
855
856 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
857 BLOCK_NUM_PKTS(pbd1) = 0;
858 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
859
860 getnstimeofday(&ts);
861
862 h1->ts_first_pkt.ts_sec = ts.tv_sec;
863 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
864
865 pkc1->pkblk_start = (char *)pbd1;
866 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
867
868 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
869 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
870
871 pbd1->version = pkc1->version;
872 pkc1->prev = pkc1->nxt_offset;
873 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
874
875 prb_thaw_queue(pkc1);
876 _prb_refresh_rx_retire_blk_timer(pkc1);
877
878 smp_wmb();
879 }
880
881 /*
882 * Queue freeze logic:
883 * 1) Assume tp_block_nr = 8 blocks.
884 * 2) At time 't0', user opens Rx ring.
885 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
886 * 4) user-space is either sleeping or processing block '0'.
887 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
888 * it will close block-7,loop around and try to fill block '0'.
889 * call-flow:
890 * __packet_lookup_frame_in_block
891 * prb_retire_current_block()
892 * prb_dispatch_next_block()
893 * |->(BLOCK_STATUS == USER) evaluates to true
894 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
895 * 6) Now there are two cases:
896 * 6.1) Link goes idle right after the queue is frozen.
897 * But remember, the last open_block() refreshed the timer.
898 * When this timer expires,it will refresh itself so that we can
899 * re-open block-0 in near future.
900 * 6.2) Link is busy and keeps on receiving packets. This is a simple
901 * case and __packet_lookup_frame_in_block will check if block-0
902 * is free and can now be re-used.
903 */
904 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
905 struct packet_sock *po)
906 {
907 pkc->reset_pending_on_curr_blk = 1;
908 po->stats.stats3.tp_freeze_q_cnt++;
909 }
910
911 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
912
913 /*
914 * If the next block is free then we will dispatch it
915 * and return a good offset.
916 * Else, we will freeze the queue.
917 * So, caller must check the return value.
918 */
919 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
920 struct packet_sock *po)
921 {
922 struct tpacket_block_desc *pbd;
923
924 smp_rmb();
925
926 /* 1. Get current block num */
927 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
928
929 /* 2. If this block is currently in_use then freeze the queue */
930 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
931 prb_freeze_queue(pkc, po);
932 return NULL;
933 }
934
935 /*
936 * 3.
937 * open this block and return the offset where the first packet
938 * needs to get stored.
939 */
940 prb_open_block(pkc, pbd);
941 return (void *)pkc->nxt_offset;
942 }
943
944 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
945 struct packet_sock *po, unsigned int status)
946 {
947 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
948
949 /* retire/close the current block */
950 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
951 /*
952 * Plug the case where copy_bits() is in progress on
953 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
954 * have space to copy the pkt in the current block and
955 * called prb_retire_current_block()
956 *
957 * We don't need to worry about the TMO case because
958 * the timer-handler already handled this case.
959 */
960 if (!(status & TP_STATUS_BLK_TMO)) {
961 while (atomic_read(&pkc->blk_fill_in_prog)) {
962 /* Waiting for skb_copy_bits to finish... */
963 cpu_relax();
964 }
965 }
966 prb_close_block(pkc, pbd, po, status);
967 return;
968 }
969 }
970
971 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
972 struct tpacket_block_desc *pbd)
973 {
974 return TP_STATUS_USER & BLOCK_STATUS(pbd);
975 }
976
977 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
978 {
979 return pkc->reset_pending_on_curr_blk;
980 }
981
982 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
983 {
984 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
985 atomic_dec(&pkc->blk_fill_in_prog);
986 }
987
988 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
989 struct tpacket3_hdr *ppd)
990 {
991 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
992 }
993
994 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
995 struct tpacket3_hdr *ppd)
996 {
997 ppd->hv1.tp_rxhash = 0;
998 }
999
1000 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1001 struct tpacket3_hdr *ppd)
1002 {
1003 if (skb_vlan_tag_present(pkc->skb)) {
1004 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1005 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1006 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1007 } else {
1008 ppd->hv1.tp_vlan_tci = 0;
1009 ppd->hv1.tp_vlan_tpid = 0;
1010 ppd->tp_status = TP_STATUS_AVAILABLE;
1011 }
1012 }
1013
1014 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1015 struct tpacket3_hdr *ppd)
1016 {
1017 ppd->hv1.tp_padding = 0;
1018 prb_fill_vlan_info(pkc, ppd);
1019
1020 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1021 prb_fill_rxhash(pkc, ppd);
1022 else
1023 prb_clear_rxhash(pkc, ppd);
1024 }
1025
1026 static void prb_fill_curr_block(char *curr,
1027 struct tpacket_kbdq_core *pkc,
1028 struct tpacket_block_desc *pbd,
1029 unsigned int len)
1030 {
1031 struct tpacket3_hdr *ppd;
1032
1033 ppd = (struct tpacket3_hdr *)curr;
1034 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 pkc->prev = curr;
1036 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1037 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1038 BLOCK_NUM_PKTS(pbd) += 1;
1039 atomic_inc(&pkc->blk_fill_in_prog);
1040 prb_run_all_ft_ops(pkc, ppd);
1041 }
1042
1043 /* Assumes caller has the sk->rx_queue.lock */
1044 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1045 struct sk_buff *skb,
1046 int status,
1047 unsigned int len
1048 )
1049 {
1050 struct tpacket_kbdq_core *pkc;
1051 struct tpacket_block_desc *pbd;
1052 char *curr, *end;
1053
1054 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1055 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1056
1057 /* Queue is frozen when user space is lagging behind */
1058 if (prb_queue_frozen(pkc)) {
1059 /*
1060 * Check if that last block which caused the queue to freeze,
1061 * is still in_use by user-space.
1062 */
1063 if (prb_curr_blk_in_use(pkc, pbd)) {
1064 /* Can't record this packet */
1065 return NULL;
1066 } else {
1067 /*
1068 * Ok, the block was released by user-space.
1069 * Now let's open that block.
1070 * opening a block also thaws the queue.
1071 * Thawing is a side effect.
1072 */
1073 prb_open_block(pkc, pbd);
1074 }
1075 }
1076
1077 smp_mb();
1078 curr = pkc->nxt_offset;
1079 pkc->skb = skb;
1080 end = (char *)pbd + pkc->kblk_size;
1081
1082 /* first try the current block */
1083 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1084 prb_fill_curr_block(curr, pkc, pbd, len);
1085 return (void *)curr;
1086 }
1087
1088 /* Ok, close the current block */
1089 prb_retire_current_block(pkc, po, 0);
1090
1091 /* Now, try to dispatch the next block */
1092 curr = (char *)prb_dispatch_next_block(pkc, po);
1093 if (curr) {
1094 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1095 prb_fill_curr_block(curr, pkc, pbd, len);
1096 return (void *)curr;
1097 }
1098
1099 /*
1100 * No free blocks are available.user_space hasn't caught up yet.
1101 * Queue was just frozen and now this packet will get dropped.
1102 */
1103 return NULL;
1104 }
1105
1106 static void *packet_current_rx_frame(struct packet_sock *po,
1107 struct sk_buff *skb,
1108 int status, unsigned int len)
1109 {
1110 char *curr = NULL;
1111 switch (po->tp_version) {
1112 case TPACKET_V1:
1113 case TPACKET_V2:
1114 curr = packet_lookup_frame(po, &po->rx_ring,
1115 po->rx_ring.head, status);
1116 return curr;
1117 case TPACKET_V3:
1118 return __packet_lookup_frame_in_block(po, skb, status, len);
1119 default:
1120 WARN(1, "TPACKET version not supported\n");
1121 BUG();
1122 return NULL;
1123 }
1124 }
1125
1126 static void *prb_lookup_block(struct packet_sock *po,
1127 struct packet_ring_buffer *rb,
1128 unsigned int idx,
1129 int status)
1130 {
1131 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1132 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1133
1134 if (status != BLOCK_STATUS(pbd))
1135 return NULL;
1136 return pbd;
1137 }
1138
1139 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1140 {
1141 unsigned int prev;
1142 if (rb->prb_bdqc.kactive_blk_num)
1143 prev = rb->prb_bdqc.kactive_blk_num-1;
1144 else
1145 prev = rb->prb_bdqc.knum_blocks-1;
1146 return prev;
1147 }
1148
1149 /* Assumes caller has held the rx_queue.lock */
1150 static void *__prb_previous_block(struct packet_sock *po,
1151 struct packet_ring_buffer *rb,
1152 int status)
1153 {
1154 unsigned int previous = prb_previous_blk_num(rb);
1155 return prb_lookup_block(po, rb, previous, status);
1156 }
1157
1158 static void *packet_previous_rx_frame(struct packet_sock *po,
1159 struct packet_ring_buffer *rb,
1160 int status)
1161 {
1162 if (po->tp_version <= TPACKET_V2)
1163 return packet_previous_frame(po, rb, status);
1164
1165 return __prb_previous_block(po, rb, status);
1166 }
1167
1168 static void packet_increment_rx_head(struct packet_sock *po,
1169 struct packet_ring_buffer *rb)
1170 {
1171 switch (po->tp_version) {
1172 case TPACKET_V1:
1173 case TPACKET_V2:
1174 return packet_increment_head(rb);
1175 case TPACKET_V3:
1176 default:
1177 WARN(1, "TPACKET version not supported.\n");
1178 BUG();
1179 return;
1180 }
1181 }
1182
1183 static void *packet_previous_frame(struct packet_sock *po,
1184 struct packet_ring_buffer *rb,
1185 int status)
1186 {
1187 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1188 return packet_lookup_frame(po, rb, previous, status);
1189 }
1190
1191 static void packet_increment_head(struct packet_ring_buffer *buff)
1192 {
1193 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1194 }
1195
1196 static void packet_inc_pending(struct packet_ring_buffer *rb)
1197 {
1198 this_cpu_inc(*rb->pending_refcnt);
1199 }
1200
1201 static void packet_dec_pending(struct packet_ring_buffer *rb)
1202 {
1203 this_cpu_dec(*rb->pending_refcnt);
1204 }
1205
1206 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1207 {
1208 unsigned int refcnt = 0;
1209 int cpu;
1210
1211 /* We don't use pending refcount in rx_ring. */
1212 if (rb->pending_refcnt == NULL)
1213 return 0;
1214
1215 for_each_possible_cpu(cpu)
1216 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1217
1218 return refcnt;
1219 }
1220
1221 static int packet_alloc_pending(struct packet_sock *po)
1222 {
1223 po->rx_ring.pending_refcnt = NULL;
1224
1225 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1226 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1227 return -ENOBUFS;
1228
1229 return 0;
1230 }
1231
1232 static void packet_free_pending(struct packet_sock *po)
1233 {
1234 free_percpu(po->tx_ring.pending_refcnt);
1235 }
1236
1237 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1238 {
1239 struct sock *sk = &po->sk;
1240 bool has_room;
1241
1242 if (po->prot_hook.func != tpacket_rcv)
1243 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1244 <= sk->sk_rcvbuf;
1245
1246 spin_lock(&sk->sk_receive_queue.lock);
1247 if (po->tp_version == TPACKET_V3)
1248 has_room = prb_lookup_block(po, &po->rx_ring,
1249 po->rx_ring.prb_bdqc.kactive_blk_num,
1250 TP_STATUS_KERNEL);
1251 else
1252 has_room = packet_lookup_frame(po, &po->rx_ring,
1253 po->rx_ring.head,
1254 TP_STATUS_KERNEL);
1255 spin_unlock(&sk->sk_receive_queue.lock);
1256
1257 return has_room;
1258 }
1259
1260 static void packet_sock_destruct(struct sock *sk)
1261 {
1262 skb_queue_purge(&sk->sk_error_queue);
1263
1264 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1265 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1266
1267 if (!sock_flag(sk, SOCK_DEAD)) {
1268 pr_err("Attempt to release alive packet socket: %p\n", sk);
1269 return;
1270 }
1271
1272 sk_refcnt_debug_dec(sk);
1273 }
1274
1275 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1276 {
1277 int x = atomic_read(&f->rr_cur) + 1;
1278
1279 if (x >= num)
1280 x = 0;
1281
1282 return x;
1283 }
1284
1285 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1286 struct sk_buff *skb,
1287 unsigned int num)
1288 {
1289 return reciprocal_scale(skb_get_hash(skb), num);
1290 }
1291
1292 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1293 struct sk_buff *skb,
1294 unsigned int num)
1295 {
1296 int cur, old;
1297
1298 cur = atomic_read(&f->rr_cur);
1299 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1300 fanout_rr_next(f, num))) != cur)
1301 cur = old;
1302 return cur;
1303 }
1304
1305 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1306 struct sk_buff *skb,
1307 unsigned int num)
1308 {
1309 return smp_processor_id() % num;
1310 }
1311
1312 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1313 struct sk_buff *skb,
1314 unsigned int num)
1315 {
1316 return prandom_u32_max(num);
1317 }
1318
1319 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1320 struct sk_buff *skb,
1321 unsigned int idx, unsigned int skip,
1322 unsigned int num)
1323 {
1324 unsigned int i, j;
1325
1326 i = j = min_t(int, f->next[idx], num - 1);
1327 do {
1328 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1329 if (i != j)
1330 f->next[idx] = i;
1331 return i;
1332 }
1333 if (++i == num)
1334 i = 0;
1335 } while (i != j);
1336
1337 return idx;
1338 }
1339
1340 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1341 struct sk_buff *skb,
1342 unsigned int num)
1343 {
1344 return skb_get_queue_mapping(skb) % num;
1345 }
1346
1347 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1348 {
1349 return f->flags & (flag >> 8);
1350 }
1351
1352 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1353 struct packet_type *pt, struct net_device *orig_dev)
1354 {
1355 struct packet_fanout *f = pt->af_packet_priv;
1356 unsigned int num = f->num_members;
1357 struct packet_sock *po;
1358 unsigned int idx;
1359
1360 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1361 !num) {
1362 kfree_skb(skb);
1363 return 0;
1364 }
1365
1366 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1367 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1368 if (!skb)
1369 return 0;
1370 }
1371 switch (f->type) {
1372 case PACKET_FANOUT_HASH:
1373 default:
1374 idx = fanout_demux_hash(f, skb, num);
1375 break;
1376 case PACKET_FANOUT_LB:
1377 idx = fanout_demux_lb(f, skb, num);
1378 break;
1379 case PACKET_FANOUT_CPU:
1380 idx = fanout_demux_cpu(f, skb, num);
1381 break;
1382 case PACKET_FANOUT_RND:
1383 idx = fanout_demux_rnd(f, skb, num);
1384 break;
1385 case PACKET_FANOUT_QM:
1386 idx = fanout_demux_qm(f, skb, num);
1387 break;
1388 case PACKET_FANOUT_ROLLOVER:
1389 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1390 break;
1391 }
1392
1393 po = pkt_sk(f->arr[idx]);
1394 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1395 unlikely(!packet_rcv_has_room(po, skb))) {
1396 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1397 po = pkt_sk(f->arr[idx]);
1398 }
1399
1400 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1401 }
1402
1403 DEFINE_MUTEX(fanout_mutex);
1404 EXPORT_SYMBOL_GPL(fanout_mutex);
1405 static LIST_HEAD(fanout_list);
1406
1407 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1408 {
1409 struct packet_fanout *f = po->fanout;
1410
1411 spin_lock(&f->lock);
1412 f->arr[f->num_members] = sk;
1413 smp_wmb();
1414 f->num_members++;
1415 spin_unlock(&f->lock);
1416 }
1417
1418 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1419 {
1420 struct packet_fanout *f = po->fanout;
1421 int i;
1422
1423 spin_lock(&f->lock);
1424 for (i = 0; i < f->num_members; i++) {
1425 if (f->arr[i] == sk)
1426 break;
1427 }
1428 BUG_ON(i >= f->num_members);
1429 f->arr[i] = f->arr[f->num_members - 1];
1430 f->num_members--;
1431 spin_unlock(&f->lock);
1432 }
1433
1434 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1435 {
1436 if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
1437 return true;
1438
1439 return false;
1440 }
1441
1442 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1443 {
1444 struct packet_sock *po = pkt_sk(sk);
1445 struct packet_fanout *f, *match;
1446 u8 type = type_flags & 0xff;
1447 u8 flags = type_flags >> 8;
1448 int err;
1449
1450 switch (type) {
1451 case PACKET_FANOUT_ROLLOVER:
1452 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1453 return -EINVAL;
1454 case PACKET_FANOUT_HASH:
1455 case PACKET_FANOUT_LB:
1456 case PACKET_FANOUT_CPU:
1457 case PACKET_FANOUT_RND:
1458 case PACKET_FANOUT_QM:
1459 break;
1460 default:
1461 return -EINVAL;
1462 }
1463
1464 if (!po->running)
1465 return -EINVAL;
1466
1467 if (po->fanout)
1468 return -EALREADY;
1469
1470 mutex_lock(&fanout_mutex);
1471 match = NULL;
1472 list_for_each_entry(f, &fanout_list, list) {
1473 if (f->id == id &&
1474 read_pnet(&f->net) == sock_net(sk)) {
1475 match = f;
1476 break;
1477 }
1478 }
1479 err = -EINVAL;
1480 if (match && match->flags != flags)
1481 goto out;
1482 if (!match) {
1483 err = -ENOMEM;
1484 match = kzalloc(sizeof(*match), GFP_KERNEL);
1485 if (!match)
1486 goto out;
1487 write_pnet(&match->net, sock_net(sk));
1488 match->id = id;
1489 match->type = type;
1490 match->flags = flags;
1491 atomic_set(&match->rr_cur, 0);
1492 INIT_LIST_HEAD(&match->list);
1493 spin_lock_init(&match->lock);
1494 atomic_set(&match->sk_ref, 0);
1495 match->prot_hook.type = po->prot_hook.type;
1496 match->prot_hook.dev = po->prot_hook.dev;
1497 match->prot_hook.func = packet_rcv_fanout;
1498 match->prot_hook.af_packet_priv = match;
1499 match->prot_hook.id_match = match_fanout_group;
1500 dev_add_pack(&match->prot_hook);
1501 list_add(&match->list, &fanout_list);
1502 }
1503 err = -EINVAL;
1504 if (match->type == type &&
1505 match->prot_hook.type == po->prot_hook.type &&
1506 match->prot_hook.dev == po->prot_hook.dev) {
1507 err = -ENOSPC;
1508 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1509 __dev_remove_pack(&po->prot_hook);
1510 po->fanout = match;
1511 atomic_inc(&match->sk_ref);
1512 __fanout_link(sk, po);
1513 err = 0;
1514 }
1515 }
1516 out:
1517 mutex_unlock(&fanout_mutex);
1518 return err;
1519 }
1520
1521 static void fanout_release(struct sock *sk)
1522 {
1523 struct packet_sock *po = pkt_sk(sk);
1524 struct packet_fanout *f;
1525
1526 f = po->fanout;
1527 if (!f)
1528 return;
1529
1530 mutex_lock(&fanout_mutex);
1531 po->fanout = NULL;
1532
1533 if (atomic_dec_and_test(&f->sk_ref)) {
1534 list_del(&f->list);
1535 dev_remove_pack(&f->prot_hook);
1536 kfree(f);
1537 }
1538 mutex_unlock(&fanout_mutex);
1539 }
1540
1541 static const struct proto_ops packet_ops;
1542
1543 static const struct proto_ops packet_ops_spkt;
1544
1545 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1546 struct packet_type *pt, struct net_device *orig_dev)
1547 {
1548 struct sock *sk;
1549 struct sockaddr_pkt *spkt;
1550
1551 /*
1552 * When we registered the protocol we saved the socket in the data
1553 * field for just this event.
1554 */
1555
1556 sk = pt->af_packet_priv;
1557
1558 /*
1559 * Yank back the headers [hope the device set this
1560 * right or kerboom...]
1561 *
1562 * Incoming packets have ll header pulled,
1563 * push it back.
1564 *
1565 * For outgoing ones skb->data == skb_mac_header(skb)
1566 * so that this procedure is noop.
1567 */
1568
1569 if (skb->pkt_type == PACKET_LOOPBACK)
1570 goto out;
1571
1572 if (!net_eq(dev_net(dev), sock_net(sk)))
1573 goto out;
1574
1575 skb = skb_share_check(skb, GFP_ATOMIC);
1576 if (skb == NULL)
1577 goto oom;
1578
1579 /* drop any routing info */
1580 skb_dst_drop(skb);
1581
1582 /* drop conntrack reference */
1583 nf_reset(skb);
1584
1585 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1586
1587 skb_push(skb, skb->data - skb_mac_header(skb));
1588
1589 /*
1590 * The SOCK_PACKET socket receives _all_ frames.
1591 */
1592
1593 spkt->spkt_family = dev->type;
1594 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1595 spkt->spkt_protocol = skb->protocol;
1596
1597 /*
1598 * Charge the memory to the socket. This is done specifically
1599 * to prevent sockets using all the memory up.
1600 */
1601
1602 if (sock_queue_rcv_skb(sk, skb) == 0)
1603 return 0;
1604
1605 out:
1606 kfree_skb(skb);
1607 oom:
1608 return 0;
1609 }
1610
1611
1612 /*
1613 * Output a raw packet to a device layer. This bypasses all the other
1614 * protocol layers and you must therefore supply it with a complete frame
1615 */
1616
1617 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1618 size_t len)
1619 {
1620 struct sock *sk = sock->sk;
1621 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1622 struct sk_buff *skb = NULL;
1623 struct net_device *dev;
1624 __be16 proto = 0;
1625 int err;
1626 int extra_len = 0;
1627
1628 /*
1629 * Get and verify the address.
1630 */
1631
1632 if (saddr) {
1633 if (msg->msg_namelen < sizeof(struct sockaddr))
1634 return -EINVAL;
1635 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1636 proto = saddr->spkt_protocol;
1637 } else
1638 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1639
1640 /*
1641 * Find the device first to size check it
1642 */
1643
1644 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1645 retry:
1646 rcu_read_lock();
1647 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1648 err = -ENODEV;
1649 if (dev == NULL)
1650 goto out_unlock;
1651
1652 err = -ENETDOWN;
1653 if (!(dev->flags & IFF_UP))
1654 goto out_unlock;
1655
1656 /*
1657 * You may not queue a frame bigger than the mtu. This is the lowest level
1658 * raw protocol and you must do your own fragmentation at this level.
1659 */
1660
1661 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1662 if (!netif_supports_nofcs(dev)) {
1663 err = -EPROTONOSUPPORT;
1664 goto out_unlock;
1665 }
1666 extra_len = 4; /* We're doing our own CRC */
1667 }
1668
1669 err = -EMSGSIZE;
1670 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1671 goto out_unlock;
1672
1673 if (!skb) {
1674 size_t reserved = LL_RESERVED_SPACE(dev);
1675 int tlen = dev->needed_tailroom;
1676 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1677
1678 rcu_read_unlock();
1679 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1680 if (skb == NULL)
1681 return -ENOBUFS;
1682 /* FIXME: Save some space for broken drivers that write a hard
1683 * header at transmission time by themselves. PPP is the notable
1684 * one here. This should really be fixed at the driver level.
1685 */
1686 skb_reserve(skb, reserved);
1687 skb_reset_network_header(skb);
1688
1689 /* Try to align data part correctly */
1690 if (hhlen) {
1691 skb->data -= hhlen;
1692 skb->tail -= hhlen;
1693 if (len < hhlen)
1694 skb_reset_network_header(skb);
1695 }
1696 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1697 if (err)
1698 goto out_free;
1699 goto retry;
1700 }
1701
1702 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1703 /* Earlier code assumed this would be a VLAN pkt,
1704 * double-check this now that we have the actual
1705 * packet in hand.
1706 */
1707 struct ethhdr *ehdr;
1708 skb_reset_mac_header(skb);
1709 ehdr = eth_hdr(skb);
1710 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1711 err = -EMSGSIZE;
1712 goto out_unlock;
1713 }
1714 }
1715
1716 skb->protocol = proto;
1717 skb->dev = dev;
1718 skb->priority = sk->sk_priority;
1719 skb->mark = sk->sk_mark;
1720
1721 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1722
1723 if (unlikely(extra_len == 4))
1724 skb->no_fcs = 1;
1725
1726 skb_probe_transport_header(skb, 0);
1727
1728 dev_queue_xmit(skb);
1729 rcu_read_unlock();
1730 return len;
1731
1732 out_unlock:
1733 rcu_read_unlock();
1734 out_free:
1735 kfree_skb(skb);
1736 return err;
1737 }
1738
1739 static unsigned int run_filter(const struct sk_buff *skb,
1740 const struct sock *sk,
1741 unsigned int res)
1742 {
1743 struct sk_filter *filter;
1744
1745 rcu_read_lock();
1746 filter = rcu_dereference(sk->sk_filter);
1747 if (filter != NULL)
1748 res = SK_RUN_FILTER(filter, skb);
1749 rcu_read_unlock();
1750
1751 return res;
1752 }
1753
1754 /*
1755 * This function makes lazy skb cloning in hope that most of packets
1756 * are discarded by BPF.
1757 *
1758 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1759 * and skb->cb are mangled. It works because (and until) packets
1760 * falling here are owned by current CPU. Output packets are cloned
1761 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1762 * sequencially, so that if we return skb to original state on exit,
1763 * we will not harm anyone.
1764 */
1765
1766 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1767 struct packet_type *pt, struct net_device *orig_dev)
1768 {
1769 struct sock *sk;
1770 struct sockaddr_ll *sll;
1771 struct packet_sock *po;
1772 u8 *skb_head = skb->data;
1773 int skb_len = skb->len;
1774 unsigned int snaplen, res;
1775
1776 if (skb->pkt_type == PACKET_LOOPBACK)
1777 goto drop;
1778
1779 sk = pt->af_packet_priv;
1780 po = pkt_sk(sk);
1781
1782 if (!net_eq(dev_net(dev), sock_net(sk)))
1783 goto drop;
1784
1785 skb->dev = dev;
1786
1787 if (dev->header_ops) {
1788 /* The device has an explicit notion of ll header,
1789 * exported to higher levels.
1790 *
1791 * Otherwise, the device hides details of its frame
1792 * structure, so that corresponding packet head is
1793 * never delivered to user.
1794 */
1795 if (sk->sk_type != SOCK_DGRAM)
1796 skb_push(skb, skb->data - skb_mac_header(skb));
1797 else if (skb->pkt_type == PACKET_OUTGOING) {
1798 /* Special case: outgoing packets have ll header at head */
1799 skb_pull(skb, skb_network_offset(skb));
1800 }
1801 }
1802
1803 snaplen = skb->len;
1804
1805 res = run_filter(skb, sk, snaplen);
1806 if (!res)
1807 goto drop_n_restore;
1808 if (snaplen > res)
1809 snaplen = res;
1810
1811 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1812 goto drop_n_acct;
1813
1814 if (skb_shared(skb)) {
1815 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1816 if (nskb == NULL)
1817 goto drop_n_acct;
1818
1819 if (skb_head != skb->data) {
1820 skb->data = skb_head;
1821 skb->len = skb_len;
1822 }
1823 consume_skb(skb);
1824 skb = nskb;
1825 }
1826
1827 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
1828
1829 sll = &PACKET_SKB_CB(skb)->sa.ll;
1830 sll->sll_hatype = dev->type;
1831 sll->sll_pkttype = skb->pkt_type;
1832 if (unlikely(po->origdev))
1833 sll->sll_ifindex = orig_dev->ifindex;
1834 else
1835 sll->sll_ifindex = dev->ifindex;
1836
1837 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1838
1839 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1840 * Use their space for storing the original skb length.
1841 */
1842 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
1843
1844 if (pskb_trim(skb, snaplen))
1845 goto drop_n_acct;
1846
1847 skb_set_owner_r(skb, sk);
1848 skb->dev = NULL;
1849 skb_dst_drop(skb);
1850
1851 /* drop conntrack reference */
1852 nf_reset(skb);
1853
1854 spin_lock(&sk->sk_receive_queue.lock);
1855 po->stats.stats1.tp_packets++;
1856 sock_skb_set_dropcount(sk, skb);
1857 __skb_queue_tail(&sk->sk_receive_queue, skb);
1858 spin_unlock(&sk->sk_receive_queue.lock);
1859 sk->sk_data_ready(sk);
1860 return 0;
1861
1862 drop_n_acct:
1863 spin_lock(&sk->sk_receive_queue.lock);
1864 po->stats.stats1.tp_drops++;
1865 atomic_inc(&sk->sk_drops);
1866 spin_unlock(&sk->sk_receive_queue.lock);
1867
1868 drop_n_restore:
1869 if (skb_head != skb->data && skb_shared(skb)) {
1870 skb->data = skb_head;
1871 skb->len = skb_len;
1872 }
1873 drop:
1874 consume_skb(skb);
1875 return 0;
1876 }
1877
1878 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1879 struct packet_type *pt, struct net_device *orig_dev)
1880 {
1881 struct sock *sk;
1882 struct packet_sock *po;
1883 struct sockaddr_ll *sll;
1884 union tpacket_uhdr h;
1885 u8 *skb_head = skb->data;
1886 int skb_len = skb->len;
1887 unsigned int snaplen, res;
1888 unsigned long status = TP_STATUS_USER;
1889 unsigned short macoff, netoff, hdrlen;
1890 struct sk_buff *copy_skb = NULL;
1891 struct timespec ts;
1892 __u32 ts_status;
1893
1894 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1895 * We may add members to them until current aligned size without forcing
1896 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1897 */
1898 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1899 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1900
1901 if (skb->pkt_type == PACKET_LOOPBACK)
1902 goto drop;
1903
1904 sk = pt->af_packet_priv;
1905 po = pkt_sk(sk);
1906
1907 if (!net_eq(dev_net(dev), sock_net(sk)))
1908 goto drop;
1909
1910 if (dev->header_ops) {
1911 if (sk->sk_type != SOCK_DGRAM)
1912 skb_push(skb, skb->data - skb_mac_header(skb));
1913 else if (skb->pkt_type == PACKET_OUTGOING) {
1914 /* Special case: outgoing packets have ll header at head */
1915 skb_pull(skb, skb_network_offset(skb));
1916 }
1917 }
1918
1919 snaplen = skb->len;
1920
1921 res = run_filter(skb, sk, snaplen);
1922 if (!res)
1923 goto drop_n_restore;
1924
1925 if (skb->ip_summed == CHECKSUM_PARTIAL)
1926 status |= TP_STATUS_CSUMNOTREADY;
1927 else if (skb->pkt_type != PACKET_OUTGOING &&
1928 (skb->ip_summed == CHECKSUM_COMPLETE ||
1929 skb_csum_unnecessary(skb)))
1930 status |= TP_STATUS_CSUM_VALID;
1931
1932 if (snaplen > res)
1933 snaplen = res;
1934
1935 if (sk->sk_type == SOCK_DGRAM) {
1936 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1937 po->tp_reserve;
1938 } else {
1939 unsigned int maclen = skb_network_offset(skb);
1940 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1941 (maclen < 16 ? 16 : maclen)) +
1942 po->tp_reserve;
1943 macoff = netoff - maclen;
1944 }
1945 if (po->tp_version <= TPACKET_V2) {
1946 if (macoff + snaplen > po->rx_ring.frame_size) {
1947 if (po->copy_thresh &&
1948 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1949 if (skb_shared(skb)) {
1950 copy_skb = skb_clone(skb, GFP_ATOMIC);
1951 } else {
1952 copy_skb = skb_get(skb);
1953 skb_head = skb->data;
1954 }
1955 if (copy_skb)
1956 skb_set_owner_r(copy_skb, sk);
1957 }
1958 snaplen = po->rx_ring.frame_size - macoff;
1959 if ((int)snaplen < 0)
1960 snaplen = 0;
1961 }
1962 } else if (unlikely(macoff + snaplen >
1963 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1964 u32 nval;
1965
1966 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1967 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1968 snaplen, nval, macoff);
1969 snaplen = nval;
1970 if (unlikely((int)snaplen < 0)) {
1971 snaplen = 0;
1972 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1973 }
1974 }
1975 spin_lock(&sk->sk_receive_queue.lock);
1976 h.raw = packet_current_rx_frame(po, skb,
1977 TP_STATUS_KERNEL, (macoff+snaplen));
1978 if (!h.raw)
1979 goto ring_is_full;
1980 if (po->tp_version <= TPACKET_V2) {
1981 packet_increment_rx_head(po, &po->rx_ring);
1982 /*
1983 * LOSING will be reported till you read the stats,
1984 * because it's COR - Clear On Read.
1985 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1986 * at packet level.
1987 */
1988 if (po->stats.stats1.tp_drops)
1989 status |= TP_STATUS_LOSING;
1990 }
1991 po->stats.stats1.tp_packets++;
1992 if (copy_skb) {
1993 status |= TP_STATUS_COPY;
1994 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1995 }
1996 spin_unlock(&sk->sk_receive_queue.lock);
1997
1998 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1999
2000 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2001 getnstimeofday(&ts);
2002
2003 status |= ts_status;
2004
2005 switch (po->tp_version) {
2006 case TPACKET_V1:
2007 h.h1->tp_len = skb->len;
2008 h.h1->tp_snaplen = snaplen;
2009 h.h1->tp_mac = macoff;
2010 h.h1->tp_net = netoff;
2011 h.h1->tp_sec = ts.tv_sec;
2012 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2013 hdrlen = sizeof(*h.h1);
2014 break;
2015 case TPACKET_V2:
2016 h.h2->tp_len = skb->len;
2017 h.h2->tp_snaplen = snaplen;
2018 h.h2->tp_mac = macoff;
2019 h.h2->tp_net = netoff;
2020 h.h2->tp_sec = ts.tv_sec;
2021 h.h2->tp_nsec = ts.tv_nsec;
2022 if (skb_vlan_tag_present(skb)) {
2023 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2024 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2025 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2026 } else {
2027 h.h2->tp_vlan_tci = 0;
2028 h.h2->tp_vlan_tpid = 0;
2029 }
2030 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2031 hdrlen = sizeof(*h.h2);
2032 break;
2033 case TPACKET_V3:
2034 /* tp_nxt_offset,vlan are already populated above.
2035 * So DONT clear those fields here
2036 */
2037 h.h3->tp_status |= status;
2038 h.h3->tp_len = skb->len;
2039 h.h3->tp_snaplen = snaplen;
2040 h.h3->tp_mac = macoff;
2041 h.h3->tp_net = netoff;
2042 h.h3->tp_sec = ts.tv_sec;
2043 h.h3->tp_nsec = ts.tv_nsec;
2044 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2045 hdrlen = sizeof(*h.h3);
2046 break;
2047 default:
2048 BUG();
2049 }
2050
2051 sll = h.raw + TPACKET_ALIGN(hdrlen);
2052 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2053 sll->sll_family = AF_PACKET;
2054 sll->sll_hatype = dev->type;
2055 sll->sll_protocol = skb->protocol;
2056 sll->sll_pkttype = skb->pkt_type;
2057 if (unlikely(po->origdev))
2058 sll->sll_ifindex = orig_dev->ifindex;
2059 else
2060 sll->sll_ifindex = dev->ifindex;
2061
2062 smp_mb();
2063
2064 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2065 if (po->tp_version <= TPACKET_V2) {
2066 u8 *start, *end;
2067
2068 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2069 macoff + snaplen);
2070
2071 for (start = h.raw; start < end; start += PAGE_SIZE)
2072 flush_dcache_page(pgv_to_page(start));
2073 }
2074 smp_wmb();
2075 #endif
2076
2077 if (po->tp_version <= TPACKET_V2) {
2078 __packet_set_status(po, h.raw, status);
2079 sk->sk_data_ready(sk);
2080 } else {
2081 prb_clear_blk_fill_status(&po->rx_ring);
2082 }
2083
2084 drop_n_restore:
2085 if (skb_head != skb->data && skb_shared(skb)) {
2086 skb->data = skb_head;
2087 skb->len = skb_len;
2088 }
2089 drop:
2090 kfree_skb(skb);
2091 return 0;
2092
2093 ring_is_full:
2094 po->stats.stats1.tp_drops++;
2095 spin_unlock(&sk->sk_receive_queue.lock);
2096
2097 sk->sk_data_ready(sk);
2098 kfree_skb(copy_skb);
2099 goto drop_n_restore;
2100 }
2101
2102 static void tpacket_destruct_skb(struct sk_buff *skb)
2103 {
2104 struct packet_sock *po = pkt_sk(skb->sk);
2105
2106 if (likely(po->tx_ring.pg_vec)) {
2107 void *ph;
2108 __u32 ts;
2109
2110 ph = skb_shinfo(skb)->destructor_arg;
2111 packet_dec_pending(&po->tx_ring);
2112
2113 ts = __packet_set_timestamp(po, ph, skb);
2114 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2115 }
2116
2117 sock_wfree(skb);
2118 }
2119
2120 static bool ll_header_truncated(const struct net_device *dev, int len)
2121 {
2122 /* net device doesn't like empty head */
2123 if (unlikely(len <= dev->hard_header_len)) {
2124 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2125 current->comm, len, dev->hard_header_len);
2126 return true;
2127 }
2128
2129 return false;
2130 }
2131
2132 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2133 void *frame, struct net_device *dev, int size_max,
2134 __be16 proto, unsigned char *addr, int hlen)
2135 {
2136 union tpacket_uhdr ph;
2137 int to_write, offset, len, tp_len, nr_frags, len_max;
2138 struct socket *sock = po->sk.sk_socket;
2139 struct page *page;
2140 void *data;
2141 int err;
2142
2143 ph.raw = frame;
2144
2145 skb->protocol = proto;
2146 skb->dev = dev;
2147 skb->priority = po->sk.sk_priority;
2148 skb->mark = po->sk.sk_mark;
2149 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2150 skb_shinfo(skb)->destructor_arg = ph.raw;
2151
2152 switch (po->tp_version) {
2153 case TPACKET_V2:
2154 tp_len = ph.h2->tp_len;
2155 break;
2156 default:
2157 tp_len = ph.h1->tp_len;
2158 break;
2159 }
2160 if (unlikely(tp_len > size_max)) {
2161 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2162 return -EMSGSIZE;
2163 }
2164
2165 skb_reserve(skb, hlen);
2166 skb_reset_network_header(skb);
2167
2168 if (!packet_use_direct_xmit(po))
2169 skb_probe_transport_header(skb, 0);
2170 if (unlikely(po->tp_tx_has_off)) {
2171 int off_min, off_max, off;
2172 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2173 off_max = po->tx_ring.frame_size - tp_len;
2174 if (sock->type == SOCK_DGRAM) {
2175 switch (po->tp_version) {
2176 case TPACKET_V2:
2177 off = ph.h2->tp_net;
2178 break;
2179 default:
2180 off = ph.h1->tp_net;
2181 break;
2182 }
2183 } else {
2184 switch (po->tp_version) {
2185 case TPACKET_V2:
2186 off = ph.h2->tp_mac;
2187 break;
2188 default:
2189 off = ph.h1->tp_mac;
2190 break;
2191 }
2192 }
2193 if (unlikely((off < off_min) || (off_max < off)))
2194 return -EINVAL;
2195 data = ph.raw + off;
2196 } else {
2197 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2198 }
2199 to_write = tp_len;
2200
2201 if (sock->type == SOCK_DGRAM) {
2202 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2203 NULL, tp_len);
2204 if (unlikely(err < 0))
2205 return -EINVAL;
2206 } else if (dev->hard_header_len) {
2207 if (ll_header_truncated(dev, tp_len))
2208 return -EINVAL;
2209
2210 skb_push(skb, dev->hard_header_len);
2211 err = skb_store_bits(skb, 0, data,
2212 dev->hard_header_len);
2213 if (unlikely(err))
2214 return err;
2215
2216 data += dev->hard_header_len;
2217 to_write -= dev->hard_header_len;
2218 }
2219
2220 offset = offset_in_page(data);
2221 len_max = PAGE_SIZE - offset;
2222 len = ((to_write > len_max) ? len_max : to_write);
2223
2224 skb->data_len = to_write;
2225 skb->len += to_write;
2226 skb->truesize += to_write;
2227 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2228
2229 while (likely(to_write)) {
2230 nr_frags = skb_shinfo(skb)->nr_frags;
2231
2232 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2233 pr_err("Packet exceed the number of skb frags(%lu)\n",
2234 MAX_SKB_FRAGS);
2235 return -EFAULT;
2236 }
2237
2238 page = pgv_to_page(data);
2239 data += len;
2240 flush_dcache_page(page);
2241 get_page(page);
2242 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2243 to_write -= len;
2244 offset = 0;
2245 len_max = PAGE_SIZE;
2246 len = ((to_write > len_max) ? len_max : to_write);
2247 }
2248
2249 return tp_len;
2250 }
2251
2252 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2253 {
2254 struct sk_buff *skb;
2255 struct net_device *dev;
2256 __be16 proto;
2257 int err, reserve = 0;
2258 void *ph;
2259 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2260 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2261 int tp_len, size_max;
2262 unsigned char *addr;
2263 int len_sum = 0;
2264 int status = TP_STATUS_AVAILABLE;
2265 int hlen, tlen;
2266
2267 mutex_lock(&po->pg_vec_lock);
2268
2269 if (likely(saddr == NULL)) {
2270 dev = packet_cached_dev_get(po);
2271 proto = po->num;
2272 addr = NULL;
2273 } else {
2274 err = -EINVAL;
2275 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2276 goto out;
2277 if (msg->msg_namelen < (saddr->sll_halen
2278 + offsetof(struct sockaddr_ll,
2279 sll_addr)))
2280 goto out;
2281 proto = saddr->sll_protocol;
2282 addr = saddr->sll_addr;
2283 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2284 }
2285
2286 err = -ENXIO;
2287 if (unlikely(dev == NULL))
2288 goto out;
2289 err = -ENETDOWN;
2290 if (unlikely(!(dev->flags & IFF_UP)))
2291 goto out_put;
2292
2293 reserve = dev->hard_header_len + VLAN_HLEN;
2294 size_max = po->tx_ring.frame_size
2295 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2296
2297 if (size_max > dev->mtu + reserve)
2298 size_max = dev->mtu + reserve;
2299
2300 do {
2301 ph = packet_current_frame(po, &po->tx_ring,
2302 TP_STATUS_SEND_REQUEST);
2303 if (unlikely(ph == NULL)) {
2304 if (need_wait && need_resched())
2305 schedule();
2306 continue;
2307 }
2308
2309 status = TP_STATUS_SEND_REQUEST;
2310 hlen = LL_RESERVED_SPACE(dev);
2311 tlen = dev->needed_tailroom;
2312 skb = sock_alloc_send_skb(&po->sk,
2313 hlen + tlen + sizeof(struct sockaddr_ll),
2314 0, &err);
2315
2316 if (unlikely(skb == NULL))
2317 goto out_status;
2318
2319 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2320 addr, hlen);
2321 if (tp_len > dev->mtu + dev->hard_header_len) {
2322 struct ethhdr *ehdr;
2323 /* Earlier code assumed this would be a VLAN pkt,
2324 * double-check this now that we have the actual
2325 * packet in hand.
2326 */
2327
2328 skb_reset_mac_header(skb);
2329 ehdr = eth_hdr(skb);
2330 if (ehdr->h_proto != htons(ETH_P_8021Q))
2331 tp_len = -EMSGSIZE;
2332 }
2333 if (unlikely(tp_len < 0)) {
2334 if (po->tp_loss) {
2335 __packet_set_status(po, ph,
2336 TP_STATUS_AVAILABLE);
2337 packet_increment_head(&po->tx_ring);
2338 kfree_skb(skb);
2339 continue;
2340 } else {
2341 status = TP_STATUS_WRONG_FORMAT;
2342 err = tp_len;
2343 goto out_status;
2344 }
2345 }
2346
2347 packet_pick_tx_queue(dev, skb);
2348
2349 skb->destructor = tpacket_destruct_skb;
2350 __packet_set_status(po, ph, TP_STATUS_SENDING);
2351 packet_inc_pending(&po->tx_ring);
2352
2353 status = TP_STATUS_SEND_REQUEST;
2354 err = po->xmit(skb);
2355 if (unlikely(err > 0)) {
2356 err = net_xmit_errno(err);
2357 if (err && __packet_get_status(po, ph) ==
2358 TP_STATUS_AVAILABLE) {
2359 /* skb was destructed already */
2360 skb = NULL;
2361 goto out_status;
2362 }
2363 /*
2364 * skb was dropped but not destructed yet;
2365 * let's treat it like congestion or err < 0
2366 */
2367 err = 0;
2368 }
2369 packet_increment_head(&po->tx_ring);
2370 len_sum += tp_len;
2371 } while (likely((ph != NULL) ||
2372 /* Note: packet_read_pending() might be slow if we have
2373 * to call it as it's per_cpu variable, but in fast-path
2374 * we already short-circuit the loop with the first
2375 * condition, and luckily don't have to go that path
2376 * anyway.
2377 */
2378 (need_wait && packet_read_pending(&po->tx_ring))));
2379
2380 err = len_sum;
2381 goto out_put;
2382
2383 out_status:
2384 __packet_set_status(po, ph, status);
2385 kfree_skb(skb);
2386 out_put:
2387 dev_put(dev);
2388 out:
2389 mutex_unlock(&po->pg_vec_lock);
2390 return err;
2391 }
2392
2393 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2394 size_t reserve, size_t len,
2395 size_t linear, int noblock,
2396 int *err)
2397 {
2398 struct sk_buff *skb;
2399
2400 /* Under a page? Don't bother with paged skb. */
2401 if (prepad + len < PAGE_SIZE || !linear)
2402 linear = len;
2403
2404 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2405 err, 0);
2406 if (!skb)
2407 return NULL;
2408
2409 skb_reserve(skb, reserve);
2410 skb_put(skb, linear);
2411 skb->data_len = len - linear;
2412 skb->len += len - linear;
2413
2414 return skb;
2415 }
2416
2417 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2418 {
2419 struct sock *sk = sock->sk;
2420 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2421 struct sk_buff *skb;
2422 struct net_device *dev;
2423 __be16 proto;
2424 unsigned char *addr;
2425 int err, reserve = 0;
2426 struct virtio_net_hdr vnet_hdr = { 0 };
2427 int offset = 0;
2428 int vnet_hdr_len;
2429 struct packet_sock *po = pkt_sk(sk);
2430 unsigned short gso_type = 0;
2431 int hlen, tlen;
2432 int extra_len = 0;
2433 ssize_t n;
2434
2435 /*
2436 * Get and verify the address.
2437 */
2438
2439 if (likely(saddr == NULL)) {
2440 dev = packet_cached_dev_get(po);
2441 proto = po->num;
2442 addr = NULL;
2443 } else {
2444 err = -EINVAL;
2445 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2446 goto out;
2447 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2448 goto out;
2449 proto = saddr->sll_protocol;
2450 addr = saddr->sll_addr;
2451 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2452 }
2453
2454 err = -ENXIO;
2455 if (unlikely(dev == NULL))
2456 goto out_unlock;
2457 err = -ENETDOWN;
2458 if (unlikely(!(dev->flags & IFF_UP)))
2459 goto out_unlock;
2460
2461 if (sock->type == SOCK_RAW)
2462 reserve = dev->hard_header_len;
2463 if (po->has_vnet_hdr) {
2464 vnet_hdr_len = sizeof(vnet_hdr);
2465
2466 err = -EINVAL;
2467 if (len < vnet_hdr_len)
2468 goto out_unlock;
2469
2470 len -= vnet_hdr_len;
2471
2472 err = -EFAULT;
2473 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2474 if (n != vnet_hdr_len)
2475 goto out_unlock;
2476
2477 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2478 (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2479 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2480 __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2481 vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2482 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2483 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
2484
2485 err = -EINVAL;
2486 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
2487 goto out_unlock;
2488
2489 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2490 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2491 case VIRTIO_NET_HDR_GSO_TCPV4:
2492 gso_type = SKB_GSO_TCPV4;
2493 break;
2494 case VIRTIO_NET_HDR_GSO_TCPV6:
2495 gso_type = SKB_GSO_TCPV6;
2496 break;
2497 case VIRTIO_NET_HDR_GSO_UDP:
2498 gso_type = SKB_GSO_UDP;
2499 break;
2500 default:
2501 goto out_unlock;
2502 }
2503
2504 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2505 gso_type |= SKB_GSO_TCP_ECN;
2506
2507 if (vnet_hdr.gso_size == 0)
2508 goto out_unlock;
2509
2510 }
2511 }
2512
2513 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2514 if (!netif_supports_nofcs(dev)) {
2515 err = -EPROTONOSUPPORT;
2516 goto out_unlock;
2517 }
2518 extra_len = 4; /* We're doing our own CRC */
2519 }
2520
2521 err = -EMSGSIZE;
2522 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2523 goto out_unlock;
2524
2525 err = -ENOBUFS;
2526 hlen = LL_RESERVED_SPACE(dev);
2527 tlen = dev->needed_tailroom;
2528 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2529 __virtio16_to_cpu(false, vnet_hdr.hdr_len),
2530 msg->msg_flags & MSG_DONTWAIT, &err);
2531 if (skb == NULL)
2532 goto out_unlock;
2533
2534 skb_set_network_header(skb, reserve);
2535
2536 err = -EINVAL;
2537 if (sock->type == SOCK_DGRAM) {
2538 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2539 if (unlikely(offset < 0))
2540 goto out_free;
2541 } else {
2542 if (ll_header_truncated(dev, len))
2543 goto out_free;
2544 }
2545
2546 /* Returns -EFAULT on error */
2547 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2548 if (err)
2549 goto out_free;
2550
2551 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2552
2553 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2554 /* Earlier code assumed this would be a VLAN pkt,
2555 * double-check this now that we have the actual
2556 * packet in hand.
2557 */
2558 struct ethhdr *ehdr;
2559 skb_reset_mac_header(skb);
2560 ehdr = eth_hdr(skb);
2561 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2562 err = -EMSGSIZE;
2563 goto out_free;
2564 }
2565 }
2566
2567 skb->protocol = proto;
2568 skb->dev = dev;
2569 skb->priority = sk->sk_priority;
2570 skb->mark = sk->sk_mark;
2571
2572 packet_pick_tx_queue(dev, skb);
2573
2574 if (po->has_vnet_hdr) {
2575 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2576 u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2577 u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2578 if (!skb_partial_csum_set(skb, s, o)) {
2579 err = -EINVAL;
2580 goto out_free;
2581 }
2582 }
2583
2584 skb_shinfo(skb)->gso_size =
2585 __virtio16_to_cpu(false, vnet_hdr.gso_size);
2586 skb_shinfo(skb)->gso_type = gso_type;
2587
2588 /* Header must be checked, and gso_segs computed. */
2589 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2590 skb_shinfo(skb)->gso_segs = 0;
2591
2592 len += vnet_hdr_len;
2593 }
2594
2595 if (!packet_use_direct_xmit(po))
2596 skb_probe_transport_header(skb, reserve);
2597 if (unlikely(extra_len == 4))
2598 skb->no_fcs = 1;
2599
2600 err = po->xmit(skb);
2601 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2602 goto out_unlock;
2603
2604 dev_put(dev);
2605
2606 return len;
2607
2608 out_free:
2609 kfree_skb(skb);
2610 out_unlock:
2611 if (dev)
2612 dev_put(dev);
2613 out:
2614 return err;
2615 }
2616
2617 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2618 {
2619 struct sock *sk = sock->sk;
2620 struct packet_sock *po = pkt_sk(sk);
2621
2622 if (po->tx_ring.pg_vec)
2623 return tpacket_snd(po, msg);
2624 else
2625 return packet_snd(sock, msg, len);
2626 }
2627
2628 /*
2629 * Close a PACKET socket. This is fairly simple. We immediately go
2630 * to 'closed' state and remove our protocol entry in the device list.
2631 */
2632
2633 static int packet_release(struct socket *sock)
2634 {
2635 struct sock *sk = sock->sk;
2636 struct packet_sock *po;
2637 struct net *net;
2638 union tpacket_req_u req_u;
2639
2640 if (!sk)
2641 return 0;
2642
2643 net = sock_net(sk);
2644 po = pkt_sk(sk);
2645
2646 mutex_lock(&net->packet.sklist_lock);
2647 sk_del_node_init_rcu(sk);
2648 mutex_unlock(&net->packet.sklist_lock);
2649
2650 preempt_disable();
2651 sock_prot_inuse_add(net, sk->sk_prot, -1);
2652 preempt_enable();
2653
2654 spin_lock(&po->bind_lock);
2655 unregister_prot_hook(sk, false);
2656 packet_cached_dev_reset(po);
2657
2658 if (po->prot_hook.dev) {
2659 dev_put(po->prot_hook.dev);
2660 po->prot_hook.dev = NULL;
2661 }
2662 spin_unlock(&po->bind_lock);
2663
2664 packet_flush_mclist(sk);
2665
2666 if (po->rx_ring.pg_vec) {
2667 memset(&req_u, 0, sizeof(req_u));
2668 packet_set_ring(sk, &req_u, 1, 0);
2669 }
2670
2671 if (po->tx_ring.pg_vec) {
2672 memset(&req_u, 0, sizeof(req_u));
2673 packet_set_ring(sk, &req_u, 1, 1);
2674 }
2675
2676 fanout_release(sk);
2677
2678 synchronize_net();
2679 /*
2680 * Now the socket is dead. No more input will appear.
2681 */
2682 sock_orphan(sk);
2683 sock->sk = NULL;
2684
2685 /* Purge queues */
2686
2687 skb_queue_purge(&sk->sk_receive_queue);
2688 packet_free_pending(po);
2689 sk_refcnt_debug_release(sk);
2690
2691 sock_put(sk);
2692 return 0;
2693 }
2694
2695 /*
2696 * Attach a packet hook.
2697 */
2698
2699 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2700 {
2701 struct packet_sock *po = pkt_sk(sk);
2702 const struct net_device *dev_curr;
2703 __be16 proto_curr;
2704 bool need_rehook;
2705
2706 if (po->fanout) {
2707 if (dev)
2708 dev_put(dev);
2709
2710 return -EINVAL;
2711 }
2712
2713 lock_sock(sk);
2714 spin_lock(&po->bind_lock);
2715
2716 proto_curr = po->prot_hook.type;
2717 dev_curr = po->prot_hook.dev;
2718
2719 need_rehook = proto_curr != proto || dev_curr != dev;
2720
2721 if (need_rehook) {
2722 unregister_prot_hook(sk, true);
2723
2724 po->num = proto;
2725 po->prot_hook.type = proto;
2726
2727 if (po->prot_hook.dev)
2728 dev_put(po->prot_hook.dev);
2729
2730 po->prot_hook.dev = dev;
2731
2732 po->ifindex = dev ? dev->ifindex : 0;
2733 packet_cached_dev_assign(po, dev);
2734 }
2735
2736 if (proto == 0 || !need_rehook)
2737 goto out_unlock;
2738
2739 if (!dev || (dev->flags & IFF_UP)) {
2740 register_prot_hook(sk);
2741 } else {
2742 sk->sk_err = ENETDOWN;
2743 if (!sock_flag(sk, SOCK_DEAD))
2744 sk->sk_error_report(sk);
2745 }
2746
2747 out_unlock:
2748 spin_unlock(&po->bind_lock);
2749 release_sock(sk);
2750 return 0;
2751 }
2752
2753 /*
2754 * Bind a packet socket to a device
2755 */
2756
2757 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2758 int addr_len)
2759 {
2760 struct sock *sk = sock->sk;
2761 char name[15];
2762 struct net_device *dev;
2763 int err = -ENODEV;
2764
2765 /*
2766 * Check legality
2767 */
2768
2769 if (addr_len != sizeof(struct sockaddr))
2770 return -EINVAL;
2771 strlcpy(name, uaddr->sa_data, sizeof(name));
2772
2773 dev = dev_get_by_name(sock_net(sk), name);
2774 if (dev)
2775 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2776 return err;
2777 }
2778
2779 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2780 {
2781 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2782 struct sock *sk = sock->sk;
2783 struct net_device *dev = NULL;
2784 int err;
2785
2786
2787 /*
2788 * Check legality
2789 */
2790
2791 if (addr_len < sizeof(struct sockaddr_ll))
2792 return -EINVAL;
2793 if (sll->sll_family != AF_PACKET)
2794 return -EINVAL;
2795
2796 if (sll->sll_ifindex) {
2797 err = -ENODEV;
2798 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2799 if (dev == NULL)
2800 goto out;
2801 }
2802 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2803
2804 out:
2805 return err;
2806 }
2807
2808 static struct proto packet_proto = {
2809 .name = "PACKET",
2810 .owner = THIS_MODULE,
2811 .obj_size = sizeof(struct packet_sock),
2812 };
2813
2814 /*
2815 * Create a packet of type SOCK_PACKET.
2816 */
2817
2818 static int packet_create(struct net *net, struct socket *sock, int protocol,
2819 int kern)
2820 {
2821 struct sock *sk;
2822 struct packet_sock *po;
2823 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2824 int err;
2825
2826 if (!ns_capable(net->user_ns, CAP_NET_RAW))
2827 return -EPERM;
2828 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2829 sock->type != SOCK_PACKET)
2830 return -ESOCKTNOSUPPORT;
2831
2832 sock->state = SS_UNCONNECTED;
2833
2834 err = -ENOBUFS;
2835 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2836 if (sk == NULL)
2837 goto out;
2838
2839 sock->ops = &packet_ops;
2840 if (sock->type == SOCK_PACKET)
2841 sock->ops = &packet_ops_spkt;
2842
2843 sock_init_data(sock, sk);
2844
2845 po = pkt_sk(sk);
2846 sk->sk_family = PF_PACKET;
2847 po->num = proto;
2848 po->xmit = dev_queue_xmit;
2849
2850 err = packet_alloc_pending(po);
2851 if (err)
2852 goto out2;
2853
2854 packet_cached_dev_reset(po);
2855
2856 sk->sk_destruct = packet_sock_destruct;
2857 sk_refcnt_debug_inc(sk);
2858
2859 /*
2860 * Attach a protocol block
2861 */
2862
2863 spin_lock_init(&po->bind_lock);
2864 mutex_init(&po->pg_vec_lock);
2865 po->prot_hook.func = packet_rcv;
2866
2867 if (sock->type == SOCK_PACKET)
2868 po->prot_hook.func = packet_rcv_spkt;
2869
2870 po->prot_hook.af_packet_priv = sk;
2871
2872 if (proto) {
2873 po->prot_hook.type = proto;
2874 register_prot_hook(sk);
2875 }
2876
2877 mutex_lock(&net->packet.sklist_lock);
2878 sk_add_node_rcu(sk, &net->packet.sklist);
2879 mutex_unlock(&net->packet.sklist_lock);
2880
2881 preempt_disable();
2882 sock_prot_inuse_add(net, &packet_proto, 1);
2883 preempt_enable();
2884
2885 return 0;
2886 out2:
2887 sk_free(sk);
2888 out:
2889 return err;
2890 }
2891
2892 /*
2893 * Pull a packet from our receive queue and hand it to the user.
2894 * If necessary we block.
2895 */
2896
2897 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2898 int flags)
2899 {
2900 struct sock *sk = sock->sk;
2901 struct sk_buff *skb;
2902 int copied, err;
2903 int vnet_hdr_len = 0;
2904 unsigned int origlen = 0;
2905
2906 err = -EINVAL;
2907 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2908 goto out;
2909
2910 #if 0
2911 /* What error should we return now? EUNATTACH? */
2912 if (pkt_sk(sk)->ifindex < 0)
2913 return -ENODEV;
2914 #endif
2915
2916 if (flags & MSG_ERRQUEUE) {
2917 err = sock_recv_errqueue(sk, msg, len,
2918 SOL_PACKET, PACKET_TX_TIMESTAMP);
2919 goto out;
2920 }
2921
2922 /*
2923 * Call the generic datagram receiver. This handles all sorts
2924 * of horrible races and re-entrancy so we can forget about it
2925 * in the protocol layers.
2926 *
2927 * Now it will return ENETDOWN, if device have just gone down,
2928 * but then it will block.
2929 */
2930
2931 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2932
2933 /*
2934 * An error occurred so return it. Because skb_recv_datagram()
2935 * handles the blocking we don't see and worry about blocking
2936 * retries.
2937 */
2938
2939 if (skb == NULL)
2940 goto out;
2941
2942 if (pkt_sk(sk)->has_vnet_hdr) {
2943 struct virtio_net_hdr vnet_hdr = { 0 };
2944
2945 err = -EINVAL;
2946 vnet_hdr_len = sizeof(vnet_hdr);
2947 if (len < vnet_hdr_len)
2948 goto out_free;
2949
2950 len -= vnet_hdr_len;
2951
2952 if (skb_is_gso(skb)) {
2953 struct skb_shared_info *sinfo = skb_shinfo(skb);
2954
2955 /* This is a hint as to how much should be linear. */
2956 vnet_hdr.hdr_len =
2957 __cpu_to_virtio16(false, skb_headlen(skb));
2958 vnet_hdr.gso_size =
2959 __cpu_to_virtio16(false, sinfo->gso_size);
2960 if (sinfo->gso_type & SKB_GSO_TCPV4)
2961 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2962 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2963 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2964 else if (sinfo->gso_type & SKB_GSO_UDP)
2965 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2966 else if (sinfo->gso_type & SKB_GSO_FCOE)
2967 goto out_free;
2968 else
2969 BUG();
2970 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2971 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2972 } else
2973 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2974
2975 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2976 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2977 vnet_hdr.csum_start = __cpu_to_virtio16(false,
2978 skb_checksum_start_offset(skb));
2979 vnet_hdr.csum_offset = __cpu_to_virtio16(false,
2980 skb->csum_offset);
2981 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2982 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2983 } /* else everything is zero */
2984
2985 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
2986 if (err < 0)
2987 goto out_free;
2988 }
2989
2990 /* You lose any data beyond the buffer you gave. If it worries
2991 * a user program they can ask the device for its MTU
2992 * anyway.
2993 */
2994 copied = skb->len;
2995 if (copied > len) {
2996 copied = len;
2997 msg->msg_flags |= MSG_TRUNC;
2998 }
2999
3000 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3001 if (err)
3002 goto out_free;
3003
3004 if (sock->type != SOCK_PACKET) {
3005 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3006
3007 /* Original length was stored in sockaddr_ll fields */
3008 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3009 sll->sll_family = AF_PACKET;
3010 sll->sll_protocol = skb->protocol;
3011 }
3012
3013 sock_recv_ts_and_drops(msg, sk, skb);
3014
3015 if (msg->msg_name) {
3016 /* If the address length field is there to be filled
3017 * in, we fill it in now.
3018 */
3019 if (sock->type == SOCK_PACKET) {
3020 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3021 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3022 } else {
3023 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3024
3025 msg->msg_namelen = sll->sll_halen +
3026 offsetof(struct sockaddr_ll, sll_addr);
3027 }
3028 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3029 msg->msg_namelen);
3030 }
3031
3032 if (pkt_sk(sk)->auxdata) {
3033 struct tpacket_auxdata aux;
3034
3035 aux.tp_status = TP_STATUS_USER;
3036 if (skb->ip_summed == CHECKSUM_PARTIAL)
3037 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3038 else if (skb->pkt_type != PACKET_OUTGOING &&
3039 (skb->ip_summed == CHECKSUM_COMPLETE ||
3040 skb_csum_unnecessary(skb)))
3041 aux.tp_status |= TP_STATUS_CSUM_VALID;
3042
3043 aux.tp_len = origlen;
3044 aux.tp_snaplen = skb->len;
3045 aux.tp_mac = 0;
3046 aux.tp_net = skb_network_offset(skb);
3047 if (skb_vlan_tag_present(skb)) {
3048 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3049 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3050 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3051 } else {
3052 aux.tp_vlan_tci = 0;
3053 aux.tp_vlan_tpid = 0;
3054 }
3055 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3056 }
3057
3058 /*
3059 * Free or return the buffer as appropriate. Again this
3060 * hides all the races and re-entrancy issues from us.
3061 */
3062 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3063
3064 out_free:
3065 skb_free_datagram(sk, skb);
3066 out:
3067 return err;
3068 }
3069
3070 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3071 int *uaddr_len, int peer)
3072 {
3073 struct net_device *dev;
3074 struct sock *sk = sock->sk;
3075
3076 if (peer)
3077 return -EOPNOTSUPP;
3078
3079 uaddr->sa_family = AF_PACKET;
3080 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3081 rcu_read_lock();
3082 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3083 if (dev)
3084 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3085 rcu_read_unlock();
3086 *uaddr_len = sizeof(*uaddr);
3087
3088 return 0;
3089 }
3090
3091 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3092 int *uaddr_len, int peer)
3093 {
3094 struct net_device *dev;
3095 struct sock *sk = sock->sk;
3096 struct packet_sock *po = pkt_sk(sk);
3097 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3098
3099 if (peer)
3100 return -EOPNOTSUPP;
3101
3102 sll->sll_family = AF_PACKET;
3103 sll->sll_ifindex = po->ifindex;
3104 sll->sll_protocol = po->num;
3105 sll->sll_pkttype = 0;
3106 rcu_read_lock();
3107 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3108 if (dev) {
3109 sll->sll_hatype = dev->type;
3110 sll->sll_halen = dev->addr_len;
3111 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3112 } else {
3113 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3114 sll->sll_halen = 0;
3115 }
3116 rcu_read_unlock();
3117 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3118
3119 return 0;
3120 }
3121
3122 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3123 int what)
3124 {
3125 switch (i->type) {
3126 case PACKET_MR_MULTICAST:
3127 if (i->alen != dev->addr_len)
3128 return -EINVAL;
3129 if (what > 0)
3130 return dev_mc_add(dev, i->addr);
3131 else
3132 return dev_mc_del(dev, i->addr);
3133 break;
3134 case PACKET_MR_PROMISC:
3135 return dev_set_promiscuity(dev, what);
3136 case PACKET_MR_ALLMULTI:
3137 return dev_set_allmulti(dev, what);
3138 case PACKET_MR_UNICAST:
3139 if (i->alen != dev->addr_len)
3140 return -EINVAL;
3141 if (what > 0)
3142 return dev_uc_add(dev, i->addr);
3143 else
3144 return dev_uc_del(dev, i->addr);
3145 break;
3146 default:
3147 break;
3148 }
3149 return 0;
3150 }
3151
3152 static void packet_dev_mclist_delete(struct net_device *dev,
3153 struct packet_mclist **mlp)
3154 {
3155 struct packet_mclist *ml;
3156
3157 while ((ml = *mlp) != NULL) {
3158 if (ml->ifindex == dev->ifindex) {
3159 packet_dev_mc(dev, ml, -1);
3160 *mlp = ml->next;
3161 kfree(ml);
3162 } else
3163 mlp = &ml->next;
3164 }
3165 }
3166
3167 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3168 {
3169 struct packet_sock *po = pkt_sk(sk);
3170 struct packet_mclist *ml, *i;
3171 struct net_device *dev;
3172 int err;
3173
3174 rtnl_lock();
3175
3176 err = -ENODEV;
3177 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3178 if (!dev)
3179 goto done;
3180
3181 err = -EINVAL;
3182 if (mreq->mr_alen > dev->addr_len)
3183 goto done;
3184
3185 err = -ENOBUFS;
3186 i = kmalloc(sizeof(*i), GFP_KERNEL);
3187 if (i == NULL)
3188 goto done;
3189
3190 err = 0;
3191 for (ml = po->mclist; ml; ml = ml->next) {
3192 if (ml->ifindex == mreq->mr_ifindex &&
3193 ml->type == mreq->mr_type &&
3194 ml->alen == mreq->mr_alen &&
3195 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3196 ml->count++;
3197 /* Free the new element ... */
3198 kfree(i);
3199 goto done;
3200 }
3201 }
3202
3203 i->type = mreq->mr_type;
3204 i->ifindex = mreq->mr_ifindex;
3205 i->alen = mreq->mr_alen;
3206 memcpy(i->addr, mreq->mr_address, i->alen);
3207 i->count = 1;
3208 i->next = po->mclist;
3209 po->mclist = i;
3210 err = packet_dev_mc(dev, i, 1);
3211 if (err) {
3212 po->mclist = i->next;
3213 kfree(i);
3214 }
3215
3216 done:
3217 rtnl_unlock();
3218 return err;
3219 }
3220
3221 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3222 {
3223 struct packet_mclist *ml, **mlp;
3224
3225 rtnl_lock();
3226
3227 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3228 if (ml->ifindex == mreq->mr_ifindex &&
3229 ml->type == mreq->mr_type &&
3230 ml->alen == mreq->mr_alen &&
3231 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3232 if (--ml->count == 0) {
3233 struct net_device *dev;
3234 *mlp = ml->next;
3235 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3236 if (dev)
3237 packet_dev_mc(dev, ml, -1);
3238 kfree(ml);
3239 }
3240 break;
3241 }
3242 }
3243 rtnl_unlock();
3244 return 0;
3245 }
3246
3247 static void packet_flush_mclist(struct sock *sk)
3248 {
3249 struct packet_sock *po = pkt_sk(sk);
3250 struct packet_mclist *ml;
3251
3252 if (!po->mclist)
3253 return;
3254
3255 rtnl_lock();
3256 while ((ml = po->mclist) != NULL) {
3257 struct net_device *dev;
3258
3259 po->mclist = ml->next;
3260 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3261 if (dev != NULL)
3262 packet_dev_mc(dev, ml, -1);
3263 kfree(ml);
3264 }
3265 rtnl_unlock();
3266 }
3267
3268 static int
3269 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3270 {
3271 struct sock *sk = sock->sk;
3272 struct packet_sock *po = pkt_sk(sk);
3273 int ret;
3274
3275 if (level != SOL_PACKET)
3276 return -ENOPROTOOPT;
3277
3278 switch (optname) {
3279 case PACKET_ADD_MEMBERSHIP:
3280 case PACKET_DROP_MEMBERSHIP:
3281 {
3282 struct packet_mreq_max mreq;
3283 int len = optlen;
3284 memset(&mreq, 0, sizeof(mreq));
3285 if (len < sizeof(struct packet_mreq))
3286 return -EINVAL;
3287 if (len > sizeof(mreq))
3288 len = sizeof(mreq);
3289 if (copy_from_user(&mreq, optval, len))
3290 return -EFAULT;
3291 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3292 return -EINVAL;
3293 if (optname == PACKET_ADD_MEMBERSHIP)
3294 ret = packet_mc_add(sk, &mreq);
3295 else
3296 ret = packet_mc_drop(sk, &mreq);
3297 return ret;
3298 }
3299
3300 case PACKET_RX_RING:
3301 case PACKET_TX_RING:
3302 {
3303 union tpacket_req_u req_u;
3304 int len;
3305
3306 switch (po->tp_version) {
3307 case TPACKET_V1:
3308 case TPACKET_V2:
3309 len = sizeof(req_u.req);
3310 break;
3311 case TPACKET_V3:
3312 default:
3313 len = sizeof(req_u.req3);
3314 break;
3315 }
3316 if (optlen < len)
3317 return -EINVAL;
3318 if (pkt_sk(sk)->has_vnet_hdr)
3319 return -EINVAL;
3320 if (copy_from_user(&req_u.req, optval, len))
3321 return -EFAULT;
3322 return packet_set_ring(sk, &req_u, 0,
3323 optname == PACKET_TX_RING);
3324 }
3325 case PACKET_COPY_THRESH:
3326 {
3327 int val;
3328
3329 if (optlen != sizeof(val))
3330 return -EINVAL;
3331 if (copy_from_user(&val, optval, sizeof(val)))
3332 return -EFAULT;
3333
3334 pkt_sk(sk)->copy_thresh = val;
3335 return 0;
3336 }
3337 case PACKET_VERSION:
3338 {
3339 int val;
3340
3341 if (optlen != sizeof(val))
3342 return -EINVAL;
3343 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3344 return -EBUSY;
3345 if (copy_from_user(&val, optval, sizeof(val)))
3346 return -EFAULT;
3347 switch (val) {
3348 case TPACKET_V1:
3349 case TPACKET_V2:
3350 case TPACKET_V3:
3351 po->tp_version = val;
3352 return 0;
3353 default:
3354 return -EINVAL;
3355 }
3356 }
3357 case PACKET_RESERVE:
3358 {
3359 unsigned int val;
3360
3361 if (optlen != sizeof(val))
3362 return -EINVAL;
3363 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3364 return -EBUSY;
3365 if (copy_from_user(&val, optval, sizeof(val)))
3366 return -EFAULT;
3367 po->tp_reserve = val;
3368 return 0;
3369 }
3370 case PACKET_LOSS:
3371 {
3372 unsigned int val;
3373
3374 if (optlen != sizeof(val))
3375 return -EINVAL;
3376 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3377 return -EBUSY;
3378 if (copy_from_user(&val, optval, sizeof(val)))
3379 return -EFAULT;
3380 po->tp_loss = !!val;
3381 return 0;
3382 }
3383 case PACKET_AUXDATA:
3384 {
3385 int val;
3386
3387 if (optlen < sizeof(val))
3388 return -EINVAL;
3389 if (copy_from_user(&val, optval, sizeof(val)))
3390 return -EFAULT;
3391
3392 po->auxdata = !!val;
3393 return 0;
3394 }
3395 case PACKET_ORIGDEV:
3396 {
3397 int val;
3398
3399 if (optlen < sizeof(val))
3400 return -EINVAL;
3401 if (copy_from_user(&val, optval, sizeof(val)))
3402 return -EFAULT;
3403
3404 po->origdev = !!val;
3405 return 0;
3406 }
3407 case PACKET_VNET_HDR:
3408 {
3409 int val;
3410
3411 if (sock->type != SOCK_RAW)
3412 return -EINVAL;
3413 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3414 return -EBUSY;
3415 if (optlen < sizeof(val))
3416 return -EINVAL;
3417 if (copy_from_user(&val, optval, sizeof(val)))
3418 return -EFAULT;
3419
3420 po->has_vnet_hdr = !!val;
3421 return 0;
3422 }
3423 case PACKET_TIMESTAMP:
3424 {
3425 int val;
3426
3427 if (optlen != sizeof(val))
3428 return -EINVAL;
3429 if (copy_from_user(&val, optval, sizeof(val)))
3430 return -EFAULT;
3431
3432 po->tp_tstamp = val;
3433 return 0;
3434 }
3435 case PACKET_FANOUT:
3436 {
3437 int val;
3438
3439 if (optlen != sizeof(val))
3440 return -EINVAL;
3441 if (copy_from_user(&val, optval, sizeof(val)))
3442 return -EFAULT;
3443
3444 return fanout_add(sk, val & 0xffff, val >> 16);
3445 }
3446 case PACKET_TX_HAS_OFF:
3447 {
3448 unsigned int val;
3449
3450 if (optlen != sizeof(val))
3451 return -EINVAL;
3452 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3453 return -EBUSY;
3454 if (copy_from_user(&val, optval, sizeof(val)))
3455 return -EFAULT;
3456 po->tp_tx_has_off = !!val;
3457 return 0;
3458 }
3459 case PACKET_QDISC_BYPASS:
3460 {
3461 int val;
3462
3463 if (optlen != sizeof(val))
3464 return -EINVAL;
3465 if (copy_from_user(&val, optval, sizeof(val)))
3466 return -EFAULT;
3467
3468 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3469 return 0;
3470 }
3471 default:
3472 return -ENOPROTOOPT;
3473 }
3474 }
3475
3476 static int packet_getsockopt(struct socket *sock, int level, int optname,
3477 char __user *optval, int __user *optlen)
3478 {
3479 int len;
3480 int val, lv = sizeof(val);
3481 struct sock *sk = sock->sk;
3482 struct packet_sock *po = pkt_sk(sk);
3483 void *data = &val;
3484 union tpacket_stats_u st;
3485
3486 if (level != SOL_PACKET)
3487 return -ENOPROTOOPT;
3488
3489 if (get_user(len, optlen))
3490 return -EFAULT;
3491
3492 if (len < 0)
3493 return -EINVAL;
3494
3495 switch (optname) {
3496 case PACKET_STATISTICS:
3497 spin_lock_bh(&sk->sk_receive_queue.lock);
3498 memcpy(&st, &po->stats, sizeof(st));
3499 memset(&po->stats, 0, sizeof(po->stats));
3500 spin_unlock_bh(&sk->sk_receive_queue.lock);
3501
3502 if (po->tp_version == TPACKET_V3) {
3503 lv = sizeof(struct tpacket_stats_v3);
3504 st.stats3.tp_packets += st.stats3.tp_drops;
3505 data = &st.stats3;
3506 } else {
3507 lv = sizeof(struct tpacket_stats);
3508 st.stats1.tp_packets += st.stats1.tp_drops;
3509 data = &st.stats1;
3510 }
3511
3512 break;
3513 case PACKET_AUXDATA:
3514 val = po->auxdata;
3515 break;
3516 case PACKET_ORIGDEV:
3517 val = po->origdev;
3518 break;
3519 case PACKET_VNET_HDR:
3520 val = po->has_vnet_hdr;
3521 break;
3522 case PACKET_VERSION:
3523 val = po->tp_version;
3524 break;
3525 case PACKET_HDRLEN:
3526 if (len > sizeof(int))
3527 len = sizeof(int);
3528 if (copy_from_user(&val, optval, len))
3529 return -EFAULT;
3530 switch (val) {
3531 case TPACKET_V1:
3532 val = sizeof(struct tpacket_hdr);
3533 break;
3534 case TPACKET_V2:
3535 val = sizeof(struct tpacket2_hdr);
3536 break;
3537 case TPACKET_V3:
3538 val = sizeof(struct tpacket3_hdr);
3539 break;
3540 default:
3541 return -EINVAL;
3542 }
3543 break;
3544 case PACKET_RESERVE:
3545 val = po->tp_reserve;
3546 break;
3547 case PACKET_LOSS:
3548 val = po->tp_loss;
3549 break;
3550 case PACKET_TIMESTAMP:
3551 val = po->tp_tstamp;
3552 break;
3553 case PACKET_FANOUT:
3554 val = (po->fanout ?
3555 ((u32)po->fanout->id |
3556 ((u32)po->fanout->type << 16) |
3557 ((u32)po->fanout->flags << 24)) :
3558 0);
3559 break;
3560 case PACKET_TX_HAS_OFF:
3561 val = po->tp_tx_has_off;
3562 break;
3563 case PACKET_QDISC_BYPASS:
3564 val = packet_use_direct_xmit(po);
3565 break;
3566 default:
3567 return -ENOPROTOOPT;
3568 }
3569
3570 if (len > lv)
3571 len = lv;
3572 if (put_user(len, optlen))
3573 return -EFAULT;
3574 if (copy_to_user(optval, data, len))
3575 return -EFAULT;
3576 return 0;
3577 }
3578
3579
3580 static int packet_notifier(struct notifier_block *this,
3581 unsigned long msg, void *ptr)
3582 {
3583 struct sock *sk;
3584 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3585 struct net *net = dev_net(dev);
3586
3587 rcu_read_lock();
3588 sk_for_each_rcu(sk, &net->packet.sklist) {
3589 struct packet_sock *po = pkt_sk(sk);
3590
3591 switch (msg) {
3592 case NETDEV_UNREGISTER:
3593 if (po->mclist)
3594 packet_dev_mclist_delete(dev, &po->mclist);
3595 /* fallthrough */
3596
3597 case NETDEV_DOWN:
3598 if (dev->ifindex == po->ifindex) {
3599 spin_lock(&po->bind_lock);
3600 if (po->running) {
3601 __unregister_prot_hook(sk, false);
3602 sk->sk_err = ENETDOWN;
3603 if (!sock_flag(sk, SOCK_DEAD))
3604 sk->sk_error_report(sk);
3605 }
3606 if (msg == NETDEV_UNREGISTER) {
3607 packet_cached_dev_reset(po);
3608 po->ifindex = -1;
3609 if (po->prot_hook.dev)
3610 dev_put(po->prot_hook.dev);
3611 po->prot_hook.dev = NULL;
3612 }
3613 spin_unlock(&po->bind_lock);
3614 }
3615 break;
3616 case NETDEV_UP:
3617 if (dev->ifindex == po->ifindex) {
3618 spin_lock(&po->bind_lock);
3619 if (po->num)
3620 register_prot_hook(sk);
3621 spin_unlock(&po->bind_lock);
3622 }
3623 break;
3624 }
3625 }
3626 rcu_read_unlock();
3627 return NOTIFY_DONE;
3628 }
3629
3630
3631 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3632 unsigned long arg)
3633 {
3634 struct sock *sk = sock->sk;
3635
3636 switch (cmd) {
3637 case SIOCOUTQ:
3638 {
3639 int amount = sk_wmem_alloc_get(sk);
3640
3641 return put_user(amount, (int __user *)arg);
3642 }
3643 case SIOCINQ:
3644 {
3645 struct sk_buff *skb;
3646 int amount = 0;
3647
3648 spin_lock_bh(&sk->sk_receive_queue.lock);
3649 skb = skb_peek(&sk->sk_receive_queue);
3650 if (skb)
3651 amount = skb->len;
3652 spin_unlock_bh(&sk->sk_receive_queue.lock);
3653 return put_user(amount, (int __user *)arg);
3654 }
3655 case SIOCGSTAMP:
3656 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3657 case SIOCGSTAMPNS:
3658 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3659
3660 #ifdef CONFIG_INET
3661 case SIOCADDRT:
3662 case SIOCDELRT:
3663 case SIOCDARP:
3664 case SIOCGARP:
3665 case SIOCSARP:
3666 case SIOCGIFADDR:
3667 case SIOCSIFADDR:
3668 case SIOCGIFBRDADDR:
3669 case SIOCSIFBRDADDR:
3670 case SIOCGIFNETMASK:
3671 case SIOCSIFNETMASK:
3672 case SIOCGIFDSTADDR:
3673 case SIOCSIFDSTADDR:
3674 case SIOCSIFFLAGS:
3675 return inet_dgram_ops.ioctl(sock, cmd, arg);
3676 #endif
3677
3678 default:
3679 return -ENOIOCTLCMD;
3680 }
3681 return 0;
3682 }
3683
3684 static unsigned int packet_poll(struct file *file, struct socket *sock,
3685 poll_table *wait)
3686 {
3687 struct sock *sk = sock->sk;
3688 struct packet_sock *po = pkt_sk(sk);
3689 unsigned int mask = datagram_poll(file, sock, wait);
3690
3691 spin_lock_bh(&sk->sk_receive_queue.lock);
3692 if (po->rx_ring.pg_vec) {
3693 if (!packet_previous_rx_frame(po, &po->rx_ring,
3694 TP_STATUS_KERNEL))
3695 mask |= POLLIN | POLLRDNORM;
3696 }
3697 spin_unlock_bh(&sk->sk_receive_queue.lock);
3698 spin_lock_bh(&sk->sk_write_queue.lock);
3699 if (po->tx_ring.pg_vec) {
3700 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3701 mask |= POLLOUT | POLLWRNORM;
3702 }
3703 spin_unlock_bh(&sk->sk_write_queue.lock);
3704 return mask;
3705 }
3706
3707
3708 /* Dirty? Well, I still did not learn better way to account
3709 * for user mmaps.
3710 */
3711
3712 static void packet_mm_open(struct vm_area_struct *vma)
3713 {
3714 struct file *file = vma->vm_file;
3715 struct socket *sock = file->private_data;
3716 struct sock *sk = sock->sk;
3717
3718 if (sk)
3719 atomic_inc(&pkt_sk(sk)->mapped);
3720 }
3721
3722 static void packet_mm_close(struct vm_area_struct *vma)
3723 {
3724 struct file *file = vma->vm_file;
3725 struct socket *sock = file->private_data;
3726 struct sock *sk = sock->sk;
3727
3728 if (sk)
3729 atomic_dec(&pkt_sk(sk)->mapped);
3730 }
3731
3732 static const struct vm_operations_struct packet_mmap_ops = {
3733 .open = packet_mm_open,
3734 .close = packet_mm_close,
3735 };
3736
3737 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3738 unsigned int len)
3739 {
3740 int i;
3741
3742 for (i = 0; i < len; i++) {
3743 if (likely(pg_vec[i].buffer)) {
3744 if (is_vmalloc_addr(pg_vec[i].buffer))
3745 vfree(pg_vec[i].buffer);
3746 else
3747 free_pages((unsigned long)pg_vec[i].buffer,
3748 order);
3749 pg_vec[i].buffer = NULL;
3750 }
3751 }
3752 kfree(pg_vec);
3753 }
3754
3755 static char *alloc_one_pg_vec_page(unsigned long order)
3756 {
3757 char *buffer;
3758 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3759 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3760
3761 buffer = (char *) __get_free_pages(gfp_flags, order);
3762 if (buffer)
3763 return buffer;
3764
3765 /* __get_free_pages failed, fall back to vmalloc */
3766 buffer = vzalloc((1 << order) * PAGE_SIZE);
3767 if (buffer)
3768 return buffer;
3769
3770 /* vmalloc failed, lets dig into swap here */
3771 gfp_flags &= ~__GFP_NORETRY;
3772 buffer = (char *) __get_free_pages(gfp_flags, order);
3773 if (buffer)
3774 return buffer;
3775
3776 /* complete and utter failure */
3777 return NULL;
3778 }
3779
3780 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3781 {
3782 unsigned int block_nr = req->tp_block_nr;
3783 struct pgv *pg_vec;
3784 int i;
3785
3786 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3787 if (unlikely(!pg_vec))
3788 goto out;
3789
3790 for (i = 0; i < block_nr; i++) {
3791 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3792 if (unlikely(!pg_vec[i].buffer))
3793 goto out_free_pgvec;
3794 }
3795
3796 out:
3797 return pg_vec;
3798
3799 out_free_pgvec:
3800 free_pg_vec(pg_vec, order, block_nr);
3801 pg_vec = NULL;
3802 goto out;
3803 }
3804
3805 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3806 int closing, int tx_ring)
3807 {
3808 struct pgv *pg_vec = NULL;
3809 struct packet_sock *po = pkt_sk(sk);
3810 int was_running, order = 0;
3811 struct packet_ring_buffer *rb;
3812 struct sk_buff_head *rb_queue;
3813 __be16 num;
3814 int err = -EINVAL;
3815 /* Added to avoid minimal code churn */
3816 struct tpacket_req *req = &req_u->req;
3817
3818 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3819 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3820 WARN(1, "Tx-ring is not supported.\n");
3821 goto out;
3822 }
3823
3824 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3825 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3826
3827 err = -EBUSY;
3828 if (!closing) {
3829 if (atomic_read(&po->mapped))
3830 goto out;
3831 if (packet_read_pending(rb))
3832 goto out;
3833 }
3834
3835 if (req->tp_block_nr) {
3836 /* Sanity tests and some calculations */
3837 err = -EBUSY;
3838 if (unlikely(rb->pg_vec))
3839 goto out;
3840
3841 switch (po->tp_version) {
3842 case TPACKET_V1:
3843 po->tp_hdrlen = TPACKET_HDRLEN;
3844 break;
3845 case TPACKET_V2:
3846 po->tp_hdrlen = TPACKET2_HDRLEN;
3847 break;
3848 case TPACKET_V3:
3849 po->tp_hdrlen = TPACKET3_HDRLEN;
3850 break;
3851 }
3852
3853 err = -EINVAL;
3854 if (unlikely((int)req->tp_block_size <= 0))
3855 goto out;
3856 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3857 goto out;
3858 if (po->tp_version >= TPACKET_V3 &&
3859 (int)(req->tp_block_size -
3860 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3861 goto out;
3862 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3863 po->tp_reserve))
3864 goto out;
3865 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3866 goto out;
3867
3868 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3869 if (unlikely(rb->frames_per_block <= 0))
3870 goto out;
3871 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3872 req->tp_frame_nr))
3873 goto out;
3874
3875 err = -ENOMEM;
3876 order = get_order(req->tp_block_size);
3877 pg_vec = alloc_pg_vec(req, order);
3878 if (unlikely(!pg_vec))
3879 goto out;
3880 switch (po->tp_version) {
3881 case TPACKET_V3:
3882 /* Transmit path is not supported. We checked
3883 * it above but just being paranoid
3884 */
3885 if (!tx_ring)
3886 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3887 break;
3888 default:
3889 break;
3890 }
3891 }
3892 /* Done */
3893 else {
3894 err = -EINVAL;
3895 if (unlikely(req->tp_frame_nr))
3896 goto out;
3897 }
3898
3899 lock_sock(sk);
3900
3901 /* Detach socket from network */
3902 spin_lock(&po->bind_lock);
3903 was_running = po->running;
3904 num = po->num;
3905 if (was_running) {
3906 po->num = 0;
3907 __unregister_prot_hook(sk, false);
3908 }
3909 spin_unlock(&po->bind_lock);
3910
3911 synchronize_net();
3912
3913 err = -EBUSY;
3914 mutex_lock(&po->pg_vec_lock);
3915 if (closing || atomic_read(&po->mapped) == 0) {
3916 err = 0;
3917 spin_lock_bh(&rb_queue->lock);
3918 swap(rb->pg_vec, pg_vec);
3919 rb->frame_max = (req->tp_frame_nr - 1);
3920 rb->head = 0;
3921 rb->frame_size = req->tp_frame_size;
3922 spin_unlock_bh(&rb_queue->lock);
3923
3924 swap(rb->pg_vec_order, order);
3925 swap(rb->pg_vec_len, req->tp_block_nr);
3926
3927 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3928 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3929 tpacket_rcv : packet_rcv;
3930 skb_queue_purge(rb_queue);
3931 if (atomic_read(&po->mapped))
3932 pr_err("packet_mmap: vma is busy: %d\n",
3933 atomic_read(&po->mapped));
3934 }
3935 mutex_unlock(&po->pg_vec_lock);
3936
3937 spin_lock(&po->bind_lock);
3938 if (was_running) {
3939 po->num = num;
3940 register_prot_hook(sk);
3941 }
3942 spin_unlock(&po->bind_lock);
3943 if (closing && (po->tp_version > TPACKET_V2)) {
3944 /* Because we don't support block-based V3 on tx-ring */
3945 if (!tx_ring)
3946 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3947 }
3948 release_sock(sk);
3949
3950 if (pg_vec)
3951 free_pg_vec(pg_vec, order, req->tp_block_nr);
3952 out:
3953 return err;
3954 }
3955
3956 static int packet_mmap(struct file *file, struct socket *sock,
3957 struct vm_area_struct *vma)
3958 {
3959 struct sock *sk = sock->sk;
3960 struct packet_sock *po = pkt_sk(sk);
3961 unsigned long size, expected_size;
3962 struct packet_ring_buffer *rb;
3963 unsigned long start;
3964 int err = -EINVAL;
3965 int i;
3966
3967 if (vma->vm_pgoff)
3968 return -EINVAL;
3969
3970 mutex_lock(&po->pg_vec_lock);
3971
3972 expected_size = 0;
3973 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3974 if (rb->pg_vec) {
3975 expected_size += rb->pg_vec_len
3976 * rb->pg_vec_pages
3977 * PAGE_SIZE;
3978 }
3979 }
3980
3981 if (expected_size == 0)
3982 goto out;
3983
3984 size = vma->vm_end - vma->vm_start;
3985 if (size != expected_size)
3986 goto out;
3987
3988 start = vma->vm_start;
3989 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3990 if (rb->pg_vec == NULL)
3991 continue;
3992
3993 for (i = 0; i < rb->pg_vec_len; i++) {
3994 struct page *page;
3995 void *kaddr = rb->pg_vec[i].buffer;
3996 int pg_num;
3997
3998 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3999 page = pgv_to_page(kaddr);
4000 err = vm_insert_page(vma, start, page);
4001 if (unlikely(err))
4002 goto out;
4003 start += PAGE_SIZE;
4004 kaddr += PAGE_SIZE;
4005 }
4006 }
4007 }
4008
4009 atomic_inc(&po->mapped);
4010 vma->vm_ops = &packet_mmap_ops;
4011 err = 0;
4012
4013 out:
4014 mutex_unlock(&po->pg_vec_lock);
4015 return err;
4016 }
4017
4018 static const struct proto_ops packet_ops_spkt = {
4019 .family = PF_PACKET,
4020 .owner = THIS_MODULE,
4021 .release = packet_release,
4022 .bind = packet_bind_spkt,
4023 .connect = sock_no_connect,
4024 .socketpair = sock_no_socketpair,
4025 .accept = sock_no_accept,
4026 .getname = packet_getname_spkt,
4027 .poll = datagram_poll,
4028 .ioctl = packet_ioctl,
4029 .listen = sock_no_listen,
4030 .shutdown = sock_no_shutdown,
4031 .setsockopt = sock_no_setsockopt,
4032 .getsockopt = sock_no_getsockopt,
4033 .sendmsg = packet_sendmsg_spkt,
4034 .recvmsg = packet_recvmsg,
4035 .mmap = sock_no_mmap,
4036 .sendpage = sock_no_sendpage,
4037 };
4038
4039 static const struct proto_ops packet_ops = {
4040 .family = PF_PACKET,
4041 .owner = THIS_MODULE,
4042 .release = packet_release,
4043 .bind = packet_bind,
4044 .connect = sock_no_connect,
4045 .socketpair = sock_no_socketpair,
4046 .accept = sock_no_accept,
4047 .getname = packet_getname,
4048 .poll = packet_poll,
4049 .ioctl = packet_ioctl,
4050 .listen = sock_no_listen,
4051 .shutdown = sock_no_shutdown,
4052 .setsockopt = packet_setsockopt,
4053 .getsockopt = packet_getsockopt,
4054 .sendmsg = packet_sendmsg,
4055 .recvmsg = packet_recvmsg,
4056 .mmap = packet_mmap,
4057 .sendpage = sock_no_sendpage,
4058 };
4059
4060 static const struct net_proto_family packet_family_ops = {
4061 .family = PF_PACKET,
4062 .create = packet_create,
4063 .owner = THIS_MODULE,
4064 };
4065
4066 static struct notifier_block packet_netdev_notifier = {
4067 .notifier_call = packet_notifier,
4068 };
4069
4070 #ifdef CONFIG_PROC_FS
4071
4072 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4073 __acquires(RCU)
4074 {
4075 struct net *net = seq_file_net(seq);
4076
4077 rcu_read_lock();
4078 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4079 }
4080
4081 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4082 {
4083 struct net *net = seq_file_net(seq);
4084 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4085 }
4086
4087 static void packet_seq_stop(struct seq_file *seq, void *v)
4088 __releases(RCU)
4089 {
4090 rcu_read_unlock();
4091 }
4092
4093 static int packet_seq_show(struct seq_file *seq, void *v)
4094 {
4095 if (v == SEQ_START_TOKEN)
4096 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4097 else {
4098 struct sock *s = sk_entry(v);
4099 const struct packet_sock *po = pkt_sk(s);
4100
4101 seq_printf(seq,
4102 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4103 s,
4104 atomic_read(&s->sk_refcnt),
4105 s->sk_type,
4106 ntohs(po->num),
4107 po->ifindex,
4108 po->running,
4109 atomic_read(&s->sk_rmem_alloc),
4110 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4111 sock_i_ino(s));
4112 }
4113
4114 return 0;
4115 }
4116
4117 static const struct seq_operations packet_seq_ops = {
4118 .start = packet_seq_start,
4119 .next = packet_seq_next,
4120 .stop = packet_seq_stop,
4121 .show = packet_seq_show,
4122 };
4123
4124 static int packet_seq_open(struct inode *inode, struct file *file)
4125 {
4126 return seq_open_net(inode, file, &packet_seq_ops,
4127 sizeof(struct seq_net_private));
4128 }
4129
4130 static const struct file_operations packet_seq_fops = {
4131 .owner = THIS_MODULE,
4132 .open = packet_seq_open,
4133 .read = seq_read,
4134 .llseek = seq_lseek,
4135 .release = seq_release_net,
4136 };
4137
4138 #endif
4139
4140 static int __net_init packet_net_init(struct net *net)
4141 {
4142 mutex_init(&net->packet.sklist_lock);
4143 INIT_HLIST_HEAD(&net->packet.sklist);
4144
4145 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4146 return -ENOMEM;
4147
4148 return 0;
4149 }
4150
4151 static void __net_exit packet_net_exit(struct net *net)
4152 {
4153 remove_proc_entry("packet", net->proc_net);
4154 }
4155
4156 static struct pernet_operations packet_net_ops = {
4157 .init = packet_net_init,
4158 .exit = packet_net_exit,
4159 };
4160
4161
4162 static void __exit packet_exit(void)
4163 {
4164 unregister_netdevice_notifier(&packet_netdev_notifier);
4165 unregister_pernet_subsys(&packet_net_ops);
4166 sock_unregister(PF_PACKET);
4167 proto_unregister(&packet_proto);
4168 }
4169
4170 static int __init packet_init(void)
4171 {
4172 int rc = proto_register(&packet_proto, 0);
4173
4174 if (rc != 0)
4175 goto out;
4176
4177 sock_register(&packet_family_ops);
4178 register_pernet_subsys(&packet_net_ops);
4179 register_netdevice_notifier(&packet_netdev_notifier);
4180 out:
4181 return rc;
4182 }
4183
4184 module_init(packet_init);
4185 module_exit(packet_exit);
4186 MODULE_LICENSE("GPL");
4187 MODULE_ALIAS_NETPROTO(PF_PACKET);
This page took 0.166481 seconds and 5 git commands to generate.