Merge git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
[deliverable/linux.git] / drivers / net / tun.c
1 /*
2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16 */
17
18 /*
19 * Changes:
20 *
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
23 *
24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use eth_random_addr() for tap MAC address.
26 *
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
30 * Added ethtool API.
31 * Minor cleanups
32 *
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
35 */
36
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39 #define DRV_NAME "tun"
40 #define DRV_VERSION "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/major.h>
48 #include <linux/slab.h>
49 #include <linux/poll.h>
50 #include <linux/fcntl.h>
51 #include <linux/init.h>
52 #include <linux/skbuff.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/miscdevice.h>
56 #include <linux/ethtool.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/compat.h>
59 #include <linux/if.h>
60 #include <linux/if_arp.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_tun.h>
63 #include <linux/if_vlan.h>
64 #include <linux/crc32.h>
65 #include <linux/nsproxy.h>
66 #include <linux/virtio_net.h>
67 #include <linux/rcupdate.h>
68 #include <net/net_namespace.h>
69 #include <net/netns/generic.h>
70 #include <net/rtnetlink.h>
71 #include <net/sock.h>
72 #include <linux/seq_file.h>
73 #include <linux/uio.h>
74
75 #include <asm/uaccess.h>
76
77 /* Uncomment to enable debugging */
78 /* #define TUN_DEBUG 1 */
79
80 #ifdef TUN_DEBUG
81 static int debug;
82
83 #define tun_debug(level, tun, fmt, args...) \
84 do { \
85 if (tun->debug) \
86 netdev_printk(level, tun->dev, fmt, ##args); \
87 } while (0)
88 #define DBG1(level, fmt, args...) \
89 do { \
90 if (debug == 2) \
91 printk(level fmt, ##args); \
92 } while (0)
93 #else
94 #define tun_debug(level, tun, fmt, args...) \
95 do { \
96 if (0) \
97 netdev_printk(level, tun->dev, fmt, ##args); \
98 } while (0)
99 #define DBG1(level, fmt, args...) \
100 do { \
101 if (0) \
102 printk(level fmt, ##args); \
103 } while (0)
104 #endif
105
106 /* TUN device flags */
107
108 /* IFF_ATTACH_QUEUE is never stored in device flags,
109 * overload it to mean fasync when stored there.
110 */
111 #define TUN_FASYNC IFF_ATTACH_QUEUE
112 /* High bits in flags field are unused. */
113 #define TUN_VNET_LE 0x80000000
114 #define TUN_VNET_BE 0x40000000
115
116 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
117 IFF_MULTI_QUEUE)
118 #define GOODCOPY_LEN 128
119
120 #define FLT_EXACT_COUNT 8
121 struct tap_filter {
122 unsigned int count; /* Number of addrs. Zero means disabled */
123 u32 mask[2]; /* Mask of the hashed addrs */
124 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
125 };
126
127 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
128 * to max number of VCPUs in guest. */
129 #define MAX_TAP_QUEUES 256
130 #define MAX_TAP_FLOWS 4096
131
132 #define TUN_FLOW_EXPIRE (3 * HZ)
133
134 struct tun_pcpu_stats {
135 u64 rx_packets;
136 u64 rx_bytes;
137 u64 tx_packets;
138 u64 tx_bytes;
139 struct u64_stats_sync syncp;
140 u32 rx_dropped;
141 u32 tx_dropped;
142 u32 rx_frame_errors;
143 };
144
145 /* A tun_file connects an open character device to a tuntap netdevice. It
146 * also contains all socket related structures (except sock_fprog and tap_filter)
147 * to serve as one transmit queue for tuntap device. The sock_fprog and
148 * tap_filter were kept in tun_struct since they were used for filtering for the
149 * netdevice not for a specific queue (at least I didn't see the requirement for
150 * this).
151 *
152 * RCU usage:
153 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
154 * other can only be read while rcu_read_lock or rtnl_lock is held.
155 */
156 struct tun_file {
157 struct sock sk;
158 struct socket socket;
159 struct socket_wq wq;
160 struct tun_struct __rcu *tun;
161 struct fasync_struct *fasync;
162 /* only used for fasnyc */
163 unsigned int flags;
164 union {
165 u16 queue_index;
166 unsigned int ifindex;
167 };
168 struct list_head next;
169 struct tun_struct *detached;
170 };
171
172 struct tun_flow_entry {
173 struct hlist_node hash_link;
174 struct rcu_head rcu;
175 struct tun_struct *tun;
176
177 u32 rxhash;
178 u32 rps_rxhash;
179 int queue_index;
180 unsigned long updated;
181 };
182
183 #define TUN_NUM_FLOW_ENTRIES 1024
184
185 /* Since the socket were moved to tun_file, to preserve the behavior of persist
186 * device, socket filter, sndbuf and vnet header size were restore when the
187 * file were attached to a persist device.
188 */
189 struct tun_struct {
190 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
191 unsigned int numqueues;
192 unsigned int flags;
193 kuid_t owner;
194 kgid_t group;
195
196 struct net_device *dev;
197 netdev_features_t set_features;
198 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
199 NETIF_F_TSO6|NETIF_F_UFO)
200
201 int align;
202 int vnet_hdr_sz;
203 int sndbuf;
204 struct tap_filter txflt;
205 struct sock_fprog fprog;
206 /* protected by rtnl lock */
207 bool filter_attached;
208 #ifdef TUN_DEBUG
209 int debug;
210 #endif
211 spinlock_t lock;
212 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
213 struct timer_list flow_gc_timer;
214 unsigned long ageing_time;
215 unsigned int numdisabled;
216 struct list_head disabled;
217 void *security;
218 u32 flow_count;
219 struct tun_pcpu_stats __percpu *pcpu_stats;
220 };
221
222 #ifdef CONFIG_TUN_VNET_CROSS_LE
223 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
224 {
225 return tun->flags & TUN_VNET_BE ? false :
226 virtio_legacy_is_little_endian();
227 }
228
229 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
230 {
231 int be = !!(tun->flags & TUN_VNET_BE);
232
233 if (put_user(be, argp))
234 return -EFAULT;
235
236 return 0;
237 }
238
239 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
240 {
241 int be;
242
243 if (get_user(be, argp))
244 return -EFAULT;
245
246 if (be)
247 tun->flags |= TUN_VNET_BE;
248 else
249 tun->flags &= ~TUN_VNET_BE;
250
251 return 0;
252 }
253 #else
254 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
255 {
256 return virtio_legacy_is_little_endian();
257 }
258
259 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
260 {
261 return -EINVAL;
262 }
263
264 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
265 {
266 return -EINVAL;
267 }
268 #endif /* CONFIG_TUN_VNET_CROSS_LE */
269
270 static inline bool tun_is_little_endian(struct tun_struct *tun)
271 {
272 return tun->flags & TUN_VNET_LE ||
273 tun_legacy_is_little_endian(tun);
274 }
275
276 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
277 {
278 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
279 }
280
281 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
282 {
283 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
284 }
285
286 static inline u32 tun_hashfn(u32 rxhash)
287 {
288 return rxhash & 0x3ff;
289 }
290
291 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
292 {
293 struct tun_flow_entry *e;
294
295 hlist_for_each_entry_rcu(e, head, hash_link) {
296 if (e->rxhash == rxhash)
297 return e;
298 }
299 return NULL;
300 }
301
302 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
303 struct hlist_head *head,
304 u32 rxhash, u16 queue_index)
305 {
306 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
307
308 if (e) {
309 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
310 rxhash, queue_index);
311 e->updated = jiffies;
312 e->rxhash = rxhash;
313 e->rps_rxhash = 0;
314 e->queue_index = queue_index;
315 e->tun = tun;
316 hlist_add_head_rcu(&e->hash_link, head);
317 ++tun->flow_count;
318 }
319 return e;
320 }
321
322 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
323 {
324 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
325 e->rxhash, e->queue_index);
326 hlist_del_rcu(&e->hash_link);
327 kfree_rcu(e, rcu);
328 --tun->flow_count;
329 }
330
331 static void tun_flow_flush(struct tun_struct *tun)
332 {
333 int i;
334
335 spin_lock_bh(&tun->lock);
336 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
337 struct tun_flow_entry *e;
338 struct hlist_node *n;
339
340 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
341 tun_flow_delete(tun, e);
342 }
343 spin_unlock_bh(&tun->lock);
344 }
345
346 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
347 {
348 int i;
349
350 spin_lock_bh(&tun->lock);
351 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
352 struct tun_flow_entry *e;
353 struct hlist_node *n;
354
355 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
356 if (e->queue_index == queue_index)
357 tun_flow_delete(tun, e);
358 }
359 }
360 spin_unlock_bh(&tun->lock);
361 }
362
363 static void tun_flow_cleanup(unsigned long data)
364 {
365 struct tun_struct *tun = (struct tun_struct *)data;
366 unsigned long delay = tun->ageing_time;
367 unsigned long next_timer = jiffies + delay;
368 unsigned long count = 0;
369 int i;
370
371 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
372
373 spin_lock_bh(&tun->lock);
374 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
375 struct tun_flow_entry *e;
376 struct hlist_node *n;
377
378 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
379 unsigned long this_timer;
380 count++;
381 this_timer = e->updated + delay;
382 if (time_before_eq(this_timer, jiffies))
383 tun_flow_delete(tun, e);
384 else if (time_before(this_timer, next_timer))
385 next_timer = this_timer;
386 }
387 }
388
389 if (count)
390 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
391 spin_unlock_bh(&tun->lock);
392 }
393
394 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
395 struct tun_file *tfile)
396 {
397 struct hlist_head *head;
398 struct tun_flow_entry *e;
399 unsigned long delay = tun->ageing_time;
400 u16 queue_index = tfile->queue_index;
401
402 if (!rxhash)
403 return;
404 else
405 head = &tun->flows[tun_hashfn(rxhash)];
406
407 rcu_read_lock();
408
409 /* We may get a very small possibility of OOO during switching, not
410 * worth to optimize.*/
411 if (tun->numqueues == 1 || tfile->detached)
412 goto unlock;
413
414 e = tun_flow_find(head, rxhash);
415 if (likely(e)) {
416 /* TODO: keep queueing to old queue until it's empty? */
417 e->queue_index = queue_index;
418 e->updated = jiffies;
419 sock_rps_record_flow_hash(e->rps_rxhash);
420 } else {
421 spin_lock_bh(&tun->lock);
422 if (!tun_flow_find(head, rxhash) &&
423 tun->flow_count < MAX_TAP_FLOWS)
424 tun_flow_create(tun, head, rxhash, queue_index);
425
426 if (!timer_pending(&tun->flow_gc_timer))
427 mod_timer(&tun->flow_gc_timer,
428 round_jiffies_up(jiffies + delay));
429 spin_unlock_bh(&tun->lock);
430 }
431
432 unlock:
433 rcu_read_unlock();
434 }
435
436 /**
437 * Save the hash received in the stack receive path and update the
438 * flow_hash table accordingly.
439 */
440 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
441 {
442 if (unlikely(e->rps_rxhash != hash))
443 e->rps_rxhash = hash;
444 }
445
446 /* We try to identify a flow through its rxhash first. The reason that
447 * we do not check rxq no. is because some cards(e.g 82599), chooses
448 * the rxq based on the txq where the last packet of the flow comes. As
449 * the userspace application move between processors, we may get a
450 * different rxq no. here. If we could not get rxhash, then we would
451 * hope the rxq no. may help here.
452 */
453 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
454 void *accel_priv, select_queue_fallback_t fallback)
455 {
456 struct tun_struct *tun = netdev_priv(dev);
457 struct tun_flow_entry *e;
458 u32 txq = 0;
459 u32 numqueues = 0;
460
461 rcu_read_lock();
462 numqueues = ACCESS_ONCE(tun->numqueues);
463
464 txq = skb_get_hash(skb);
465 if (txq) {
466 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
467 if (e) {
468 tun_flow_save_rps_rxhash(e, txq);
469 txq = e->queue_index;
470 } else
471 /* use multiply and shift instead of expensive divide */
472 txq = ((u64)txq * numqueues) >> 32;
473 } else if (likely(skb_rx_queue_recorded(skb))) {
474 txq = skb_get_rx_queue(skb);
475 while (unlikely(txq >= numqueues))
476 txq -= numqueues;
477 }
478
479 rcu_read_unlock();
480 return txq;
481 }
482
483 static inline bool tun_not_capable(struct tun_struct *tun)
484 {
485 const struct cred *cred = current_cred();
486 struct net *net = dev_net(tun->dev);
487
488 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
489 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
490 !ns_capable(net->user_ns, CAP_NET_ADMIN);
491 }
492
493 static void tun_set_real_num_queues(struct tun_struct *tun)
494 {
495 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
496 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
497 }
498
499 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
500 {
501 tfile->detached = tun;
502 list_add_tail(&tfile->next, &tun->disabled);
503 ++tun->numdisabled;
504 }
505
506 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
507 {
508 struct tun_struct *tun = tfile->detached;
509
510 tfile->detached = NULL;
511 list_del_init(&tfile->next);
512 --tun->numdisabled;
513 return tun;
514 }
515
516 static void tun_queue_purge(struct tun_file *tfile)
517 {
518 skb_queue_purge(&tfile->sk.sk_receive_queue);
519 skb_queue_purge(&tfile->sk.sk_error_queue);
520 }
521
522 static void __tun_detach(struct tun_file *tfile, bool clean)
523 {
524 struct tun_file *ntfile;
525 struct tun_struct *tun;
526
527 tun = rtnl_dereference(tfile->tun);
528
529 if (tun && !tfile->detached) {
530 u16 index = tfile->queue_index;
531 BUG_ON(index >= tun->numqueues);
532
533 rcu_assign_pointer(tun->tfiles[index],
534 tun->tfiles[tun->numqueues - 1]);
535 ntfile = rtnl_dereference(tun->tfiles[index]);
536 ntfile->queue_index = index;
537
538 --tun->numqueues;
539 if (clean) {
540 RCU_INIT_POINTER(tfile->tun, NULL);
541 sock_put(&tfile->sk);
542 } else
543 tun_disable_queue(tun, tfile);
544
545 synchronize_net();
546 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
547 /* Drop read queue */
548 tun_queue_purge(tfile);
549 tun_set_real_num_queues(tun);
550 } else if (tfile->detached && clean) {
551 tun = tun_enable_queue(tfile);
552 sock_put(&tfile->sk);
553 }
554
555 if (clean) {
556 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
557 netif_carrier_off(tun->dev);
558
559 if (!(tun->flags & IFF_PERSIST) &&
560 tun->dev->reg_state == NETREG_REGISTERED)
561 unregister_netdevice(tun->dev);
562 }
563 sock_put(&tfile->sk);
564 }
565 }
566
567 static void tun_detach(struct tun_file *tfile, bool clean)
568 {
569 rtnl_lock();
570 __tun_detach(tfile, clean);
571 rtnl_unlock();
572 }
573
574 static void tun_detach_all(struct net_device *dev)
575 {
576 struct tun_struct *tun = netdev_priv(dev);
577 struct tun_file *tfile, *tmp;
578 int i, n = tun->numqueues;
579
580 for (i = 0; i < n; i++) {
581 tfile = rtnl_dereference(tun->tfiles[i]);
582 BUG_ON(!tfile);
583 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
584 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
585 RCU_INIT_POINTER(tfile->tun, NULL);
586 --tun->numqueues;
587 }
588 list_for_each_entry(tfile, &tun->disabled, next) {
589 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
590 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
591 RCU_INIT_POINTER(tfile->tun, NULL);
592 }
593 BUG_ON(tun->numqueues != 0);
594
595 synchronize_net();
596 for (i = 0; i < n; i++) {
597 tfile = rtnl_dereference(tun->tfiles[i]);
598 /* Drop read queue */
599 tun_queue_purge(tfile);
600 sock_put(&tfile->sk);
601 }
602 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
603 tun_enable_queue(tfile);
604 tun_queue_purge(tfile);
605 sock_put(&tfile->sk);
606 }
607 BUG_ON(tun->numdisabled != 0);
608
609 if (tun->flags & IFF_PERSIST)
610 module_put(THIS_MODULE);
611 }
612
613 static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
614 {
615 struct tun_file *tfile = file->private_data;
616 int err;
617
618 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
619 if (err < 0)
620 goto out;
621
622 err = -EINVAL;
623 if (rtnl_dereference(tfile->tun) && !tfile->detached)
624 goto out;
625
626 err = -EBUSY;
627 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
628 goto out;
629
630 err = -E2BIG;
631 if (!tfile->detached &&
632 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
633 goto out;
634
635 err = 0;
636
637 /* Re-attach the filter to persist device */
638 if (!skip_filter && (tun->filter_attached == true)) {
639 lock_sock(tfile->socket.sk);
640 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
641 release_sock(tfile->socket.sk);
642 if (!err)
643 goto out;
644 }
645 tfile->queue_index = tun->numqueues;
646 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
647 rcu_assign_pointer(tfile->tun, tun);
648 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
649 tun->numqueues++;
650
651 if (tfile->detached)
652 tun_enable_queue(tfile);
653 else
654 sock_hold(&tfile->sk);
655
656 tun_set_real_num_queues(tun);
657
658 /* device is allowed to go away first, so no need to hold extra
659 * refcnt.
660 */
661
662 out:
663 return err;
664 }
665
666 static struct tun_struct *__tun_get(struct tun_file *tfile)
667 {
668 struct tun_struct *tun;
669
670 rcu_read_lock();
671 tun = rcu_dereference(tfile->tun);
672 if (tun)
673 dev_hold(tun->dev);
674 rcu_read_unlock();
675
676 return tun;
677 }
678
679 static struct tun_struct *tun_get(struct file *file)
680 {
681 return __tun_get(file->private_data);
682 }
683
684 static void tun_put(struct tun_struct *tun)
685 {
686 dev_put(tun->dev);
687 }
688
689 /* TAP filtering */
690 static void addr_hash_set(u32 *mask, const u8 *addr)
691 {
692 int n = ether_crc(ETH_ALEN, addr) >> 26;
693 mask[n >> 5] |= (1 << (n & 31));
694 }
695
696 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
697 {
698 int n = ether_crc(ETH_ALEN, addr) >> 26;
699 return mask[n >> 5] & (1 << (n & 31));
700 }
701
702 static int update_filter(struct tap_filter *filter, void __user *arg)
703 {
704 struct { u8 u[ETH_ALEN]; } *addr;
705 struct tun_filter uf;
706 int err, alen, n, nexact;
707
708 if (copy_from_user(&uf, arg, sizeof(uf)))
709 return -EFAULT;
710
711 if (!uf.count) {
712 /* Disabled */
713 filter->count = 0;
714 return 0;
715 }
716
717 alen = ETH_ALEN * uf.count;
718 addr = kmalloc(alen, GFP_KERNEL);
719 if (!addr)
720 return -ENOMEM;
721
722 if (copy_from_user(addr, arg + sizeof(uf), alen)) {
723 err = -EFAULT;
724 goto done;
725 }
726
727 /* The filter is updated without holding any locks. Which is
728 * perfectly safe. We disable it first and in the worst
729 * case we'll accept a few undesired packets. */
730 filter->count = 0;
731 wmb();
732
733 /* Use first set of addresses as an exact filter */
734 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
735 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
736
737 nexact = n;
738
739 /* Remaining multicast addresses are hashed,
740 * unicast will leave the filter disabled. */
741 memset(filter->mask, 0, sizeof(filter->mask));
742 for (; n < uf.count; n++) {
743 if (!is_multicast_ether_addr(addr[n].u)) {
744 err = 0; /* no filter */
745 goto done;
746 }
747 addr_hash_set(filter->mask, addr[n].u);
748 }
749
750 /* For ALLMULTI just set the mask to all ones.
751 * This overrides the mask populated above. */
752 if ((uf.flags & TUN_FLT_ALLMULTI))
753 memset(filter->mask, ~0, sizeof(filter->mask));
754
755 /* Now enable the filter */
756 wmb();
757 filter->count = nexact;
758
759 /* Return the number of exact filters */
760 err = nexact;
761
762 done:
763 kfree(addr);
764 return err;
765 }
766
767 /* Returns: 0 - drop, !=0 - accept */
768 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
769 {
770 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
771 * at this point. */
772 struct ethhdr *eh = (struct ethhdr *) skb->data;
773 int i;
774
775 /* Exact match */
776 for (i = 0; i < filter->count; i++)
777 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
778 return 1;
779
780 /* Inexact match (multicast only) */
781 if (is_multicast_ether_addr(eh->h_dest))
782 return addr_hash_test(filter->mask, eh->h_dest);
783
784 return 0;
785 }
786
787 /*
788 * Checks whether the packet is accepted or not.
789 * Returns: 0 - drop, !=0 - accept
790 */
791 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
792 {
793 if (!filter->count)
794 return 1;
795
796 return run_filter(filter, skb);
797 }
798
799 /* Network device part of the driver */
800
801 static const struct ethtool_ops tun_ethtool_ops;
802
803 /* Net device detach from fd. */
804 static void tun_net_uninit(struct net_device *dev)
805 {
806 tun_detach_all(dev);
807 }
808
809 /* Net device open. */
810 static int tun_net_open(struct net_device *dev)
811 {
812 netif_tx_start_all_queues(dev);
813 return 0;
814 }
815
816 /* Net device close. */
817 static int tun_net_close(struct net_device *dev)
818 {
819 netif_tx_stop_all_queues(dev);
820 return 0;
821 }
822
823 /* Net device start xmit */
824 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
825 {
826 struct tun_struct *tun = netdev_priv(dev);
827 int txq = skb->queue_mapping;
828 struct tun_file *tfile;
829 u32 numqueues = 0;
830
831 rcu_read_lock();
832 tfile = rcu_dereference(tun->tfiles[txq]);
833 numqueues = ACCESS_ONCE(tun->numqueues);
834
835 /* Drop packet if interface is not attached */
836 if (txq >= numqueues)
837 goto drop;
838
839 #ifdef CONFIG_RPS
840 if (numqueues == 1 && static_key_false(&rps_needed)) {
841 /* Select queue was not called for the skbuff, so we extract the
842 * RPS hash and save it into the flow_table here.
843 */
844 __u32 rxhash;
845
846 rxhash = skb_get_hash(skb);
847 if (rxhash) {
848 struct tun_flow_entry *e;
849 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
850 rxhash);
851 if (e)
852 tun_flow_save_rps_rxhash(e, rxhash);
853 }
854 }
855 #endif
856
857 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
858
859 BUG_ON(!tfile);
860
861 /* Drop if the filter does not like it.
862 * This is a noop if the filter is disabled.
863 * Filter can be enabled only for the TAP devices. */
864 if (!check_filter(&tun->txflt, skb))
865 goto drop;
866
867 if (tfile->socket.sk->sk_filter &&
868 sk_filter(tfile->socket.sk, skb))
869 goto drop;
870
871 /* Limit the number of packets queued by dividing txq length with the
872 * number of queues.
873 */
874 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
875 >= dev->tx_queue_len)
876 goto drop;
877
878 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
879 goto drop;
880
881 if (skb->sk && sk_fullsock(skb->sk)) {
882 sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
883 &skb_shinfo(skb)->tx_flags);
884 sw_tx_timestamp(skb);
885 }
886
887 /* Orphan the skb - required as we might hang on to it
888 * for indefinite time.
889 */
890 skb_orphan(skb);
891
892 nf_reset(skb);
893
894 /* Enqueue packet */
895 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
896
897 /* Notify and wake up reader process */
898 if (tfile->flags & TUN_FASYNC)
899 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
900 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
901
902 rcu_read_unlock();
903 return NETDEV_TX_OK;
904
905 drop:
906 this_cpu_inc(tun->pcpu_stats->tx_dropped);
907 skb_tx_error(skb);
908 kfree_skb(skb);
909 rcu_read_unlock();
910 return NET_XMIT_DROP;
911 }
912
913 static void tun_net_mclist(struct net_device *dev)
914 {
915 /*
916 * This callback is supposed to deal with mc filter in
917 * _rx_ path and has nothing to do with the _tx_ path.
918 * In rx path we always accept everything userspace gives us.
919 */
920 }
921
922 #define MIN_MTU 68
923 #define MAX_MTU 65535
924
925 static int
926 tun_net_change_mtu(struct net_device *dev, int new_mtu)
927 {
928 if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
929 return -EINVAL;
930 dev->mtu = new_mtu;
931 return 0;
932 }
933
934 static netdev_features_t tun_net_fix_features(struct net_device *dev,
935 netdev_features_t features)
936 {
937 struct tun_struct *tun = netdev_priv(dev);
938
939 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
940 }
941 #ifdef CONFIG_NET_POLL_CONTROLLER
942 static void tun_poll_controller(struct net_device *dev)
943 {
944 /*
945 * Tun only receives frames when:
946 * 1) the char device endpoint gets data from user space
947 * 2) the tun socket gets a sendmsg call from user space
948 * Since both of those are synchronous operations, we are guaranteed
949 * never to have pending data when we poll for it
950 * so there is nothing to do here but return.
951 * We need this though so netpoll recognizes us as an interface that
952 * supports polling, which enables bridge devices in virt setups to
953 * still use netconsole
954 */
955 return;
956 }
957 #endif
958
959 static void tun_set_headroom(struct net_device *dev, int new_hr)
960 {
961 struct tun_struct *tun = netdev_priv(dev);
962
963 if (new_hr < NET_SKB_PAD)
964 new_hr = NET_SKB_PAD;
965
966 tun->align = new_hr;
967 }
968
969 static struct rtnl_link_stats64 *
970 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
971 {
972 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
973 struct tun_struct *tun = netdev_priv(dev);
974 struct tun_pcpu_stats *p;
975 int i;
976
977 for_each_possible_cpu(i) {
978 u64 rxpackets, rxbytes, txpackets, txbytes;
979 unsigned int start;
980
981 p = per_cpu_ptr(tun->pcpu_stats, i);
982 do {
983 start = u64_stats_fetch_begin(&p->syncp);
984 rxpackets = p->rx_packets;
985 rxbytes = p->rx_bytes;
986 txpackets = p->tx_packets;
987 txbytes = p->tx_bytes;
988 } while (u64_stats_fetch_retry(&p->syncp, start));
989
990 stats->rx_packets += rxpackets;
991 stats->rx_bytes += rxbytes;
992 stats->tx_packets += txpackets;
993 stats->tx_bytes += txbytes;
994
995 /* u32 counters */
996 rx_dropped += p->rx_dropped;
997 rx_frame_errors += p->rx_frame_errors;
998 tx_dropped += p->tx_dropped;
999 }
1000 stats->rx_dropped = rx_dropped;
1001 stats->rx_frame_errors = rx_frame_errors;
1002 stats->tx_dropped = tx_dropped;
1003 return stats;
1004 }
1005
1006 static const struct net_device_ops tun_netdev_ops = {
1007 .ndo_uninit = tun_net_uninit,
1008 .ndo_open = tun_net_open,
1009 .ndo_stop = tun_net_close,
1010 .ndo_start_xmit = tun_net_xmit,
1011 .ndo_change_mtu = tun_net_change_mtu,
1012 .ndo_fix_features = tun_net_fix_features,
1013 .ndo_select_queue = tun_select_queue,
1014 #ifdef CONFIG_NET_POLL_CONTROLLER
1015 .ndo_poll_controller = tun_poll_controller,
1016 #endif
1017 .ndo_set_rx_headroom = tun_set_headroom,
1018 .ndo_get_stats64 = tun_net_get_stats64,
1019 };
1020
1021 static const struct net_device_ops tap_netdev_ops = {
1022 .ndo_uninit = tun_net_uninit,
1023 .ndo_open = tun_net_open,
1024 .ndo_stop = tun_net_close,
1025 .ndo_start_xmit = tun_net_xmit,
1026 .ndo_change_mtu = tun_net_change_mtu,
1027 .ndo_fix_features = tun_net_fix_features,
1028 .ndo_set_rx_mode = tun_net_mclist,
1029 .ndo_set_mac_address = eth_mac_addr,
1030 .ndo_validate_addr = eth_validate_addr,
1031 .ndo_select_queue = tun_select_queue,
1032 #ifdef CONFIG_NET_POLL_CONTROLLER
1033 .ndo_poll_controller = tun_poll_controller,
1034 #endif
1035 .ndo_features_check = passthru_features_check,
1036 .ndo_set_rx_headroom = tun_set_headroom,
1037 .ndo_get_stats64 = tun_net_get_stats64,
1038 };
1039
1040 static void tun_flow_init(struct tun_struct *tun)
1041 {
1042 int i;
1043
1044 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1045 INIT_HLIST_HEAD(&tun->flows[i]);
1046
1047 tun->ageing_time = TUN_FLOW_EXPIRE;
1048 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
1049 mod_timer(&tun->flow_gc_timer,
1050 round_jiffies_up(jiffies + tun->ageing_time));
1051 }
1052
1053 static void tun_flow_uninit(struct tun_struct *tun)
1054 {
1055 del_timer_sync(&tun->flow_gc_timer);
1056 tun_flow_flush(tun);
1057 }
1058
1059 /* Initialize net device. */
1060 static void tun_net_init(struct net_device *dev)
1061 {
1062 struct tun_struct *tun = netdev_priv(dev);
1063
1064 switch (tun->flags & TUN_TYPE_MASK) {
1065 case IFF_TUN:
1066 dev->netdev_ops = &tun_netdev_ops;
1067
1068 /* Point-to-Point TUN Device */
1069 dev->hard_header_len = 0;
1070 dev->addr_len = 0;
1071 dev->mtu = 1500;
1072
1073 /* Zero header length */
1074 dev->type = ARPHRD_NONE;
1075 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1076 break;
1077
1078 case IFF_TAP:
1079 dev->netdev_ops = &tap_netdev_ops;
1080 /* Ethernet TAP Device */
1081 ether_setup(dev);
1082 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1083 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1084
1085 eth_hw_addr_random(dev);
1086
1087 break;
1088 }
1089 }
1090
1091 /* Character device part */
1092
1093 /* Poll */
1094 static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1095 {
1096 struct tun_file *tfile = file->private_data;
1097 struct tun_struct *tun = __tun_get(tfile);
1098 struct sock *sk;
1099 unsigned int mask = 0;
1100
1101 if (!tun)
1102 return POLLERR;
1103
1104 sk = tfile->socket.sk;
1105
1106 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1107
1108 poll_wait(file, sk_sleep(sk), wait);
1109
1110 if (!skb_queue_empty(&sk->sk_receive_queue))
1111 mask |= POLLIN | POLLRDNORM;
1112
1113 if (sock_writeable(sk) ||
1114 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1115 sock_writeable(sk)))
1116 mask |= POLLOUT | POLLWRNORM;
1117
1118 if (tun->dev->reg_state != NETREG_REGISTERED)
1119 mask = POLLERR;
1120
1121 tun_put(tun);
1122 return mask;
1123 }
1124
1125 /* prepad is the amount to reserve at front. len is length after that.
1126 * linear is a hint as to how much to copy (usually headers). */
1127 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1128 size_t prepad, size_t len,
1129 size_t linear, int noblock)
1130 {
1131 struct sock *sk = tfile->socket.sk;
1132 struct sk_buff *skb;
1133 int err;
1134
1135 /* Under a page? Don't bother with paged skb. */
1136 if (prepad + len < PAGE_SIZE || !linear)
1137 linear = len;
1138
1139 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1140 &err, 0);
1141 if (!skb)
1142 return ERR_PTR(err);
1143
1144 skb_reserve(skb, prepad);
1145 skb_put(skb, linear);
1146 skb->data_len = len - linear;
1147 skb->len += len - linear;
1148
1149 return skb;
1150 }
1151
1152 /* Get packet from user space buffer */
1153 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1154 void *msg_control, struct iov_iter *from,
1155 int noblock)
1156 {
1157 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1158 struct sk_buff *skb;
1159 size_t total_len = iov_iter_count(from);
1160 size_t len = total_len, align = tun->align, linear;
1161 struct virtio_net_hdr gso = { 0 };
1162 struct tun_pcpu_stats *stats;
1163 int good_linear;
1164 int copylen;
1165 bool zerocopy = false;
1166 int err;
1167 u32 rxhash;
1168 ssize_t n;
1169
1170 if (!(tun->dev->flags & IFF_UP))
1171 return -EIO;
1172
1173 if (!(tun->flags & IFF_NO_PI)) {
1174 if (len < sizeof(pi))
1175 return -EINVAL;
1176 len -= sizeof(pi);
1177
1178 n = copy_from_iter(&pi, sizeof(pi), from);
1179 if (n != sizeof(pi))
1180 return -EFAULT;
1181 }
1182
1183 if (tun->flags & IFF_VNET_HDR) {
1184 if (len < tun->vnet_hdr_sz)
1185 return -EINVAL;
1186 len -= tun->vnet_hdr_sz;
1187
1188 n = copy_from_iter(&gso, sizeof(gso), from);
1189 if (n != sizeof(gso))
1190 return -EFAULT;
1191
1192 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1193 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1194 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1195
1196 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1197 return -EINVAL;
1198 iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
1199 }
1200
1201 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1202 align += NET_IP_ALIGN;
1203 if (unlikely(len < ETH_HLEN ||
1204 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1205 return -EINVAL;
1206 }
1207
1208 good_linear = SKB_MAX_HEAD(align);
1209
1210 if (msg_control) {
1211 struct iov_iter i = *from;
1212
1213 /* There are 256 bytes to be copied in skb, so there is
1214 * enough room for skb expand head in case it is used.
1215 * The rest of the buffer is mapped from userspace.
1216 */
1217 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1218 if (copylen > good_linear)
1219 copylen = good_linear;
1220 linear = copylen;
1221 iov_iter_advance(&i, copylen);
1222 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1223 zerocopy = true;
1224 }
1225
1226 if (!zerocopy) {
1227 copylen = len;
1228 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1229 linear = good_linear;
1230 else
1231 linear = tun16_to_cpu(tun, gso.hdr_len);
1232 }
1233
1234 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1235 if (IS_ERR(skb)) {
1236 if (PTR_ERR(skb) != -EAGAIN)
1237 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1238 return PTR_ERR(skb);
1239 }
1240
1241 if (zerocopy)
1242 err = zerocopy_sg_from_iter(skb, from);
1243 else {
1244 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1245 if (!err && msg_control) {
1246 struct ubuf_info *uarg = msg_control;
1247 uarg->callback(uarg, false);
1248 }
1249 }
1250
1251 if (err) {
1252 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1253 kfree_skb(skb);
1254 return -EFAULT;
1255 }
1256
1257 err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
1258 if (err) {
1259 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1260 kfree_skb(skb);
1261 return -EINVAL;
1262 }
1263
1264 switch (tun->flags & TUN_TYPE_MASK) {
1265 case IFF_TUN:
1266 if (tun->flags & IFF_NO_PI) {
1267 switch (skb->data[0] & 0xf0) {
1268 case 0x40:
1269 pi.proto = htons(ETH_P_IP);
1270 break;
1271 case 0x60:
1272 pi.proto = htons(ETH_P_IPV6);
1273 break;
1274 default:
1275 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1276 kfree_skb(skb);
1277 return -EINVAL;
1278 }
1279 }
1280
1281 skb_reset_mac_header(skb);
1282 skb->protocol = pi.proto;
1283 skb->dev = tun->dev;
1284 break;
1285 case IFF_TAP:
1286 skb->protocol = eth_type_trans(skb, tun->dev);
1287 break;
1288 }
1289
1290 /* copy skb_ubuf_info for callback when skb has no error */
1291 if (zerocopy) {
1292 skb_shinfo(skb)->destructor_arg = msg_control;
1293 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1294 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1295 }
1296
1297 skb_reset_network_header(skb);
1298 skb_probe_transport_header(skb, 0);
1299
1300 rxhash = skb_get_hash(skb);
1301 netif_rx_ni(skb);
1302
1303 stats = get_cpu_ptr(tun->pcpu_stats);
1304 u64_stats_update_begin(&stats->syncp);
1305 stats->rx_packets++;
1306 stats->rx_bytes += len;
1307 u64_stats_update_end(&stats->syncp);
1308 put_cpu_ptr(stats);
1309
1310 tun_flow_update(tun, rxhash, tfile);
1311 return total_len;
1312 }
1313
1314 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1315 {
1316 struct file *file = iocb->ki_filp;
1317 struct tun_struct *tun = tun_get(file);
1318 struct tun_file *tfile = file->private_data;
1319 ssize_t result;
1320
1321 if (!tun)
1322 return -EBADFD;
1323
1324 result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
1325
1326 tun_put(tun);
1327 return result;
1328 }
1329
1330 /* Put packet to the user space buffer */
1331 static ssize_t tun_put_user(struct tun_struct *tun,
1332 struct tun_file *tfile,
1333 struct sk_buff *skb,
1334 struct iov_iter *iter)
1335 {
1336 struct tun_pi pi = { 0, skb->protocol };
1337 struct tun_pcpu_stats *stats;
1338 ssize_t total;
1339 int vlan_offset = 0;
1340 int vlan_hlen = 0;
1341 int vnet_hdr_sz = 0;
1342
1343 if (skb_vlan_tag_present(skb))
1344 vlan_hlen = VLAN_HLEN;
1345
1346 if (tun->flags & IFF_VNET_HDR)
1347 vnet_hdr_sz = tun->vnet_hdr_sz;
1348
1349 total = skb->len + vlan_hlen + vnet_hdr_sz;
1350
1351 if (!(tun->flags & IFF_NO_PI)) {
1352 if (iov_iter_count(iter) < sizeof(pi))
1353 return -EINVAL;
1354
1355 total += sizeof(pi);
1356 if (iov_iter_count(iter) < total) {
1357 /* Packet will be striped */
1358 pi.flags |= TUN_PKT_STRIP;
1359 }
1360
1361 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
1362 return -EFAULT;
1363 }
1364
1365 if (vnet_hdr_sz) {
1366 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1367 int ret;
1368
1369 if (iov_iter_count(iter) < vnet_hdr_sz)
1370 return -EINVAL;
1371
1372 ret = virtio_net_hdr_from_skb(skb, &gso,
1373 tun_is_little_endian(tun));
1374 if (ret) {
1375 struct skb_shared_info *sinfo = skb_shinfo(skb);
1376 pr_err("unexpected GSO type: "
1377 "0x%x, gso_size %d, hdr_len %d\n",
1378 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1379 tun16_to_cpu(tun, gso.hdr_len));
1380 print_hex_dump(KERN_ERR, "tun: ",
1381 DUMP_PREFIX_NONE,
1382 16, 1, skb->head,
1383 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1384 WARN_ON_ONCE(1);
1385 return -EINVAL;
1386 }
1387
1388 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
1389 return -EFAULT;
1390
1391 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1392 }
1393
1394 if (vlan_hlen) {
1395 int ret;
1396 struct {
1397 __be16 h_vlan_proto;
1398 __be16 h_vlan_TCI;
1399 } veth;
1400
1401 veth.h_vlan_proto = skb->vlan_proto;
1402 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
1403
1404 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1405
1406 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
1407 if (ret || !iov_iter_count(iter))
1408 goto done;
1409
1410 ret = copy_to_iter(&veth, sizeof(veth), iter);
1411 if (ret != sizeof(veth) || !iov_iter_count(iter))
1412 goto done;
1413 }
1414
1415 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
1416
1417 done:
1418 /* caller is in process context, */
1419 stats = get_cpu_ptr(tun->pcpu_stats);
1420 u64_stats_update_begin(&stats->syncp);
1421 stats->tx_packets++;
1422 stats->tx_bytes += skb->len + vlan_hlen;
1423 u64_stats_update_end(&stats->syncp);
1424 put_cpu_ptr(tun->pcpu_stats);
1425
1426 return total;
1427 }
1428
1429 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1430 struct iov_iter *to,
1431 int noblock)
1432 {
1433 struct sk_buff *skb;
1434 ssize_t ret;
1435 int peeked, err, off = 0;
1436
1437 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1438
1439 if (!iov_iter_count(to))
1440 return 0;
1441
1442 /* Read frames from queue */
1443 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1444 &peeked, &off, &err);
1445 if (!skb)
1446 return err;
1447
1448 ret = tun_put_user(tun, tfile, skb, to);
1449 if (unlikely(ret < 0))
1450 kfree_skb(skb);
1451 else
1452 consume_skb(skb);
1453
1454 return ret;
1455 }
1456
1457 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1458 {
1459 struct file *file = iocb->ki_filp;
1460 struct tun_file *tfile = file->private_data;
1461 struct tun_struct *tun = __tun_get(tfile);
1462 ssize_t len = iov_iter_count(to), ret;
1463
1464 if (!tun)
1465 return -EBADFD;
1466 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
1467 ret = min_t(ssize_t, ret, len);
1468 if (ret > 0)
1469 iocb->ki_pos = ret;
1470 tun_put(tun);
1471 return ret;
1472 }
1473
1474 static void tun_free_netdev(struct net_device *dev)
1475 {
1476 struct tun_struct *tun = netdev_priv(dev);
1477
1478 BUG_ON(!(list_empty(&tun->disabled)));
1479 free_percpu(tun->pcpu_stats);
1480 tun_flow_uninit(tun);
1481 security_tun_dev_free_security(tun->security);
1482 free_netdev(dev);
1483 }
1484
1485 static void tun_setup(struct net_device *dev)
1486 {
1487 struct tun_struct *tun = netdev_priv(dev);
1488
1489 tun->owner = INVALID_UID;
1490 tun->group = INVALID_GID;
1491
1492 dev->ethtool_ops = &tun_ethtool_ops;
1493 dev->destructor = tun_free_netdev;
1494 /* We prefer our own queue length */
1495 dev->tx_queue_len = TUN_READQ_SIZE;
1496 }
1497
1498 /* Trivial set of netlink ops to allow deleting tun or tap
1499 * device with netlink.
1500 */
1501 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1502 {
1503 return -EINVAL;
1504 }
1505
1506 static struct rtnl_link_ops tun_link_ops __read_mostly = {
1507 .kind = DRV_NAME,
1508 .priv_size = sizeof(struct tun_struct),
1509 .setup = tun_setup,
1510 .validate = tun_validate,
1511 };
1512
1513 static void tun_sock_write_space(struct sock *sk)
1514 {
1515 struct tun_file *tfile;
1516 wait_queue_head_t *wqueue;
1517
1518 if (!sock_writeable(sk))
1519 return;
1520
1521 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1522 return;
1523
1524 wqueue = sk_sleep(sk);
1525 if (wqueue && waitqueue_active(wqueue))
1526 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1527 POLLWRNORM | POLLWRBAND);
1528
1529 tfile = container_of(sk, struct tun_file, sk);
1530 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1531 }
1532
1533 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1534 {
1535 int ret;
1536 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1537 struct tun_struct *tun = __tun_get(tfile);
1538
1539 if (!tun)
1540 return -EBADFD;
1541
1542 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
1543 m->msg_flags & MSG_DONTWAIT);
1544 tun_put(tun);
1545 return ret;
1546 }
1547
1548 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
1549 int flags)
1550 {
1551 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1552 struct tun_struct *tun = __tun_get(tfile);
1553 int ret;
1554
1555 if (!tun)
1556 return -EBADFD;
1557
1558 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1559 ret = -EINVAL;
1560 goto out;
1561 }
1562 if (flags & MSG_ERRQUEUE) {
1563 ret = sock_recv_errqueue(sock->sk, m, total_len,
1564 SOL_PACKET, TUN_TX_TIMESTAMP);
1565 goto out;
1566 }
1567 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
1568 if (ret > (ssize_t)total_len) {
1569 m->msg_flags |= MSG_TRUNC;
1570 ret = flags & MSG_TRUNC ? ret : total_len;
1571 }
1572 out:
1573 tun_put(tun);
1574 return ret;
1575 }
1576
1577 /* Ops structure to mimic raw sockets with tun */
1578 static const struct proto_ops tun_socket_ops = {
1579 .sendmsg = tun_sendmsg,
1580 .recvmsg = tun_recvmsg,
1581 };
1582
1583 static struct proto tun_proto = {
1584 .name = "tun",
1585 .owner = THIS_MODULE,
1586 .obj_size = sizeof(struct tun_file),
1587 };
1588
1589 static int tun_flags(struct tun_struct *tun)
1590 {
1591 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
1592 }
1593
1594 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1595 char *buf)
1596 {
1597 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1598 return sprintf(buf, "0x%x\n", tun_flags(tun));
1599 }
1600
1601 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1605 return uid_valid(tun->owner)?
1606 sprintf(buf, "%u\n",
1607 from_kuid_munged(current_user_ns(), tun->owner)):
1608 sprintf(buf, "-1\n");
1609 }
1610
1611 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1612 char *buf)
1613 {
1614 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1615 return gid_valid(tun->group) ?
1616 sprintf(buf, "%u\n",
1617 from_kgid_munged(current_user_ns(), tun->group)):
1618 sprintf(buf, "-1\n");
1619 }
1620
1621 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1622 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1623 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1624
1625 static struct attribute *tun_dev_attrs[] = {
1626 &dev_attr_tun_flags.attr,
1627 &dev_attr_owner.attr,
1628 &dev_attr_group.attr,
1629 NULL
1630 };
1631
1632 static const struct attribute_group tun_attr_group = {
1633 .attrs = tun_dev_attrs
1634 };
1635
1636 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1637 {
1638 struct tun_struct *tun;
1639 struct tun_file *tfile = file->private_data;
1640 struct net_device *dev;
1641 int err;
1642
1643 if (tfile->detached)
1644 return -EINVAL;
1645
1646 dev = __dev_get_by_name(net, ifr->ifr_name);
1647 if (dev) {
1648 if (ifr->ifr_flags & IFF_TUN_EXCL)
1649 return -EBUSY;
1650 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1651 tun = netdev_priv(dev);
1652 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1653 tun = netdev_priv(dev);
1654 else
1655 return -EINVAL;
1656
1657 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1658 !!(tun->flags & IFF_MULTI_QUEUE))
1659 return -EINVAL;
1660
1661 if (tun_not_capable(tun))
1662 return -EPERM;
1663 err = security_tun_dev_open(tun->security);
1664 if (err < 0)
1665 return err;
1666
1667 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1668 if (err < 0)
1669 return err;
1670
1671 if (tun->flags & IFF_MULTI_QUEUE &&
1672 (tun->numqueues + tun->numdisabled > 1)) {
1673 /* One or more queue has already been attached, no need
1674 * to initialize the device again.
1675 */
1676 return 0;
1677 }
1678 }
1679 else {
1680 char *name;
1681 unsigned long flags = 0;
1682 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1683 MAX_TAP_QUEUES : 1;
1684
1685 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1686 return -EPERM;
1687 err = security_tun_dev_create();
1688 if (err < 0)
1689 return err;
1690
1691 /* Set dev type */
1692 if (ifr->ifr_flags & IFF_TUN) {
1693 /* TUN device */
1694 flags |= IFF_TUN;
1695 name = "tun%d";
1696 } else if (ifr->ifr_flags & IFF_TAP) {
1697 /* TAP device */
1698 flags |= IFF_TAP;
1699 name = "tap%d";
1700 } else
1701 return -EINVAL;
1702
1703 if (*ifr->ifr_name)
1704 name = ifr->ifr_name;
1705
1706 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1707 NET_NAME_UNKNOWN, tun_setup, queues,
1708 queues);
1709
1710 if (!dev)
1711 return -ENOMEM;
1712
1713 dev_net_set(dev, net);
1714 dev->rtnl_link_ops = &tun_link_ops;
1715 dev->ifindex = tfile->ifindex;
1716 dev->sysfs_groups[0] = &tun_attr_group;
1717
1718 tun = netdev_priv(dev);
1719 tun->dev = dev;
1720 tun->flags = flags;
1721 tun->txflt.count = 0;
1722 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1723
1724 tun->align = NET_SKB_PAD;
1725 tun->filter_attached = false;
1726 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1727
1728 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
1729 if (!tun->pcpu_stats) {
1730 err = -ENOMEM;
1731 goto err_free_dev;
1732 }
1733
1734 spin_lock_init(&tun->lock);
1735
1736 err = security_tun_dev_alloc_security(&tun->security);
1737 if (err < 0)
1738 goto err_free_stat;
1739
1740 tun_net_init(dev);
1741 tun_flow_init(tun);
1742
1743 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1744 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1745 NETIF_F_HW_VLAN_STAG_TX;
1746 dev->features = dev->hw_features | NETIF_F_LLTX;
1747 dev->vlan_features = dev->features &
1748 ~(NETIF_F_HW_VLAN_CTAG_TX |
1749 NETIF_F_HW_VLAN_STAG_TX);
1750
1751 INIT_LIST_HEAD(&tun->disabled);
1752 err = tun_attach(tun, file, false);
1753 if (err < 0)
1754 goto err_free_flow;
1755
1756 err = register_netdevice(tun->dev);
1757 if (err < 0)
1758 goto err_detach;
1759 }
1760
1761 netif_carrier_on(tun->dev);
1762
1763 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1764
1765 tun->flags = (tun->flags & ~TUN_FEATURES) |
1766 (ifr->ifr_flags & TUN_FEATURES);
1767
1768 /* Make sure persistent devices do not get stuck in
1769 * xoff state.
1770 */
1771 if (netif_running(tun->dev))
1772 netif_tx_wake_all_queues(tun->dev);
1773
1774 strcpy(ifr->ifr_name, tun->dev->name);
1775 return 0;
1776
1777 err_detach:
1778 tun_detach_all(dev);
1779 err_free_flow:
1780 tun_flow_uninit(tun);
1781 security_tun_dev_free_security(tun->security);
1782 err_free_stat:
1783 free_percpu(tun->pcpu_stats);
1784 err_free_dev:
1785 free_netdev(dev);
1786 return err;
1787 }
1788
1789 static void tun_get_iff(struct net *net, struct tun_struct *tun,
1790 struct ifreq *ifr)
1791 {
1792 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1793
1794 strcpy(ifr->ifr_name, tun->dev->name);
1795
1796 ifr->ifr_flags = tun_flags(tun);
1797
1798 }
1799
1800 /* This is like a cut-down ethtool ops, except done via tun fd so no
1801 * privs required. */
1802 static int set_offload(struct tun_struct *tun, unsigned long arg)
1803 {
1804 netdev_features_t features = 0;
1805
1806 if (arg & TUN_F_CSUM) {
1807 features |= NETIF_F_HW_CSUM;
1808 arg &= ~TUN_F_CSUM;
1809
1810 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1811 if (arg & TUN_F_TSO_ECN) {
1812 features |= NETIF_F_TSO_ECN;
1813 arg &= ~TUN_F_TSO_ECN;
1814 }
1815 if (arg & TUN_F_TSO4)
1816 features |= NETIF_F_TSO;
1817 if (arg & TUN_F_TSO6)
1818 features |= NETIF_F_TSO6;
1819 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1820 }
1821
1822 if (arg & TUN_F_UFO) {
1823 features |= NETIF_F_UFO;
1824 arg &= ~TUN_F_UFO;
1825 }
1826 }
1827
1828 /* This gives the user a way to test for new features in future by
1829 * trying to set them. */
1830 if (arg)
1831 return -EINVAL;
1832
1833 tun->set_features = features;
1834 netdev_update_features(tun->dev);
1835
1836 return 0;
1837 }
1838
1839 static void tun_detach_filter(struct tun_struct *tun, int n)
1840 {
1841 int i;
1842 struct tun_file *tfile;
1843
1844 for (i = 0; i < n; i++) {
1845 tfile = rtnl_dereference(tun->tfiles[i]);
1846 lock_sock(tfile->socket.sk);
1847 sk_detach_filter(tfile->socket.sk);
1848 release_sock(tfile->socket.sk);
1849 }
1850
1851 tun->filter_attached = false;
1852 }
1853
1854 static int tun_attach_filter(struct tun_struct *tun)
1855 {
1856 int i, ret = 0;
1857 struct tun_file *tfile;
1858
1859 for (i = 0; i < tun->numqueues; i++) {
1860 tfile = rtnl_dereference(tun->tfiles[i]);
1861 lock_sock(tfile->socket.sk);
1862 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1863 release_sock(tfile->socket.sk);
1864 if (ret) {
1865 tun_detach_filter(tun, i);
1866 return ret;
1867 }
1868 }
1869
1870 tun->filter_attached = true;
1871 return ret;
1872 }
1873
1874 static void tun_set_sndbuf(struct tun_struct *tun)
1875 {
1876 struct tun_file *tfile;
1877 int i;
1878
1879 for (i = 0; i < tun->numqueues; i++) {
1880 tfile = rtnl_dereference(tun->tfiles[i]);
1881 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1882 }
1883 }
1884
1885 static int tun_set_queue(struct file *file, struct ifreq *ifr)
1886 {
1887 struct tun_file *tfile = file->private_data;
1888 struct tun_struct *tun;
1889 int ret = 0;
1890
1891 rtnl_lock();
1892
1893 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1894 tun = tfile->detached;
1895 if (!tun) {
1896 ret = -EINVAL;
1897 goto unlock;
1898 }
1899 ret = security_tun_dev_attach_queue(tun->security);
1900 if (ret < 0)
1901 goto unlock;
1902 ret = tun_attach(tun, file, false);
1903 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1904 tun = rtnl_dereference(tfile->tun);
1905 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1906 ret = -EINVAL;
1907 else
1908 __tun_detach(tfile, false);
1909 } else
1910 ret = -EINVAL;
1911
1912 unlock:
1913 rtnl_unlock();
1914 return ret;
1915 }
1916
1917 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1918 unsigned long arg, int ifreq_len)
1919 {
1920 struct tun_file *tfile = file->private_data;
1921 struct tun_struct *tun;
1922 void __user* argp = (void __user*)arg;
1923 struct ifreq ifr;
1924 kuid_t owner;
1925 kgid_t group;
1926 int sndbuf;
1927 int vnet_hdr_sz;
1928 unsigned int ifindex;
1929 int le;
1930 int ret;
1931
1932 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1933 if (copy_from_user(&ifr, argp, ifreq_len))
1934 return -EFAULT;
1935 } else {
1936 memset(&ifr, 0, sizeof(ifr));
1937 }
1938 if (cmd == TUNGETFEATURES) {
1939 /* Currently this just means: "what IFF flags are valid?".
1940 * This is needed because we never checked for invalid flags on
1941 * TUNSETIFF.
1942 */
1943 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
1944 (unsigned int __user*)argp);
1945 } else if (cmd == TUNSETQUEUE)
1946 return tun_set_queue(file, &ifr);
1947
1948 ret = 0;
1949 rtnl_lock();
1950
1951 tun = __tun_get(tfile);
1952 if (cmd == TUNSETIFF && !tun) {
1953 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1954
1955 ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
1956
1957 if (ret)
1958 goto unlock;
1959
1960 if (copy_to_user(argp, &ifr, ifreq_len))
1961 ret = -EFAULT;
1962 goto unlock;
1963 }
1964 if (cmd == TUNSETIFINDEX) {
1965 ret = -EPERM;
1966 if (tun)
1967 goto unlock;
1968
1969 ret = -EFAULT;
1970 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
1971 goto unlock;
1972
1973 ret = 0;
1974 tfile->ifindex = ifindex;
1975 goto unlock;
1976 }
1977
1978 ret = -EBADFD;
1979 if (!tun)
1980 goto unlock;
1981
1982 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1983
1984 ret = 0;
1985 switch (cmd) {
1986 case TUNGETIFF:
1987 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1988
1989 if (tfile->detached)
1990 ifr.ifr_flags |= IFF_DETACH_QUEUE;
1991 if (!tfile->socket.sk->sk_filter)
1992 ifr.ifr_flags |= IFF_NOFILTER;
1993
1994 if (copy_to_user(argp, &ifr, ifreq_len))
1995 ret = -EFAULT;
1996 break;
1997
1998 case TUNSETNOCSUM:
1999 /* Disable/Enable checksum */
2000
2001 /* [unimplemented] */
2002 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
2003 arg ? "disabled" : "enabled");
2004 break;
2005
2006 case TUNSETPERSIST:
2007 /* Disable/Enable persist mode. Keep an extra reference to the
2008 * module to prevent the module being unprobed.
2009 */
2010 if (arg && !(tun->flags & IFF_PERSIST)) {
2011 tun->flags |= IFF_PERSIST;
2012 __module_get(THIS_MODULE);
2013 }
2014 if (!arg && (tun->flags & IFF_PERSIST)) {
2015 tun->flags &= ~IFF_PERSIST;
2016 module_put(THIS_MODULE);
2017 }
2018
2019 tun_debug(KERN_INFO, tun, "persist %s\n",
2020 arg ? "enabled" : "disabled");
2021 break;
2022
2023 case TUNSETOWNER:
2024 /* Set owner of the device */
2025 owner = make_kuid(current_user_ns(), arg);
2026 if (!uid_valid(owner)) {
2027 ret = -EINVAL;
2028 break;
2029 }
2030 tun->owner = owner;
2031 tun_debug(KERN_INFO, tun, "owner set to %u\n",
2032 from_kuid(&init_user_ns, tun->owner));
2033 break;
2034
2035 case TUNSETGROUP:
2036 /* Set group of the device */
2037 group = make_kgid(current_user_ns(), arg);
2038 if (!gid_valid(group)) {
2039 ret = -EINVAL;
2040 break;
2041 }
2042 tun->group = group;
2043 tun_debug(KERN_INFO, tun, "group set to %u\n",
2044 from_kgid(&init_user_ns, tun->group));
2045 break;
2046
2047 case TUNSETLINK:
2048 /* Only allow setting the type when the interface is down */
2049 if (tun->dev->flags & IFF_UP) {
2050 tun_debug(KERN_INFO, tun,
2051 "Linktype set failed because interface is up\n");
2052 ret = -EBUSY;
2053 } else {
2054 tun->dev->type = (int) arg;
2055 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
2056 tun->dev->type);
2057 ret = 0;
2058 }
2059 break;
2060
2061 #ifdef TUN_DEBUG
2062 case TUNSETDEBUG:
2063 tun->debug = arg;
2064 break;
2065 #endif
2066 case TUNSETOFFLOAD:
2067 ret = set_offload(tun, arg);
2068 break;
2069
2070 case TUNSETTXFILTER:
2071 /* Can be set only for TAPs */
2072 ret = -EINVAL;
2073 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2074 break;
2075 ret = update_filter(&tun->txflt, (void __user *)arg);
2076 break;
2077
2078 case SIOCGIFHWADDR:
2079 /* Get hw address */
2080 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2081 ifr.ifr_hwaddr.sa_family = tun->dev->type;
2082 if (copy_to_user(argp, &ifr, ifreq_len))
2083 ret = -EFAULT;
2084 break;
2085
2086 case SIOCSIFHWADDR:
2087 /* Set hw address */
2088 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
2089 ifr.ifr_hwaddr.sa_data);
2090
2091 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2092 break;
2093
2094 case TUNGETSNDBUF:
2095 sndbuf = tfile->socket.sk->sk_sndbuf;
2096 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2097 ret = -EFAULT;
2098 break;
2099
2100 case TUNSETSNDBUF:
2101 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2102 ret = -EFAULT;
2103 break;
2104 }
2105
2106 tun->sndbuf = sndbuf;
2107 tun_set_sndbuf(tun);
2108 break;
2109
2110 case TUNGETVNETHDRSZ:
2111 vnet_hdr_sz = tun->vnet_hdr_sz;
2112 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2113 ret = -EFAULT;
2114 break;
2115
2116 case TUNSETVNETHDRSZ:
2117 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2118 ret = -EFAULT;
2119 break;
2120 }
2121 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2122 ret = -EINVAL;
2123 break;
2124 }
2125
2126 tun->vnet_hdr_sz = vnet_hdr_sz;
2127 break;
2128
2129 case TUNGETVNETLE:
2130 le = !!(tun->flags & TUN_VNET_LE);
2131 if (put_user(le, (int __user *)argp))
2132 ret = -EFAULT;
2133 break;
2134
2135 case TUNSETVNETLE:
2136 if (get_user(le, (int __user *)argp)) {
2137 ret = -EFAULT;
2138 break;
2139 }
2140 if (le)
2141 tun->flags |= TUN_VNET_LE;
2142 else
2143 tun->flags &= ~TUN_VNET_LE;
2144 break;
2145
2146 case TUNGETVNETBE:
2147 ret = tun_get_vnet_be(tun, argp);
2148 break;
2149
2150 case TUNSETVNETBE:
2151 ret = tun_set_vnet_be(tun, argp);
2152 break;
2153
2154 case TUNATTACHFILTER:
2155 /* Can be set only for TAPs */
2156 ret = -EINVAL;
2157 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2158 break;
2159 ret = -EFAULT;
2160 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2161 break;
2162
2163 ret = tun_attach_filter(tun);
2164 break;
2165
2166 case TUNDETACHFILTER:
2167 /* Can be set only for TAPs */
2168 ret = -EINVAL;
2169 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2170 break;
2171 ret = 0;
2172 tun_detach_filter(tun, tun->numqueues);
2173 break;
2174
2175 case TUNGETFILTER:
2176 ret = -EINVAL;
2177 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2178 break;
2179 ret = -EFAULT;
2180 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2181 break;
2182 ret = 0;
2183 break;
2184
2185 default:
2186 ret = -EINVAL;
2187 break;
2188 }
2189
2190 unlock:
2191 rtnl_unlock();
2192 if (tun)
2193 tun_put(tun);
2194 return ret;
2195 }
2196
2197 static long tun_chr_ioctl(struct file *file,
2198 unsigned int cmd, unsigned long arg)
2199 {
2200 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2201 }
2202
2203 #ifdef CONFIG_COMPAT
2204 static long tun_chr_compat_ioctl(struct file *file,
2205 unsigned int cmd, unsigned long arg)
2206 {
2207 switch (cmd) {
2208 case TUNSETIFF:
2209 case TUNGETIFF:
2210 case TUNSETTXFILTER:
2211 case TUNGETSNDBUF:
2212 case TUNSETSNDBUF:
2213 case SIOCGIFHWADDR:
2214 case SIOCSIFHWADDR:
2215 arg = (unsigned long)compat_ptr(arg);
2216 break;
2217 default:
2218 arg = (compat_ulong_t)arg;
2219 break;
2220 }
2221
2222 /*
2223 * compat_ifreq is shorter than ifreq, so we must not access beyond
2224 * the end of that structure. All fields that are used in this
2225 * driver are compatible though, we don't need to convert the
2226 * contents.
2227 */
2228 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2229 }
2230 #endif /* CONFIG_COMPAT */
2231
2232 static int tun_chr_fasync(int fd, struct file *file, int on)
2233 {
2234 struct tun_file *tfile = file->private_data;
2235 int ret;
2236
2237 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2238 goto out;
2239
2240 if (on) {
2241 __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2242 tfile->flags |= TUN_FASYNC;
2243 } else
2244 tfile->flags &= ~TUN_FASYNC;
2245 ret = 0;
2246 out:
2247 return ret;
2248 }
2249
2250 static int tun_chr_open(struct inode *inode, struct file * file)
2251 {
2252 struct net *net = current->nsproxy->net_ns;
2253 struct tun_file *tfile;
2254
2255 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2256
2257 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
2258 &tun_proto, 0);
2259 if (!tfile)
2260 return -ENOMEM;
2261 RCU_INIT_POINTER(tfile->tun, NULL);
2262 tfile->flags = 0;
2263 tfile->ifindex = 0;
2264
2265 init_waitqueue_head(&tfile->wq.wait);
2266 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
2267
2268 tfile->socket.file = file;
2269 tfile->socket.ops = &tun_socket_ops;
2270
2271 sock_init_data(&tfile->socket, &tfile->sk);
2272
2273 tfile->sk.sk_write_space = tun_sock_write_space;
2274 tfile->sk.sk_sndbuf = INT_MAX;
2275
2276 file->private_data = tfile;
2277 INIT_LIST_HEAD(&tfile->next);
2278
2279 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2280
2281 return 0;
2282 }
2283
2284 static int tun_chr_close(struct inode *inode, struct file *file)
2285 {
2286 struct tun_file *tfile = file->private_data;
2287
2288 tun_detach(tfile, true);
2289
2290 return 0;
2291 }
2292
2293 #ifdef CONFIG_PROC_FS
2294 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2295 {
2296 struct tun_struct *tun;
2297 struct ifreq ifr;
2298
2299 memset(&ifr, 0, sizeof(ifr));
2300
2301 rtnl_lock();
2302 tun = tun_get(f);
2303 if (tun)
2304 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2305 rtnl_unlock();
2306
2307 if (tun)
2308 tun_put(tun);
2309
2310 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2311 }
2312 #endif
2313
2314 static const struct file_operations tun_fops = {
2315 .owner = THIS_MODULE,
2316 .llseek = no_llseek,
2317 .read_iter = tun_chr_read_iter,
2318 .write_iter = tun_chr_write_iter,
2319 .poll = tun_chr_poll,
2320 .unlocked_ioctl = tun_chr_ioctl,
2321 #ifdef CONFIG_COMPAT
2322 .compat_ioctl = tun_chr_compat_ioctl,
2323 #endif
2324 .open = tun_chr_open,
2325 .release = tun_chr_close,
2326 .fasync = tun_chr_fasync,
2327 #ifdef CONFIG_PROC_FS
2328 .show_fdinfo = tun_chr_show_fdinfo,
2329 #endif
2330 };
2331
2332 static struct miscdevice tun_miscdev = {
2333 .minor = TUN_MINOR,
2334 .name = "tun",
2335 .nodename = "net/tun",
2336 .fops = &tun_fops,
2337 };
2338
2339 /* ethtool interface */
2340
2341 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2342 {
2343 cmd->supported = 0;
2344 cmd->advertising = 0;
2345 ethtool_cmd_speed_set(cmd, SPEED_10);
2346 cmd->duplex = DUPLEX_FULL;
2347 cmd->port = PORT_TP;
2348 cmd->phy_address = 0;
2349 cmd->transceiver = XCVR_INTERNAL;
2350 cmd->autoneg = AUTONEG_DISABLE;
2351 cmd->maxtxpkt = 0;
2352 cmd->maxrxpkt = 0;
2353 return 0;
2354 }
2355
2356 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2357 {
2358 struct tun_struct *tun = netdev_priv(dev);
2359
2360 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2361 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2362
2363 switch (tun->flags & TUN_TYPE_MASK) {
2364 case IFF_TUN:
2365 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2366 break;
2367 case IFF_TAP:
2368 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2369 break;
2370 }
2371 }
2372
2373 static u32 tun_get_msglevel(struct net_device *dev)
2374 {
2375 #ifdef TUN_DEBUG
2376 struct tun_struct *tun = netdev_priv(dev);
2377 return tun->debug;
2378 #else
2379 return -EOPNOTSUPP;
2380 #endif
2381 }
2382
2383 static void tun_set_msglevel(struct net_device *dev, u32 value)
2384 {
2385 #ifdef TUN_DEBUG
2386 struct tun_struct *tun = netdev_priv(dev);
2387 tun->debug = value;
2388 #endif
2389 }
2390
2391 static const struct ethtool_ops tun_ethtool_ops = {
2392 .get_settings = tun_get_settings,
2393 .get_drvinfo = tun_get_drvinfo,
2394 .get_msglevel = tun_get_msglevel,
2395 .set_msglevel = tun_set_msglevel,
2396 .get_link = ethtool_op_get_link,
2397 .get_ts_info = ethtool_op_get_ts_info,
2398 };
2399
2400
2401 static int __init tun_init(void)
2402 {
2403 int ret = 0;
2404
2405 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2406 pr_info("%s\n", DRV_COPYRIGHT);
2407
2408 ret = rtnl_link_register(&tun_link_ops);
2409 if (ret) {
2410 pr_err("Can't register link_ops\n");
2411 goto err_linkops;
2412 }
2413
2414 ret = misc_register(&tun_miscdev);
2415 if (ret) {
2416 pr_err("Can't register misc device %d\n", TUN_MINOR);
2417 goto err_misc;
2418 }
2419 return 0;
2420 err_misc:
2421 rtnl_link_unregister(&tun_link_ops);
2422 err_linkops:
2423 return ret;
2424 }
2425
2426 static void tun_cleanup(void)
2427 {
2428 misc_deregister(&tun_miscdev);
2429 rtnl_link_unregister(&tun_link_ops);
2430 }
2431
2432 /* Get an underlying socket object from tun file. Returns error unless file is
2433 * attached to a device. The returned object works like a packet socket, it
2434 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2435 * holding a reference to the file for as long as the socket is in use. */
2436 struct socket *tun_get_socket(struct file *file)
2437 {
2438 struct tun_file *tfile;
2439 if (file->f_op != &tun_fops)
2440 return ERR_PTR(-EINVAL);
2441 tfile = file->private_data;
2442 if (!tfile)
2443 return ERR_PTR(-EBADFD);
2444 return &tfile->socket;
2445 }
2446 EXPORT_SYMBOL_GPL(tun_get_socket);
2447
2448 module_init(tun_init);
2449 module_exit(tun_cleanup);
2450 MODULE_DESCRIPTION(DRV_DESCRIPTION);
2451 MODULE_AUTHOR(DRV_COPYRIGHT);
2452 MODULE_LICENSE("GPL");
2453 MODULE_ALIAS_MISCDEV(TUN_MINOR);
2454 MODULE_ALIAS("devname:net/tun");
This page took 0.081208 seconds and 5 git commands to generate.