Merge tag 'vfio-v3.14-rc1' of git://github.com/awilliam/linux-vfio
[deliverable/linux.git] / drivers / net / macvlan.c
1 /*
2 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of
7 * the License, or (at your option) any later version.
8 *
9 * The code this is based on carried the following copyright notice:
10 * ---
11 * (C) Copyright 2001-2006
12 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
13 * Re-worked by Ben Greear <greearb@candelatech.com>
14 * ---
15 */
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/rculist.h>
24 #include <linux/notifier.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
30 #include <linux/if_link.h>
31 #include <linux/if_macvlan.h>
32 #include <linux/hash.h>
33 #include <net/rtnetlink.h>
34 #include <net/xfrm.h>
35
36 #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
37
38 struct macvlan_port {
39 struct net_device *dev;
40 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
41 struct list_head vlans;
42 struct rcu_head rcu;
43 bool passthru;
44 int count;
45 };
46
47 static void macvlan_port_destroy(struct net_device *dev);
48
49 static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
50 {
51 return rcu_dereference(dev->rx_handler_data);
52 }
53
54 static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
55 {
56 return rtnl_dereference(dev->rx_handler_data);
57 }
58
59 #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
60
61 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
62 const unsigned char *addr)
63 {
64 struct macvlan_dev *vlan;
65
66 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
67 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
68 return vlan;
69 }
70 return NULL;
71 }
72
73 static void macvlan_hash_add(struct macvlan_dev *vlan)
74 {
75 struct macvlan_port *port = vlan->port;
76 const unsigned char *addr = vlan->dev->dev_addr;
77
78 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]);
79 }
80
81 static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
82 {
83 hlist_del_rcu(&vlan->hlist);
84 if (sync)
85 synchronize_rcu();
86 }
87
88 static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
89 const unsigned char *addr)
90 {
91 macvlan_hash_del(vlan, true);
92 /* Now that we are unhashed it is safe to change the device
93 * address without confusing packet delivery.
94 */
95 memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
96 macvlan_hash_add(vlan);
97 }
98
99 static int macvlan_addr_busy(const struct macvlan_port *port,
100 const unsigned char *addr)
101 {
102 /* Test to see if the specified multicast address is
103 * currently in use by the underlying device or
104 * another macvlan.
105 */
106 if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
107 return 1;
108
109 if (macvlan_hash_lookup(port, addr))
110 return 1;
111
112 return 0;
113 }
114
115
116 static int macvlan_broadcast_one(struct sk_buff *skb,
117 const struct macvlan_dev *vlan,
118 const struct ethhdr *eth, bool local)
119 {
120 struct net_device *dev = vlan->dev;
121
122 if (local)
123 return vlan->forward(dev, skb);
124
125 skb->dev = dev;
126 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
127 skb->pkt_type = PACKET_BROADCAST;
128 else
129 skb->pkt_type = PACKET_MULTICAST;
130
131 return vlan->receive(skb);
132 }
133
134 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
135 {
136 return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
137 }
138
139
140 static unsigned int mc_hash(const struct macvlan_dev *vlan,
141 const unsigned char *addr)
142 {
143 u32 val = __get_unaligned_cpu32(addr + 2);
144
145 val ^= macvlan_hash_mix(vlan);
146 return hash_32(val, MACVLAN_MC_FILTER_BITS);
147 }
148
149 static void macvlan_broadcast(struct sk_buff *skb,
150 const struct macvlan_port *port,
151 struct net_device *src,
152 enum macvlan_mode mode)
153 {
154 const struct ethhdr *eth = eth_hdr(skb);
155 const struct macvlan_dev *vlan;
156 struct sk_buff *nskb;
157 unsigned int i;
158 int err;
159 unsigned int hash;
160
161 if (skb->protocol == htons(ETH_P_PAUSE))
162 return;
163
164 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
165 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
166 if (vlan->dev == src || !(vlan->mode & mode))
167 continue;
168
169 hash = mc_hash(vlan, eth->h_dest);
170 if (!test_bit(hash, vlan->mc_filter))
171 continue;
172
173 err = NET_RX_DROP;
174 nskb = skb_clone(skb, GFP_ATOMIC);
175 if (likely(nskb))
176 err = macvlan_broadcast_one(
177 nskb, vlan, eth,
178 mode == MACVLAN_MODE_BRIDGE);
179 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
180 err == NET_RX_SUCCESS, 1);
181 }
182 }
183 }
184
185 /* called under rcu_read_lock() from netif_receive_skb */
186 static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
187 {
188 struct macvlan_port *port;
189 struct sk_buff *skb = *pskb;
190 const struct ethhdr *eth = eth_hdr(skb);
191 const struct macvlan_dev *vlan;
192 const struct macvlan_dev *src;
193 struct net_device *dev;
194 unsigned int len = 0;
195 int ret = NET_RX_DROP;
196
197 port = macvlan_port_get_rcu(skb->dev);
198 if (is_multicast_ether_addr(eth->h_dest)) {
199 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
200 if (!skb)
201 return RX_HANDLER_CONSUMED;
202 eth = eth_hdr(skb);
203 src = macvlan_hash_lookup(port, eth->h_source);
204 if (!src)
205 /* frame comes from an external address */
206 macvlan_broadcast(skb, port, NULL,
207 MACVLAN_MODE_PRIVATE |
208 MACVLAN_MODE_VEPA |
209 MACVLAN_MODE_PASSTHRU|
210 MACVLAN_MODE_BRIDGE);
211 else if (src->mode == MACVLAN_MODE_VEPA)
212 /* flood to everyone except source */
213 macvlan_broadcast(skb, port, src->dev,
214 MACVLAN_MODE_VEPA |
215 MACVLAN_MODE_BRIDGE);
216 else if (src->mode == MACVLAN_MODE_BRIDGE)
217 /*
218 * flood only to VEPA ports, bridge ports
219 * already saw the frame on the way out.
220 */
221 macvlan_broadcast(skb, port, src->dev,
222 MACVLAN_MODE_VEPA);
223 else {
224 /* forward to original port. */
225 vlan = src;
226 ret = macvlan_broadcast_one(skb, vlan, eth, 0);
227 goto out;
228 }
229
230 return RX_HANDLER_PASS;
231 }
232
233 if (port->passthru)
234 vlan = list_first_or_null_rcu(&port->vlans,
235 struct macvlan_dev, list);
236 else
237 vlan = macvlan_hash_lookup(port, eth->h_dest);
238 if (vlan == NULL)
239 return RX_HANDLER_PASS;
240
241 dev = vlan->dev;
242 if (unlikely(!(dev->flags & IFF_UP))) {
243 kfree_skb(skb);
244 return RX_HANDLER_CONSUMED;
245 }
246 len = skb->len + ETH_HLEN;
247 skb = skb_share_check(skb, GFP_ATOMIC);
248 if (!skb)
249 goto out;
250
251 skb->dev = dev;
252 skb->pkt_type = PACKET_HOST;
253
254 ret = vlan->receive(skb);
255
256 out:
257 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
258 return RX_HANDLER_CONSUMED;
259 }
260
261 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
262 {
263 const struct macvlan_dev *vlan = netdev_priv(dev);
264 const struct macvlan_port *port = vlan->port;
265 const struct macvlan_dev *dest;
266 __u8 ip_summed = skb->ip_summed;
267
268 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
269 const struct ethhdr *eth = (void *)skb->data;
270 skb->ip_summed = CHECKSUM_UNNECESSARY;
271
272 /* send to other bridge ports directly */
273 if (is_multicast_ether_addr(eth->h_dest)) {
274 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
275 goto xmit_world;
276 }
277
278 dest = macvlan_hash_lookup(port, eth->h_dest);
279 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
280 /* send to lowerdev first for its network taps */
281 dev_forward_skb(vlan->lowerdev, skb);
282
283 return NET_XMIT_SUCCESS;
284 }
285 }
286
287 xmit_world:
288 skb->ip_summed = ip_summed;
289 skb->dev = vlan->lowerdev;
290 return dev_queue_xmit(skb);
291 }
292
293 netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
294 struct net_device *dev)
295 {
296 unsigned int len = skb->len;
297 int ret;
298 const struct macvlan_dev *vlan = netdev_priv(dev);
299
300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev;
302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else {
304 ret = macvlan_queue_xmit(skb, dev);
305 }
306
307 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
308 struct macvlan_pcpu_stats *pcpu_stats;
309
310 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
311 u64_stats_update_begin(&pcpu_stats->syncp);
312 pcpu_stats->tx_packets++;
313 pcpu_stats->tx_bytes += len;
314 u64_stats_update_end(&pcpu_stats->syncp);
315 } else {
316 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
317 }
318 return ret;
319 }
320 EXPORT_SYMBOL_GPL(macvlan_start_xmit);
321
322 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
323 unsigned short type, const void *daddr,
324 const void *saddr, unsigned len)
325 {
326 const struct macvlan_dev *vlan = netdev_priv(dev);
327 struct net_device *lowerdev = vlan->lowerdev;
328
329 return dev_hard_header(skb, lowerdev, type, daddr,
330 saddr ? : dev->dev_addr, len);
331 }
332
333 static const struct header_ops macvlan_hard_header_ops = {
334 .create = macvlan_hard_header,
335 .rebuild = eth_rebuild_header,
336 .parse = eth_header_parse,
337 .cache = eth_header_cache,
338 .cache_update = eth_header_cache_update,
339 };
340
341 static struct rtnl_link_ops macvlan_link_ops;
342
343 static int macvlan_open(struct net_device *dev)
344 {
345 struct macvlan_dev *vlan = netdev_priv(dev);
346 struct net_device *lowerdev = vlan->lowerdev;
347 int err;
348
349 if (vlan->port->passthru) {
350 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
351 err = dev_set_promiscuity(lowerdev, 1);
352 if (err < 0)
353 goto out;
354 }
355 goto hash_add;
356 }
357
358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
360 vlan->fwd_priv =
361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
362
363 /* If we get a NULL pointer back, or if we get an error
364 * then we should just fall through to the non accelerated path
365 */
366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
367 vlan->fwd_priv = NULL;
368 } else
369 return 0;
370 }
371
372 err = -EBUSY;
373 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
374 goto out;
375
376 err = dev_uc_add(lowerdev, dev->dev_addr);
377 if (err < 0)
378 goto out;
379 if (dev->flags & IFF_ALLMULTI) {
380 err = dev_set_allmulti(lowerdev, 1);
381 if (err < 0)
382 goto del_unicast;
383 }
384
385 hash_add:
386 macvlan_hash_add(vlan);
387 return 0;
388
389 del_unicast:
390 dev_uc_del(lowerdev, dev->dev_addr);
391 out:
392 if (vlan->fwd_priv) {
393 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
394 vlan->fwd_priv);
395 vlan->fwd_priv = NULL;
396 }
397 return err;
398 }
399
400 static int macvlan_stop(struct net_device *dev)
401 {
402 struct macvlan_dev *vlan = netdev_priv(dev);
403 struct net_device *lowerdev = vlan->lowerdev;
404
405 if (vlan->fwd_priv) {
406 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
407 vlan->fwd_priv);
408 vlan->fwd_priv = NULL;
409 return 0;
410 }
411
412 dev_uc_unsync(lowerdev, dev);
413 dev_mc_unsync(lowerdev, dev);
414
415 if (vlan->port->passthru) {
416 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
417 dev_set_promiscuity(lowerdev, -1);
418 goto hash_del;
419 }
420
421 if (dev->flags & IFF_ALLMULTI)
422 dev_set_allmulti(lowerdev, -1);
423
424 dev_uc_del(lowerdev, dev->dev_addr);
425
426 hash_del:
427 macvlan_hash_del(vlan, !dev->dismantle);
428 return 0;
429 }
430
431 static int macvlan_set_mac_address(struct net_device *dev, void *p)
432 {
433 struct macvlan_dev *vlan = netdev_priv(dev);
434 struct net_device *lowerdev = vlan->lowerdev;
435 struct sockaddr *addr = p;
436 int err;
437
438 if (!is_valid_ether_addr(addr->sa_data))
439 return -EADDRNOTAVAIL;
440
441 if (!(dev->flags & IFF_UP)) {
442 /* Just copy in the new address */
443 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
444 } else {
445 /* Rehash and update the device filters */
446 if (macvlan_addr_busy(vlan->port, addr->sa_data))
447 return -EBUSY;
448
449 err = dev_uc_add(lowerdev, addr->sa_data);
450 if (err)
451 return err;
452
453 dev_uc_del(lowerdev, dev->dev_addr);
454
455 macvlan_hash_change_addr(vlan, addr->sa_data);
456 }
457 return 0;
458 }
459
460 static void macvlan_change_rx_flags(struct net_device *dev, int change)
461 {
462 struct macvlan_dev *vlan = netdev_priv(dev);
463 struct net_device *lowerdev = vlan->lowerdev;
464
465 if (change & IFF_ALLMULTI)
466 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
467 }
468
469 static void macvlan_set_mac_lists(struct net_device *dev)
470 {
471 struct macvlan_dev *vlan = netdev_priv(dev);
472
473 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
474 bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
475 } else {
476 struct netdev_hw_addr *ha;
477 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
478
479 bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
480 netdev_for_each_mc_addr(ha, dev) {
481 __set_bit(mc_hash(vlan, ha->addr), filter);
482 }
483
484 __set_bit(mc_hash(vlan, dev->broadcast), filter);
485
486 bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
487 }
488 dev_uc_sync(vlan->lowerdev, dev);
489 dev_mc_sync(vlan->lowerdev, dev);
490 }
491
492 static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
493 {
494 struct macvlan_dev *vlan = netdev_priv(dev);
495
496 if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
497 return -EINVAL;
498 dev->mtu = new_mtu;
499 return 0;
500 }
501
502 /*
503 * macvlan network devices have devices nesting below it and are a special
504 * "super class" of normal network devices; split their locks off into a
505 * separate class since they always nest.
506 */
507 static struct lock_class_key macvlan_netdev_xmit_lock_key;
508 static struct lock_class_key macvlan_netdev_addr_lock_key;
509
510 #define MACVLAN_FEATURES \
511 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
512 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
513 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
514 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
515
516 #define MACVLAN_STATE_MASK \
517 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
518
519 static void macvlan_set_lockdep_class_one(struct net_device *dev,
520 struct netdev_queue *txq,
521 void *_unused)
522 {
523 lockdep_set_class(&txq->_xmit_lock,
524 &macvlan_netdev_xmit_lock_key);
525 }
526
527 static void macvlan_set_lockdep_class(struct net_device *dev)
528 {
529 lockdep_set_class(&dev->addr_list_lock,
530 &macvlan_netdev_addr_lock_key);
531 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
532 }
533
534 static int macvlan_init(struct net_device *dev)
535 {
536 struct macvlan_dev *vlan = netdev_priv(dev);
537 const struct net_device *lowerdev = vlan->lowerdev;
538 int i;
539
540 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
541 (lowerdev->state & MACVLAN_STATE_MASK);
542 dev->features = lowerdev->features & MACVLAN_FEATURES;
543 dev->features |= NETIF_F_LLTX;
544 dev->gso_max_size = lowerdev->gso_max_size;
545 dev->iflink = lowerdev->ifindex;
546 dev->hard_header_len = lowerdev->hard_header_len;
547
548 macvlan_set_lockdep_class(dev);
549
550 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
551 if (!vlan->pcpu_stats)
552 return -ENOMEM;
553
554 for_each_possible_cpu(i) {
555 struct macvlan_pcpu_stats *mvlstats;
556 mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
557 u64_stats_init(&mvlstats->syncp);
558 }
559
560 return 0;
561 }
562
563 static void macvlan_uninit(struct net_device *dev)
564 {
565 struct macvlan_dev *vlan = netdev_priv(dev);
566 struct macvlan_port *port = vlan->port;
567
568 free_percpu(vlan->pcpu_stats);
569
570 port->count -= 1;
571 if (!port->count)
572 macvlan_port_destroy(port->dev);
573 }
574
575 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
576 struct rtnl_link_stats64 *stats)
577 {
578 struct macvlan_dev *vlan = netdev_priv(dev);
579
580 if (vlan->pcpu_stats) {
581 struct macvlan_pcpu_stats *p;
582 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
583 u32 rx_errors = 0, tx_dropped = 0;
584 unsigned int start;
585 int i;
586
587 for_each_possible_cpu(i) {
588 p = per_cpu_ptr(vlan->pcpu_stats, i);
589 do {
590 start = u64_stats_fetch_begin_bh(&p->syncp);
591 rx_packets = p->rx_packets;
592 rx_bytes = p->rx_bytes;
593 rx_multicast = p->rx_multicast;
594 tx_packets = p->tx_packets;
595 tx_bytes = p->tx_bytes;
596 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
597
598 stats->rx_packets += rx_packets;
599 stats->rx_bytes += rx_bytes;
600 stats->multicast += rx_multicast;
601 stats->tx_packets += tx_packets;
602 stats->tx_bytes += tx_bytes;
603 /* rx_errors & tx_dropped are u32, updated
604 * without syncp protection.
605 */
606 rx_errors += p->rx_errors;
607 tx_dropped += p->tx_dropped;
608 }
609 stats->rx_errors = rx_errors;
610 stats->rx_dropped = rx_errors;
611 stats->tx_dropped = tx_dropped;
612 }
613 return stats;
614 }
615
616 static int macvlan_vlan_rx_add_vid(struct net_device *dev,
617 __be16 proto, u16 vid)
618 {
619 struct macvlan_dev *vlan = netdev_priv(dev);
620 struct net_device *lowerdev = vlan->lowerdev;
621
622 return vlan_vid_add(lowerdev, proto, vid);
623 }
624
625 static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
626 __be16 proto, u16 vid)
627 {
628 struct macvlan_dev *vlan = netdev_priv(dev);
629 struct net_device *lowerdev = vlan->lowerdev;
630
631 vlan_vid_del(lowerdev, proto, vid);
632 return 0;
633 }
634
635 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
636 struct net_device *dev,
637 const unsigned char *addr,
638 u16 flags)
639 {
640 struct macvlan_dev *vlan = netdev_priv(dev);
641 int err = -EINVAL;
642
643 if (!vlan->port->passthru)
644 return -EOPNOTSUPP;
645
646 if (flags & NLM_F_REPLACE)
647 return -EOPNOTSUPP;
648
649 if (is_unicast_ether_addr(addr))
650 err = dev_uc_add_excl(dev, addr);
651 else if (is_multicast_ether_addr(addr))
652 err = dev_mc_add_excl(dev, addr);
653
654 return err;
655 }
656
657 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
658 struct net_device *dev,
659 const unsigned char *addr)
660 {
661 struct macvlan_dev *vlan = netdev_priv(dev);
662 int err = -EINVAL;
663
664 if (!vlan->port->passthru)
665 return -EOPNOTSUPP;
666
667 if (is_unicast_ether_addr(addr))
668 err = dev_uc_del(dev, addr);
669 else if (is_multicast_ether_addr(addr))
670 err = dev_mc_del(dev, addr);
671
672 return err;
673 }
674
675 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
676 struct ethtool_drvinfo *drvinfo)
677 {
678 strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
679 strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
680 }
681
682 static int macvlan_ethtool_get_settings(struct net_device *dev,
683 struct ethtool_cmd *cmd)
684 {
685 const struct macvlan_dev *vlan = netdev_priv(dev);
686
687 return __ethtool_get_settings(vlan->lowerdev, cmd);
688 }
689
690 static netdev_features_t macvlan_fix_features(struct net_device *dev,
691 netdev_features_t features)
692 {
693 struct macvlan_dev *vlan = netdev_priv(dev);
694 netdev_features_t mask;
695
696 features |= NETIF_F_ALL_FOR_ALL;
697 features &= (vlan->set_features | ~MACVLAN_FEATURES);
698 mask = features;
699
700 features = netdev_increment_features(vlan->lowerdev->features,
701 features,
702 mask);
703 features |= NETIF_F_LLTX;
704
705 return features;
706 }
707
708 static const struct ethtool_ops macvlan_ethtool_ops = {
709 .get_link = ethtool_op_get_link,
710 .get_settings = macvlan_ethtool_get_settings,
711 .get_drvinfo = macvlan_ethtool_get_drvinfo,
712 };
713
714 static const struct net_device_ops macvlan_netdev_ops = {
715 .ndo_init = macvlan_init,
716 .ndo_uninit = macvlan_uninit,
717 .ndo_open = macvlan_open,
718 .ndo_stop = macvlan_stop,
719 .ndo_start_xmit = macvlan_start_xmit,
720 .ndo_change_mtu = macvlan_change_mtu,
721 .ndo_fix_features = macvlan_fix_features,
722 .ndo_change_rx_flags = macvlan_change_rx_flags,
723 .ndo_set_mac_address = macvlan_set_mac_address,
724 .ndo_set_rx_mode = macvlan_set_mac_lists,
725 .ndo_get_stats64 = macvlan_dev_get_stats64,
726 .ndo_validate_addr = eth_validate_addr,
727 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
728 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
729 .ndo_fdb_add = macvlan_fdb_add,
730 .ndo_fdb_del = macvlan_fdb_del,
731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 };
733
734 void macvlan_common_setup(struct net_device *dev)
735 {
736 ether_setup(dev);
737
738 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
739 dev->priv_flags |= IFF_UNICAST_FLT;
740 dev->netdev_ops = &macvlan_netdev_ops;
741 dev->destructor = free_netdev;
742 dev->header_ops = &macvlan_hard_header_ops;
743 dev->ethtool_ops = &macvlan_ethtool_ops;
744 }
745 EXPORT_SYMBOL_GPL(macvlan_common_setup);
746
747 static void macvlan_setup(struct net_device *dev)
748 {
749 macvlan_common_setup(dev);
750 dev->tx_queue_len = 0;
751 }
752
753 static int macvlan_port_create(struct net_device *dev)
754 {
755 struct macvlan_port *port;
756 unsigned int i;
757 int err;
758
759 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
760 return -EINVAL;
761
762 port = kzalloc(sizeof(*port), GFP_KERNEL);
763 if (port == NULL)
764 return -ENOMEM;
765
766 port->passthru = false;
767 port->dev = dev;
768 INIT_LIST_HEAD(&port->vlans);
769 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
770 INIT_HLIST_HEAD(&port->vlan_hash[i]);
771
772 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
773 if (err)
774 kfree(port);
775 else
776 dev->priv_flags |= IFF_MACVLAN_PORT;
777 return err;
778 }
779
780 static void macvlan_port_destroy(struct net_device *dev)
781 {
782 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
783
784 dev->priv_flags &= ~IFF_MACVLAN_PORT;
785 netdev_rx_handler_unregister(dev);
786 kfree_rcu(port, rcu);
787 }
788
789 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
790 {
791 if (tb[IFLA_ADDRESS]) {
792 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
793 return -EINVAL;
794 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
795 return -EADDRNOTAVAIL;
796 }
797
798 if (data && data[IFLA_MACVLAN_FLAGS] &&
799 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
800 return -EINVAL;
801
802 if (data && data[IFLA_MACVLAN_MODE]) {
803 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
804 case MACVLAN_MODE_PRIVATE:
805 case MACVLAN_MODE_VEPA:
806 case MACVLAN_MODE_BRIDGE:
807 case MACVLAN_MODE_PASSTHRU:
808 break;
809 default:
810 return -EINVAL;
811 }
812 }
813 return 0;
814 }
815
816 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
817 struct nlattr *tb[], struct nlattr *data[],
818 int (*receive)(struct sk_buff *skb),
819 int (*forward)(struct net_device *dev,
820 struct sk_buff *skb))
821 {
822 struct macvlan_dev *vlan = netdev_priv(dev);
823 struct macvlan_port *port;
824 struct net_device *lowerdev;
825 int err;
826
827 if (!tb[IFLA_LINK])
828 return -EINVAL;
829
830 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
831 if (lowerdev == NULL)
832 return -ENODEV;
833
834 /* When creating macvlans on top of other macvlans - use
835 * the real device as the lowerdev.
836 */
837 if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
838 struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
839 lowerdev = lowervlan->lowerdev;
840 }
841
842 if (!tb[IFLA_MTU])
843 dev->mtu = lowerdev->mtu;
844 else if (dev->mtu > lowerdev->mtu)
845 return -EINVAL;
846
847 if (!tb[IFLA_ADDRESS])
848 eth_hw_addr_random(dev);
849
850 if (!macvlan_port_exists(lowerdev)) {
851 err = macvlan_port_create(lowerdev);
852 if (err < 0)
853 return err;
854 }
855 port = macvlan_port_get_rtnl(lowerdev);
856
857 /* Only 1 macvlan device can be created in passthru mode */
858 if (port->passthru)
859 return -EINVAL;
860
861 vlan->lowerdev = lowerdev;
862 vlan->dev = dev;
863 vlan->port = port;
864 vlan->receive = receive;
865 vlan->forward = forward;
866 vlan->set_features = MACVLAN_FEATURES;
867
868 vlan->mode = MACVLAN_MODE_VEPA;
869 if (data && data[IFLA_MACVLAN_MODE])
870 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
871
872 if (data && data[IFLA_MACVLAN_FLAGS])
873 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
874
875 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
876 if (port->count)
877 return -EINVAL;
878 port->passthru = true;
879 eth_hw_addr_inherit(dev, lowerdev);
880 }
881
882 port->count += 1;
883 err = register_netdevice(dev);
884 if (err < 0)
885 goto destroy_port;
886
887 dev->priv_flags |= IFF_MACVLAN;
888 err = netdev_upper_dev_link(lowerdev, dev);
889 if (err)
890 goto destroy_port;
891
892
893 list_add_tail_rcu(&vlan->list, &port->vlans);
894 netif_stacked_transfer_operstate(lowerdev, dev);
895
896 return 0;
897
898 destroy_port:
899 port->count -= 1;
900 if (!port->count)
901 macvlan_port_destroy(lowerdev);
902
903 return err;
904 }
905 EXPORT_SYMBOL_GPL(macvlan_common_newlink);
906
907 static int macvlan_newlink(struct net *src_net, struct net_device *dev,
908 struct nlattr *tb[], struct nlattr *data[])
909 {
910 return macvlan_common_newlink(src_net, dev, tb, data,
911 netif_rx,
912 dev_forward_skb);
913 }
914
915 void macvlan_dellink(struct net_device *dev, struct list_head *head)
916 {
917 struct macvlan_dev *vlan = netdev_priv(dev);
918
919 list_del_rcu(&vlan->list);
920 unregister_netdevice_queue(dev, head);
921 netdev_upper_dev_unlink(vlan->lowerdev, dev);
922 }
923 EXPORT_SYMBOL_GPL(macvlan_dellink);
924
925 static int macvlan_changelink(struct net_device *dev,
926 struct nlattr *tb[], struct nlattr *data[])
927 {
928 struct macvlan_dev *vlan = netdev_priv(dev);
929 enum macvlan_mode mode;
930 bool set_mode = false;
931
932 /* Validate mode, but don't set yet: setting flags may fail. */
933 if (data && data[IFLA_MACVLAN_MODE]) {
934 set_mode = true;
935 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
936 /* Passthrough mode can't be set or cleared dynamically */
937 if ((mode == MACVLAN_MODE_PASSTHRU) !=
938 (vlan->mode == MACVLAN_MODE_PASSTHRU))
939 return -EINVAL;
940 }
941
942 if (data && data[IFLA_MACVLAN_FLAGS]) {
943 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
944 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
945 if (vlan->port->passthru && promisc) {
946 int err;
947
948 if (flags & MACVLAN_FLAG_NOPROMISC)
949 err = dev_set_promiscuity(vlan->lowerdev, -1);
950 else
951 err = dev_set_promiscuity(vlan->lowerdev, 1);
952 if (err < 0)
953 return err;
954 }
955 vlan->flags = flags;
956 }
957 if (set_mode)
958 vlan->mode = mode;
959 return 0;
960 }
961
962 static size_t macvlan_get_size(const struct net_device *dev)
963 {
964 return (0
965 + nla_total_size(4) /* IFLA_MACVLAN_MODE */
966 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
967 );
968 }
969
970 static int macvlan_fill_info(struct sk_buff *skb,
971 const struct net_device *dev)
972 {
973 struct macvlan_dev *vlan = netdev_priv(dev);
974
975 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
976 goto nla_put_failure;
977 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
978 goto nla_put_failure;
979 return 0;
980
981 nla_put_failure:
982 return -EMSGSIZE;
983 }
984
985 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
986 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
987 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
988 };
989
990 int macvlan_link_register(struct rtnl_link_ops *ops)
991 {
992 /* common fields */
993 ops->priv_size = sizeof(struct macvlan_dev);
994 ops->validate = macvlan_validate;
995 ops->maxtype = IFLA_MACVLAN_MAX;
996 ops->policy = macvlan_policy;
997 ops->changelink = macvlan_changelink;
998 ops->get_size = macvlan_get_size;
999 ops->fill_info = macvlan_fill_info;
1000
1001 return rtnl_link_register(ops);
1002 };
1003 EXPORT_SYMBOL_GPL(macvlan_link_register);
1004
1005 static struct rtnl_link_ops macvlan_link_ops = {
1006 .kind = "macvlan",
1007 .setup = macvlan_setup,
1008 .newlink = macvlan_newlink,
1009 .dellink = macvlan_dellink,
1010 };
1011
1012 static int macvlan_device_event(struct notifier_block *unused,
1013 unsigned long event, void *ptr)
1014 {
1015 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1016 struct macvlan_dev *vlan, *next;
1017 struct macvlan_port *port;
1018 LIST_HEAD(list_kill);
1019
1020 if (!macvlan_port_exists(dev))
1021 return NOTIFY_DONE;
1022
1023 port = macvlan_port_get_rtnl(dev);
1024
1025 switch (event) {
1026 case NETDEV_CHANGE:
1027 list_for_each_entry(vlan, &port->vlans, list)
1028 netif_stacked_transfer_operstate(vlan->lowerdev,
1029 vlan->dev);
1030 break;
1031 case NETDEV_FEAT_CHANGE:
1032 list_for_each_entry(vlan, &port->vlans, list) {
1033 vlan->dev->gso_max_size = dev->gso_max_size;
1034 netdev_update_features(vlan->dev);
1035 }
1036 break;
1037 case NETDEV_UNREGISTER:
1038 /* twiddle thumbs on netns device moves */
1039 if (dev->reg_state != NETREG_UNREGISTERING)
1040 break;
1041
1042 list_for_each_entry_safe(vlan, next, &port->vlans, list)
1043 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
1044 unregister_netdevice_many(&list_kill);
1045 list_del(&list_kill);
1046 break;
1047 case NETDEV_PRE_TYPE_CHANGE:
1048 /* Forbid underlaying device to change its type. */
1049 return NOTIFY_BAD;
1050 }
1051 return NOTIFY_DONE;
1052 }
1053
1054 static struct notifier_block macvlan_notifier_block __read_mostly = {
1055 .notifier_call = macvlan_device_event,
1056 };
1057
1058 static int __init macvlan_init_module(void)
1059 {
1060 int err;
1061
1062 register_netdevice_notifier(&macvlan_notifier_block);
1063
1064 err = macvlan_link_register(&macvlan_link_ops);
1065 if (err < 0)
1066 goto err1;
1067 return 0;
1068 err1:
1069 unregister_netdevice_notifier(&macvlan_notifier_block);
1070 return err;
1071 }
1072
1073 static void __exit macvlan_cleanup_module(void)
1074 {
1075 rtnl_link_unregister(&macvlan_link_ops);
1076 unregister_netdevice_notifier(&macvlan_notifier_block);
1077 }
1078
1079 module_init(macvlan_init_module);
1080 module_exit(macvlan_cleanup_module);
1081
1082 MODULE_LICENSE("GPL");
1083 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
1084 MODULE_DESCRIPTION("Driver for MAC address based VLANs");
1085 MODULE_ALIAS_RTNL_LINK("macvlan");
This page took 0.053083 seconds and 6 git commands to generate.