2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
31 #include <linux/if_link.h>
34 #include <linux/pm_qos.h>
35 #include <linux/timer.h>
36 #include <linux/bug.h>
37 #include <linux/delay.h>
38 #include <linux/atomic.h>
39 #include <asm/cache.h>
40 #include <asm/byteorder.h>
42 #include <linux/device.h>
43 #include <linux/percpu.h>
44 #include <linux/rculist.h>
45 #include <linux/dmaengine.h>
46 #include <linux/workqueue.h>
47 #include <linux/dynamic_queue_limits.h>
49 #include <linux/ethtool.h>
50 #include <net/net_namespace.h>
53 #include <net/dcbnl.h>
55 #include <net/netprio_cgroup.h>
57 #include <linux/netdev_features.h>
63 /* source back-compat hooks */
64 #define SET_ETHTOOL_OPS(netdev,ops) \
65 ( (netdev)->ethtool_ops = (ops) )
67 /* hardware address assignment types */
68 #define NET_ADDR_PERM 0 /* address is permanent (default) */
69 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
70 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
72 /* Backlog congestion levels */
73 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74 #define NET_RX_DROP 1 /* packet dropped */
77 * Transmit return codes: transmit return codes originate from three different
80 * - qdisc return codes
81 * - driver transmit return codes
84 * Drivers are allowed to return any one of those in their hard_start_xmit()
85 * function. Real network devices commonly used with qdiscs should only return
86 * the driver transmit return codes though - when qdiscs are used, the actual
87 * transmission happens asynchronously, so the value is not propagated to
88 * higher layers. Virtual network devices transmit synchronously, in this case
89 * the driver transmit return codes are consumed by dev_queue_xmit(), all
90 * others are propagated to higher layers.
93 /* qdisc ->enqueue() return codes. */
94 #define NET_XMIT_SUCCESS 0x00
95 #define NET_XMIT_DROP 0x01 /* skb dropped */
96 #define NET_XMIT_CN 0x02 /* congestion notification */
97 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
98 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
100 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
101 * indicates that the device will soon be dropping packets, or already drops
102 * some packets of the same priority; prompting us to send less aggressively. */
103 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
104 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106 /* Driver transmit return codes */
107 #define NETDEV_TX_MASK 0xf0
110 __NETDEV_TX_MIN
= INT_MIN
, /* make sure enum is signed */
111 NETDEV_TX_OK
= 0x00, /* driver took care of packet */
112 NETDEV_TX_BUSY
= 0x10, /* driver tx path was busy*/
113 NETDEV_TX_LOCKED
= 0x20, /* driver tx lock was already taken */
115 typedef enum netdev_tx netdev_tx_t
;
118 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
119 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 static inline bool dev_xmit_complete(int rc
)
124 * Positive cases with an skb consumed by a driver:
125 * - successful transmission (rc == NETDEV_TX_OK)
126 * - error while transmitting (rc < 0)
127 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 if (likely(rc
< NET_XMIT_MASK
))
137 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
139 /* Initial net device group. All devices belong to group 0 by default. */
140 #define INIT_NETDEV_GROUP 0
144 * Compute the worst case header length according to the protocols
148 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
149 # if defined(CONFIG_MAC80211_MESH)
150 # define LL_MAX_HEADER 128
152 # define LL_MAX_HEADER 96
154 #elif IS_ENABLED(CONFIG_TR)
155 # define LL_MAX_HEADER 48
157 # define LL_MAX_HEADER 32
160 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
161 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
162 #define MAX_HEADER LL_MAX_HEADER
164 #define MAX_HEADER (LL_MAX_HEADER + 48)
168 * Old network device statistics. Fields are native words
169 * (unsigned long) so they can be read and written atomically.
172 struct net_device_stats
{
173 unsigned long rx_packets
;
174 unsigned long tx_packets
;
175 unsigned long rx_bytes
;
176 unsigned long tx_bytes
;
177 unsigned long rx_errors
;
178 unsigned long tx_errors
;
179 unsigned long rx_dropped
;
180 unsigned long tx_dropped
;
181 unsigned long multicast
;
182 unsigned long collisions
;
183 unsigned long rx_length_errors
;
184 unsigned long rx_over_errors
;
185 unsigned long rx_crc_errors
;
186 unsigned long rx_frame_errors
;
187 unsigned long rx_fifo_errors
;
188 unsigned long rx_missed_errors
;
189 unsigned long tx_aborted_errors
;
190 unsigned long tx_carrier_errors
;
191 unsigned long tx_fifo_errors
;
192 unsigned long tx_heartbeat_errors
;
193 unsigned long tx_window_errors
;
194 unsigned long rx_compressed
;
195 unsigned long tx_compressed
;
198 #endif /* __KERNEL__ */
201 /* Media selection options. */
214 #include <linux/cache.h>
215 #include <linux/skbuff.h>
218 #include <linux/jump_label.h>
219 extern struct jump_label_key rps_needed
;
226 struct netdev_hw_addr
{
227 struct list_head list
;
228 unsigned char addr
[MAX_ADDR_LEN
];
230 #define NETDEV_HW_ADDR_T_LAN 1
231 #define NETDEV_HW_ADDR_T_SAN 2
232 #define NETDEV_HW_ADDR_T_SLAVE 3
233 #define NETDEV_HW_ADDR_T_UNICAST 4
234 #define NETDEV_HW_ADDR_T_MULTICAST 5
238 struct rcu_head rcu_head
;
241 struct netdev_hw_addr_list
{
242 struct list_head list
;
246 #define netdev_hw_addr_list_count(l) ((l)->count)
247 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
248 #define netdev_hw_addr_list_for_each(ha, l) \
249 list_for_each_entry(ha, &(l)->list, list)
251 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
252 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
253 #define netdev_for_each_uc_addr(ha, dev) \
254 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
256 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
257 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
258 #define netdev_for_each_mc_addr(ha, dev) \
259 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
266 /* cached hardware header; allow for machine alignment needs. */
267 #define HH_DATA_MOD 16
268 #define HH_DATA_OFF(__len) \
269 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
270 #define HH_DATA_ALIGN(__len) \
271 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
272 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
275 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
277 * dev->hard_header_len ? (dev->hard_header_len +
278 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
280 * We could use other alignment values, but we must maintain the
281 * relationship HH alignment <= LL alignment.
283 #define LL_RESERVED_SPACE(dev) \
284 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
285 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
286 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
289 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
290 unsigned short type
, const void *daddr
,
291 const void *saddr
, unsigned len
);
292 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
293 int (*rebuild
)(struct sk_buff
*skb
);
294 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
, __be16 type
);
295 void (*cache_update
)(struct hh_cache
*hh
,
296 const struct net_device
*dev
,
297 const unsigned char *haddr
);
300 /* These flag bits are private to the generic network queueing
301 * layer, they may not be explicitly referenced by any other
305 enum netdev_state_t
{
307 __LINK_STATE_PRESENT
,
308 __LINK_STATE_NOCARRIER
,
309 __LINK_STATE_LINKWATCH_PENDING
,
310 __LINK_STATE_DORMANT
,
315 * This structure holds at boot time configured netdevice settings. They
316 * are then used in the device probing.
318 struct netdev_boot_setup
{
322 #define NETDEV_BOOT_SETUP_MAX 8
324 extern int __init
netdev_boot_setup(char *str
);
327 * Structure for NAPI scheduling similar to tasklet but with weighting
330 /* The poll_list must only be managed by the entity which
331 * changes the state of the NAPI_STATE_SCHED bit. This means
332 * whoever atomically sets that bit can add this napi_struct
333 * to the per-cpu poll_list, and whoever clears that bit
334 * can remove from the list right before clearing the bit.
336 struct list_head poll_list
;
340 int (*poll
)(struct napi_struct
*, int);
341 #ifdef CONFIG_NETPOLL
342 spinlock_t poll_lock
;
346 unsigned int gro_count
;
348 struct net_device
*dev
;
349 struct list_head dev_list
;
350 struct sk_buff
*gro_list
;
355 NAPI_STATE_SCHED
, /* Poll is scheduled */
356 NAPI_STATE_DISABLE
, /* Disable pending */
357 NAPI_STATE_NPSVC
, /* Netpoll - don't dequeue from poll_list */
367 typedef enum gro_result gro_result_t
;
370 * enum rx_handler_result - Possible return values for rx_handlers.
371 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
373 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
374 * case skb->dev was changed by rx_handler.
375 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
376 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
378 * rx_handlers are functions called from inside __netif_receive_skb(), to do
379 * special processing of the skb, prior to delivery to protocol handlers.
381 * Currently, a net_device can only have a single rx_handler registered. Trying
382 * to register a second rx_handler will return -EBUSY.
384 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
385 * To unregister a rx_handler on a net_device, use
386 * netdev_rx_handler_unregister().
388 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
391 * If the rx_handler consumed to skb in some way, it should return
392 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
393 * the skb to be delivered in some other ways.
395 * If the rx_handler changed skb->dev, to divert the skb to another
396 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
397 * new device will be called if it exists.
399 * If the rx_handler consider the skb should be ignored, it should return
400 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
401 * are registred on exact device (ptype->dev == skb->dev).
403 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
404 * delivered, it should return RX_HANDLER_PASS.
406 * A device without a registered rx_handler will behave as if rx_handler
407 * returned RX_HANDLER_PASS.
410 enum rx_handler_result
{
416 typedef enum rx_handler_result rx_handler_result_t
;
417 typedef rx_handler_result_t
rx_handler_func_t(struct sk_buff
**pskb
);
419 extern void __napi_schedule(struct napi_struct
*n
);
421 static inline int napi_disable_pending(struct napi_struct
*n
)
423 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
427 * napi_schedule_prep - check if napi can be scheduled
430 * Test if NAPI routine is already running, and if not mark
431 * it as running. This is used as a condition variable
432 * insure only one NAPI poll instance runs. We also make
433 * sure there is no pending NAPI disable.
435 static inline int napi_schedule_prep(struct napi_struct
*n
)
437 return !napi_disable_pending(n
) &&
438 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
442 * napi_schedule - schedule NAPI poll
445 * Schedule NAPI poll routine to be called if it is not already
448 static inline void napi_schedule(struct napi_struct
*n
)
450 if (napi_schedule_prep(n
))
454 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
455 static inline int napi_reschedule(struct napi_struct
*napi
)
457 if (napi_schedule_prep(napi
)) {
458 __napi_schedule(napi
);
465 * napi_complete - NAPI processing complete
468 * Mark NAPI processing as complete.
470 extern void __napi_complete(struct napi_struct
*n
);
471 extern void napi_complete(struct napi_struct
*n
);
474 * napi_disable - prevent NAPI from scheduling
477 * Stop NAPI from being scheduled on this context.
478 * Waits till any outstanding processing completes.
480 static inline void napi_disable(struct napi_struct
*n
)
482 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
483 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
485 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
489 * napi_enable - enable NAPI scheduling
492 * Resume NAPI from being scheduled on this context.
493 * Must be paired with napi_disable.
495 static inline void napi_enable(struct napi_struct
*n
)
497 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
498 smp_mb__before_clear_bit();
499 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
504 * napi_synchronize - wait until NAPI is not running
507 * Wait until NAPI is done being scheduled on this context.
508 * Waits till any outstanding processing completes but
509 * does not disable future activations.
511 static inline void napi_synchronize(const struct napi_struct
*n
)
513 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
517 # define napi_synchronize(n) barrier()
520 enum netdev_queue_state_t
{
521 __QUEUE_STATE_DRV_XOFF
,
522 __QUEUE_STATE_STACK_XOFF
,
523 __QUEUE_STATE_FROZEN
,
524 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
525 (1 << __QUEUE_STATE_STACK_XOFF))
526 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
527 (1 << __QUEUE_STATE_FROZEN))
530 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
531 * netif_tx_* functions below are used to manipulate this flag. The
532 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
533 * queue independently. The netif_xmit_*stopped functions below are called
534 * to check if the queue has been stopped by the driver or stack (either
535 * of the XOFF bits are set in the state). Drivers should not need to call
536 * netif_xmit*stopped functions, they should only be using netif_tx_*.
539 struct netdev_queue
{
543 struct net_device
*dev
;
545 struct Qdisc
*qdisc_sleeping
;
549 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
555 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
558 * please use this field instead of dev->trans_start
560 unsigned long trans_start
;
563 * Number of TX timeouts for this queue
564 * (/sys/class/net/DEV/Q/trans_timeout)
566 unsigned long trans_timeout
;
573 } ____cacheline_aligned_in_smp
;
575 static inline int netdev_queue_numa_node_read(const struct netdev_queue
*q
)
577 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
584 static inline void netdev_queue_numa_node_write(struct netdev_queue
*q
, int node
)
586 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
593 * This structure holds an RPS map which can be of variable length. The
594 * map is an array of CPUs.
601 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
604 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
605 * tail pointer for that CPU's input queue at the time of last enqueue, and
606 * a hardware filter index.
608 struct rps_dev_flow
{
611 unsigned int last_qtail
;
613 #define RPS_NO_FILTER 0xffff
616 * The rps_dev_flow_table structure contains a table of flow mappings.
618 struct rps_dev_flow_table
{
621 struct work_struct free_work
;
622 struct rps_dev_flow flows
[0];
624 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
625 ((_num) * sizeof(struct rps_dev_flow)))
628 * The rps_sock_flow_table contains mappings of flows to the last CPU
629 * on which they were processed by the application (set in recvmsg).
631 struct rps_sock_flow_table
{
635 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
636 ((_num) * sizeof(u16)))
638 #define RPS_NO_CPU 0xffff
640 static inline void rps_record_sock_flow(struct rps_sock_flow_table
*table
,
644 unsigned int cpu
, index
= hash
& table
->mask
;
646 /* We only give a hint, preemption can change cpu under us */
647 cpu
= raw_smp_processor_id();
649 if (table
->ents
[index
] != cpu
)
650 table
->ents
[index
] = cpu
;
654 static inline void rps_reset_sock_flow(struct rps_sock_flow_table
*table
,
658 table
->ents
[hash
& table
->mask
] = RPS_NO_CPU
;
661 extern struct rps_sock_flow_table __rcu
*rps_sock_flow_table
;
663 #ifdef CONFIG_RFS_ACCEL
664 extern bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
665 u32 flow_id
, u16 filter_id
);
668 /* This structure contains an instance of an RX queue. */
669 struct netdev_rx_queue
{
670 struct rps_map __rcu
*rps_map
;
671 struct rps_dev_flow_table __rcu
*rps_flow_table
;
673 struct net_device
*dev
;
674 } ____cacheline_aligned_in_smp
;
675 #endif /* CONFIG_RPS */
679 * This structure holds an XPS map which can be of variable length. The
680 * map is an array of queues.
684 unsigned int alloc_len
;
688 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
689 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 * This structure holds all XPS maps for device. Maps are indexed by CPU.
695 struct xps_dev_maps
{
697 struct xps_map __rcu
*cpu_map
[0];
699 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
700 (nr_cpu_ids * sizeof(struct xps_map *)))
701 #endif /* CONFIG_XPS */
703 #define TC_MAX_QUEUE 16
704 #define TC_BITMASK 15
705 /* HW offloaded queuing disciplines txq count and offset maps */
706 struct netdev_tc_txq
{
711 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
713 * This structure is to hold information about the device
714 * configured to run FCoE protocol stack.
716 struct netdev_fcoe_hbainfo
{
717 char manufacturer
[64];
718 char serial_number
[64];
719 char hardware_version
[64];
720 char driver_version
[64];
721 char optionrom_version
[64];
722 char firmware_version
[64];
724 char model_description
[256];
729 * This structure defines the management hooks for network devices.
730 * The following hooks can be defined; unless noted otherwise, they are
731 * optional and can be filled with a null pointer.
733 * int (*ndo_init)(struct net_device *dev);
734 * This function is called once when network device is registered.
735 * The network device can use this to any late stage initializaton
736 * or semantic validattion. It can fail with an error code which will
737 * be propogated back to register_netdev
739 * void (*ndo_uninit)(struct net_device *dev);
740 * This function is called when device is unregistered or when registration
741 * fails. It is not called if init fails.
743 * int (*ndo_open)(struct net_device *dev);
744 * This function is called when network device transistions to the up
747 * int (*ndo_stop)(struct net_device *dev);
748 * This function is called when network device transistions to the down
751 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
752 * struct net_device *dev);
753 * Called when a packet needs to be transmitted.
754 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
755 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
756 * Required can not be NULL.
758 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
759 * Called to decide which queue to when device supports multiple
762 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
763 * This function is called to allow device receiver to make
764 * changes to configuration when multicast or promiscious is enabled.
766 * void (*ndo_set_rx_mode)(struct net_device *dev);
767 * This function is called device changes address list filtering.
768 * If driver handles unicast address filtering, it should set
769 * IFF_UNICAST_FLT to its priv_flags.
771 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
772 * This function is called when the Media Access Control address
773 * needs to be changed. If this interface is not defined, the
774 * mac address can not be changed.
776 * int (*ndo_validate_addr)(struct net_device *dev);
777 * Test if Media Access Control address is valid for the device.
779 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
780 * Called when a user request an ioctl which can't be handled by
781 * the generic interface code. If not defined ioctl's return
782 * not supported error code.
784 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
785 * Used to set network devices bus interface parameters. This interface
786 * is retained for legacy reason, new devices should use the bus
787 * interface (PCI) for low level management.
789 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
790 * Called when a user wants to change the Maximum Transfer Unit
791 * of a device. If not defined, any request to change MTU will
792 * will return an error.
794 * void (*ndo_tx_timeout)(struct net_device *dev);
795 * Callback uses when the transmitter has not made any progress
796 * for dev->watchdog ticks.
798 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
799 * struct rtnl_link_stats64 *storage);
800 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
801 * Called when a user wants to get the network device usage
802 * statistics. Drivers must do one of the following:
803 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
804 * rtnl_link_stats64 structure passed by the caller.
805 * 2. Define @ndo_get_stats to update a net_device_stats structure
806 * (which should normally be dev->stats) and return a pointer to
807 * it. The structure may be changed asynchronously only if each
808 * field is written atomically.
809 * 3. Update dev->stats asynchronously and atomically, and define
812 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
813 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
814 * this function is called when a VLAN id is registered.
816 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
817 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
818 * this function is called when a VLAN id is unregistered.
820 * void (*ndo_poll_controller)(struct net_device *dev);
822 * SR-IOV management functions.
823 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
824 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
825 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
826 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
827 * int (*ndo_get_vf_config)(struct net_device *dev,
828 * int vf, struct ifla_vf_info *ivf);
829 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
830 * struct nlattr *port[]);
831 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
832 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
833 * Called to setup 'tc' number of traffic classes in the net device. This
834 * is always called from the stack with the rtnl lock held and netif tx
835 * queues stopped. This allows the netdevice to perform queue management
838 * Fiber Channel over Ethernet (FCoE) offload functions.
839 * int (*ndo_fcoe_enable)(struct net_device *dev);
840 * Called when the FCoE protocol stack wants to start using LLD for FCoE
841 * so the underlying device can perform whatever needed configuration or
842 * initialization to support acceleration of FCoE traffic.
844 * int (*ndo_fcoe_disable)(struct net_device *dev);
845 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
846 * so the underlying device can perform whatever needed clean-ups to
847 * stop supporting acceleration of FCoE traffic.
849 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
850 * struct scatterlist *sgl, unsigned int sgc);
851 * Called when the FCoE Initiator wants to initialize an I/O that
852 * is a possible candidate for Direct Data Placement (DDP). The LLD can
853 * perform necessary setup and returns 1 to indicate the device is set up
854 * successfully to perform DDP on this I/O, otherwise this returns 0.
856 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
857 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
858 * indicated by the FC exchange id 'xid', so the underlying device can
859 * clean up and reuse resources for later DDP requests.
861 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
862 * struct scatterlist *sgl, unsigned int sgc);
863 * Called when the FCoE Target wants to initialize an I/O that
864 * is a possible candidate for Direct Data Placement (DDP). The LLD can
865 * perform necessary setup and returns 1 to indicate the device is set up
866 * successfully to perform DDP on this I/O, otherwise this returns 0.
868 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
869 * struct netdev_fcoe_hbainfo *hbainfo);
870 * Called when the FCoE Protocol stack wants information on the underlying
871 * device. This information is utilized by the FCoE protocol stack to
872 * register attributes with Fiber Channel management service as per the
873 * FC-GS Fabric Device Management Information(FDMI) specification.
875 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
876 * Called when the underlying device wants to override default World Wide
877 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
878 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
879 * protocol stack to use.
882 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
883 * u16 rxq_index, u32 flow_id);
884 * Set hardware filter for RFS. rxq_index is the target queue index;
885 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
886 * Return the filter ID on success, or a negative error code.
888 * Slave management functions (for bridge, bonding, etc). User should
889 * call netdev_set_master() to set dev->master properly.
890 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
891 * Called to make another netdev an underling.
893 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
894 * Called to release previously enslaved netdev.
896 * Feature/offload setting functions.
897 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
898 * netdev_features_t features);
899 * Adjusts the requested feature flags according to device-specific
900 * constraints, and returns the resulting flags. Must not modify
903 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
904 * Called to update device configuration to new features. Passed
905 * feature set might be less than what was returned by ndo_fix_features()).
906 * Must return >0 or -errno if it changed dev->features itself.
909 struct net_device_ops
{
910 int (*ndo_init
)(struct net_device
*dev
);
911 void (*ndo_uninit
)(struct net_device
*dev
);
912 int (*ndo_open
)(struct net_device
*dev
);
913 int (*ndo_stop
)(struct net_device
*dev
);
914 netdev_tx_t (*ndo_start_xmit
) (struct sk_buff
*skb
,
915 struct net_device
*dev
);
916 u16 (*ndo_select_queue
)(struct net_device
*dev
,
917 struct sk_buff
*skb
);
918 void (*ndo_change_rx_flags
)(struct net_device
*dev
,
920 void (*ndo_set_rx_mode
)(struct net_device
*dev
);
921 int (*ndo_set_mac_address
)(struct net_device
*dev
,
923 int (*ndo_validate_addr
)(struct net_device
*dev
);
924 int (*ndo_do_ioctl
)(struct net_device
*dev
,
925 struct ifreq
*ifr
, int cmd
);
926 int (*ndo_set_config
)(struct net_device
*dev
,
928 int (*ndo_change_mtu
)(struct net_device
*dev
,
930 int (*ndo_neigh_setup
)(struct net_device
*dev
,
931 struct neigh_parms
*);
932 void (*ndo_tx_timeout
) (struct net_device
*dev
);
934 struct rtnl_link_stats64
* (*ndo_get_stats64
)(struct net_device
*dev
,
935 struct rtnl_link_stats64
*storage
);
936 struct net_device_stats
* (*ndo_get_stats
)(struct net_device
*dev
);
938 int (*ndo_vlan_rx_add_vid
)(struct net_device
*dev
,
940 int (*ndo_vlan_rx_kill_vid
)(struct net_device
*dev
,
942 #ifdef CONFIG_NET_POLL_CONTROLLER
943 void (*ndo_poll_controller
)(struct net_device
*dev
);
944 int (*ndo_netpoll_setup
)(struct net_device
*dev
,
945 struct netpoll_info
*info
);
946 void (*ndo_netpoll_cleanup
)(struct net_device
*dev
);
948 int (*ndo_set_vf_mac
)(struct net_device
*dev
,
950 int (*ndo_set_vf_vlan
)(struct net_device
*dev
,
951 int queue
, u16 vlan
, u8 qos
);
952 int (*ndo_set_vf_tx_rate
)(struct net_device
*dev
,
954 int (*ndo_set_vf_spoofchk
)(struct net_device
*dev
,
955 int vf
, bool setting
);
956 int (*ndo_get_vf_config
)(struct net_device
*dev
,
958 struct ifla_vf_info
*ivf
);
959 int (*ndo_set_vf_port
)(struct net_device
*dev
,
961 struct nlattr
*port
[]);
962 int (*ndo_get_vf_port
)(struct net_device
*dev
,
963 int vf
, struct sk_buff
*skb
);
964 int (*ndo_setup_tc
)(struct net_device
*dev
, u8 tc
);
965 #if IS_ENABLED(CONFIG_FCOE)
966 int (*ndo_fcoe_enable
)(struct net_device
*dev
);
967 int (*ndo_fcoe_disable
)(struct net_device
*dev
);
968 int (*ndo_fcoe_ddp_setup
)(struct net_device
*dev
,
970 struct scatterlist
*sgl
,
972 int (*ndo_fcoe_ddp_done
)(struct net_device
*dev
,
974 int (*ndo_fcoe_ddp_target
)(struct net_device
*dev
,
976 struct scatterlist
*sgl
,
978 int (*ndo_fcoe_get_hbainfo
)(struct net_device
*dev
,
979 struct netdev_fcoe_hbainfo
*hbainfo
);
982 #if IS_ENABLED(CONFIG_LIBFCOE)
983 #define NETDEV_FCOE_WWNN 0
984 #define NETDEV_FCOE_WWPN 1
985 int (*ndo_fcoe_get_wwn
)(struct net_device
*dev
,
989 #ifdef CONFIG_RFS_ACCEL
990 int (*ndo_rx_flow_steer
)(struct net_device
*dev
,
991 const struct sk_buff
*skb
,
995 int (*ndo_add_slave
)(struct net_device
*dev
,
996 struct net_device
*slave_dev
);
997 int (*ndo_del_slave
)(struct net_device
*dev
,
998 struct net_device
*slave_dev
);
999 netdev_features_t (*ndo_fix_features
)(struct net_device
*dev
,
1000 netdev_features_t features
);
1001 int (*ndo_set_features
)(struct net_device
*dev
,
1002 netdev_features_t features
);
1003 int (*ndo_neigh_construct
)(struct neighbour
*n
);
1004 void (*ndo_neigh_destroy
)(struct neighbour
*n
);
1008 * The DEVICE structure.
1009 * Actually, this whole structure is a big mistake. It mixes I/O
1010 * data with strictly "high-level" data, and it has to know about
1011 * almost every data structure used in the INET module.
1013 * FIXME: cleanup struct net_device such that network protocol info
1020 * This is the first field of the "visible" part of this structure
1021 * (i.e. as seen by users in the "Space.c" file). It is the name
1024 char name
[IFNAMSIZ
];
1026 struct pm_qos_request pm_qos_req
;
1028 /* device name hash chain */
1029 struct hlist_node name_hlist
;
1034 * I/O specific fields
1035 * FIXME: Merge these and struct ifmap into one
1037 unsigned long mem_end
; /* shared mem end */
1038 unsigned long mem_start
; /* shared mem start */
1039 unsigned long base_addr
; /* device I/O address */
1040 unsigned int irq
; /* device IRQ number */
1043 * Some hardware also needs these fields, but they are not
1044 * part of the usual set specified in Space.c.
1047 unsigned long state
;
1049 struct list_head dev_list
;
1050 struct list_head napi_list
;
1051 struct list_head unreg_list
;
1053 /* currently active device features */
1054 netdev_features_t features
;
1055 /* user-changeable features */
1056 netdev_features_t hw_features
;
1057 /* user-requested features */
1058 netdev_features_t wanted_features
;
1059 /* mask of features inheritable by VLAN devices */
1060 netdev_features_t vlan_features
;
1062 /* Interface index. Unique device identifier */
1066 struct net_device_stats stats
;
1067 atomic_long_t rx_dropped
; /* dropped packets by core network
1068 * Do not use this in drivers.
1071 #ifdef CONFIG_WIRELESS_EXT
1072 /* List of functions to handle Wireless Extensions (instead of ioctl).
1073 * See <net/iw_handler.h> for details. Jean II */
1074 const struct iw_handler_def
* wireless_handlers
;
1075 /* Instance data managed by the core of Wireless Extensions. */
1076 struct iw_public_data
* wireless_data
;
1078 /* Management operations */
1079 const struct net_device_ops
*netdev_ops
;
1080 const struct ethtool_ops
*ethtool_ops
;
1082 /* Hardware header description */
1083 const struct header_ops
*header_ops
;
1085 unsigned int flags
; /* interface flags (a la BSD) */
1086 unsigned int priv_flags
; /* Like 'flags' but invisible to userspace. */
1087 unsigned short gflags
;
1088 unsigned short padded
; /* How much padding added by alloc_netdev() */
1090 unsigned char operstate
; /* RFC2863 operstate */
1091 unsigned char link_mode
; /* mapping policy to operstate */
1093 unsigned char if_port
; /* Selectable AUI, TP,..*/
1094 unsigned char dma
; /* DMA channel */
1096 unsigned int mtu
; /* interface MTU value */
1097 unsigned short type
; /* interface hardware type */
1098 unsigned short hard_header_len
; /* hardware hdr length */
1100 /* extra head- and tailroom the hardware may need, but not in all cases
1101 * can this be guaranteed, especially tailroom. Some cases also use
1102 * LL_MAX_HEADER instead to allocate the skb.
1104 unsigned short needed_headroom
;
1105 unsigned short needed_tailroom
;
1107 /* Interface address info. */
1108 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
1109 unsigned char addr_assign_type
; /* hw address assignment type */
1110 unsigned char addr_len
; /* hardware address length */
1111 unsigned char neigh_priv_len
;
1112 unsigned short dev_id
; /* for shared network cards */
1114 spinlock_t addr_list_lock
;
1115 struct netdev_hw_addr_list uc
; /* Unicast mac addresses */
1116 struct netdev_hw_addr_list mc
; /* Multicast mac addresses */
1118 unsigned int promiscuity
;
1119 unsigned int allmulti
;
1122 /* Protocol specific pointers */
1124 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1125 struct vlan_info __rcu
*vlan_info
; /* VLAN info */
1127 #if IS_ENABLED(CONFIG_NET_DSA)
1128 struct dsa_switch_tree
*dsa_ptr
; /* dsa specific data */
1130 void *atalk_ptr
; /* AppleTalk link */
1131 struct in_device __rcu
*ip_ptr
; /* IPv4 specific data */
1132 struct dn_dev __rcu
*dn_ptr
; /* DECnet specific data */
1133 struct inet6_dev __rcu
*ip6_ptr
; /* IPv6 specific data */
1134 void *ec_ptr
; /* Econet specific data */
1135 void *ax25_ptr
; /* AX.25 specific data */
1136 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
1137 assign before registering */
1140 * Cache lines mostly used on receive path (including eth_type_trans())
1142 unsigned long last_rx
; /* Time of last Rx
1143 * This should not be set in
1144 * drivers, unless really needed,
1145 * because network stack (bonding)
1146 * use it if/when necessary, to
1147 * avoid dirtying this cache line.
1150 struct net_device
*master
; /* Pointer to master device of a group,
1151 * which this device is member of.
1154 /* Interface address info used in eth_type_trans() */
1155 unsigned char *dev_addr
; /* hw address, (before bcast
1156 because most packets are
1159 struct netdev_hw_addr_list dev_addrs
; /* list of device
1162 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
1165 struct kset
*queues_kset
;
1169 struct netdev_rx_queue
*_rx
;
1171 /* Number of RX queues allocated at register_netdev() time */
1172 unsigned int num_rx_queues
;
1174 /* Number of RX queues currently active in device */
1175 unsigned int real_num_rx_queues
;
1177 #ifdef CONFIG_RFS_ACCEL
1178 /* CPU reverse-mapping for RX completion interrupts, indexed
1179 * by RX queue number. Assigned by driver. This must only be
1180 * set if the ndo_rx_flow_steer operation is defined. */
1181 struct cpu_rmap
*rx_cpu_rmap
;
1185 rx_handler_func_t __rcu
*rx_handler
;
1186 void __rcu
*rx_handler_data
;
1188 struct netdev_queue __rcu
*ingress_queue
;
1191 * Cache lines mostly used on transmit path
1193 struct netdev_queue
*_tx ____cacheline_aligned_in_smp
;
1195 /* Number of TX queues allocated at alloc_netdev_mq() time */
1196 unsigned int num_tx_queues
;
1198 /* Number of TX queues currently active in device */
1199 unsigned int real_num_tx_queues
;
1201 /* root qdisc from userspace point of view */
1202 struct Qdisc
*qdisc
;
1204 unsigned long tx_queue_len
; /* Max frames per queue allowed */
1205 spinlock_t tx_global_lock
;
1208 struct xps_dev_maps __rcu
*xps_maps
;
1211 /* These may be needed for future network-power-down code. */
1214 * trans_start here is expensive for high speed devices on SMP,
1215 * please use netdev_queue->trans_start instead.
1217 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
1219 int watchdog_timeo
; /* used by dev_watchdog() */
1220 struct timer_list watchdog_timer
;
1222 /* Number of references to this device */
1223 int __percpu
*pcpu_refcnt
;
1225 /* delayed register/unregister */
1226 struct list_head todo_list
;
1227 /* device index hash chain */
1228 struct hlist_node index_hlist
;
1230 struct list_head link_watch_list
;
1232 /* register/unregister state machine */
1233 enum { NETREG_UNINITIALIZED
=0,
1234 NETREG_REGISTERED
, /* completed register_netdevice */
1235 NETREG_UNREGISTERING
, /* called unregister_netdevice */
1236 NETREG_UNREGISTERED
, /* completed unregister todo */
1237 NETREG_RELEASED
, /* called free_netdev */
1238 NETREG_DUMMY
, /* dummy device for NAPI poll */
1241 bool dismantle
; /* device is going do be freed */
1244 RTNL_LINK_INITIALIZED
,
1245 RTNL_LINK_INITIALIZING
,
1246 } rtnl_link_state
:16;
1248 /* Called from unregister, can be used to call free_netdev */
1249 void (*destructor
)(struct net_device
*dev
);
1251 #ifdef CONFIG_NETPOLL
1252 struct netpoll_info
*npinfo
;
1255 #ifdef CONFIG_NET_NS
1256 /* Network namespace this network device is inside */
1260 /* mid-layer private */
1263 struct pcpu_lstats __percpu
*lstats
; /* loopback stats */
1264 struct pcpu_tstats __percpu
*tstats
; /* tunnel stats */
1265 struct pcpu_dstats __percpu
*dstats
; /* dummy stats */
1268 struct garp_port __rcu
*garp_port
;
1270 /* class/net/name entry */
1272 /* space for optional device, statistics, and wireless sysfs groups */
1273 const struct attribute_group
*sysfs_groups
[4];
1275 /* rtnetlink link ops */
1276 const struct rtnl_link_ops
*rtnl_link_ops
;
1278 /* for setting kernel sock attribute on TCP connection setup */
1279 #define GSO_MAX_SIZE 65536
1280 unsigned int gso_max_size
;
1283 /* Data Center Bridging netlink ops */
1284 const struct dcbnl_rtnl_ops
*dcbnl_ops
;
1287 struct netdev_tc_txq tc_to_txq
[TC_MAX_QUEUE
];
1288 u8 prio_tc_map
[TC_BITMASK
+ 1];
1290 #if IS_ENABLED(CONFIG_FCOE)
1291 /* max exchange id for FCoE LRO by ddp */
1292 unsigned int fcoe_ddp_xid
;
1294 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1295 struct netprio_map __rcu
*priomap
;
1297 /* phy device may attach itself for hardware timestamping */
1298 struct phy_device
*phydev
;
1300 /* group the device belongs to */
1303 #define to_net_dev(d) container_of(d, struct net_device, dev)
1305 #define NETDEV_ALIGN 32
1308 int netdev_get_prio_tc_map(const struct net_device
*dev
, u32 prio
)
1310 return dev
->prio_tc_map
[prio
& TC_BITMASK
];
1314 int netdev_set_prio_tc_map(struct net_device
*dev
, u8 prio
, u8 tc
)
1316 if (tc
>= dev
->num_tc
)
1319 dev
->prio_tc_map
[prio
& TC_BITMASK
] = tc
& TC_BITMASK
;
1324 void netdev_reset_tc(struct net_device
*dev
)
1327 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
1328 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
1332 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
1334 if (tc
>= dev
->num_tc
)
1337 dev
->tc_to_txq
[tc
].count
= count
;
1338 dev
->tc_to_txq
[tc
].offset
= offset
;
1343 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
1345 if (num_tc
> TC_MAX_QUEUE
)
1348 dev
->num_tc
= num_tc
;
1353 int netdev_get_num_tc(struct net_device
*dev
)
1359 struct netdev_queue
*netdev_get_tx_queue(const struct net_device
*dev
,
1362 return &dev
->_tx
[index
];
1365 static inline void netdev_for_each_tx_queue(struct net_device
*dev
,
1366 void (*f
)(struct net_device
*,
1367 struct netdev_queue
*,
1373 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1374 f(dev
, &dev
->_tx
[i
], arg
);
1378 * Net namespace inlines
1381 struct net
*dev_net(const struct net_device
*dev
)
1383 return read_pnet(&dev
->nd_net
);
1387 void dev_net_set(struct net_device
*dev
, struct net
*net
)
1389 #ifdef CONFIG_NET_NS
1390 release_net(dev
->nd_net
);
1391 dev
->nd_net
= hold_net(net
);
1395 static inline bool netdev_uses_dsa_tags(struct net_device
*dev
)
1397 #ifdef CONFIG_NET_DSA_TAG_DSA
1398 if (dev
->dsa_ptr
!= NULL
)
1399 return dsa_uses_dsa_tags(dev
->dsa_ptr
);
1405 #ifndef CONFIG_NET_NS
1406 static inline void skb_set_dev(struct sk_buff
*skb
, struct net_device
*dev
)
1410 #else /* CONFIG_NET_NS */
1411 void skb_set_dev(struct sk_buff
*skb
, struct net_device
*dev
);
1414 static inline bool netdev_uses_trailer_tags(struct net_device
*dev
)
1416 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1417 if (dev
->dsa_ptr
!= NULL
)
1418 return dsa_uses_trailer_tags(dev
->dsa_ptr
);
1425 * netdev_priv - access network device private data
1426 * @dev: network device
1428 * Get network device private data
1430 static inline void *netdev_priv(const struct net_device
*dev
)
1432 return (char *)dev
+ ALIGN(sizeof(struct net_device
), NETDEV_ALIGN
);
1435 /* Set the sysfs physical device reference for the network logical device
1436 * if set prior to registration will cause a symlink during initialization.
1438 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1440 /* Set the sysfs device type for the network logical device to allow
1441 * fin grained indentification of different network device types. For
1442 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1444 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1447 * netif_napi_add - initialize a napi context
1448 * @dev: network device
1449 * @napi: napi context
1450 * @poll: polling function
1451 * @weight: default weight
1453 * netif_napi_add() must be used to initialize a napi context prior to calling
1454 * *any* of the other napi related functions.
1456 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
1457 int (*poll
)(struct napi_struct
*, int), int weight
);
1460 * netif_napi_del - remove a napi context
1461 * @napi: napi context
1463 * netif_napi_del() removes a napi context from the network device napi list
1465 void netif_napi_del(struct napi_struct
*napi
);
1467 struct napi_gro_cb
{
1468 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1471 /* Length of frag0. */
1472 unsigned int frag0_len
;
1474 /* This indicates where we are processing relative to skb->data. */
1477 /* This is non-zero if the packet may be of the same flow. */
1480 /* This is non-zero if the packet cannot be merged with the new skb. */
1483 /* Number of segments aggregated. */
1490 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1492 struct packet_type
{
1493 __be16 type
; /* This is really htons(ether_type). */
1494 struct net_device
*dev
; /* NULL is wildcarded here */
1495 int (*func
) (struct sk_buff
*,
1496 struct net_device
*,
1497 struct packet_type
*,
1498 struct net_device
*);
1499 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
1500 netdev_features_t features
);
1501 int (*gso_send_check
)(struct sk_buff
*skb
);
1502 struct sk_buff
**(*gro_receive
)(struct sk_buff
**head
,
1503 struct sk_buff
*skb
);
1504 int (*gro_complete
)(struct sk_buff
*skb
);
1505 void *af_packet_priv
;
1506 struct list_head list
;
1509 #include <linux/notifier.h>
1511 /* netdevice notifier chain. Please remember to update the rtnetlink
1512 * notification exclusion list in rtnetlink_event() when adding new
1515 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1516 #define NETDEV_DOWN 0x0002
1517 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1518 detected a hardware crash and restarted
1519 - we can use this eg to kick tcp sessions
1521 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1522 #define NETDEV_REGISTER 0x0005
1523 #define NETDEV_UNREGISTER 0x0006
1524 #define NETDEV_CHANGEMTU 0x0007
1525 #define NETDEV_CHANGEADDR 0x0008
1526 #define NETDEV_GOING_DOWN 0x0009
1527 #define NETDEV_CHANGENAME 0x000A
1528 #define NETDEV_FEAT_CHANGE 0x000B
1529 #define NETDEV_BONDING_FAILOVER 0x000C
1530 #define NETDEV_PRE_UP 0x000D
1531 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1532 #define NETDEV_POST_TYPE_CHANGE 0x000F
1533 #define NETDEV_POST_INIT 0x0010
1534 #define NETDEV_UNREGISTER_BATCH 0x0011
1535 #define NETDEV_RELEASE 0x0012
1536 #define NETDEV_NOTIFY_PEERS 0x0013
1537 #define NETDEV_JOIN 0x0014
1539 extern int register_netdevice_notifier(struct notifier_block
*nb
);
1540 extern int unregister_netdevice_notifier(struct notifier_block
*nb
);
1541 extern int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
1544 extern rwlock_t dev_base_lock
; /* Device list lock */
1547 #define for_each_netdev(net, d) \
1548 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1549 #define for_each_netdev_reverse(net, d) \
1550 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1551 #define for_each_netdev_rcu(net, d) \
1552 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1553 #define for_each_netdev_safe(net, d, n) \
1554 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1555 #define for_each_netdev_continue(net, d) \
1556 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1557 #define for_each_netdev_continue_rcu(net, d) \
1558 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1559 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1561 static inline struct net_device
*next_net_device(struct net_device
*dev
)
1563 struct list_head
*lh
;
1567 lh
= dev
->dev_list
.next
;
1568 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1571 static inline struct net_device
*next_net_device_rcu(struct net_device
*dev
)
1573 struct list_head
*lh
;
1577 lh
= rcu_dereference(list_next_rcu(&dev
->dev_list
));
1578 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1581 static inline struct net_device
*first_net_device(struct net
*net
)
1583 return list_empty(&net
->dev_base_head
) ? NULL
:
1584 net_device_entry(net
->dev_base_head
.next
);
1587 static inline struct net_device
*first_net_device_rcu(struct net
*net
)
1589 struct list_head
*lh
= rcu_dereference(list_next_rcu(&net
->dev_base_head
));
1591 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1594 extern int netdev_boot_setup_check(struct net_device
*dev
);
1595 extern unsigned long netdev_boot_base(const char *prefix
, int unit
);
1596 extern struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1597 const char *hwaddr
);
1598 extern struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1599 extern struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1600 extern void dev_add_pack(struct packet_type
*pt
);
1601 extern void dev_remove_pack(struct packet_type
*pt
);
1602 extern void __dev_remove_pack(struct packet_type
*pt
);
1604 extern struct net_device
*dev_get_by_flags_rcu(struct net
*net
, unsigned short flags
,
1605 unsigned short mask
);
1606 extern struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
1607 extern struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
);
1608 extern struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
1609 extern int dev_alloc_name(struct net_device
*dev
, const char *name
);
1610 extern int dev_open(struct net_device
*dev
);
1611 extern int dev_close(struct net_device
*dev
);
1612 extern void dev_disable_lro(struct net_device
*dev
);
1613 extern int dev_queue_xmit(struct sk_buff
*skb
);
1614 extern int register_netdevice(struct net_device
*dev
);
1615 extern void unregister_netdevice_queue(struct net_device
*dev
,
1616 struct list_head
*head
);
1617 extern void unregister_netdevice_many(struct list_head
*head
);
1618 static inline void unregister_netdevice(struct net_device
*dev
)
1620 unregister_netdevice_queue(dev
, NULL
);
1623 extern int netdev_refcnt_read(const struct net_device
*dev
);
1624 extern void free_netdev(struct net_device
*dev
);
1625 extern void synchronize_net(void);
1626 extern int init_dummy_netdev(struct net_device
*dev
);
1627 extern void netdev_resync_ops(struct net_device
*dev
);
1629 extern struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
1630 extern struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
1631 extern struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
);
1632 extern int dev_restart(struct net_device
*dev
);
1633 #ifdef CONFIG_NETPOLL_TRAP
1634 extern int netpoll_trap(void);
1636 extern int skb_gro_receive(struct sk_buff
**head
,
1637 struct sk_buff
*skb
);
1638 extern void skb_gro_reset_offset(struct sk_buff
*skb
);
1640 static inline unsigned int skb_gro_offset(const struct sk_buff
*skb
)
1642 return NAPI_GRO_CB(skb
)->data_offset
;
1645 static inline unsigned int skb_gro_len(const struct sk_buff
*skb
)
1647 return skb
->len
- NAPI_GRO_CB(skb
)->data_offset
;
1650 static inline void skb_gro_pull(struct sk_buff
*skb
, unsigned int len
)
1652 NAPI_GRO_CB(skb
)->data_offset
+= len
;
1655 static inline void *skb_gro_header_fast(struct sk_buff
*skb
,
1656 unsigned int offset
)
1658 return NAPI_GRO_CB(skb
)->frag0
+ offset
;
1661 static inline int skb_gro_header_hard(struct sk_buff
*skb
, unsigned int hlen
)
1663 return NAPI_GRO_CB(skb
)->frag0_len
< hlen
;
1666 static inline void *skb_gro_header_slow(struct sk_buff
*skb
, unsigned int hlen
,
1667 unsigned int offset
)
1669 if (!pskb_may_pull(skb
, hlen
))
1672 NAPI_GRO_CB(skb
)->frag0
= NULL
;
1673 NAPI_GRO_CB(skb
)->frag0_len
= 0;
1674 return skb
->data
+ offset
;
1677 static inline void *skb_gro_mac_header(struct sk_buff
*skb
)
1679 return NAPI_GRO_CB(skb
)->frag0
?: skb_mac_header(skb
);
1682 static inline void *skb_gro_network_header(struct sk_buff
*skb
)
1684 return (NAPI_GRO_CB(skb
)->frag0
?: skb
->data
) +
1685 skb_network_offset(skb
);
1688 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1689 unsigned short type
,
1690 const void *daddr
, const void *saddr
,
1693 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
1696 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
1699 static inline int dev_parse_header(const struct sk_buff
*skb
,
1700 unsigned char *haddr
)
1702 const struct net_device
*dev
= skb
->dev
;
1704 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
1706 return dev
->header_ops
->parse(skb
, haddr
);
1709 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
1710 extern int register_gifconf(unsigned int family
, gifconf_func_t
* gifconf
);
1711 static inline int unregister_gifconf(unsigned int family
)
1713 return register_gifconf(family
, NULL
);
1717 * Incoming packets are placed on per-cpu queues
1719 struct softnet_data
{
1720 struct Qdisc
*output_queue
;
1721 struct Qdisc
**output_queue_tailp
;
1722 struct list_head poll_list
;
1723 struct sk_buff
*completion_queue
;
1724 struct sk_buff_head process_queue
;
1727 unsigned int processed
;
1728 unsigned int time_squeeze
;
1729 unsigned int cpu_collision
;
1730 unsigned int received_rps
;
1733 struct softnet_data
*rps_ipi_list
;
1735 /* Elements below can be accessed between CPUs for RPS */
1736 struct call_single_data csd ____cacheline_aligned_in_smp
;
1737 struct softnet_data
*rps_ipi_next
;
1739 unsigned int input_queue_head
;
1740 unsigned int input_queue_tail
;
1743 struct sk_buff_head input_pkt_queue
;
1744 struct napi_struct backlog
;
1747 static inline void input_queue_head_incr(struct softnet_data
*sd
)
1750 sd
->input_queue_head
++;
1754 static inline void input_queue_tail_incr_save(struct softnet_data
*sd
,
1755 unsigned int *qtail
)
1758 *qtail
= ++sd
->input_queue_tail
;
1762 DECLARE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
1764 extern void __netif_schedule(struct Qdisc
*q
);
1766 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
1768 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
))
1769 __netif_schedule(txq
->qdisc
);
1772 static inline void netif_tx_schedule_all(struct net_device
*dev
)
1776 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1777 netif_schedule_queue(netdev_get_tx_queue(dev
, i
));
1780 static inline void netif_tx_start_queue(struct netdev_queue
*dev_queue
)
1782 clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1786 * netif_start_queue - allow transmit
1787 * @dev: network device
1789 * Allow upper layers to call the device hard_start_xmit routine.
1791 static inline void netif_start_queue(struct net_device
*dev
)
1793 netif_tx_start_queue(netdev_get_tx_queue(dev
, 0));
1796 static inline void netif_tx_start_all_queues(struct net_device
*dev
)
1800 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1801 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1802 netif_tx_start_queue(txq
);
1806 static inline void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
1808 #ifdef CONFIG_NETPOLL_TRAP
1809 if (netpoll_trap()) {
1810 netif_tx_start_queue(dev_queue
);
1814 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
))
1815 __netif_schedule(dev_queue
->qdisc
);
1819 * netif_wake_queue - restart transmit
1820 * @dev: network device
1822 * Allow upper layers to call the device hard_start_xmit routine.
1823 * Used for flow control when transmit resources are available.
1825 static inline void netif_wake_queue(struct net_device
*dev
)
1827 netif_tx_wake_queue(netdev_get_tx_queue(dev
, 0));
1830 static inline void netif_tx_wake_all_queues(struct net_device
*dev
)
1834 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1835 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1836 netif_tx_wake_queue(txq
);
1840 static inline void netif_tx_stop_queue(struct netdev_queue
*dev_queue
)
1842 if (WARN_ON(!dev_queue
)) {
1843 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1846 set_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1850 * netif_stop_queue - stop transmitted packets
1851 * @dev: network device
1853 * Stop upper layers calling the device hard_start_xmit routine.
1854 * Used for flow control when transmit resources are unavailable.
1856 static inline void netif_stop_queue(struct net_device
*dev
)
1858 netif_tx_stop_queue(netdev_get_tx_queue(dev
, 0));
1861 static inline void netif_tx_stop_all_queues(struct net_device
*dev
)
1865 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1866 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
1867 netif_tx_stop_queue(txq
);
1871 static inline int netif_tx_queue_stopped(const struct netdev_queue
*dev_queue
)
1873 return test_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
1877 * netif_queue_stopped - test if transmit queue is flowblocked
1878 * @dev: network device
1880 * Test if transmit queue on device is currently unable to send.
1882 static inline int netif_queue_stopped(const struct net_device
*dev
)
1884 return netif_tx_queue_stopped(netdev_get_tx_queue(dev
, 0));
1887 static inline int netif_xmit_stopped(const struct netdev_queue
*dev_queue
)
1889 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF
;
1892 static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue
*dev_queue
)
1894 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF_OR_FROZEN
;
1897 static inline void netdev_tx_sent_queue(struct netdev_queue
*dev_queue
,
1901 dql_queued(&dev_queue
->dql
, bytes
);
1902 if (unlikely(dql_avail(&dev_queue
->dql
) < 0)) {
1903 set_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
1904 if (unlikely(dql_avail(&dev_queue
->dql
) >= 0))
1905 clear_bit(__QUEUE_STATE_STACK_XOFF
,
1911 static inline void netdev_sent_queue(struct net_device
*dev
, unsigned int bytes
)
1913 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, 0), bytes
);
1916 static inline void netdev_tx_completed_queue(struct netdev_queue
*dev_queue
,
1917 unsigned pkts
, unsigned bytes
)
1920 if (likely(bytes
)) {
1921 dql_completed(&dev_queue
->dql
, bytes
);
1922 if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF
,
1923 &dev_queue
->state
) &&
1924 dql_avail(&dev_queue
->dql
) >= 0)) {
1925 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF
,
1927 netif_schedule_queue(dev_queue
);
1933 static inline void netdev_completed_queue(struct net_device
*dev
,
1934 unsigned pkts
, unsigned bytes
)
1936 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, 0), pkts
, bytes
);
1939 static inline void netdev_tx_reset_queue(struct netdev_queue
*q
)
1946 static inline void netdev_reset_queue(struct net_device
*dev_queue
)
1948 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue
, 0));
1952 * netif_running - test if up
1953 * @dev: network device
1955 * Test if the device has been brought up.
1957 static inline int netif_running(const struct net_device
*dev
)
1959 return test_bit(__LINK_STATE_START
, &dev
->state
);
1963 * Routines to manage the subqueues on a device. We only need start
1964 * stop, and a check if it's stopped. All other device management is
1965 * done at the overall netdevice level.
1966 * Also test the device if we're multiqueue.
1970 * netif_start_subqueue - allow sending packets on subqueue
1971 * @dev: network device
1972 * @queue_index: sub queue index
1974 * Start individual transmit queue of a device with multiple transmit queues.
1976 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
1978 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1980 netif_tx_start_queue(txq
);
1984 * netif_stop_subqueue - stop sending packets on subqueue
1985 * @dev: network device
1986 * @queue_index: sub queue index
1988 * Stop individual transmit queue of a device with multiple transmit queues.
1990 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
1992 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
1993 #ifdef CONFIG_NETPOLL_TRAP
1997 netif_tx_stop_queue(txq
);
2001 * netif_subqueue_stopped - test status of subqueue
2002 * @dev: network device
2003 * @queue_index: sub queue index
2005 * Check individual transmit queue of a device with multiple transmit queues.
2007 static inline int __netif_subqueue_stopped(const struct net_device
*dev
,
2010 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2012 return netif_tx_queue_stopped(txq
);
2015 static inline int netif_subqueue_stopped(const struct net_device
*dev
,
2016 struct sk_buff
*skb
)
2018 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
2022 * netif_wake_subqueue - allow sending packets on subqueue
2023 * @dev: network device
2024 * @queue_index: sub queue index
2026 * Resume individual transmit queue of a device with multiple transmit queues.
2028 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2030 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2031 #ifdef CONFIG_NETPOLL_TRAP
2035 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
))
2036 __netif_schedule(txq
->qdisc
);
2040 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2041 * as a distribution range limit for the returned value.
2043 static inline u16
skb_tx_hash(const struct net_device
*dev
,
2044 const struct sk_buff
*skb
)
2046 return __skb_tx_hash(dev
, skb
, dev
->real_num_tx_queues
);
2050 * netif_is_multiqueue - test if device has multiple transmit queues
2051 * @dev: network device
2053 * Check if device has multiple transmit queues
2055 static inline int netif_is_multiqueue(const struct net_device
*dev
)
2057 return dev
->num_tx_queues
> 1;
2060 extern int netif_set_real_num_tx_queues(struct net_device
*dev
,
2064 extern int netif_set_real_num_rx_queues(struct net_device
*dev
,
2067 static inline int netif_set_real_num_rx_queues(struct net_device
*dev
,
2074 static inline int netif_copy_real_num_queues(struct net_device
*to_dev
,
2075 const struct net_device
*from_dev
)
2077 netif_set_real_num_tx_queues(to_dev
, from_dev
->real_num_tx_queues
);
2079 return netif_set_real_num_rx_queues(to_dev
,
2080 from_dev
->real_num_rx_queues
);
2086 /* Use this variant when it is known for sure that it
2087 * is executing from hardware interrupt context or with hardware interrupts
2090 extern void dev_kfree_skb_irq(struct sk_buff
*skb
);
2092 /* Use this variant in places where it could be invoked
2093 * from either hardware interrupt or other context, with hardware interrupts
2094 * either disabled or enabled.
2096 extern void dev_kfree_skb_any(struct sk_buff
*skb
);
2098 extern int netif_rx(struct sk_buff
*skb
);
2099 extern int netif_rx_ni(struct sk_buff
*skb
);
2100 extern int netif_receive_skb(struct sk_buff
*skb
);
2101 extern gro_result_t
dev_gro_receive(struct napi_struct
*napi
,
2102 struct sk_buff
*skb
);
2103 extern gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
);
2104 extern gro_result_t
napi_gro_receive(struct napi_struct
*napi
,
2105 struct sk_buff
*skb
);
2106 extern void napi_gro_flush(struct napi_struct
*napi
);
2107 extern struct sk_buff
* napi_get_frags(struct napi_struct
*napi
);
2108 extern gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
2109 struct sk_buff
*skb
,
2111 extern struct sk_buff
* napi_frags_skb(struct napi_struct
*napi
);
2112 extern gro_result_t
napi_gro_frags(struct napi_struct
*napi
);
2114 static inline void napi_free_frags(struct napi_struct
*napi
)
2116 kfree_skb(napi
->skb
);
2120 extern int netdev_rx_handler_register(struct net_device
*dev
,
2121 rx_handler_func_t
*rx_handler
,
2122 void *rx_handler_data
);
2123 extern void netdev_rx_handler_unregister(struct net_device
*dev
);
2125 extern int dev_valid_name(const char *name
);
2126 extern int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
2127 extern int dev_ethtool(struct net
*net
, struct ifreq
*);
2128 extern unsigned dev_get_flags(const struct net_device
*);
2129 extern int __dev_change_flags(struct net_device
*, unsigned int flags
);
2130 extern int dev_change_flags(struct net_device
*, unsigned);
2131 extern void __dev_notify_flags(struct net_device
*, unsigned int old_flags
);
2132 extern int dev_change_name(struct net_device
*, const char *);
2133 extern int dev_set_alias(struct net_device
*, const char *, size_t);
2134 extern int dev_change_net_namespace(struct net_device
*,
2135 struct net
*, const char *);
2136 extern int dev_set_mtu(struct net_device
*, int);
2137 extern void dev_set_group(struct net_device
*, int);
2138 extern int dev_set_mac_address(struct net_device
*,
2140 extern int dev_hard_start_xmit(struct sk_buff
*skb
,
2141 struct net_device
*dev
,
2142 struct netdev_queue
*txq
);
2143 extern int dev_forward_skb(struct net_device
*dev
,
2144 struct sk_buff
*skb
);
2146 extern int netdev_budget
;
2148 /* Called by rtnetlink.c:rtnl_unlock() */
2149 extern void netdev_run_todo(void);
2152 * dev_put - release reference to device
2153 * @dev: network device
2155 * Release reference to device to allow it to be freed.
2157 static inline void dev_put(struct net_device
*dev
)
2159 this_cpu_dec(*dev
->pcpu_refcnt
);
2163 * dev_hold - get reference to device
2164 * @dev: network device
2166 * Hold reference to device to keep it from being freed.
2168 static inline void dev_hold(struct net_device
*dev
)
2170 this_cpu_inc(*dev
->pcpu_refcnt
);
2173 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2174 * and _off may be called from IRQ context, but it is caller
2175 * who is responsible for serialization of these calls.
2177 * The name carrier is inappropriate, these functions should really be
2178 * called netif_lowerlayer_*() because they represent the state of any
2179 * kind of lower layer not just hardware media.
2182 extern void linkwatch_fire_event(struct net_device
*dev
);
2183 extern void linkwatch_forget_dev(struct net_device
*dev
);
2186 * netif_carrier_ok - test if carrier present
2187 * @dev: network device
2189 * Check if carrier is present on device
2191 static inline int netif_carrier_ok(const struct net_device
*dev
)
2193 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
2196 extern unsigned long dev_trans_start(struct net_device
*dev
);
2198 extern void __netdev_watchdog_up(struct net_device
*dev
);
2200 extern void netif_carrier_on(struct net_device
*dev
);
2202 extern void netif_carrier_off(struct net_device
*dev
);
2204 extern void netif_notify_peers(struct net_device
*dev
);
2207 * netif_dormant_on - mark device as dormant.
2208 * @dev: network device
2210 * Mark device as dormant (as per RFC2863).
2212 * The dormant state indicates that the relevant interface is not
2213 * actually in a condition to pass packets (i.e., it is not 'up') but is
2214 * in a "pending" state, waiting for some external event. For "on-
2215 * demand" interfaces, this new state identifies the situation where the
2216 * interface is waiting for events to place it in the up state.
2219 static inline void netif_dormant_on(struct net_device
*dev
)
2221 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2222 linkwatch_fire_event(dev
);
2226 * netif_dormant_off - set device as not dormant.
2227 * @dev: network device
2229 * Device is not in dormant state.
2231 static inline void netif_dormant_off(struct net_device
*dev
)
2233 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2234 linkwatch_fire_event(dev
);
2238 * netif_dormant - test if carrier present
2239 * @dev: network device
2241 * Check if carrier is present on device
2243 static inline int netif_dormant(const struct net_device
*dev
)
2245 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
2250 * netif_oper_up - test if device is operational
2251 * @dev: network device
2253 * Check if carrier is operational
2255 static inline int netif_oper_up(const struct net_device
*dev
)
2257 return (dev
->operstate
== IF_OPER_UP
||
2258 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
2262 * netif_device_present - is device available or removed
2263 * @dev: network device
2265 * Check if device has not been removed from system.
2267 static inline int netif_device_present(struct net_device
*dev
)
2269 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
2272 extern void netif_device_detach(struct net_device
*dev
);
2274 extern void netif_device_attach(struct net_device
*dev
);
2277 * Network interface message level settings
2281 NETIF_MSG_DRV
= 0x0001,
2282 NETIF_MSG_PROBE
= 0x0002,
2283 NETIF_MSG_LINK
= 0x0004,
2284 NETIF_MSG_TIMER
= 0x0008,
2285 NETIF_MSG_IFDOWN
= 0x0010,
2286 NETIF_MSG_IFUP
= 0x0020,
2287 NETIF_MSG_RX_ERR
= 0x0040,
2288 NETIF_MSG_TX_ERR
= 0x0080,
2289 NETIF_MSG_TX_QUEUED
= 0x0100,
2290 NETIF_MSG_INTR
= 0x0200,
2291 NETIF_MSG_TX_DONE
= 0x0400,
2292 NETIF_MSG_RX_STATUS
= 0x0800,
2293 NETIF_MSG_PKTDATA
= 0x1000,
2294 NETIF_MSG_HW
= 0x2000,
2295 NETIF_MSG_WOL
= 0x4000,
2298 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2299 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2300 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2301 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2302 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2303 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2304 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2305 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2306 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2307 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2308 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2309 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2310 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2311 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2312 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2314 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
2317 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
2318 return default_msg_enable_bits
;
2319 if (debug_value
== 0) /* no output */
2321 /* set low N bits */
2322 return (1 << debug_value
) - 1;
2325 static inline void __netif_tx_lock(struct netdev_queue
*txq
, int cpu
)
2327 spin_lock(&txq
->_xmit_lock
);
2328 txq
->xmit_lock_owner
= cpu
;
2331 static inline void __netif_tx_lock_bh(struct netdev_queue
*txq
)
2333 spin_lock_bh(&txq
->_xmit_lock
);
2334 txq
->xmit_lock_owner
= smp_processor_id();
2337 static inline int __netif_tx_trylock(struct netdev_queue
*txq
)
2339 int ok
= spin_trylock(&txq
->_xmit_lock
);
2341 txq
->xmit_lock_owner
= smp_processor_id();
2345 static inline void __netif_tx_unlock(struct netdev_queue
*txq
)
2347 txq
->xmit_lock_owner
= -1;
2348 spin_unlock(&txq
->_xmit_lock
);
2351 static inline void __netif_tx_unlock_bh(struct netdev_queue
*txq
)
2353 txq
->xmit_lock_owner
= -1;
2354 spin_unlock_bh(&txq
->_xmit_lock
);
2357 static inline void txq_trans_update(struct netdev_queue
*txq
)
2359 if (txq
->xmit_lock_owner
!= -1)
2360 txq
->trans_start
= jiffies
;
2364 * netif_tx_lock - grab network device transmit lock
2365 * @dev: network device
2367 * Get network device transmit lock
2369 static inline void netif_tx_lock(struct net_device
*dev
)
2374 spin_lock(&dev
->tx_global_lock
);
2375 cpu
= smp_processor_id();
2376 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2377 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2379 /* We are the only thread of execution doing a
2380 * freeze, but we have to grab the _xmit_lock in
2381 * order to synchronize with threads which are in
2382 * the ->hard_start_xmit() handler and already
2383 * checked the frozen bit.
2385 __netif_tx_lock(txq
, cpu
);
2386 set_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2387 __netif_tx_unlock(txq
);
2391 static inline void netif_tx_lock_bh(struct net_device
*dev
)
2397 static inline void netif_tx_unlock(struct net_device
*dev
)
2401 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2402 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2404 /* No need to grab the _xmit_lock here. If the
2405 * queue is not stopped for another reason, we
2408 clear_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2409 netif_schedule_queue(txq
);
2411 spin_unlock(&dev
->tx_global_lock
);
2414 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
2416 netif_tx_unlock(dev
);
2420 #define HARD_TX_LOCK(dev, txq, cpu) { \
2421 if ((dev->features & NETIF_F_LLTX) == 0) { \
2422 __netif_tx_lock(txq, cpu); \
2426 #define HARD_TX_UNLOCK(dev, txq) { \
2427 if ((dev->features & NETIF_F_LLTX) == 0) { \
2428 __netif_tx_unlock(txq); \
2432 static inline void netif_tx_disable(struct net_device
*dev
)
2438 cpu
= smp_processor_id();
2439 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2440 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2442 __netif_tx_lock(txq
, cpu
);
2443 netif_tx_stop_queue(txq
);
2444 __netif_tx_unlock(txq
);
2449 static inline void netif_addr_lock(struct net_device
*dev
)
2451 spin_lock(&dev
->addr_list_lock
);
2454 static inline void netif_addr_lock_nested(struct net_device
*dev
)
2456 spin_lock_nested(&dev
->addr_list_lock
, SINGLE_DEPTH_NESTING
);
2459 static inline void netif_addr_lock_bh(struct net_device
*dev
)
2461 spin_lock_bh(&dev
->addr_list_lock
);
2464 static inline void netif_addr_unlock(struct net_device
*dev
)
2466 spin_unlock(&dev
->addr_list_lock
);
2469 static inline void netif_addr_unlock_bh(struct net_device
*dev
)
2471 spin_unlock_bh(&dev
->addr_list_lock
);
2475 * dev_addrs walker. Should be used only for read access. Call with
2476 * rcu_read_lock held.
2478 #define for_each_dev_addr(dev, ha) \
2479 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2481 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2483 extern void ether_setup(struct net_device
*dev
);
2485 /* Support for loadable net-drivers */
2486 extern struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
2487 void (*setup
)(struct net_device
*),
2488 unsigned int txqs
, unsigned int rxqs
);
2489 #define alloc_netdev(sizeof_priv, name, setup) \
2490 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2492 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2493 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2495 extern int register_netdev(struct net_device
*dev
);
2496 extern void unregister_netdev(struct net_device
*dev
);
2498 /* General hardware address lists handling functions */
2499 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list
*to_list
,
2500 struct netdev_hw_addr_list
*from_list
,
2501 int addr_len
, unsigned char addr_type
);
2502 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list
*to_list
,
2503 struct netdev_hw_addr_list
*from_list
,
2504 int addr_len
, unsigned char addr_type
);
2505 extern int __hw_addr_sync(struct netdev_hw_addr_list
*to_list
,
2506 struct netdev_hw_addr_list
*from_list
,
2508 extern void __hw_addr_unsync(struct netdev_hw_addr_list
*to_list
,
2509 struct netdev_hw_addr_list
*from_list
,
2511 extern void __hw_addr_flush(struct netdev_hw_addr_list
*list
);
2512 extern void __hw_addr_init(struct netdev_hw_addr_list
*list
);
2514 /* Functions used for device addresses handling */
2515 extern int dev_addr_add(struct net_device
*dev
, unsigned char *addr
,
2516 unsigned char addr_type
);
2517 extern int dev_addr_del(struct net_device
*dev
, unsigned char *addr
,
2518 unsigned char addr_type
);
2519 extern int dev_addr_add_multiple(struct net_device
*to_dev
,
2520 struct net_device
*from_dev
,
2521 unsigned char addr_type
);
2522 extern int dev_addr_del_multiple(struct net_device
*to_dev
,
2523 struct net_device
*from_dev
,
2524 unsigned char addr_type
);
2525 extern void dev_addr_flush(struct net_device
*dev
);
2526 extern int dev_addr_init(struct net_device
*dev
);
2528 /* Functions used for unicast addresses handling */
2529 extern int dev_uc_add(struct net_device
*dev
, unsigned char *addr
);
2530 extern int dev_uc_del(struct net_device
*dev
, unsigned char *addr
);
2531 extern int dev_uc_sync(struct net_device
*to
, struct net_device
*from
);
2532 extern void dev_uc_unsync(struct net_device
*to
, struct net_device
*from
);
2533 extern void dev_uc_flush(struct net_device
*dev
);
2534 extern void dev_uc_init(struct net_device
*dev
);
2536 /* Functions used for multicast addresses handling */
2537 extern int dev_mc_add(struct net_device
*dev
, unsigned char *addr
);
2538 extern int dev_mc_add_global(struct net_device
*dev
, unsigned char *addr
);
2539 extern int dev_mc_del(struct net_device
*dev
, unsigned char *addr
);
2540 extern int dev_mc_del_global(struct net_device
*dev
, unsigned char *addr
);
2541 extern int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
2542 extern void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
2543 extern void dev_mc_flush(struct net_device
*dev
);
2544 extern void dev_mc_init(struct net_device
*dev
);
2546 /* Functions used for secondary unicast and multicast support */
2547 extern void dev_set_rx_mode(struct net_device
*dev
);
2548 extern void __dev_set_rx_mode(struct net_device
*dev
);
2549 extern int dev_set_promiscuity(struct net_device
*dev
, int inc
);
2550 extern int dev_set_allmulti(struct net_device
*dev
, int inc
);
2551 extern void netdev_state_change(struct net_device
*dev
);
2552 extern int netdev_bonding_change(struct net_device
*dev
,
2553 unsigned long event
);
2554 extern void netdev_features_change(struct net_device
*dev
);
2555 /* Load a device via the kmod */
2556 extern void dev_load(struct net
*net
, const char *name
);
2557 extern void dev_mcast_init(void);
2558 extern struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
2559 struct rtnl_link_stats64
*storage
);
2561 extern int netdev_max_backlog
;
2562 extern int netdev_tstamp_prequeue
;
2563 extern int weight_p
;
2564 extern int bpf_jit_enable
;
2565 extern int netdev_set_master(struct net_device
*dev
, struct net_device
*master
);
2566 extern int netdev_set_bond_master(struct net_device
*dev
,
2567 struct net_device
*master
);
2568 extern int skb_checksum_help(struct sk_buff
*skb
);
2569 extern struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
,
2570 netdev_features_t features
);
2572 extern void netdev_rx_csum_fault(struct net_device
*dev
);
2574 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
2578 /* rx skb timestamps */
2579 extern void net_enable_timestamp(void);
2580 extern void net_disable_timestamp(void);
2582 #ifdef CONFIG_PROC_FS
2583 extern void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
);
2584 extern void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
2585 extern void dev_seq_stop(struct seq_file
*seq
, void *v
);
2586 extern int dev_seq_open_ops(struct inode
*inode
, struct file
*file
,
2587 const struct seq_operations
*ops
);
2590 extern int netdev_class_create_file(struct class_attribute
*class_attr
);
2591 extern void netdev_class_remove_file(struct class_attribute
*class_attr
);
2593 extern struct kobj_ns_type_operations net_ns_type_operations
;
2595 extern const char *netdev_drivername(const struct net_device
*dev
);
2597 extern void linkwatch_run_queue(void);
2599 static inline netdev_features_t
netdev_get_wanted_features(
2600 struct net_device
*dev
)
2602 return (dev
->features
& ~dev
->hw_features
) | dev
->wanted_features
;
2604 netdev_features_t
netdev_increment_features(netdev_features_t all
,
2605 netdev_features_t one
, netdev_features_t mask
);
2606 int __netdev_update_features(struct net_device
*dev
);
2607 void netdev_update_features(struct net_device
*dev
);
2608 void netdev_change_features(struct net_device
*dev
);
2610 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
2611 struct net_device
*dev
);
2613 netdev_features_t
netif_skb_features(struct sk_buff
*skb
);
2615 static inline int net_gso_ok(netdev_features_t features
, int gso_type
)
2617 netdev_features_t feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
2619 /* check flags correspondence */
2620 BUILD_BUG_ON(SKB_GSO_TCPV4
!= (NETIF_F_TSO
>> NETIF_F_GSO_SHIFT
));
2621 BUILD_BUG_ON(SKB_GSO_UDP
!= (NETIF_F_UFO
>> NETIF_F_GSO_SHIFT
));
2622 BUILD_BUG_ON(SKB_GSO_DODGY
!= (NETIF_F_GSO_ROBUST
>> NETIF_F_GSO_SHIFT
));
2623 BUILD_BUG_ON(SKB_GSO_TCP_ECN
!= (NETIF_F_TSO_ECN
>> NETIF_F_GSO_SHIFT
));
2624 BUILD_BUG_ON(SKB_GSO_TCPV6
!= (NETIF_F_TSO6
>> NETIF_F_GSO_SHIFT
));
2625 BUILD_BUG_ON(SKB_GSO_FCOE
!= (NETIF_F_FSO
>> NETIF_F_GSO_SHIFT
));
2627 return (features
& feature
) == feature
;
2630 static inline int skb_gso_ok(struct sk_buff
*skb
, netdev_features_t features
)
2632 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
) &&
2633 (!skb_has_frag_list(skb
) || (features
& NETIF_F_FRAGLIST
));
2636 static inline int netif_needs_gso(struct sk_buff
*skb
,
2637 netdev_features_t features
)
2639 return skb_is_gso(skb
) && (!skb_gso_ok(skb
, features
) ||
2640 unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
));
2643 static inline void netif_set_gso_max_size(struct net_device
*dev
,
2646 dev
->gso_max_size
= size
;
2649 static inline int netif_is_bond_slave(struct net_device
*dev
)
2651 return dev
->flags
& IFF_SLAVE
&& dev
->priv_flags
& IFF_BONDING
;
2654 extern struct pernet_operations __net_initdata loopback_net_ops
;
2656 /* Logging, debugging and troubleshooting/diagnostic helpers. */
2658 /* netdev_printk helpers, similar to dev_printk */
2660 static inline const char *netdev_name(const struct net_device
*dev
)
2662 if (dev
->reg_state
!= NETREG_REGISTERED
)
2663 return "(unregistered net_device)";
2667 extern int __netdev_printk(const char *level
, const struct net_device
*dev
,
2668 struct va_format
*vaf
);
2670 extern __printf(3, 4)
2671 int netdev_printk(const char *level
, const struct net_device
*dev
,
2672 const char *format
, ...);
2673 extern __printf(2, 3)
2674 int netdev_emerg(const struct net_device
*dev
, const char *format
, ...);
2675 extern __printf(2, 3)
2676 int netdev_alert(const struct net_device
*dev
, const char *format
, ...);
2677 extern __printf(2, 3)
2678 int netdev_crit(const struct net_device
*dev
, const char *format
, ...);
2679 extern __printf(2, 3)
2680 int netdev_err(const struct net_device
*dev
, const char *format
, ...);
2681 extern __printf(2, 3)
2682 int netdev_warn(const struct net_device
*dev
, const char *format
, ...);
2683 extern __printf(2, 3)
2684 int netdev_notice(const struct net_device
*dev
, const char *format
, ...);
2685 extern __printf(2, 3)
2686 int netdev_info(const struct net_device
*dev
, const char *format
, ...);
2688 #define MODULE_ALIAS_NETDEV(device) \
2689 MODULE_ALIAS("netdev-" device)
2692 #define netdev_dbg(__dev, format, args...) \
2693 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2694 #elif defined(CONFIG_DYNAMIC_DEBUG)
2695 #define netdev_dbg(__dev, format, args...) \
2697 dynamic_netdev_dbg(__dev, format, ##args); \
2700 #define netdev_dbg(__dev, format, args...) \
2703 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2708 #if defined(VERBOSE_DEBUG)
2709 #define netdev_vdbg netdev_dbg
2712 #define netdev_vdbg(dev, format, args...) \
2715 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2721 * netdev_WARN() acts like dev_printk(), but with the key difference
2722 * of using a WARN/WARN_ON to get the message out, including the
2723 * file/line information and a backtrace.
2725 #define netdev_WARN(dev, format, args...) \
2726 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2728 /* netif printk helpers, similar to netdev_printk */
2730 #define netif_printk(priv, type, level, dev, fmt, args...) \
2732 if (netif_msg_##type(priv)) \
2733 netdev_printk(level, (dev), fmt, ##args); \
2736 #define netif_level(level, priv, type, dev, fmt, args...) \
2738 if (netif_msg_##type(priv)) \
2739 netdev_##level(dev, fmt, ##args); \
2742 #define netif_emerg(priv, type, dev, fmt, args...) \
2743 netif_level(emerg, priv, type, dev, fmt, ##args)
2744 #define netif_alert(priv, type, dev, fmt, args...) \
2745 netif_level(alert, priv, type, dev, fmt, ##args)
2746 #define netif_crit(priv, type, dev, fmt, args...) \
2747 netif_level(crit, priv, type, dev, fmt, ##args)
2748 #define netif_err(priv, type, dev, fmt, args...) \
2749 netif_level(err, priv, type, dev, fmt, ##args)
2750 #define netif_warn(priv, type, dev, fmt, args...) \
2751 netif_level(warn, priv, type, dev, fmt, ##args)
2752 #define netif_notice(priv, type, dev, fmt, args...) \
2753 netif_level(notice, priv, type, dev, fmt, ##args)
2754 #define netif_info(priv, type, dev, fmt, args...) \
2755 netif_level(info, priv, type, dev, fmt, ##args)
2758 #define netif_dbg(priv, type, dev, format, args...) \
2759 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2760 #elif defined(CONFIG_DYNAMIC_DEBUG)
2761 #define netif_dbg(priv, type, netdev, format, args...) \
2763 if (netif_msg_##type(priv)) \
2764 dynamic_netdev_dbg(netdev, format, ##args); \
2767 #define netif_dbg(priv, type, dev, format, args...) \
2770 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2775 #if defined(VERBOSE_DEBUG)
2776 #define netif_vdbg netif_dbg
2778 #define netif_vdbg(priv, type, dev, format, args...) \
2781 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2786 #endif /* __KERNEL__ */
2788 #endif /* _LINUX_NETDEVICE_H */