2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
28 #include <linux/pm_qos.h>
29 #include <linux/timer.h>
30 #include <linux/bug.h>
31 #include <linux/delay.h>
32 #include <linux/atomic.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
46 #include <net/dcbnl.h>
48 #include <net/netprio_cgroup.h>
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
60 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
61 const struct ethtool_ops
*ops
);
63 /* Backlog congestion levels */
64 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
65 #define NET_RX_DROP 1 /* packet dropped */
68 * Transmit return codes: transmit return codes originate from three different
71 * - qdisc return codes
72 * - driver transmit return codes
75 * Drivers are allowed to return any one of those in their hard_start_xmit()
76 * function. Real network devices commonly used with qdiscs should only return
77 * the driver transmit return codes though - when qdiscs are used, the actual
78 * transmission happens asynchronously, so the value is not propagated to
79 * higher layers. Virtual network devices transmit synchronously, in this case
80 * the driver transmit return codes are consumed by dev_queue_xmit(), all
81 * others are propagated to higher layers.
84 /* qdisc ->enqueue() return codes. */
85 #define NET_XMIT_SUCCESS 0x00
86 #define NET_XMIT_DROP 0x01 /* skb dropped */
87 #define NET_XMIT_CN 0x02 /* congestion notification */
88 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
89 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
91 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
92 * indicates that the device will soon be dropping packets, or already drops
93 * some packets of the same priority; prompting us to send less aggressively. */
94 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
95 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
97 /* Driver transmit return codes */
98 #define NETDEV_TX_MASK 0xf0
101 __NETDEV_TX_MIN
= INT_MIN
, /* make sure enum is signed */
102 NETDEV_TX_OK
= 0x00, /* driver took care of packet */
103 NETDEV_TX_BUSY
= 0x10, /* driver tx path was busy*/
104 NETDEV_TX_LOCKED
= 0x20, /* driver tx lock was already taken */
106 typedef enum netdev_tx netdev_tx_t
;
109 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
110 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
112 static inline bool dev_xmit_complete(int rc
)
115 * Positive cases with an skb consumed by a driver:
116 * - successful transmission (rc == NETDEV_TX_OK)
117 * - error while transmitting (rc < 0)
118 * - error while queueing to a different device (rc & NET_XMIT_MASK)
120 if (likely(rc
< NET_XMIT_MASK
))
127 * Compute the worst case header length according to the protocols
131 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
132 # if defined(CONFIG_MAC80211_MESH)
133 # define LL_MAX_HEADER 128
135 # define LL_MAX_HEADER 96
138 # define LL_MAX_HEADER 32
141 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
142 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
143 #define MAX_HEADER LL_MAX_HEADER
145 #define MAX_HEADER (LL_MAX_HEADER + 48)
149 * Old network device statistics. Fields are native words
150 * (unsigned long) so they can be read and written atomically.
153 struct net_device_stats
{
154 unsigned long rx_packets
;
155 unsigned long tx_packets
;
156 unsigned long rx_bytes
;
157 unsigned long tx_bytes
;
158 unsigned long rx_errors
;
159 unsigned long tx_errors
;
160 unsigned long rx_dropped
;
161 unsigned long tx_dropped
;
162 unsigned long multicast
;
163 unsigned long collisions
;
164 unsigned long rx_length_errors
;
165 unsigned long rx_over_errors
;
166 unsigned long rx_crc_errors
;
167 unsigned long rx_frame_errors
;
168 unsigned long rx_fifo_errors
;
169 unsigned long rx_missed_errors
;
170 unsigned long tx_aborted_errors
;
171 unsigned long tx_carrier_errors
;
172 unsigned long tx_fifo_errors
;
173 unsigned long tx_heartbeat_errors
;
174 unsigned long tx_window_errors
;
175 unsigned long rx_compressed
;
176 unsigned long tx_compressed
;
180 #include <linux/cache.h>
181 #include <linux/skbuff.h>
184 #include <linux/static_key.h>
185 extern struct static_key rps_needed
;
192 struct netdev_hw_addr
{
193 struct list_head list
;
194 unsigned char addr
[MAX_ADDR_LEN
];
196 #define NETDEV_HW_ADDR_T_LAN 1
197 #define NETDEV_HW_ADDR_T_SAN 2
198 #define NETDEV_HW_ADDR_T_SLAVE 3
199 #define NETDEV_HW_ADDR_T_UNICAST 4
200 #define NETDEV_HW_ADDR_T_MULTICAST 5
205 struct rcu_head rcu_head
;
208 struct netdev_hw_addr_list
{
209 struct list_head list
;
213 #define netdev_hw_addr_list_count(l) ((l)->count)
214 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
215 #define netdev_hw_addr_list_for_each(ha, l) \
216 list_for_each_entry(ha, &(l)->list, list)
218 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
219 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
220 #define netdev_for_each_uc_addr(ha, dev) \
221 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
223 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
224 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
225 #define netdev_for_each_mc_addr(ha, dev) \
226 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
233 /* cached hardware header; allow for machine alignment needs. */
234 #define HH_DATA_MOD 16
235 #define HH_DATA_OFF(__len) \
236 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
237 #define HH_DATA_ALIGN(__len) \
238 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
239 unsigned long hh_data
[HH_DATA_ALIGN(LL_MAX_HEADER
) / sizeof(long)];
242 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
244 * dev->hard_header_len ? (dev->hard_header_len +
245 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
247 * We could use other alignment values, but we must maintain the
248 * relationship HH alignment <= LL alignment.
250 #define LL_RESERVED_SPACE(dev) \
251 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
252 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
253 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
256 int (*create
) (struct sk_buff
*skb
, struct net_device
*dev
,
257 unsigned short type
, const void *daddr
,
258 const void *saddr
, unsigned int len
);
259 int (*parse
)(const struct sk_buff
*skb
, unsigned char *haddr
);
260 int (*rebuild
)(struct sk_buff
*skb
);
261 int (*cache
)(const struct neighbour
*neigh
, struct hh_cache
*hh
, __be16 type
);
262 void (*cache_update
)(struct hh_cache
*hh
,
263 const struct net_device
*dev
,
264 const unsigned char *haddr
);
267 /* These flag bits are private to the generic network queueing
268 * layer, they may not be explicitly referenced by any other
272 enum netdev_state_t
{
274 __LINK_STATE_PRESENT
,
275 __LINK_STATE_NOCARRIER
,
276 __LINK_STATE_LINKWATCH_PENDING
,
277 __LINK_STATE_DORMANT
,
282 * This structure holds at boot time configured netdevice settings. They
283 * are then used in the device probing.
285 struct netdev_boot_setup
{
289 #define NETDEV_BOOT_SETUP_MAX 8
291 int __init
netdev_boot_setup(char *str
);
294 * Structure for NAPI scheduling similar to tasklet but with weighting
297 /* The poll_list must only be managed by the entity which
298 * changes the state of the NAPI_STATE_SCHED bit. This means
299 * whoever atomically sets that bit can add this napi_struct
300 * to the per-cpu poll_list, and whoever clears that bit
301 * can remove from the list right before clearing the bit.
303 struct list_head poll_list
;
307 unsigned int gro_count
;
308 int (*poll
)(struct napi_struct
*, int);
309 #ifdef CONFIG_NETPOLL
310 spinlock_t poll_lock
;
313 struct net_device
*dev
;
314 struct sk_buff
*gro_list
;
316 struct list_head dev_list
;
317 struct hlist_node napi_hash_node
;
318 unsigned int napi_id
;
322 NAPI_STATE_SCHED
, /* Poll is scheduled */
323 NAPI_STATE_DISABLE
, /* Disable pending */
324 NAPI_STATE_NPSVC
, /* Netpoll - don't dequeue from poll_list */
325 NAPI_STATE_HASHED
, /* In NAPI hash */
335 typedef enum gro_result gro_result_t
;
338 * enum rx_handler_result - Possible return values for rx_handlers.
339 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
341 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
342 * case skb->dev was changed by rx_handler.
343 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
344 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
346 * rx_handlers are functions called from inside __netif_receive_skb(), to do
347 * special processing of the skb, prior to delivery to protocol handlers.
349 * Currently, a net_device can only have a single rx_handler registered. Trying
350 * to register a second rx_handler will return -EBUSY.
352 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
353 * To unregister a rx_handler on a net_device, use
354 * netdev_rx_handler_unregister().
356 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
359 * If the rx_handler consumed to skb in some way, it should return
360 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
361 * the skb to be delivered in some other ways.
363 * If the rx_handler changed skb->dev, to divert the skb to another
364 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
365 * new device will be called if it exists.
367 * If the rx_handler consider the skb should be ignored, it should return
368 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
369 * are registered on exact device (ptype->dev == skb->dev).
371 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
372 * delivered, it should return RX_HANDLER_PASS.
374 * A device without a registered rx_handler will behave as if rx_handler
375 * returned RX_HANDLER_PASS.
378 enum rx_handler_result
{
384 typedef enum rx_handler_result rx_handler_result_t
;
385 typedef rx_handler_result_t
rx_handler_func_t(struct sk_buff
**pskb
);
387 void __napi_schedule(struct napi_struct
*n
);
389 static inline bool napi_disable_pending(struct napi_struct
*n
)
391 return test_bit(NAPI_STATE_DISABLE
, &n
->state
);
395 * napi_schedule_prep - check if napi can be scheduled
398 * Test if NAPI routine is already running, and if not mark
399 * it as running. This is used as a condition variable
400 * insure only one NAPI poll instance runs. We also make
401 * sure there is no pending NAPI disable.
403 static inline bool napi_schedule_prep(struct napi_struct
*n
)
405 return !napi_disable_pending(n
) &&
406 !test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
);
410 * napi_schedule - schedule NAPI poll
413 * Schedule NAPI poll routine to be called if it is not already
416 static inline void napi_schedule(struct napi_struct
*n
)
418 if (napi_schedule_prep(n
))
422 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
423 static inline bool napi_reschedule(struct napi_struct
*napi
)
425 if (napi_schedule_prep(napi
)) {
426 __napi_schedule(napi
);
433 * napi_complete - NAPI processing complete
436 * Mark NAPI processing as complete.
438 void __napi_complete(struct napi_struct
*n
);
439 void napi_complete(struct napi_struct
*n
);
442 * napi_by_id - lookup a NAPI by napi_id
443 * @napi_id: hashed napi_id
445 * lookup @napi_id in napi_hash table
446 * must be called under rcu_read_lock()
448 struct napi_struct
*napi_by_id(unsigned int napi_id
);
451 * napi_hash_add - add a NAPI to global hashtable
452 * @napi: napi context
454 * generate a new napi_id and store a @napi under it in napi_hash
456 void napi_hash_add(struct napi_struct
*napi
);
459 * napi_hash_del - remove a NAPI from global table
460 * @napi: napi context
462 * Warning: caller must observe rcu grace period
463 * before freeing memory containing @napi
465 void napi_hash_del(struct napi_struct
*napi
);
468 * napi_disable - prevent NAPI from scheduling
471 * Stop NAPI from being scheduled on this context.
472 * Waits till any outstanding processing completes.
474 static inline void napi_disable(struct napi_struct
*n
)
477 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
478 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
480 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
484 * napi_enable - enable NAPI scheduling
487 * Resume NAPI from being scheduled on this context.
488 * Must be paired with napi_disable.
490 static inline void napi_enable(struct napi_struct
*n
)
492 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
493 smp_mb__before_atomic();
494 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
499 * napi_synchronize - wait until NAPI is not running
502 * Wait until NAPI is done being scheduled on this context.
503 * Waits till any outstanding processing completes but
504 * does not disable future activations.
506 static inline void napi_synchronize(const struct napi_struct
*n
)
508 while (test_bit(NAPI_STATE_SCHED
, &n
->state
))
512 # define napi_synchronize(n) barrier()
515 enum netdev_queue_state_t
{
516 __QUEUE_STATE_DRV_XOFF
,
517 __QUEUE_STATE_STACK_XOFF
,
518 __QUEUE_STATE_FROZEN
,
521 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
522 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
523 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
525 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
526 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
528 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
532 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
533 * netif_tx_* functions below are used to manipulate this flag. The
534 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
535 * queue independently. The netif_xmit_*stopped functions below are called
536 * to check if the queue has been stopped by the driver or stack (either
537 * of the XOFF bits are set in the state). Drivers should not need to call
538 * netif_xmit*stopped functions, they should only be using netif_tx_*.
541 struct netdev_queue
{
545 struct net_device
*dev
;
547 struct Qdisc
*qdisc_sleeping
;
551 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
557 spinlock_t _xmit_lock ____cacheline_aligned_in_smp
;
560 * please use this field instead of dev->trans_start
562 unsigned long trans_start
;
565 * Number of TX timeouts for this queue
566 * (/sys/class/net/DEV/Q/trans_timeout)
568 unsigned long trans_timeout
;
575 } ____cacheline_aligned_in_smp
;
577 static inline int netdev_queue_numa_node_read(const struct netdev_queue
*q
)
579 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
586 static inline void netdev_queue_numa_node_write(struct netdev_queue
*q
, int node
)
588 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
595 * This structure holds an RPS map which can be of variable length. The
596 * map is an array of CPUs.
603 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
606 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
607 * tail pointer for that CPU's input queue at the time of last enqueue, and
608 * a hardware filter index.
610 struct rps_dev_flow
{
613 unsigned int last_qtail
;
615 #define RPS_NO_FILTER 0xffff
618 * The rps_dev_flow_table structure contains a table of flow mappings.
620 struct rps_dev_flow_table
{
623 struct rps_dev_flow flows
[0];
625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 ((_num) * sizeof(struct rps_dev_flow)))
629 * The rps_sock_flow_table contains mappings of flows to the last CPU
630 * on which they were processed by the application (set in recvmsg).
632 struct rps_sock_flow_table
{
636 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 ((_num) * sizeof(u16)))
639 #define RPS_NO_CPU 0xffff
641 static inline void rps_record_sock_flow(struct rps_sock_flow_table
*table
,
645 unsigned int cpu
, index
= hash
& table
->mask
;
647 /* We only give a hint, preemption can change cpu under us */
648 cpu
= raw_smp_processor_id();
650 if (table
->ents
[index
] != cpu
)
651 table
->ents
[index
] = cpu
;
655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table
*table
,
659 table
->ents
[hash
& table
->mask
] = RPS_NO_CPU
;
662 extern struct rps_sock_flow_table __rcu
*rps_sock_flow_table
;
664 #ifdef CONFIG_RFS_ACCEL
665 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
, u32 flow_id
,
668 #endif /* CONFIG_RPS */
670 /* This structure contains an instance of an RX queue. */
671 struct netdev_rx_queue
{
673 struct rps_map __rcu
*rps_map
;
674 struct rps_dev_flow_table __rcu
*rps_flow_table
;
677 struct net_device
*dev
;
678 } ____cacheline_aligned_in_smp
;
681 * RX queue sysfs structures and functions.
683 struct rx_queue_attribute
{
684 struct attribute attr
;
685 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
686 struct rx_queue_attribute
*attr
, char *buf
);
687 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
688 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
693 * This structure holds an XPS map which can be of variable length. The
694 * map is an array of queues.
698 unsigned int alloc_len
;
702 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
703 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
707 * This structure holds all XPS maps for device. Maps are indexed by CPU.
709 struct xps_dev_maps
{
711 struct xps_map __rcu
*cpu_map
[0];
713 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
714 (nr_cpu_ids * sizeof(struct xps_map *)))
715 #endif /* CONFIG_XPS */
717 #define TC_MAX_QUEUE 16
718 #define TC_BITMASK 15
719 /* HW offloaded queuing disciplines txq count and offset maps */
720 struct netdev_tc_txq
{
725 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
727 * This structure is to hold information about the device
728 * configured to run FCoE protocol stack.
730 struct netdev_fcoe_hbainfo
{
731 char manufacturer
[64];
732 char serial_number
[64];
733 char hardware_version
[64];
734 char driver_version
[64];
735 char optionrom_version
[64];
736 char firmware_version
[64];
738 char model_description
[256];
742 #define MAX_PHYS_PORT_ID_LEN 32
744 /* This structure holds a unique identifier to identify the
745 * physical port used by a netdevice.
747 struct netdev_phys_port_id
{
748 unsigned char id
[MAX_PHYS_PORT_ID_LEN
];
749 unsigned char id_len
;
752 typedef u16 (*select_queue_fallback_t
)(struct net_device
*dev
,
753 struct sk_buff
*skb
);
756 * This structure defines the management hooks for network devices.
757 * The following hooks can be defined; unless noted otherwise, they are
758 * optional and can be filled with a null pointer.
760 * int (*ndo_init)(struct net_device *dev);
761 * This function is called once when network device is registered.
762 * The network device can use this to any late stage initializaton
763 * or semantic validattion. It can fail with an error code which will
764 * be propogated back to register_netdev
766 * void (*ndo_uninit)(struct net_device *dev);
767 * This function is called when device is unregistered or when registration
768 * fails. It is not called if init fails.
770 * int (*ndo_open)(struct net_device *dev);
771 * This function is called when network device transistions to the up
774 * int (*ndo_stop)(struct net_device *dev);
775 * This function is called when network device transistions to the down
778 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
779 * struct net_device *dev);
780 * Called when a packet needs to be transmitted.
781 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
783 * Required can not be NULL.
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
786 * void *accel_priv, select_queue_fallback_t fallback);
787 * Called to decide which queue to when device supports multiple
790 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
791 * This function is called to allow device receiver to make
792 * changes to configuration when multicast or promiscious is enabled.
794 * void (*ndo_set_rx_mode)(struct net_device *dev);
795 * This function is called device changes address list filtering.
796 * If driver handles unicast address filtering, it should set
797 * IFF_UNICAST_FLT to its priv_flags.
799 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
800 * This function is called when the Media Access Control address
801 * needs to be changed. If this interface is not defined, the
802 * mac address can not be changed.
804 * int (*ndo_validate_addr)(struct net_device *dev);
805 * Test if Media Access Control address is valid for the device.
807 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
808 * Called when a user request an ioctl which can't be handled by
809 * the generic interface code. If not defined ioctl's return
810 * not supported error code.
812 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
813 * Used to set network devices bus interface parameters. This interface
814 * is retained for legacy reason, new devices should use the bus
815 * interface (PCI) for low level management.
817 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
818 * Called when a user wants to change the Maximum Transfer Unit
819 * of a device. If not defined, any request to change MTU will
820 * will return an error.
822 * void (*ndo_tx_timeout)(struct net_device *dev);
823 * Callback uses when the transmitter has not made any progress
824 * for dev->watchdog ticks.
826 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
827 * struct rtnl_link_stats64 *storage);
828 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
829 * Called when a user wants to get the network device usage
830 * statistics. Drivers must do one of the following:
831 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
832 * rtnl_link_stats64 structure passed by the caller.
833 * 2. Define @ndo_get_stats to update a net_device_stats structure
834 * (which should normally be dev->stats) and return a pointer to
835 * it. The structure may be changed asynchronously only if each
836 * field is written atomically.
837 * 3. Update dev->stats asynchronously and atomically, and define
840 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
841 * If device support VLAN filtering this function is called when a
842 * VLAN id is registered.
844 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
845 * If device support VLAN filtering this function is called when a
846 * VLAN id is unregistered.
848 * void (*ndo_poll_controller)(struct net_device *dev);
850 * SR-IOV management functions.
851 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
852 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
853 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
855 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
856 * int (*ndo_get_vf_config)(struct net_device *dev,
857 * int vf, struct ifla_vf_info *ivf);
858 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
859 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
860 * struct nlattr *port[]);
861 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
862 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
863 * Called to setup 'tc' number of traffic classes in the net device. This
864 * is always called from the stack with the rtnl lock held and netif tx
865 * queues stopped. This allows the netdevice to perform queue management
868 * Fiber Channel over Ethernet (FCoE) offload functions.
869 * int (*ndo_fcoe_enable)(struct net_device *dev);
870 * Called when the FCoE protocol stack wants to start using LLD for FCoE
871 * so the underlying device can perform whatever needed configuration or
872 * initialization to support acceleration of FCoE traffic.
874 * int (*ndo_fcoe_disable)(struct net_device *dev);
875 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
876 * so the underlying device can perform whatever needed clean-ups to
877 * stop supporting acceleration of FCoE traffic.
879 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
880 * struct scatterlist *sgl, unsigned int sgc);
881 * Called when the FCoE Initiator wants to initialize an I/O that
882 * is a possible candidate for Direct Data Placement (DDP). The LLD can
883 * perform necessary setup and returns 1 to indicate the device is set up
884 * successfully to perform DDP on this I/O, otherwise this returns 0.
886 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
887 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
888 * indicated by the FC exchange id 'xid', so the underlying device can
889 * clean up and reuse resources for later DDP requests.
891 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
892 * struct scatterlist *sgl, unsigned int sgc);
893 * Called when the FCoE Target wants to initialize an I/O that
894 * is a possible candidate for Direct Data Placement (DDP). The LLD can
895 * perform necessary setup and returns 1 to indicate the device is set up
896 * successfully to perform DDP on this I/O, otherwise this returns 0.
898 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
899 * struct netdev_fcoe_hbainfo *hbainfo);
900 * Called when the FCoE Protocol stack wants information on the underlying
901 * device. This information is utilized by the FCoE protocol stack to
902 * register attributes with Fiber Channel management service as per the
903 * FC-GS Fabric Device Management Information(FDMI) specification.
905 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
906 * Called when the underlying device wants to override default World Wide
907 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
908 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
909 * protocol stack to use.
912 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
913 * u16 rxq_index, u32 flow_id);
914 * Set hardware filter for RFS. rxq_index is the target queue index;
915 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
916 * Return the filter ID on success, or a negative error code.
918 * Slave management functions (for bridge, bonding, etc).
919 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
920 * Called to make another netdev an underling.
922 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
923 * Called to release previously enslaved netdev.
925 * Feature/offload setting functions.
926 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
927 * netdev_features_t features);
928 * Adjusts the requested feature flags according to device-specific
929 * constraints, and returns the resulting flags. Must not modify
932 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
933 * Called to update device configuration to new features. Passed
934 * feature set might be less than what was returned by ndo_fix_features()).
935 * Must return >0 or -errno if it changed dev->features itself.
937 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
938 * struct net_device *dev,
939 * const unsigned char *addr, u16 flags)
940 * Adds an FDB entry to dev for addr.
941 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
942 * struct net_device *dev,
943 * const unsigned char *addr)
944 * Deletes the FDB entry from dev coresponding to addr.
945 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
946 * struct net_device *dev, int idx)
947 * Used to add FDB entries to dump requests. Implementers should add
948 * entries to skb and update idx with the number of entries.
950 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
951 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
952 * struct net_device *dev, u32 filter_mask)
954 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
955 * Called to change device carrier. Soft-devices (like dummy, team, etc)
956 * which do not represent real hardware may define this to allow their
957 * userspace components to manage their virtual carrier state. Devices
958 * that determine carrier state from physical hardware properties (eg
959 * network cables) or protocol-dependent mechanisms (eg
960 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
962 * int (*ndo_get_phys_port_id)(struct net_device *dev,
963 * struct netdev_phys_port_id *ppid);
964 * Called to get ID of physical port of this device. If driver does
965 * not implement this, it is assumed that the hw is not able to have
966 * multiple net devices on single physical port.
968 * void (*ndo_add_vxlan_port)(struct net_device *dev,
969 * sa_family_t sa_family, __be16 port);
970 * Called by vxlan to notiy a driver about the UDP port and socket
971 * address family that vxlan is listnening to. It is called only when
972 * a new port starts listening. The operation is protected by the
973 * vxlan_net->sock_lock.
975 * void (*ndo_del_vxlan_port)(struct net_device *dev,
976 * sa_family_t sa_family, __be16 port);
977 * Called by vxlan to notify the driver about a UDP port and socket
978 * address family that vxlan is not listening to anymore. The operation
979 * is protected by the vxlan_net->sock_lock.
981 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
982 * struct net_device *dev)
983 * Called by upper layer devices to accelerate switching or other
984 * station functionality into hardware. 'pdev is the lowerdev
985 * to use for the offload and 'dev' is the net device that will
986 * back the offload. Returns a pointer to the private structure
987 * the upper layer will maintain.
988 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
989 * Called by upper layer device to delete the station created
990 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
991 * the station and priv is the structure returned by the add
993 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
994 * struct net_device *dev,
996 * Callback to use for xmit over the accelerated station. This
997 * is used in place of ndo_start_xmit on accelerated net
1000 struct net_device_ops
{
1001 int (*ndo_init
)(struct net_device
*dev
);
1002 void (*ndo_uninit
)(struct net_device
*dev
);
1003 int (*ndo_open
)(struct net_device
*dev
);
1004 int (*ndo_stop
)(struct net_device
*dev
);
1005 netdev_tx_t (*ndo_start_xmit
) (struct sk_buff
*skb
,
1006 struct net_device
*dev
);
1007 u16 (*ndo_select_queue
)(struct net_device
*dev
,
1008 struct sk_buff
*skb
,
1010 select_queue_fallback_t fallback
);
1011 void (*ndo_change_rx_flags
)(struct net_device
*dev
,
1013 void (*ndo_set_rx_mode
)(struct net_device
*dev
);
1014 int (*ndo_set_mac_address
)(struct net_device
*dev
,
1016 int (*ndo_validate_addr
)(struct net_device
*dev
);
1017 int (*ndo_do_ioctl
)(struct net_device
*dev
,
1018 struct ifreq
*ifr
, int cmd
);
1019 int (*ndo_set_config
)(struct net_device
*dev
,
1021 int (*ndo_change_mtu
)(struct net_device
*dev
,
1023 int (*ndo_neigh_setup
)(struct net_device
*dev
,
1024 struct neigh_parms
*);
1025 void (*ndo_tx_timeout
) (struct net_device
*dev
);
1027 struct rtnl_link_stats64
* (*ndo_get_stats64
)(struct net_device
*dev
,
1028 struct rtnl_link_stats64
*storage
);
1029 struct net_device_stats
* (*ndo_get_stats
)(struct net_device
*dev
);
1031 int (*ndo_vlan_rx_add_vid
)(struct net_device
*dev
,
1032 __be16 proto
, u16 vid
);
1033 int (*ndo_vlan_rx_kill_vid
)(struct net_device
*dev
,
1034 __be16 proto
, u16 vid
);
1035 #ifdef CONFIG_NET_POLL_CONTROLLER
1036 void (*ndo_poll_controller
)(struct net_device
*dev
);
1037 int (*ndo_netpoll_setup
)(struct net_device
*dev
,
1038 struct netpoll_info
*info
);
1039 void (*ndo_netpoll_cleanup
)(struct net_device
*dev
);
1041 #ifdef CONFIG_NET_RX_BUSY_POLL
1042 int (*ndo_busy_poll
)(struct napi_struct
*dev
);
1044 int (*ndo_set_vf_mac
)(struct net_device
*dev
,
1045 int queue
, u8
*mac
);
1046 int (*ndo_set_vf_vlan
)(struct net_device
*dev
,
1047 int queue
, u16 vlan
, u8 qos
);
1048 int (*ndo_set_vf_rate
)(struct net_device
*dev
,
1049 int vf
, int min_tx_rate
,
1051 int (*ndo_set_vf_spoofchk
)(struct net_device
*dev
,
1052 int vf
, bool setting
);
1053 int (*ndo_get_vf_config
)(struct net_device
*dev
,
1055 struct ifla_vf_info
*ivf
);
1056 int (*ndo_set_vf_link_state
)(struct net_device
*dev
,
1057 int vf
, int link_state
);
1058 int (*ndo_set_vf_port
)(struct net_device
*dev
,
1060 struct nlattr
*port
[]);
1061 int (*ndo_get_vf_port
)(struct net_device
*dev
,
1062 int vf
, struct sk_buff
*skb
);
1063 int (*ndo_setup_tc
)(struct net_device
*dev
, u8 tc
);
1064 #if IS_ENABLED(CONFIG_FCOE)
1065 int (*ndo_fcoe_enable
)(struct net_device
*dev
);
1066 int (*ndo_fcoe_disable
)(struct net_device
*dev
);
1067 int (*ndo_fcoe_ddp_setup
)(struct net_device
*dev
,
1069 struct scatterlist
*sgl
,
1071 int (*ndo_fcoe_ddp_done
)(struct net_device
*dev
,
1073 int (*ndo_fcoe_ddp_target
)(struct net_device
*dev
,
1075 struct scatterlist
*sgl
,
1077 int (*ndo_fcoe_get_hbainfo
)(struct net_device
*dev
,
1078 struct netdev_fcoe_hbainfo
*hbainfo
);
1081 #if IS_ENABLED(CONFIG_LIBFCOE)
1082 #define NETDEV_FCOE_WWNN 0
1083 #define NETDEV_FCOE_WWPN 1
1084 int (*ndo_fcoe_get_wwn
)(struct net_device
*dev
,
1085 u64
*wwn
, int type
);
1088 #ifdef CONFIG_RFS_ACCEL
1089 int (*ndo_rx_flow_steer
)(struct net_device
*dev
,
1090 const struct sk_buff
*skb
,
1094 int (*ndo_add_slave
)(struct net_device
*dev
,
1095 struct net_device
*slave_dev
);
1096 int (*ndo_del_slave
)(struct net_device
*dev
,
1097 struct net_device
*slave_dev
);
1098 netdev_features_t (*ndo_fix_features
)(struct net_device
*dev
,
1099 netdev_features_t features
);
1100 int (*ndo_set_features
)(struct net_device
*dev
,
1101 netdev_features_t features
);
1102 int (*ndo_neigh_construct
)(struct neighbour
*n
);
1103 void (*ndo_neigh_destroy
)(struct neighbour
*n
);
1105 int (*ndo_fdb_add
)(struct ndmsg
*ndm
,
1106 struct nlattr
*tb
[],
1107 struct net_device
*dev
,
1108 const unsigned char *addr
,
1110 int (*ndo_fdb_del
)(struct ndmsg
*ndm
,
1111 struct nlattr
*tb
[],
1112 struct net_device
*dev
,
1113 const unsigned char *addr
);
1114 int (*ndo_fdb_dump
)(struct sk_buff
*skb
,
1115 struct netlink_callback
*cb
,
1116 struct net_device
*dev
,
1119 int (*ndo_bridge_setlink
)(struct net_device
*dev
,
1120 struct nlmsghdr
*nlh
);
1121 int (*ndo_bridge_getlink
)(struct sk_buff
*skb
,
1123 struct net_device
*dev
,
1125 int (*ndo_bridge_dellink
)(struct net_device
*dev
,
1126 struct nlmsghdr
*nlh
);
1127 int (*ndo_change_carrier
)(struct net_device
*dev
,
1129 int (*ndo_get_phys_port_id
)(struct net_device
*dev
,
1130 struct netdev_phys_port_id
*ppid
);
1131 void (*ndo_add_vxlan_port
)(struct net_device
*dev
,
1132 sa_family_t sa_family
,
1134 void (*ndo_del_vxlan_port
)(struct net_device
*dev
,
1135 sa_family_t sa_family
,
1138 void* (*ndo_dfwd_add_station
)(struct net_device
*pdev
,
1139 struct net_device
*dev
);
1140 void (*ndo_dfwd_del_station
)(struct net_device
*pdev
,
1143 netdev_tx_t (*ndo_dfwd_start_xmit
) (struct sk_buff
*skb
,
1144 struct net_device
*dev
,
1146 int (*ndo_get_lock_subclass
)(struct net_device
*dev
);
1150 * enum net_device_priv_flags - &struct net_device priv_flags
1152 * These are the &struct net_device, they are only set internally
1153 * by drivers and used in the kernel. These flags are invisible to
1154 * userspace, this means that the order of these flags can change
1155 * during any kernel release.
1157 * You should have a pretty good reason to be extending these flags.
1159 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1160 * @IFF_EBRIDGE: Ethernet bridging device
1161 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1162 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1163 * @IFF_MASTER_ALB: bonding master, balance-alb
1164 * @IFF_BONDING: bonding master or slave
1165 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1166 * @IFF_ISATAP: ISATAP interface (RFC4214)
1167 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1168 * @IFF_WAN_HDLC: WAN HDLC device
1169 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1171 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1172 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1173 * @IFF_MACVLAN_PORT: device used as macvlan port
1174 * @IFF_BRIDGE_PORT: device used as bridge port
1175 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1176 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1177 * @IFF_UNICAST_FLT: Supports unicast filtering
1178 * @IFF_TEAM_PORT: device used as team port
1179 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1180 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1181 * change when it's running
1182 * @IFF_MACVLAN: Macvlan device
1184 enum netdev_priv_flags
{
1185 IFF_802_1Q_VLAN
= 1<<0,
1187 IFF_SLAVE_INACTIVE
= 1<<2,
1188 IFF_MASTER_8023AD
= 1<<3,
1189 IFF_MASTER_ALB
= 1<<4,
1191 IFF_SLAVE_NEEDARP
= 1<<6,
1193 IFF_MASTER_ARPMON
= 1<<8,
1194 IFF_WAN_HDLC
= 1<<9,
1195 IFF_XMIT_DST_RELEASE
= 1<<10,
1196 IFF_DONT_BRIDGE
= 1<<11,
1197 IFF_DISABLE_NETPOLL
= 1<<12,
1198 IFF_MACVLAN_PORT
= 1<<13,
1199 IFF_BRIDGE_PORT
= 1<<14,
1200 IFF_OVS_DATAPATH
= 1<<15,
1201 IFF_TX_SKB_SHARING
= 1<<16,
1202 IFF_UNICAST_FLT
= 1<<17,
1203 IFF_TEAM_PORT
= 1<<18,
1204 IFF_SUPP_NOFCS
= 1<<19,
1205 IFF_LIVE_ADDR_CHANGE
= 1<<20,
1206 IFF_MACVLAN
= 1<<21,
1209 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1210 #define IFF_EBRIDGE IFF_EBRIDGE
1211 #define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1212 #define IFF_MASTER_8023AD IFF_MASTER_8023AD
1213 #define IFF_MASTER_ALB IFF_MASTER_ALB
1214 #define IFF_BONDING IFF_BONDING
1215 #define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1216 #define IFF_ISATAP IFF_ISATAP
1217 #define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1218 #define IFF_WAN_HDLC IFF_WAN_HDLC
1219 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1220 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1221 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1222 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1223 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1224 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1225 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1226 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1227 #define IFF_TEAM_PORT IFF_TEAM_PORT
1228 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1229 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1230 #define IFF_MACVLAN IFF_MACVLAN
1233 * The DEVICE structure.
1234 * Actually, this whole structure is a big mistake. It mixes I/O
1235 * data with strictly "high-level" data, and it has to know about
1236 * almost every data structure used in the INET module.
1238 * FIXME: cleanup struct net_device such that network protocol info
1245 * This is the first field of the "visible" part of this structure
1246 * (i.e. as seen by users in the "Space.c" file). It is the name
1249 char name
[IFNAMSIZ
];
1251 /* device name hash chain, please keep it close to name[] */
1252 struct hlist_node name_hlist
;
1258 * I/O specific fields
1259 * FIXME: Merge these and struct ifmap into one
1261 unsigned long mem_end
; /* shared mem end */
1262 unsigned long mem_start
; /* shared mem start */
1263 unsigned long base_addr
; /* device I/O address */
1264 int irq
; /* device IRQ number */
1267 * Some hardware also needs these fields, but they are not
1268 * part of the usual set specified in Space.c.
1271 unsigned long state
;
1273 struct list_head dev_list
;
1274 struct list_head napi_list
;
1275 struct list_head unreg_list
;
1276 struct list_head close_list
;
1278 /* directly linked devices, like slaves for bonding */
1280 struct list_head upper
;
1281 struct list_head lower
;
1284 /* all linked devices, *including* neighbours */
1286 struct list_head upper
;
1287 struct list_head lower
;
1291 /* currently active device features */
1292 netdev_features_t features
;
1293 /* user-changeable features */
1294 netdev_features_t hw_features
;
1295 /* user-requested features */
1296 netdev_features_t wanted_features
;
1297 /* mask of features inheritable by VLAN devices */
1298 netdev_features_t vlan_features
;
1299 /* mask of features inherited by encapsulating devices
1300 * This field indicates what encapsulation offloads
1301 * the hardware is capable of doing, and drivers will
1302 * need to set them appropriately.
1304 netdev_features_t hw_enc_features
;
1305 /* mask of fetures inheritable by MPLS */
1306 netdev_features_t mpls_features
;
1308 /* Interface index. Unique device identifier */
1312 struct net_device_stats stats
;
1314 /* dropped packets by core network, Do not use this in drivers */
1315 atomic_long_t rx_dropped
;
1316 atomic_long_t tx_dropped
;
1318 /* Stats to monitor carrier on<->off transitions */
1319 atomic_t carrier_changes
;
1321 #ifdef CONFIG_WIRELESS_EXT
1322 /* List of functions to handle Wireless Extensions (instead of ioctl).
1323 * See <net/iw_handler.h> for details. Jean II */
1324 const struct iw_handler_def
* wireless_handlers
;
1325 /* Instance data managed by the core of Wireless Extensions. */
1326 struct iw_public_data
* wireless_data
;
1328 /* Management operations */
1329 const struct net_device_ops
*netdev_ops
;
1330 const struct ethtool_ops
*ethtool_ops
;
1331 const struct forwarding_accel_ops
*fwd_ops
;
1333 /* Hardware header description */
1334 const struct header_ops
*header_ops
;
1336 unsigned int flags
; /* interface flags (a la BSD) */
1337 unsigned int priv_flags
; /* Like 'flags' but invisible to userspace.
1338 * See if.h for definitions. */
1339 unsigned short gflags
;
1340 unsigned short padded
; /* How much padding added by alloc_netdev() */
1342 unsigned char operstate
; /* RFC2863 operstate */
1343 unsigned char link_mode
; /* mapping policy to operstate */
1345 unsigned char if_port
; /* Selectable AUI, TP,..*/
1346 unsigned char dma
; /* DMA channel */
1348 unsigned int mtu
; /* interface MTU value */
1349 unsigned short type
; /* interface hardware type */
1350 unsigned short hard_header_len
; /* hardware hdr length */
1352 /* extra head- and tailroom the hardware may need, but not in all cases
1353 * can this be guaranteed, especially tailroom. Some cases also use
1354 * LL_MAX_HEADER instead to allocate the skb.
1356 unsigned short needed_headroom
;
1357 unsigned short needed_tailroom
;
1359 /* Interface address info. */
1360 unsigned char perm_addr
[MAX_ADDR_LEN
]; /* permanent hw address */
1361 unsigned char addr_assign_type
; /* hw address assignment type */
1362 unsigned char addr_len
; /* hardware address length */
1363 unsigned short neigh_priv_len
;
1364 unsigned short dev_id
; /* Used to differentiate devices
1365 * that share the same link
1368 unsigned short dev_port
; /* Used to differentiate
1369 * devices that share the same
1372 spinlock_t addr_list_lock
;
1373 struct netdev_hw_addr_list uc
; /* Unicast mac addresses */
1374 struct netdev_hw_addr_list mc
; /* Multicast mac addresses */
1375 struct netdev_hw_addr_list dev_addrs
; /* list of device
1379 struct kset
*queues_kset
;
1383 unsigned int promiscuity
;
1384 unsigned int allmulti
;
1387 /* Protocol specific pointers */
1389 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1390 struct vlan_info __rcu
*vlan_info
; /* VLAN info */
1392 #if IS_ENABLED(CONFIG_NET_DSA)
1393 struct dsa_switch_tree
*dsa_ptr
; /* dsa specific data */
1395 #if IS_ENABLED(CONFIG_TIPC)
1396 struct tipc_bearer __rcu
*tipc_ptr
; /* TIPC specific data */
1398 void *atalk_ptr
; /* AppleTalk link */
1399 struct in_device __rcu
*ip_ptr
; /* IPv4 specific data */
1400 struct dn_dev __rcu
*dn_ptr
; /* DECnet specific data */
1401 struct inet6_dev __rcu
*ip6_ptr
; /* IPv6 specific data */
1402 void *ax25_ptr
; /* AX.25 specific data */
1403 struct wireless_dev
*ieee80211_ptr
; /* IEEE 802.11 specific data,
1404 assign before registering */
1407 * Cache lines mostly used on receive path (including eth_type_trans())
1409 unsigned long last_rx
; /* Time of last Rx */
1411 /* Interface address info used in eth_type_trans() */
1412 unsigned char *dev_addr
; /* hw address, (before bcast
1413 because most packets are
1418 struct netdev_rx_queue
*_rx
;
1420 /* Number of RX queues allocated at register_netdev() time */
1421 unsigned int num_rx_queues
;
1423 /* Number of RX queues currently active in device */
1424 unsigned int real_num_rx_queues
;
1428 rx_handler_func_t __rcu
*rx_handler
;
1429 void __rcu
*rx_handler_data
;
1431 struct netdev_queue __rcu
*ingress_queue
;
1432 unsigned char broadcast
[MAX_ADDR_LEN
]; /* hw bcast add */
1436 * Cache lines mostly used on transmit path
1438 struct netdev_queue
*_tx ____cacheline_aligned_in_smp
;
1440 /* Number of TX queues allocated at alloc_netdev_mq() time */
1441 unsigned int num_tx_queues
;
1443 /* Number of TX queues currently active in device */
1444 unsigned int real_num_tx_queues
;
1446 /* root qdisc from userspace point of view */
1447 struct Qdisc
*qdisc
;
1449 unsigned long tx_queue_len
; /* Max frames per queue allowed */
1450 spinlock_t tx_global_lock
;
1453 struct xps_dev_maps __rcu
*xps_maps
;
1455 #ifdef CONFIG_RFS_ACCEL
1456 /* CPU reverse-mapping for RX completion interrupts, indexed
1457 * by RX queue number. Assigned by driver. This must only be
1458 * set if the ndo_rx_flow_steer operation is defined. */
1459 struct cpu_rmap
*rx_cpu_rmap
;
1462 /* These may be needed for future network-power-down code. */
1465 * trans_start here is expensive for high speed devices on SMP,
1466 * please use netdev_queue->trans_start instead.
1468 unsigned long trans_start
; /* Time (in jiffies) of last Tx */
1470 int watchdog_timeo
; /* used by dev_watchdog() */
1471 struct timer_list watchdog_timer
;
1473 /* Number of references to this device */
1474 int __percpu
*pcpu_refcnt
;
1476 /* delayed register/unregister */
1477 struct list_head todo_list
;
1478 /* device index hash chain */
1479 struct hlist_node index_hlist
;
1481 struct list_head link_watch_list
;
1483 /* register/unregister state machine */
1484 enum { NETREG_UNINITIALIZED
=0,
1485 NETREG_REGISTERED
, /* completed register_netdevice */
1486 NETREG_UNREGISTERING
, /* called unregister_netdevice */
1487 NETREG_UNREGISTERED
, /* completed unregister todo */
1488 NETREG_RELEASED
, /* called free_netdev */
1489 NETREG_DUMMY
, /* dummy device for NAPI poll */
1492 bool dismantle
; /* device is going do be freed */
1495 RTNL_LINK_INITIALIZED
,
1496 RTNL_LINK_INITIALIZING
,
1497 } rtnl_link_state
:16;
1499 /* Called from unregister, can be used to call free_netdev */
1500 void (*destructor
)(struct net_device
*dev
);
1502 #ifdef CONFIG_NETPOLL
1503 struct netpoll_info __rcu
*npinfo
;
1506 #ifdef CONFIG_NET_NS
1507 /* Network namespace this network device is inside */
1511 /* mid-layer private */
1514 struct pcpu_lstats __percpu
*lstats
; /* loopback stats */
1515 struct pcpu_sw_netstats __percpu
*tstats
;
1516 struct pcpu_dstats __percpu
*dstats
; /* dummy stats */
1517 struct pcpu_vstats __percpu
*vstats
; /* veth stats */
1520 struct garp_port __rcu
*garp_port
;
1522 struct mrp_port __rcu
*mrp_port
;
1524 /* class/net/name entry */
1526 /* space for optional device, statistics, and wireless sysfs groups */
1527 const struct attribute_group
*sysfs_groups
[4];
1528 /* space for optional per-rx queue attributes */
1529 const struct attribute_group
*sysfs_rx_queue_group
;
1531 /* rtnetlink link ops */
1532 const struct rtnl_link_ops
*rtnl_link_ops
;
1534 /* for setting kernel sock attribute on TCP connection setup */
1535 #define GSO_MAX_SIZE 65536
1536 unsigned int gso_max_size
;
1537 #define GSO_MAX_SEGS 65535
1541 /* Data Center Bridging netlink ops */
1542 const struct dcbnl_rtnl_ops
*dcbnl_ops
;
1545 struct netdev_tc_txq tc_to_txq
[TC_MAX_QUEUE
];
1546 u8 prio_tc_map
[TC_BITMASK
+ 1];
1548 #if IS_ENABLED(CONFIG_FCOE)
1549 /* max exchange id for FCoE LRO by ddp */
1550 unsigned int fcoe_ddp_xid
;
1552 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1553 struct netprio_map __rcu
*priomap
;
1555 /* phy device may attach itself for hardware timestamping */
1556 struct phy_device
*phydev
;
1558 struct lock_class_key
*qdisc_tx_busylock
;
1560 /* group the device belongs to */
1563 struct pm_qos_request pm_qos_req
;
1565 #define to_net_dev(d) container_of(d, struct net_device, dev)
1567 #define NETDEV_ALIGN 32
1570 int netdev_get_prio_tc_map(const struct net_device
*dev
, u32 prio
)
1572 return dev
->prio_tc_map
[prio
& TC_BITMASK
];
1576 int netdev_set_prio_tc_map(struct net_device
*dev
, u8 prio
, u8 tc
)
1578 if (tc
>= dev
->num_tc
)
1581 dev
->prio_tc_map
[prio
& TC_BITMASK
] = tc
& TC_BITMASK
;
1586 void netdev_reset_tc(struct net_device
*dev
)
1589 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
1590 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
1594 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
1596 if (tc
>= dev
->num_tc
)
1599 dev
->tc_to_txq
[tc
].count
= count
;
1600 dev
->tc_to_txq
[tc
].offset
= offset
;
1605 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
1607 if (num_tc
> TC_MAX_QUEUE
)
1610 dev
->num_tc
= num_tc
;
1615 int netdev_get_num_tc(struct net_device
*dev
)
1621 struct netdev_queue
*netdev_get_tx_queue(const struct net_device
*dev
,
1624 return &dev
->_tx
[index
];
1627 static inline void netdev_for_each_tx_queue(struct net_device
*dev
,
1628 void (*f
)(struct net_device
*,
1629 struct netdev_queue
*,
1635 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1636 f(dev
, &dev
->_tx
[i
], arg
);
1639 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
1640 struct sk_buff
*skb
,
1644 * Net namespace inlines
1647 struct net
*dev_net(const struct net_device
*dev
)
1649 return read_pnet(&dev
->nd_net
);
1653 void dev_net_set(struct net_device
*dev
, struct net
*net
)
1655 #ifdef CONFIG_NET_NS
1656 release_net(dev
->nd_net
);
1657 dev
->nd_net
= hold_net(net
);
1661 static inline bool netdev_uses_dsa_tags(struct net_device
*dev
)
1663 #ifdef CONFIG_NET_DSA_TAG_DSA
1664 if (dev
->dsa_ptr
!= NULL
)
1665 return dsa_uses_dsa_tags(dev
->dsa_ptr
);
1671 static inline bool netdev_uses_trailer_tags(struct net_device
*dev
)
1673 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1674 if (dev
->dsa_ptr
!= NULL
)
1675 return dsa_uses_trailer_tags(dev
->dsa_ptr
);
1682 * netdev_priv - access network device private data
1683 * @dev: network device
1685 * Get network device private data
1687 static inline void *netdev_priv(const struct net_device
*dev
)
1689 return (char *)dev
+ ALIGN(sizeof(struct net_device
), NETDEV_ALIGN
);
1692 /* Set the sysfs physical device reference for the network logical device
1693 * if set prior to registration will cause a symlink during initialization.
1695 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1697 /* Set the sysfs device type for the network logical device to allow
1698 * fine-grained identification of different network device types. For
1699 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1701 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1703 /* Default NAPI poll() weight
1704 * Device drivers are strongly advised to not use bigger value
1706 #define NAPI_POLL_WEIGHT 64
1709 * netif_napi_add - initialize a napi context
1710 * @dev: network device
1711 * @napi: napi context
1712 * @poll: polling function
1713 * @weight: default weight
1715 * netif_napi_add() must be used to initialize a napi context prior to calling
1716 * *any* of the other napi related functions.
1718 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
1719 int (*poll
)(struct napi_struct
*, int), int weight
);
1722 * netif_napi_del - remove a napi context
1723 * @napi: napi context
1725 * netif_napi_del() removes a napi context from the network device napi list
1727 void netif_napi_del(struct napi_struct
*napi
);
1729 struct napi_gro_cb
{
1730 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1733 /* Length of frag0. */
1734 unsigned int frag0_len
;
1736 /* This indicates where we are processing relative to skb->data. */
1739 /* This is non-zero if the packet cannot be merged with the new skb. */
1742 /* Save the IP ID here and check when we get to the transport layer */
1745 /* Number of segments aggregated. */
1748 /* This is non-zero if the packet may be of the same flow. */
1753 #define NAPI_GRO_FREE 1
1754 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1756 /* jiffies when first packet was created/queued */
1759 /* Used in ipv6_gro_receive() */
1762 /* Used in udp_gro_receive */
1765 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1768 /* used in skb_gro_receive() slow path */
1769 struct sk_buff
*last
;
1772 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1774 struct packet_type
{
1775 __be16 type
; /* This is really htons(ether_type). */
1776 struct net_device
*dev
; /* NULL is wildcarded here */
1777 int (*func
) (struct sk_buff
*,
1778 struct net_device
*,
1779 struct packet_type
*,
1780 struct net_device
*);
1781 bool (*id_match
)(struct packet_type
*ptype
,
1783 void *af_packet_priv
;
1784 struct list_head list
;
1787 struct offload_callbacks
{
1788 struct sk_buff
*(*gso_segment
)(struct sk_buff
*skb
,
1789 netdev_features_t features
);
1790 int (*gso_send_check
)(struct sk_buff
*skb
);
1791 struct sk_buff
**(*gro_receive
)(struct sk_buff
**head
,
1792 struct sk_buff
*skb
);
1793 int (*gro_complete
)(struct sk_buff
*skb
, int nhoff
);
1796 struct packet_offload
{
1797 __be16 type
; /* This is really htons(ether_type). */
1798 struct offload_callbacks callbacks
;
1799 struct list_head list
;
1802 struct udp_offload
{
1804 struct offload_callbacks callbacks
;
1807 /* often modified stats are per cpu, other are shared (netdev->stats) */
1808 struct pcpu_sw_netstats
{
1813 struct u64_stats_sync syncp
;
1816 #define netdev_alloc_pcpu_stats(type) \
1818 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
1821 for_each_possible_cpu(i) { \
1822 typeof(type) *stat; \
1823 stat = per_cpu_ptr(pcpu_stats, i); \
1824 u64_stats_init(&stat->syncp); \
1830 #include <linux/notifier.h>
1832 /* netdevice notifier chain. Please remember to update the rtnetlink
1833 * notification exclusion list in rtnetlink_event() when adding new
1836 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1837 #define NETDEV_DOWN 0x0002
1838 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1839 detected a hardware crash and restarted
1840 - we can use this eg to kick tcp sessions
1842 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1843 #define NETDEV_REGISTER 0x0005
1844 #define NETDEV_UNREGISTER 0x0006
1845 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
1846 #define NETDEV_CHANGEADDR 0x0008
1847 #define NETDEV_GOING_DOWN 0x0009
1848 #define NETDEV_CHANGENAME 0x000A
1849 #define NETDEV_FEAT_CHANGE 0x000B
1850 #define NETDEV_BONDING_FAILOVER 0x000C
1851 #define NETDEV_PRE_UP 0x000D
1852 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1853 #define NETDEV_POST_TYPE_CHANGE 0x000F
1854 #define NETDEV_POST_INIT 0x0010
1855 #define NETDEV_UNREGISTER_FINAL 0x0011
1856 #define NETDEV_RELEASE 0x0012
1857 #define NETDEV_NOTIFY_PEERS 0x0013
1858 #define NETDEV_JOIN 0x0014
1859 #define NETDEV_CHANGEUPPER 0x0015
1860 #define NETDEV_RESEND_IGMP 0x0016
1861 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
1863 int register_netdevice_notifier(struct notifier_block
*nb
);
1864 int unregister_netdevice_notifier(struct notifier_block
*nb
);
1866 struct netdev_notifier_info
{
1867 struct net_device
*dev
;
1870 struct netdev_notifier_change_info
{
1871 struct netdev_notifier_info info
; /* must be first */
1872 unsigned int flags_changed
;
1875 static inline void netdev_notifier_info_init(struct netdev_notifier_info
*info
,
1876 struct net_device
*dev
)
1881 static inline struct net_device
*
1882 netdev_notifier_info_to_dev(const struct netdev_notifier_info
*info
)
1887 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
);
1890 extern rwlock_t dev_base_lock
; /* Device list lock */
1892 #define for_each_netdev(net, d) \
1893 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1894 #define for_each_netdev_reverse(net, d) \
1895 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1896 #define for_each_netdev_rcu(net, d) \
1897 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1898 #define for_each_netdev_safe(net, d, n) \
1899 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1900 #define for_each_netdev_continue(net, d) \
1901 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1902 #define for_each_netdev_continue_rcu(net, d) \
1903 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1904 #define for_each_netdev_in_bond_rcu(bond, slave) \
1905 for_each_netdev_rcu(&init_net, slave) \
1906 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1907 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1909 static inline struct net_device
*next_net_device(struct net_device
*dev
)
1911 struct list_head
*lh
;
1915 lh
= dev
->dev_list
.next
;
1916 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1919 static inline struct net_device
*next_net_device_rcu(struct net_device
*dev
)
1921 struct list_head
*lh
;
1925 lh
= rcu_dereference(list_next_rcu(&dev
->dev_list
));
1926 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1929 static inline struct net_device
*first_net_device(struct net
*net
)
1931 return list_empty(&net
->dev_base_head
) ? NULL
:
1932 net_device_entry(net
->dev_base_head
.next
);
1935 static inline struct net_device
*first_net_device_rcu(struct net
*net
)
1937 struct list_head
*lh
= rcu_dereference(list_next_rcu(&net
->dev_base_head
));
1939 return lh
== &net
->dev_base_head
? NULL
: net_device_entry(lh
);
1942 int netdev_boot_setup_check(struct net_device
*dev
);
1943 unsigned long netdev_boot_base(const char *prefix
, int unit
);
1944 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
1945 const char *hwaddr
);
1946 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1947 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
);
1948 void dev_add_pack(struct packet_type
*pt
);
1949 void dev_remove_pack(struct packet_type
*pt
);
1950 void __dev_remove_pack(struct packet_type
*pt
);
1951 void dev_add_offload(struct packet_offload
*po
);
1952 void dev_remove_offload(struct packet_offload
*po
);
1954 struct net_device
*dev_get_by_flags_rcu(struct net
*net
, unsigned short flags
,
1955 unsigned short mask
);
1956 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
);
1957 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
);
1958 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
);
1959 int dev_alloc_name(struct net_device
*dev
, const char *name
);
1960 int dev_open(struct net_device
*dev
);
1961 int dev_close(struct net_device
*dev
);
1962 void dev_disable_lro(struct net_device
*dev
);
1963 int dev_loopback_xmit(struct sk_buff
*newskb
);
1964 int dev_queue_xmit(struct sk_buff
*skb
);
1965 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
);
1966 int register_netdevice(struct net_device
*dev
);
1967 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
);
1968 void unregister_netdevice_many(struct list_head
*head
);
1969 static inline void unregister_netdevice(struct net_device
*dev
)
1971 unregister_netdevice_queue(dev
, NULL
);
1974 int netdev_refcnt_read(const struct net_device
*dev
);
1975 void free_netdev(struct net_device
*dev
);
1976 void netdev_freemem(struct net_device
*dev
);
1977 void synchronize_net(void);
1978 int init_dummy_netdev(struct net_device
*dev
);
1980 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
);
1981 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
);
1982 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
);
1983 int netdev_get_name(struct net
*net
, char *name
, int ifindex
);
1984 int dev_restart(struct net_device
*dev
);
1985 int skb_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
);
1987 static inline unsigned int skb_gro_offset(const struct sk_buff
*skb
)
1989 return NAPI_GRO_CB(skb
)->data_offset
;
1992 static inline unsigned int skb_gro_len(const struct sk_buff
*skb
)
1994 return skb
->len
- NAPI_GRO_CB(skb
)->data_offset
;
1997 static inline void skb_gro_pull(struct sk_buff
*skb
, unsigned int len
)
1999 NAPI_GRO_CB(skb
)->data_offset
+= len
;
2002 static inline void *skb_gro_header_fast(struct sk_buff
*skb
,
2003 unsigned int offset
)
2005 return NAPI_GRO_CB(skb
)->frag0
+ offset
;
2008 static inline int skb_gro_header_hard(struct sk_buff
*skb
, unsigned int hlen
)
2010 return NAPI_GRO_CB(skb
)->frag0_len
< hlen
;
2013 static inline void *skb_gro_header_slow(struct sk_buff
*skb
, unsigned int hlen
,
2014 unsigned int offset
)
2016 if (!pskb_may_pull(skb
, hlen
))
2019 NAPI_GRO_CB(skb
)->frag0
= NULL
;
2020 NAPI_GRO_CB(skb
)->frag0_len
= 0;
2021 return skb
->data
+ offset
;
2024 static inline void *skb_gro_network_header(struct sk_buff
*skb
)
2026 return (NAPI_GRO_CB(skb
)->frag0
?: skb
->data
) +
2027 skb_network_offset(skb
);
2030 static inline void skb_gro_postpull_rcsum(struct sk_buff
*skb
,
2031 const void *start
, unsigned int len
)
2033 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2034 NAPI_GRO_CB(skb
)->csum
= csum_sub(NAPI_GRO_CB(skb
)->csum
,
2035 csum_partial(start
, len
, 0));
2038 static inline int dev_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
2039 unsigned short type
,
2040 const void *daddr
, const void *saddr
,
2043 if (!dev
->header_ops
|| !dev
->header_ops
->create
)
2046 return dev
->header_ops
->create(skb
, dev
, type
, daddr
, saddr
, len
);
2049 static inline int dev_parse_header(const struct sk_buff
*skb
,
2050 unsigned char *haddr
)
2052 const struct net_device
*dev
= skb
->dev
;
2054 if (!dev
->header_ops
|| !dev
->header_ops
->parse
)
2056 return dev
->header_ops
->parse(skb
, haddr
);
2059 static inline int dev_rebuild_header(struct sk_buff
*skb
)
2061 const struct net_device
*dev
= skb
->dev
;
2063 if (!dev
->header_ops
|| !dev
->header_ops
->rebuild
)
2065 return dev
->header_ops
->rebuild(skb
);
2068 typedef int gifconf_func_t(struct net_device
* dev
, char __user
* bufptr
, int len
);
2069 int register_gifconf(unsigned int family
, gifconf_func_t
*gifconf
);
2070 static inline int unregister_gifconf(unsigned int family
)
2072 return register_gifconf(family
, NULL
);
2075 #ifdef CONFIG_NET_FLOW_LIMIT
2076 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2077 struct sd_flow_limit
{
2079 unsigned int num_buckets
;
2080 unsigned int history_head
;
2081 u16 history
[FLOW_LIMIT_HISTORY
];
2085 extern int netdev_flow_limit_table_len
;
2086 #endif /* CONFIG_NET_FLOW_LIMIT */
2089 * Incoming packets are placed on per-cpu queues
2091 struct softnet_data
{
2092 struct Qdisc
*output_queue
;
2093 struct Qdisc
**output_queue_tailp
;
2094 struct list_head poll_list
;
2095 struct sk_buff
*completion_queue
;
2096 struct sk_buff_head process_queue
;
2099 unsigned int processed
;
2100 unsigned int time_squeeze
;
2101 unsigned int cpu_collision
;
2102 unsigned int received_rps
;
2105 struct softnet_data
*rps_ipi_list
;
2107 /* Elements below can be accessed between CPUs for RPS */
2108 struct call_single_data csd ____cacheline_aligned_in_smp
;
2109 struct softnet_data
*rps_ipi_next
;
2111 unsigned int input_queue_head
;
2112 unsigned int input_queue_tail
;
2114 unsigned int dropped
;
2115 struct sk_buff_head input_pkt_queue
;
2116 struct napi_struct backlog
;
2118 #ifdef CONFIG_NET_FLOW_LIMIT
2119 struct sd_flow_limit __rcu
*flow_limit
;
2123 static inline void input_queue_head_incr(struct softnet_data
*sd
)
2126 sd
->input_queue_head
++;
2130 static inline void input_queue_tail_incr_save(struct softnet_data
*sd
,
2131 unsigned int *qtail
)
2134 *qtail
= ++sd
->input_queue_tail
;
2138 DECLARE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
2140 void __netif_schedule(struct Qdisc
*q
);
2142 static inline void netif_schedule_queue(struct netdev_queue
*txq
)
2144 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
))
2145 __netif_schedule(txq
->qdisc
);
2148 static inline void netif_tx_schedule_all(struct net_device
*dev
)
2152 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2153 netif_schedule_queue(netdev_get_tx_queue(dev
, i
));
2156 static inline void netif_tx_start_queue(struct netdev_queue
*dev_queue
)
2158 clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2162 * netif_start_queue - allow transmit
2163 * @dev: network device
2165 * Allow upper layers to call the device hard_start_xmit routine.
2167 static inline void netif_start_queue(struct net_device
*dev
)
2169 netif_tx_start_queue(netdev_get_tx_queue(dev
, 0));
2172 static inline void netif_tx_start_all_queues(struct net_device
*dev
)
2176 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2177 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2178 netif_tx_start_queue(txq
);
2182 static inline void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2184 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
))
2185 __netif_schedule(dev_queue
->qdisc
);
2189 * netif_wake_queue - restart transmit
2190 * @dev: network device
2192 * Allow upper layers to call the device hard_start_xmit routine.
2193 * Used for flow control when transmit resources are available.
2195 static inline void netif_wake_queue(struct net_device
*dev
)
2197 netif_tx_wake_queue(netdev_get_tx_queue(dev
, 0));
2200 static inline void netif_tx_wake_all_queues(struct net_device
*dev
)
2204 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2205 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2206 netif_tx_wake_queue(txq
);
2210 static inline void netif_tx_stop_queue(struct netdev_queue
*dev_queue
)
2212 if (WARN_ON(!dev_queue
)) {
2213 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2216 set_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2220 * netif_stop_queue - stop transmitted packets
2221 * @dev: network device
2223 * Stop upper layers calling the device hard_start_xmit routine.
2224 * Used for flow control when transmit resources are unavailable.
2226 static inline void netif_stop_queue(struct net_device
*dev
)
2228 netif_tx_stop_queue(netdev_get_tx_queue(dev
, 0));
2231 static inline void netif_tx_stop_all_queues(struct net_device
*dev
)
2235 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2236 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2237 netif_tx_stop_queue(txq
);
2241 static inline bool netif_tx_queue_stopped(const struct netdev_queue
*dev_queue
)
2243 return test_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
);
2247 * netif_queue_stopped - test if transmit queue is flowblocked
2248 * @dev: network device
2250 * Test if transmit queue on device is currently unable to send.
2252 static inline bool netif_queue_stopped(const struct net_device
*dev
)
2254 return netif_tx_queue_stopped(netdev_get_tx_queue(dev
, 0));
2257 static inline bool netif_xmit_stopped(const struct netdev_queue
*dev_queue
)
2259 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF
;
2263 netif_xmit_frozen_or_stopped(const struct netdev_queue
*dev_queue
)
2265 return dev_queue
->state
& QUEUE_STATE_ANY_XOFF_OR_FROZEN
;
2269 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue
*dev_queue
)
2271 return dev_queue
->state
& QUEUE_STATE_DRV_XOFF_OR_FROZEN
;
2274 static inline void netdev_tx_sent_queue(struct netdev_queue
*dev_queue
,
2278 dql_queued(&dev_queue
->dql
, bytes
);
2280 if (likely(dql_avail(&dev_queue
->dql
) >= 0))
2283 set_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
2286 * The XOFF flag must be set before checking the dql_avail below,
2287 * because in netdev_tx_completed_queue we update the dql_completed
2288 * before checking the XOFF flag.
2292 /* check again in case another CPU has just made room avail */
2293 if (unlikely(dql_avail(&dev_queue
->dql
) >= 0))
2294 clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
);
2299 * netdev_sent_queue - report the number of bytes queued to hardware
2300 * @dev: network device
2301 * @bytes: number of bytes queued to the hardware device queue
2303 * Report the number of bytes queued for sending/completion to the network
2304 * device hardware queue. @bytes should be a good approximation and should
2305 * exactly match netdev_completed_queue() @bytes
2307 static inline void netdev_sent_queue(struct net_device
*dev
, unsigned int bytes
)
2309 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, 0), bytes
);
2312 static inline void netdev_tx_completed_queue(struct netdev_queue
*dev_queue
,
2313 unsigned int pkts
, unsigned int bytes
)
2316 if (unlikely(!bytes
))
2319 dql_completed(&dev_queue
->dql
, bytes
);
2322 * Without the memory barrier there is a small possiblity that
2323 * netdev_tx_sent_queue will miss the update and cause the queue to
2324 * be stopped forever
2328 if (dql_avail(&dev_queue
->dql
) < 0)
2331 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF
, &dev_queue
->state
))
2332 netif_schedule_queue(dev_queue
);
2337 * netdev_completed_queue - report bytes and packets completed by device
2338 * @dev: network device
2339 * @pkts: actual number of packets sent over the medium
2340 * @bytes: actual number of bytes sent over the medium
2342 * Report the number of bytes and packets transmitted by the network device
2343 * hardware queue over the physical medium, @bytes must exactly match the
2344 * @bytes amount passed to netdev_sent_queue()
2346 static inline void netdev_completed_queue(struct net_device
*dev
,
2347 unsigned int pkts
, unsigned int bytes
)
2349 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, 0), pkts
, bytes
);
2352 static inline void netdev_tx_reset_queue(struct netdev_queue
*q
)
2355 clear_bit(__QUEUE_STATE_STACK_XOFF
, &q
->state
);
2361 * netdev_reset_queue - reset the packets and bytes count of a network device
2362 * @dev_queue: network device
2364 * Reset the bytes and packet count of a network device and clear the
2365 * software flow control OFF bit for this network device
2367 static inline void netdev_reset_queue(struct net_device
*dev_queue
)
2369 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue
, 0));
2373 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2374 * @dev: network device
2375 * @queue_index: given tx queue index
2377 * Returns 0 if given tx queue index >= number of device tx queues,
2378 * otherwise returns the originally passed tx queue index.
2380 static inline u16
netdev_cap_txqueue(struct net_device
*dev
, u16 queue_index
)
2382 if (unlikely(queue_index
>= dev
->real_num_tx_queues
)) {
2383 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2384 dev
->name
, queue_index
,
2385 dev
->real_num_tx_queues
);
2393 * netif_running - test if up
2394 * @dev: network device
2396 * Test if the device has been brought up.
2398 static inline bool netif_running(const struct net_device
*dev
)
2400 return test_bit(__LINK_STATE_START
, &dev
->state
);
2404 * Routines to manage the subqueues on a device. We only need start
2405 * stop, and a check if it's stopped. All other device management is
2406 * done at the overall netdevice level.
2407 * Also test the device if we're multiqueue.
2411 * netif_start_subqueue - allow sending packets on subqueue
2412 * @dev: network device
2413 * @queue_index: sub queue index
2415 * Start individual transmit queue of a device with multiple transmit queues.
2417 static inline void netif_start_subqueue(struct net_device
*dev
, u16 queue_index
)
2419 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2421 netif_tx_start_queue(txq
);
2425 * netif_stop_subqueue - stop sending packets on subqueue
2426 * @dev: network device
2427 * @queue_index: sub queue index
2429 * Stop individual transmit queue of a device with multiple transmit queues.
2431 static inline void netif_stop_subqueue(struct net_device
*dev
, u16 queue_index
)
2433 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2434 netif_tx_stop_queue(txq
);
2438 * netif_subqueue_stopped - test status of subqueue
2439 * @dev: network device
2440 * @queue_index: sub queue index
2442 * Check individual transmit queue of a device with multiple transmit queues.
2444 static inline bool __netif_subqueue_stopped(const struct net_device
*dev
,
2447 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2449 return netif_tx_queue_stopped(txq
);
2452 static inline bool netif_subqueue_stopped(const struct net_device
*dev
,
2453 struct sk_buff
*skb
)
2455 return __netif_subqueue_stopped(dev
, skb_get_queue_mapping(skb
));
2459 * netif_wake_subqueue - allow sending packets on subqueue
2460 * @dev: network device
2461 * @queue_index: sub queue index
2463 * Resume individual transmit queue of a device with multiple transmit queues.
2465 static inline void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2467 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2468 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
))
2469 __netif_schedule(txq
->qdisc
);
2473 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2476 static inline int netif_set_xps_queue(struct net_device
*dev
,
2477 const struct cpumask
*mask
,
2485 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2486 * as a distribution range limit for the returned value.
2488 static inline u16
skb_tx_hash(const struct net_device
*dev
,
2489 const struct sk_buff
*skb
)
2491 return __skb_tx_hash(dev
, skb
, dev
->real_num_tx_queues
);
2495 * netif_is_multiqueue - test if device has multiple transmit queues
2496 * @dev: network device
2498 * Check if device has multiple transmit queues
2500 static inline bool netif_is_multiqueue(const struct net_device
*dev
)
2502 return dev
->num_tx_queues
> 1;
2505 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
);
2508 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
);
2510 static inline int netif_set_real_num_rx_queues(struct net_device
*dev
,
2517 static inline int netif_copy_real_num_queues(struct net_device
*to_dev
,
2518 const struct net_device
*from_dev
)
2522 err
= netif_set_real_num_tx_queues(to_dev
,
2523 from_dev
->real_num_tx_queues
);
2527 return netif_set_real_num_rx_queues(to_dev
,
2528 from_dev
->real_num_rx_queues
);
2535 static inline unsigned int get_netdev_rx_queue_index(
2536 struct netdev_rx_queue
*queue
)
2538 struct net_device
*dev
= queue
->dev
;
2539 int index
= queue
- dev
->_rx
;
2541 BUG_ON(index
>= dev
->num_rx_queues
);
2546 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2547 int netif_get_num_default_rss_queues(void);
2549 enum skb_free_reason
{
2550 SKB_REASON_CONSUMED
,
2554 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
);
2555 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
);
2558 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2559 * interrupt context or with hardware interrupts being disabled.
2560 * (in_irq() || irqs_disabled())
2562 * We provide four helpers that can be used in following contexts :
2564 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2565 * replacing kfree_skb(skb)
2567 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2568 * Typically used in place of consume_skb(skb) in TX completion path
2570 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2571 * replacing kfree_skb(skb)
2573 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2574 * and consumed a packet. Used in place of consume_skb(skb)
2576 static inline void dev_kfree_skb_irq(struct sk_buff
*skb
)
2578 __dev_kfree_skb_irq(skb
, SKB_REASON_DROPPED
);
2581 static inline void dev_consume_skb_irq(struct sk_buff
*skb
)
2583 __dev_kfree_skb_irq(skb
, SKB_REASON_CONSUMED
);
2586 static inline void dev_kfree_skb_any(struct sk_buff
*skb
)
2588 __dev_kfree_skb_any(skb
, SKB_REASON_DROPPED
);
2591 static inline void dev_consume_skb_any(struct sk_buff
*skb
)
2593 __dev_kfree_skb_any(skb
, SKB_REASON_CONSUMED
);
2596 int netif_rx(struct sk_buff
*skb
);
2597 int netif_rx_ni(struct sk_buff
*skb
);
2598 int netif_receive_skb(struct sk_buff
*skb
);
2599 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
);
2600 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
);
2601 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
);
2602 gro_result_t
napi_gro_frags(struct napi_struct
*napi
);
2603 struct packet_offload
*gro_find_receive_by_type(__be16 type
);
2604 struct packet_offload
*gro_find_complete_by_type(__be16 type
);
2606 static inline void napi_free_frags(struct napi_struct
*napi
)
2608 kfree_skb(napi
->skb
);
2612 int netdev_rx_handler_register(struct net_device
*dev
,
2613 rx_handler_func_t
*rx_handler
,
2614 void *rx_handler_data
);
2615 void netdev_rx_handler_unregister(struct net_device
*dev
);
2617 bool dev_valid_name(const char *name
);
2618 int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*);
2619 int dev_ethtool(struct net
*net
, struct ifreq
*);
2620 unsigned int dev_get_flags(const struct net_device
*);
2621 int __dev_change_flags(struct net_device
*, unsigned int flags
);
2622 int dev_change_flags(struct net_device
*, unsigned int);
2623 void __dev_notify_flags(struct net_device
*, unsigned int old_flags
,
2624 unsigned int gchanges
);
2625 int dev_change_name(struct net_device
*, const char *);
2626 int dev_set_alias(struct net_device
*, const char *, size_t);
2627 int dev_change_net_namespace(struct net_device
*, struct net
*, const char *);
2628 int dev_set_mtu(struct net_device
*, int);
2629 void dev_set_group(struct net_device
*, int);
2630 int dev_set_mac_address(struct net_device
*, struct sockaddr
*);
2631 int dev_change_carrier(struct net_device
*, bool new_carrier
);
2632 int dev_get_phys_port_id(struct net_device
*dev
,
2633 struct netdev_phys_port_id
*ppid
);
2634 int dev_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
2635 struct netdev_queue
*txq
);
2636 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
);
2637 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
);
2638 bool is_skb_forwardable(struct net_device
*dev
, struct sk_buff
*skb
);
2640 extern int netdev_budget
;
2642 /* Called by rtnetlink.c:rtnl_unlock() */
2643 void netdev_run_todo(void);
2646 * dev_put - release reference to device
2647 * @dev: network device
2649 * Release reference to device to allow it to be freed.
2651 static inline void dev_put(struct net_device
*dev
)
2653 this_cpu_dec(*dev
->pcpu_refcnt
);
2657 * dev_hold - get reference to device
2658 * @dev: network device
2660 * Hold reference to device to keep it from being freed.
2662 static inline void dev_hold(struct net_device
*dev
)
2664 this_cpu_inc(*dev
->pcpu_refcnt
);
2667 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2668 * and _off may be called from IRQ context, but it is caller
2669 * who is responsible for serialization of these calls.
2671 * The name carrier is inappropriate, these functions should really be
2672 * called netif_lowerlayer_*() because they represent the state of any
2673 * kind of lower layer not just hardware media.
2676 void linkwatch_init_dev(struct net_device
*dev
);
2677 void linkwatch_fire_event(struct net_device
*dev
);
2678 void linkwatch_forget_dev(struct net_device
*dev
);
2681 * netif_carrier_ok - test if carrier present
2682 * @dev: network device
2684 * Check if carrier is present on device
2686 static inline bool netif_carrier_ok(const struct net_device
*dev
)
2688 return !test_bit(__LINK_STATE_NOCARRIER
, &dev
->state
);
2691 unsigned long dev_trans_start(struct net_device
*dev
);
2693 void __netdev_watchdog_up(struct net_device
*dev
);
2695 void netif_carrier_on(struct net_device
*dev
);
2697 void netif_carrier_off(struct net_device
*dev
);
2700 * netif_dormant_on - mark device as dormant.
2701 * @dev: network device
2703 * Mark device as dormant (as per RFC2863).
2705 * The dormant state indicates that the relevant interface is not
2706 * actually in a condition to pass packets (i.e., it is not 'up') but is
2707 * in a "pending" state, waiting for some external event. For "on-
2708 * demand" interfaces, this new state identifies the situation where the
2709 * interface is waiting for events to place it in the up state.
2712 static inline void netif_dormant_on(struct net_device
*dev
)
2714 if (!test_and_set_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2715 linkwatch_fire_event(dev
);
2719 * netif_dormant_off - set device as not dormant.
2720 * @dev: network device
2722 * Device is not in dormant state.
2724 static inline void netif_dormant_off(struct net_device
*dev
)
2726 if (test_and_clear_bit(__LINK_STATE_DORMANT
, &dev
->state
))
2727 linkwatch_fire_event(dev
);
2731 * netif_dormant - test if carrier present
2732 * @dev: network device
2734 * Check if carrier is present on device
2736 static inline bool netif_dormant(const struct net_device
*dev
)
2738 return test_bit(__LINK_STATE_DORMANT
, &dev
->state
);
2743 * netif_oper_up - test if device is operational
2744 * @dev: network device
2746 * Check if carrier is operational
2748 static inline bool netif_oper_up(const struct net_device
*dev
)
2750 return (dev
->operstate
== IF_OPER_UP
||
2751 dev
->operstate
== IF_OPER_UNKNOWN
/* backward compat */);
2755 * netif_device_present - is device available or removed
2756 * @dev: network device
2758 * Check if device has not been removed from system.
2760 static inline bool netif_device_present(struct net_device
*dev
)
2762 return test_bit(__LINK_STATE_PRESENT
, &dev
->state
);
2765 void netif_device_detach(struct net_device
*dev
);
2767 void netif_device_attach(struct net_device
*dev
);
2770 * Network interface message level settings
2774 NETIF_MSG_DRV
= 0x0001,
2775 NETIF_MSG_PROBE
= 0x0002,
2776 NETIF_MSG_LINK
= 0x0004,
2777 NETIF_MSG_TIMER
= 0x0008,
2778 NETIF_MSG_IFDOWN
= 0x0010,
2779 NETIF_MSG_IFUP
= 0x0020,
2780 NETIF_MSG_RX_ERR
= 0x0040,
2781 NETIF_MSG_TX_ERR
= 0x0080,
2782 NETIF_MSG_TX_QUEUED
= 0x0100,
2783 NETIF_MSG_INTR
= 0x0200,
2784 NETIF_MSG_TX_DONE
= 0x0400,
2785 NETIF_MSG_RX_STATUS
= 0x0800,
2786 NETIF_MSG_PKTDATA
= 0x1000,
2787 NETIF_MSG_HW
= 0x2000,
2788 NETIF_MSG_WOL
= 0x4000,
2791 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2792 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2793 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2794 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2795 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2796 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2797 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2798 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2799 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2800 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2801 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2802 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2803 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2804 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2805 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2807 static inline u32
netif_msg_init(int debug_value
, int default_msg_enable_bits
)
2810 if (debug_value
< 0 || debug_value
>= (sizeof(u32
) * 8))
2811 return default_msg_enable_bits
;
2812 if (debug_value
== 0) /* no output */
2814 /* set low N bits */
2815 return (1 << debug_value
) - 1;
2818 static inline void __netif_tx_lock(struct netdev_queue
*txq
, int cpu
)
2820 spin_lock(&txq
->_xmit_lock
);
2821 txq
->xmit_lock_owner
= cpu
;
2824 static inline void __netif_tx_lock_bh(struct netdev_queue
*txq
)
2826 spin_lock_bh(&txq
->_xmit_lock
);
2827 txq
->xmit_lock_owner
= smp_processor_id();
2830 static inline bool __netif_tx_trylock(struct netdev_queue
*txq
)
2832 bool ok
= spin_trylock(&txq
->_xmit_lock
);
2834 txq
->xmit_lock_owner
= smp_processor_id();
2838 static inline void __netif_tx_unlock(struct netdev_queue
*txq
)
2840 txq
->xmit_lock_owner
= -1;
2841 spin_unlock(&txq
->_xmit_lock
);
2844 static inline void __netif_tx_unlock_bh(struct netdev_queue
*txq
)
2846 txq
->xmit_lock_owner
= -1;
2847 spin_unlock_bh(&txq
->_xmit_lock
);
2850 static inline void txq_trans_update(struct netdev_queue
*txq
)
2852 if (txq
->xmit_lock_owner
!= -1)
2853 txq
->trans_start
= jiffies
;
2857 * netif_tx_lock - grab network device transmit lock
2858 * @dev: network device
2860 * Get network device transmit lock
2862 static inline void netif_tx_lock(struct net_device
*dev
)
2867 spin_lock(&dev
->tx_global_lock
);
2868 cpu
= smp_processor_id();
2869 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2870 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2872 /* We are the only thread of execution doing a
2873 * freeze, but we have to grab the _xmit_lock in
2874 * order to synchronize with threads which are in
2875 * the ->hard_start_xmit() handler and already
2876 * checked the frozen bit.
2878 __netif_tx_lock(txq
, cpu
);
2879 set_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2880 __netif_tx_unlock(txq
);
2884 static inline void netif_tx_lock_bh(struct net_device
*dev
)
2890 static inline void netif_tx_unlock(struct net_device
*dev
)
2894 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2895 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2897 /* No need to grab the _xmit_lock here. If the
2898 * queue is not stopped for another reason, we
2901 clear_bit(__QUEUE_STATE_FROZEN
, &txq
->state
);
2902 netif_schedule_queue(txq
);
2904 spin_unlock(&dev
->tx_global_lock
);
2907 static inline void netif_tx_unlock_bh(struct net_device
*dev
)
2909 netif_tx_unlock(dev
);
2913 #define HARD_TX_LOCK(dev, txq, cpu) { \
2914 if ((dev->features & NETIF_F_LLTX) == 0) { \
2915 __netif_tx_lock(txq, cpu); \
2919 #define HARD_TX_TRYLOCK(dev, txq) \
2920 (((dev->features & NETIF_F_LLTX) == 0) ? \
2921 __netif_tx_trylock(txq) : \
2924 #define HARD_TX_UNLOCK(dev, txq) { \
2925 if ((dev->features & NETIF_F_LLTX) == 0) { \
2926 __netif_tx_unlock(txq); \
2930 static inline void netif_tx_disable(struct net_device
*dev
)
2936 cpu
= smp_processor_id();
2937 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2938 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
2940 __netif_tx_lock(txq
, cpu
);
2941 netif_tx_stop_queue(txq
);
2942 __netif_tx_unlock(txq
);
2947 static inline void netif_addr_lock(struct net_device
*dev
)
2949 spin_lock(&dev
->addr_list_lock
);
2952 static inline void netif_addr_lock_nested(struct net_device
*dev
)
2954 int subclass
= SINGLE_DEPTH_NESTING
;
2956 if (dev
->netdev_ops
->ndo_get_lock_subclass
)
2957 subclass
= dev
->netdev_ops
->ndo_get_lock_subclass(dev
);
2959 spin_lock_nested(&dev
->addr_list_lock
, subclass
);
2962 static inline void netif_addr_lock_bh(struct net_device
*dev
)
2964 spin_lock_bh(&dev
->addr_list_lock
);
2967 static inline void netif_addr_unlock(struct net_device
*dev
)
2969 spin_unlock(&dev
->addr_list_lock
);
2972 static inline void netif_addr_unlock_bh(struct net_device
*dev
)
2974 spin_unlock_bh(&dev
->addr_list_lock
);
2978 * dev_addrs walker. Should be used only for read access. Call with
2979 * rcu_read_lock held.
2981 #define for_each_dev_addr(dev, ha) \
2982 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2984 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2986 void ether_setup(struct net_device
*dev
);
2988 /* Support for loadable net-drivers */
2989 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
2990 void (*setup
)(struct net_device
*),
2991 unsigned int txqs
, unsigned int rxqs
);
2992 #define alloc_netdev(sizeof_priv, name, setup) \
2993 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2995 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2996 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2998 int register_netdev(struct net_device
*dev
);
2999 void unregister_netdev(struct net_device
*dev
);
3001 /* General hardware address lists handling functions */
3002 int __hw_addr_sync(struct netdev_hw_addr_list
*to_list
,
3003 struct netdev_hw_addr_list
*from_list
, int addr_len
);
3004 void __hw_addr_unsync(struct netdev_hw_addr_list
*to_list
,
3005 struct netdev_hw_addr_list
*from_list
, int addr_len
);
3006 int __hw_addr_sync_dev(struct netdev_hw_addr_list
*list
,
3007 struct net_device
*dev
,
3008 int (*sync
)(struct net_device
*, const unsigned char *),
3009 int (*unsync
)(struct net_device
*,
3010 const unsigned char *));
3011 void __hw_addr_unsync_dev(struct netdev_hw_addr_list
*list
,
3012 struct net_device
*dev
,
3013 int (*unsync
)(struct net_device
*,
3014 const unsigned char *));
3015 void __hw_addr_init(struct netdev_hw_addr_list
*list
);
3017 /* Functions used for device addresses handling */
3018 int dev_addr_add(struct net_device
*dev
, const unsigned char *addr
,
3019 unsigned char addr_type
);
3020 int dev_addr_del(struct net_device
*dev
, const unsigned char *addr
,
3021 unsigned char addr_type
);
3022 void dev_addr_flush(struct net_device
*dev
);
3023 int dev_addr_init(struct net_device
*dev
);
3025 /* Functions used for unicast addresses handling */
3026 int dev_uc_add(struct net_device
*dev
, const unsigned char *addr
);
3027 int dev_uc_add_excl(struct net_device
*dev
, const unsigned char *addr
);
3028 int dev_uc_del(struct net_device
*dev
, const unsigned char *addr
);
3029 int dev_uc_sync(struct net_device
*to
, struct net_device
*from
);
3030 int dev_uc_sync_multiple(struct net_device
*to
, struct net_device
*from
);
3031 void dev_uc_unsync(struct net_device
*to
, struct net_device
*from
);
3032 void dev_uc_flush(struct net_device
*dev
);
3033 void dev_uc_init(struct net_device
*dev
);
3036 * __dev_uc_sync - Synchonize device's unicast list
3037 * @dev: device to sync
3038 * @sync: function to call if address should be added
3039 * @unsync: function to call if address should be removed
3041 * Add newly added addresses to the interface, and release
3042 * addresses that have been deleted.
3044 static inline int __dev_uc_sync(struct net_device
*dev
,
3045 int (*sync
)(struct net_device
*,
3046 const unsigned char *),
3047 int (*unsync
)(struct net_device
*,
3048 const unsigned char *))
3050 return __hw_addr_sync_dev(&dev
->uc
, dev
, sync
, unsync
);
3054 * __dev_uc_unsync - Remove synchonized addresses from device
3055 * @dev: device to sync
3056 * @unsync: function to call if address should be removed
3058 * Remove all addresses that were added to the device by dev_uc_sync().
3060 static inline void __dev_uc_unsync(struct net_device
*dev
,
3061 int (*unsync
)(struct net_device
*,
3062 const unsigned char *))
3064 __hw_addr_unsync_dev(&dev
->uc
, dev
, unsync
);
3067 /* Functions used for multicast addresses handling */
3068 int dev_mc_add(struct net_device
*dev
, const unsigned char *addr
);
3069 int dev_mc_add_global(struct net_device
*dev
, const unsigned char *addr
);
3070 int dev_mc_add_excl(struct net_device
*dev
, const unsigned char *addr
);
3071 int dev_mc_del(struct net_device
*dev
, const unsigned char *addr
);
3072 int dev_mc_del_global(struct net_device
*dev
, const unsigned char *addr
);
3073 int dev_mc_sync(struct net_device
*to
, struct net_device
*from
);
3074 int dev_mc_sync_multiple(struct net_device
*to
, struct net_device
*from
);
3075 void dev_mc_unsync(struct net_device
*to
, struct net_device
*from
);
3076 void dev_mc_flush(struct net_device
*dev
);
3077 void dev_mc_init(struct net_device
*dev
);
3080 * __dev_mc_sync - Synchonize device's multicast list
3081 * @dev: device to sync
3082 * @sync: function to call if address should be added
3083 * @unsync: function to call if address should be removed
3085 * Add newly added addresses to the interface, and release
3086 * addresses that have been deleted.
3088 static inline int __dev_mc_sync(struct net_device
*dev
,
3089 int (*sync
)(struct net_device
*,
3090 const unsigned char *),
3091 int (*unsync
)(struct net_device
*,
3092 const unsigned char *))
3094 return __hw_addr_sync_dev(&dev
->mc
, dev
, sync
, unsync
);
3098 * __dev_mc_unsync - Remove synchonized addresses from device
3099 * @dev: device to sync
3100 * @unsync: function to call if address should be removed
3102 * Remove all addresses that were added to the device by dev_mc_sync().
3104 static inline void __dev_mc_unsync(struct net_device
*dev
,
3105 int (*unsync
)(struct net_device
*,
3106 const unsigned char *))
3108 __hw_addr_unsync_dev(&dev
->mc
, dev
, unsync
);
3111 /* Functions used for secondary unicast and multicast support */
3112 void dev_set_rx_mode(struct net_device
*dev
);
3113 void __dev_set_rx_mode(struct net_device
*dev
);
3114 int dev_set_promiscuity(struct net_device
*dev
, int inc
);
3115 int dev_set_allmulti(struct net_device
*dev
, int inc
);
3116 void netdev_state_change(struct net_device
*dev
);
3117 void netdev_notify_peers(struct net_device
*dev
);
3118 void netdev_features_change(struct net_device
*dev
);
3119 /* Load a device via the kmod */
3120 void dev_load(struct net
*net
, const char *name
);
3121 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
3122 struct rtnl_link_stats64
*storage
);
3123 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
3124 const struct net_device_stats
*netdev_stats
);
3126 extern int netdev_max_backlog
;
3127 extern int netdev_tstamp_prequeue
;
3128 extern int weight_p
;
3129 extern int bpf_jit_enable
;
3131 bool netdev_has_upper_dev(struct net_device
*dev
, struct net_device
*upper_dev
);
3132 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
3133 struct list_head
**iter
);
3134 struct net_device
*netdev_all_upper_get_next_dev_rcu(struct net_device
*dev
,
3135 struct list_head
**iter
);
3137 /* iterate through upper list, must be called under RCU read lock */
3138 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3139 for (iter = &(dev)->adj_list.upper, \
3140 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3142 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3144 /* iterate through upper list, must be called under RCU read lock */
3145 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3146 for (iter = &(dev)->all_adj_list.upper, \
3147 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3149 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3151 void *netdev_lower_get_next_private(struct net_device
*dev
,
3152 struct list_head
**iter
);
3153 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
3154 struct list_head
**iter
);
3156 #define netdev_for_each_lower_private(dev, priv, iter) \
3157 for (iter = (dev)->adj_list.lower.next, \
3158 priv = netdev_lower_get_next_private(dev, &(iter)); \
3160 priv = netdev_lower_get_next_private(dev, &(iter)))
3162 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3163 for (iter = &(dev)->adj_list.lower, \
3164 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3166 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3168 void *netdev_lower_get_next(struct net_device
*dev
,
3169 struct list_head
**iter
);
3170 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3171 for (iter = &(dev)->adj_list.lower, \
3172 ldev = netdev_lower_get_next(dev, &(iter)); \
3174 ldev = netdev_lower_get_next(dev, &(iter)))
3176 void *netdev_adjacent_get_private(struct list_head
*adj_list
);
3177 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
);
3178 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
);
3179 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
);
3180 int netdev_upper_dev_link(struct net_device
*dev
, struct net_device
*upper_dev
);
3181 int netdev_master_upper_dev_link(struct net_device
*dev
,
3182 struct net_device
*upper_dev
);
3183 int netdev_master_upper_dev_link_private(struct net_device
*dev
,
3184 struct net_device
*upper_dev
,
3186 void netdev_upper_dev_unlink(struct net_device
*dev
,
3187 struct net_device
*upper_dev
);
3188 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
);
3189 void *netdev_lower_dev_get_private(struct net_device
*dev
,
3190 struct net_device
*lower_dev
);
3191 int dev_get_nest_level(struct net_device
*dev
,
3192 bool (*type_check
)(struct net_device
*dev
));
3193 int skb_checksum_help(struct sk_buff
*skb
);
3194 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
3195 netdev_features_t features
, bool tx_path
);
3196 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
3197 netdev_features_t features
);
3200 struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
, netdev_features_t features
)
3202 return __skb_gso_segment(skb
, features
, true);
3204 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
);
3206 static inline bool can_checksum_protocol(netdev_features_t features
,
3209 return ((features
& NETIF_F_GEN_CSUM
) ||
3210 ((features
& NETIF_F_V4_CSUM
) &&
3211 protocol
== htons(ETH_P_IP
)) ||
3212 ((features
& NETIF_F_V6_CSUM
) &&
3213 protocol
== htons(ETH_P_IPV6
)) ||
3214 ((features
& NETIF_F_FCOE_CRC
) &&
3215 protocol
== htons(ETH_P_FCOE
)));
3219 void netdev_rx_csum_fault(struct net_device
*dev
);
3221 static inline void netdev_rx_csum_fault(struct net_device
*dev
)
3225 /* rx skb timestamps */
3226 void net_enable_timestamp(void);
3227 void net_disable_timestamp(void);
3229 #ifdef CONFIG_PROC_FS
3230 int __init
dev_proc_init(void);
3232 #define dev_proc_init() 0
3235 int netdev_class_create_file_ns(struct class_attribute
*class_attr
,
3237 void netdev_class_remove_file_ns(struct class_attribute
*class_attr
,
3240 static inline int netdev_class_create_file(struct class_attribute
*class_attr
)
3242 return netdev_class_create_file_ns(class_attr
, NULL
);
3245 static inline void netdev_class_remove_file(struct class_attribute
*class_attr
)
3247 netdev_class_remove_file_ns(class_attr
, NULL
);
3250 extern struct kobj_ns_type_operations net_ns_type_operations
;
3252 const char *netdev_drivername(const struct net_device
*dev
);
3254 void linkwatch_run_queue(void);
3256 static inline netdev_features_t
netdev_intersect_features(netdev_features_t f1
,
3257 netdev_features_t f2
)
3259 if (f1
& NETIF_F_GEN_CSUM
)
3260 f1
|= (NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
3261 if (f2
& NETIF_F_GEN_CSUM
)
3262 f2
|= (NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
3264 if (f1
& NETIF_F_GEN_CSUM
)
3265 f1
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
3270 static inline netdev_features_t
netdev_get_wanted_features(
3271 struct net_device
*dev
)
3273 return (dev
->features
& ~dev
->hw_features
) | dev
->wanted_features
;
3275 netdev_features_t
netdev_increment_features(netdev_features_t all
,
3276 netdev_features_t one
, netdev_features_t mask
);
3278 /* Allow TSO being used on stacked device :
3279 * Performing the GSO segmentation before last device
3280 * is a performance improvement.
3282 static inline netdev_features_t
netdev_add_tso_features(netdev_features_t features
,
3283 netdev_features_t mask
)
3285 return netdev_increment_features(features
, NETIF_F_ALL_TSO
, mask
);
3288 int __netdev_update_features(struct net_device
*dev
);
3289 void netdev_update_features(struct net_device
*dev
);
3290 void netdev_change_features(struct net_device
*dev
);
3292 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
3293 struct net_device
*dev
);
3295 netdev_features_t
netif_skb_features(struct sk_buff
*skb
);
3297 static inline bool net_gso_ok(netdev_features_t features
, int gso_type
)
3299 netdev_features_t feature
= gso_type
<< NETIF_F_GSO_SHIFT
;
3301 /* check flags correspondence */
3302 BUILD_BUG_ON(SKB_GSO_TCPV4
!= (NETIF_F_TSO
>> NETIF_F_GSO_SHIFT
));
3303 BUILD_BUG_ON(SKB_GSO_UDP
!= (NETIF_F_UFO
>> NETIF_F_GSO_SHIFT
));
3304 BUILD_BUG_ON(SKB_GSO_DODGY
!= (NETIF_F_GSO_ROBUST
>> NETIF_F_GSO_SHIFT
));
3305 BUILD_BUG_ON(SKB_GSO_TCP_ECN
!= (NETIF_F_TSO_ECN
>> NETIF_F_GSO_SHIFT
));
3306 BUILD_BUG_ON(SKB_GSO_TCPV6
!= (NETIF_F_TSO6
>> NETIF_F_GSO_SHIFT
));
3307 BUILD_BUG_ON(SKB_GSO_FCOE
!= (NETIF_F_FSO
>> NETIF_F_GSO_SHIFT
));
3309 return (features
& feature
) == feature
;
3312 static inline bool skb_gso_ok(struct sk_buff
*skb
, netdev_features_t features
)
3314 return net_gso_ok(features
, skb_shinfo(skb
)->gso_type
) &&
3315 (!skb_has_frag_list(skb
) || (features
& NETIF_F_FRAGLIST
));
3318 static inline bool netif_needs_gso(struct sk_buff
*skb
,
3319 netdev_features_t features
)
3321 return skb_is_gso(skb
) && (!skb_gso_ok(skb
, features
) ||
3322 unlikely((skb
->ip_summed
!= CHECKSUM_PARTIAL
) &&
3323 (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)));
3326 static inline void netif_set_gso_max_size(struct net_device
*dev
,
3329 dev
->gso_max_size
= size
;
3332 static inline void skb_gso_error_unwind(struct sk_buff
*skb
, __be16 protocol
,
3333 int pulled_hlen
, u16 mac_offset
,
3336 skb
->protocol
= protocol
;
3337 skb
->encapsulation
= 1;
3338 skb_push(skb
, pulled_hlen
);
3339 skb_reset_transport_header(skb
);
3340 skb
->mac_header
= mac_offset
;
3341 skb
->network_header
= skb
->mac_header
+ mac_len
;
3342 skb
->mac_len
= mac_len
;
3345 static inline bool netif_is_macvlan(struct net_device
*dev
)
3347 return dev
->priv_flags
& IFF_MACVLAN
;
3350 static inline bool netif_is_bond_master(struct net_device
*dev
)
3352 return dev
->flags
& IFF_MASTER
&& dev
->priv_flags
& IFF_BONDING
;
3355 static inline bool netif_is_bond_slave(struct net_device
*dev
)
3357 return dev
->flags
& IFF_SLAVE
&& dev
->priv_flags
& IFF_BONDING
;
3360 static inline bool netif_supports_nofcs(struct net_device
*dev
)
3362 return dev
->priv_flags
& IFF_SUPP_NOFCS
;
3365 extern struct pernet_operations __net_initdata loopback_net_ops
;
3367 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3369 /* netdev_printk helpers, similar to dev_printk */
3371 static inline const char *netdev_name(const struct net_device
*dev
)
3373 if (dev
->reg_state
!= NETREG_REGISTERED
)
3374 return "(unregistered net_device)";
3379 int netdev_printk(const char *level
, const struct net_device
*dev
,
3380 const char *format
, ...);
3382 int netdev_emerg(const struct net_device
*dev
, const char *format
, ...);
3384 int netdev_alert(const struct net_device
*dev
, const char *format
, ...);
3386 int netdev_crit(const struct net_device
*dev
, const char *format
, ...);
3388 int netdev_err(const struct net_device
*dev
, const char *format
, ...);
3390 int netdev_warn(const struct net_device
*dev
, const char *format
, ...);
3392 int netdev_notice(const struct net_device
*dev
, const char *format
, ...);
3394 int netdev_info(const struct net_device
*dev
, const char *format
, ...);
3396 #define MODULE_ALIAS_NETDEV(device) \
3397 MODULE_ALIAS("netdev-" device)
3399 #if defined(CONFIG_DYNAMIC_DEBUG)
3400 #define netdev_dbg(__dev, format, args...) \
3402 dynamic_netdev_dbg(__dev, format, ##args); \
3404 #elif defined(DEBUG)
3405 #define netdev_dbg(__dev, format, args...) \
3406 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3408 #define netdev_dbg(__dev, format, args...) \
3411 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3416 #if defined(VERBOSE_DEBUG)
3417 #define netdev_vdbg netdev_dbg
3420 #define netdev_vdbg(dev, format, args...) \
3423 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3429 * netdev_WARN() acts like dev_printk(), but with the key difference
3430 * of using a WARN/WARN_ON to get the message out, including the
3431 * file/line information and a backtrace.
3433 #define netdev_WARN(dev, format, args...) \
3434 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
3436 /* netif printk helpers, similar to netdev_printk */
3438 #define netif_printk(priv, type, level, dev, fmt, args...) \
3440 if (netif_msg_##type(priv)) \
3441 netdev_printk(level, (dev), fmt, ##args); \
3444 #define netif_level(level, priv, type, dev, fmt, args...) \
3446 if (netif_msg_##type(priv)) \
3447 netdev_##level(dev, fmt, ##args); \
3450 #define netif_emerg(priv, type, dev, fmt, args...) \
3451 netif_level(emerg, priv, type, dev, fmt, ##args)
3452 #define netif_alert(priv, type, dev, fmt, args...) \
3453 netif_level(alert, priv, type, dev, fmt, ##args)
3454 #define netif_crit(priv, type, dev, fmt, args...) \
3455 netif_level(crit, priv, type, dev, fmt, ##args)
3456 #define netif_err(priv, type, dev, fmt, args...) \
3457 netif_level(err, priv, type, dev, fmt, ##args)
3458 #define netif_warn(priv, type, dev, fmt, args...) \
3459 netif_level(warn, priv, type, dev, fmt, ##args)
3460 #define netif_notice(priv, type, dev, fmt, args...) \
3461 netif_level(notice, priv, type, dev, fmt, ##args)
3462 #define netif_info(priv, type, dev, fmt, args...) \
3463 netif_level(info, priv, type, dev, fmt, ##args)
3465 #if defined(CONFIG_DYNAMIC_DEBUG)
3466 #define netif_dbg(priv, type, netdev, format, args...) \
3468 if (netif_msg_##type(priv)) \
3469 dynamic_netdev_dbg(netdev, format, ##args); \
3471 #elif defined(DEBUG)
3472 #define netif_dbg(priv, type, dev, format, args...) \
3473 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3475 #define netif_dbg(priv, type, dev, format, args...) \
3478 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3483 #if defined(VERBOSE_DEBUG)
3484 #define netif_vdbg netif_dbg
3486 #define netif_vdbg(priv, type, dev, format, args...) \
3489 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3495 * The list of packet types we will receive (as opposed to discard)
3496 * and the routines to invoke.
3498 * Why 16. Because with 16 the only overlap we get on a hash of the
3499 * low nibble of the protocol value is RARP/SNAP/X.25.
3501 * NOTE: That is no longer true with the addition of VLAN tags. Not
3502 * sure which should go first, but I bet it won't make much
3503 * difference if we are running VLANs. The good news is that
3504 * this protocol won't be in the list unless compiled in, so
3505 * the average user (w/out VLANs) will not be adversely affected.
3521 #define PTYPE_HASH_SIZE (16)
3522 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3524 #endif /* _LINUX_NETDEVICE_H */