net: gro: fix a potential crash in skb_gro_reset_offset
[deliverable/linux.git] / include / linux / netdevice.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
113aa838 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
95c26df8 31#include <linux/if_link.h>
1da177e4
LT
32
33#ifdef __KERNEL__
e8db0be1 34#include <linux/pm_qos.h>
d7fe0f24 35#include <linux/timer.h>
187f1882 36#include <linux/bug.h>
bea3348e 37#include <linux/delay.h>
60063497 38#include <linux/atomic.h>
1da177e4
LT
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
1da177e4 42#include <linux/percpu.h>
4d5b78c0 43#include <linux/rculist.h>
db217334 44#include <linux/dmaengine.h>
bea3348e 45#include <linux/workqueue.h>
114cf580 46#include <linux/dynamic_queue_limits.h>
1da177e4 47
b1b67dd4 48#include <linux/ethtool.h>
a050c33f 49#include <net/net_namespace.h>
cf85d08f 50#include <net/dsa.h>
7a6b6f51 51#ifdef CONFIG_DCB
2f90b865
AD
52#include <net/dcbnl.h>
53#endif
5bc1421e 54#include <net/netprio_cgroup.h>
a050c33f 55
a59e2ecb 56#include <linux/netdev_features.h>
77162022 57#include <linux/neighbour.h>
a59e2ecb 58
115c1d6e 59struct netpoll_info;
313162d0 60struct device;
c1f19b51 61struct phy_device;
704232c2
JB
62/* 802.11 specific */
63struct wireless_dev;
1da177e4
LT
64 /* source back-compat hooks */
65#define SET_ETHTOOL_OPS(netdev,ops) \
66 ( (netdev)->ethtool_ops = (ops) )
67
c1f79426
SA
68/* hardware address assignment types */
69#define NET_ADDR_PERM 0 /* address is permanent (default) */
70#define NET_ADDR_RANDOM 1 /* address is generated randomly */
71#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
72
9a1654ba
JP
73/* Backlog congestion levels */
74#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75#define NET_RX_DROP 1 /* packet dropped */
76
572a9d7b
PM
77/*
78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces:
80 *
81 * - qdisc return codes
82 * - driver transmit return codes
83 * - errno values
84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
92 */
93
94/* qdisc ->enqueue() return codes. */
95#define NET_XMIT_SUCCESS 0x00
9a1654ba
JP
96#define NET_XMIT_DROP 0x01 /* skb dropped */
97#define NET_XMIT_CN 0x02 /* congestion notification */
98#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
1da177e4 100
b9df3cb8
GR
101/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
572a9d7b 104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
1da177e4
LT
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
dc1f8bf6 107/* Driver transmit return codes */
9a1654ba 108#define NETDEV_TX_MASK 0xf0
572a9d7b 109
dc1f8bf6 110enum netdev_tx {
572a9d7b 111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
9a1654ba
JP
112 NETDEV_TX_OK = 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
dc1f8bf6
SH
115};
116typedef enum netdev_tx netdev_tx_t;
117
9a1654ba
JP
118/*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */
122static inline bool dev_xmit_complete(int rc)
123{
124 /*
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
1da177e4
LT
136#endif
137
138#define MAX_ADDR_LEN 32 /* Largest hardware address length */
139
23b41168
VD
140/* Initial net device group. All devices belong to group 0 by default. */
141#define INIT_NETDEV_GROUP 0
142
c88e6f51 143#ifdef __KERNEL__
1da177e4
LT
144/*
145 * Compute the worst case header length according to the protocols
146 * used.
147 */
fe2918b0 148
d11ead75 149#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
8388e3da
DM
150# if defined(CONFIG_MAC80211_MESH)
151# define LL_MAX_HEADER 128
152# else
153# define LL_MAX_HEADER 96
154# endif
d11ead75 155#elif IS_ENABLED(CONFIG_TR)
8388e3da 156# define LL_MAX_HEADER 48
1da177e4 157#else
8388e3da 158# define LL_MAX_HEADER 32
1da177e4
LT
159#endif
160
d11ead75
BH
161#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
162 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
1da177e4
LT
163#define MAX_HEADER LL_MAX_HEADER
164#else
165#define MAX_HEADER (LL_MAX_HEADER + 48)
166#endif
167
168/*
be1f3c2c
BH
169 * Old network device statistics. Fields are native words
170 * (unsigned long) so they can be read and written atomically.
1da177e4 171 */
fe2918b0 172
d94d9fee 173struct net_device_stats {
3cfde79c
BH
174 unsigned long rx_packets;
175 unsigned long tx_packets;
176 unsigned long rx_bytes;
177 unsigned long tx_bytes;
178 unsigned long rx_errors;
179 unsigned long tx_errors;
180 unsigned long rx_dropped;
181 unsigned long tx_dropped;
182 unsigned long multicast;
1da177e4 183 unsigned long collisions;
1da177e4 184 unsigned long rx_length_errors;
3cfde79c
BH
185 unsigned long rx_over_errors;
186 unsigned long rx_crc_errors;
187 unsigned long rx_frame_errors;
188 unsigned long rx_fifo_errors;
189 unsigned long rx_missed_errors;
1da177e4
LT
190 unsigned long tx_aborted_errors;
191 unsigned long tx_carrier_errors;
192 unsigned long tx_fifo_errors;
193 unsigned long tx_heartbeat_errors;
194 unsigned long tx_window_errors;
1da177e4
LT
195 unsigned long rx_compressed;
196 unsigned long tx_compressed;
197};
198
be1f3c2c
BH
199#endif /* __KERNEL__ */
200
1da177e4
LT
201
202/* Media selection options. */
203enum {
204 IF_PORT_UNKNOWN = 0,
205 IF_PORT_10BASE2,
206 IF_PORT_10BASET,
207 IF_PORT_AUI,
208 IF_PORT_100BASET,
209 IF_PORT_100BASETX,
210 IF_PORT_100BASEFX
211};
212
213#ifdef __KERNEL__
214
215#include <linux/cache.h>
216#include <linux/skbuff.h>
217
adc9300e 218#ifdef CONFIG_RPS
c5905afb
IM
219#include <linux/static_key.h>
220extern struct static_key rps_needed;
adc9300e
ED
221#endif
222
1da177e4
LT
223struct neighbour;
224struct neigh_parms;
225struct sk_buff;
226
f001fde5
JP
227struct netdev_hw_addr {
228 struct list_head list;
229 unsigned char addr[MAX_ADDR_LEN];
230 unsigned char type;
ccffad25
JP
231#define NETDEV_HW_ADDR_T_LAN 1
232#define NETDEV_HW_ADDR_T_SAN 2
233#define NETDEV_HW_ADDR_T_SLAVE 3
234#define NETDEV_HW_ADDR_T_UNICAST 4
22bedad3 235#define NETDEV_HW_ADDR_T_MULTICAST 5
ccffad25 236 bool synced;
22bedad3 237 bool global_use;
8f8f103d 238 int refcount;
f001fde5
JP
239 struct rcu_head rcu_head;
240};
241
31278e71
JP
242struct netdev_hw_addr_list {
243 struct list_head list;
244 int count;
245};
246
22bedad3
JP
247#define netdev_hw_addr_list_count(l) ((l)->count)
248#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
249#define netdev_hw_addr_list_for_each(ha, l) \
250 list_for_each_entry(ha, &(l)->list, list)
32e7bfc4 251
22bedad3
JP
252#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
253#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
254#define netdev_for_each_uc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
6683ece3 256
22bedad3
JP
257#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
258#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
18e225f2 259#define netdev_for_each_mc_addr(ha, dev) \
22bedad3 260 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
6683ece3 261
d94d9fee 262struct hh_cache {
f6b72b62 263 u16 hh_len;
5c25f686 264 u16 __pad;
3644f0ce 265 seqlock_t hh_lock;
1da177e4
LT
266
267 /* cached hardware header; allow for machine alignment needs. */
268#define HH_DATA_MOD 16
269#define HH_DATA_OFF(__len) \
5ba0eac6 270 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
1da177e4
LT
271#define HH_DATA_ALIGN(__len) \
272 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
273 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
274};
275
276/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
277 * Alternative is:
278 * dev->hard_header_len ? (dev->hard_header_len +
279 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
280 *
281 * We could use other alignment values, but we must maintain the
282 * relationship HH alignment <= LL alignment.
283 */
284#define LL_RESERVED_SPACE(dev) \
f5184d26 285 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 286#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
f5184d26 287 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 288
3b04ddde
SH
289struct header_ops {
290 int (*create) (struct sk_buff *skb, struct net_device *dev,
291 unsigned short type, const void *daddr,
95c96174 292 const void *saddr, unsigned int len);
3b04ddde
SH
293 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
294 int (*rebuild)(struct sk_buff *skb);
e69dd336 295 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
3b04ddde
SH
296 void (*cache_update)(struct hh_cache *hh,
297 const struct net_device *dev,
298 const unsigned char *haddr);
299};
300
1da177e4
LT
301/* These flag bits are private to the generic network queueing
302 * layer, they may not be explicitly referenced by any other
303 * code.
304 */
305
d94d9fee 306enum netdev_state_t {
1da177e4
LT
307 __LINK_STATE_START,
308 __LINK_STATE_PRESENT,
1da177e4 309 __LINK_STATE_NOCARRIER,
b00055aa
SR
310 __LINK_STATE_LINKWATCH_PENDING,
311 __LINK_STATE_DORMANT,
1da177e4
LT
312};
313
314
315/*
316 * This structure holds at boot time configured netdevice settings. They
fe2918b0 317 * are then used in the device probing.
1da177e4
LT
318 */
319struct netdev_boot_setup {
320 char name[IFNAMSIZ];
321 struct ifmap map;
322};
323#define NETDEV_BOOT_SETUP_MAX 8
324
20380731 325extern int __init netdev_boot_setup(char *str);
1da177e4 326
bea3348e
SH
327/*
328 * Structure for NAPI scheduling similar to tasklet but with weighting
329 */
330struct napi_struct {
331 /* The poll_list must only be managed by the entity which
332 * changes the state of the NAPI_STATE_SCHED bit. This means
333 * whoever atomically sets that bit can add this napi_struct
334 * to the per-cpu poll_list, and whoever clears that bit
335 * can remove from the list right before clearing the bit.
336 */
337 struct list_head poll_list;
338
339 unsigned long state;
340 int weight;
404f7c9e 341 unsigned int gro_count;
bea3348e
SH
342 int (*poll)(struct napi_struct *, int);
343#ifdef CONFIG_NETPOLL
344 spinlock_t poll_lock;
345 int poll_owner;
bea3348e 346#endif
5d38a079 347 struct net_device *dev;
d565b0a1 348 struct sk_buff *gro_list;
5d38a079 349 struct sk_buff *skb;
404f7c9e 350 struct list_head dev_list;
bea3348e
SH
351};
352
d94d9fee 353enum {
bea3348e 354 NAPI_STATE_SCHED, /* Poll is scheduled */
a0a46196 355 NAPI_STATE_DISABLE, /* Disable pending */
7b363e44 356 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
bea3348e
SH
357};
358
5b252f0c 359enum gro_result {
d1c76af9
HX
360 GRO_MERGED,
361 GRO_MERGED_FREE,
362 GRO_HELD,
363 GRO_NORMAL,
364 GRO_DROP,
365};
5b252f0c 366typedef enum gro_result gro_result_t;
d1c76af9 367
8a4eb573
JP
368/*
369 * enum rx_handler_result - Possible return values for rx_handlers.
370 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
371 * further.
372 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
373 * case skb->dev was changed by rx_handler.
374 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
375 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
376 *
377 * rx_handlers are functions called from inside __netif_receive_skb(), to do
378 * special processing of the skb, prior to delivery to protocol handlers.
379 *
380 * Currently, a net_device can only have a single rx_handler registered. Trying
381 * to register a second rx_handler will return -EBUSY.
382 *
383 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
384 * To unregister a rx_handler on a net_device, use
385 * netdev_rx_handler_unregister().
386 *
387 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
388 * do with the skb.
389 *
390 * If the rx_handler consumed to skb in some way, it should return
391 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
392 * the skb to be delivered in some other ways.
393 *
394 * If the rx_handler changed skb->dev, to divert the skb to another
395 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
396 * new device will be called if it exists.
397 *
398 * If the rx_handler consider the skb should be ignored, it should return
399 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
400 * are registred on exact device (ptype->dev == skb->dev).
401 *
402 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
403 * delivered, it should return RX_HANDLER_PASS.
404 *
405 * A device without a registered rx_handler will behave as if rx_handler
406 * returned RX_HANDLER_PASS.
407 */
408
409enum rx_handler_result {
410 RX_HANDLER_CONSUMED,
411 RX_HANDLER_ANOTHER,
412 RX_HANDLER_EXACT,
413 RX_HANDLER_PASS,
414};
415typedef enum rx_handler_result rx_handler_result_t;
416typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
ab95bfe0 417
b3c97528 418extern void __napi_schedule(struct napi_struct *n);
bea3348e 419
4d29515f 420static inline bool napi_disable_pending(struct napi_struct *n)
a0a46196
DM
421{
422 return test_bit(NAPI_STATE_DISABLE, &n->state);
423}
424
bea3348e
SH
425/**
426 * napi_schedule_prep - check if napi can be scheduled
427 * @n: napi context
428 *
429 * Test if NAPI routine is already running, and if not mark
430 * it as running. This is used as a condition variable
a0a46196
DM
431 * insure only one NAPI poll instance runs. We also make
432 * sure there is no pending NAPI disable.
bea3348e 433 */
4d29515f 434static inline bool napi_schedule_prep(struct napi_struct *n)
bea3348e 435{
a0a46196
DM
436 return !napi_disable_pending(n) &&
437 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
bea3348e
SH
438}
439
440/**
441 * napi_schedule - schedule NAPI poll
442 * @n: napi context
443 *
444 * Schedule NAPI poll routine to be called if it is not already
445 * running.
446 */
447static inline void napi_schedule(struct napi_struct *n)
448{
449 if (napi_schedule_prep(n))
450 __napi_schedule(n);
451}
452
bfe13f54 453/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
4d29515f 454static inline bool napi_reschedule(struct napi_struct *napi)
bfe13f54
RD
455{
456 if (napi_schedule_prep(napi)) {
457 __napi_schedule(napi);
4d29515f 458 return true;
bfe13f54 459 }
4d29515f 460 return false;
bfe13f54
RD
461}
462
bea3348e
SH
463/**
464 * napi_complete - NAPI processing complete
465 * @n: napi context
466 *
467 * Mark NAPI processing as complete.
468 */
d565b0a1
HX
469extern void __napi_complete(struct napi_struct *n);
470extern void napi_complete(struct napi_struct *n);
bea3348e
SH
471
472/**
473 * napi_disable - prevent NAPI from scheduling
474 * @n: napi context
475 *
476 * Stop NAPI from being scheduled on this context.
477 * Waits till any outstanding processing completes.
478 */
479static inline void napi_disable(struct napi_struct *n)
480{
a0a46196 481 set_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e 482 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
43cc7380 483 msleep(1);
a0a46196 484 clear_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e
SH
485}
486
487/**
488 * napi_enable - enable NAPI scheduling
489 * @n: napi context
490 *
491 * Resume NAPI from being scheduled on this context.
492 * Must be paired with napi_disable.
493 */
494static inline void napi_enable(struct napi_struct *n)
495{
496 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
497 smp_mb__before_clear_bit();
498 clear_bit(NAPI_STATE_SCHED, &n->state);
499}
500
c264c3de
SH
501#ifdef CONFIG_SMP
502/**
503 * napi_synchronize - wait until NAPI is not running
504 * @n: napi context
505 *
506 * Wait until NAPI is done being scheduled on this context.
507 * Waits till any outstanding processing completes but
508 * does not disable future activations.
509 */
510static inline void napi_synchronize(const struct napi_struct *n)
511{
512 while (test_bit(NAPI_STATE_SCHED, &n->state))
513 msleep(1);
514}
515#else
516# define napi_synchronize(n) barrier()
517#endif
518
d94d9fee 519enum netdev_queue_state_t {
73466498
TH
520 __QUEUE_STATE_DRV_XOFF,
521 __QUEUE_STATE_STACK_XOFF,
c3f26a26 522 __QUEUE_STATE_FROZEN,
73466498
TH
523#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
524 (1 << __QUEUE_STATE_STACK_XOFF))
525#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
526 (1 << __QUEUE_STATE_FROZEN))
79d16385 527};
73466498
TH
528/*
529 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
530 * netif_tx_* functions below are used to manipulate this flag. The
531 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
532 * queue independently. The netif_xmit_*stopped functions below are called
533 * to check if the queue has been stopped by the driver or stack (either
534 * of the XOFF bits are set in the state). Drivers should not need to call
535 * netif_xmit*stopped functions, they should only be using netif_tx_*.
536 */
79d16385 537
bb949fbd 538struct netdev_queue {
6a321cb3
ED
539/*
540 * read mostly part
541 */
bb949fbd 542 struct net_device *dev;
b0e1e646
DM
543 struct Qdisc *qdisc;
544 struct Qdisc *qdisc_sleeping;
ccf5ff69 545#ifdef CONFIG_SYSFS
1d24eb48
TH
546 struct kobject kobj;
547#endif
f2cd2d3e
ED
548#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
549 int numa_node;
550#endif
6a321cb3
ED
551/*
552 * write mostly part
553 */
554 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
555 int xmit_lock_owner;
9d21493b
ED
556 /*
557 * please use this field instead of dev->trans_start
558 */
559 unsigned long trans_start;
ccf5ff69 560
561 /*
562 * Number of TX timeouts for this queue
563 * (/sys/class/net/DEV/Q/trans_timeout)
564 */
565 unsigned long trans_timeout;
114cf580
TH
566
567 unsigned long state;
568
569#ifdef CONFIG_BQL
570 struct dql dql;
571#endif
e8a0464c 572} ____cacheline_aligned_in_smp;
bb949fbd 573
f2cd2d3e
ED
574static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
575{
576#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
577 return q->numa_node;
578#else
b236da69 579 return NUMA_NO_NODE;
f2cd2d3e
ED
580#endif
581}
582
583static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
584{
585#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
586 q->numa_node = node;
587#endif
588}
589
df334545 590#ifdef CONFIG_RPS
0a9627f2
TH
591/*
592 * This structure holds an RPS map which can be of variable length. The
593 * map is an array of CPUs.
594 */
595struct rps_map {
596 unsigned int len;
597 struct rcu_head rcu;
598 u16 cpus[0];
599};
60b778ce 600#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
0a9627f2 601
fec5e652 602/*
c445477d
BH
603 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
604 * tail pointer for that CPU's input queue at the time of last enqueue, and
605 * a hardware filter index.
fec5e652
TH
606 */
607struct rps_dev_flow {
608 u16 cpu;
c445477d 609 u16 filter;
fec5e652
TH
610 unsigned int last_qtail;
611};
c445477d 612#define RPS_NO_FILTER 0xffff
fec5e652
TH
613
614/*
615 * The rps_dev_flow_table structure contains a table of flow mappings.
616 */
617struct rps_dev_flow_table {
618 unsigned int mask;
619 struct rcu_head rcu;
620 struct work_struct free_work;
621 struct rps_dev_flow flows[0];
622};
623#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
60b778ce 624 ((_num) * sizeof(struct rps_dev_flow)))
fec5e652
TH
625
626/*
627 * The rps_sock_flow_table contains mappings of flows to the last CPU
628 * on which they were processed by the application (set in recvmsg).
629 */
630struct rps_sock_flow_table {
631 unsigned int mask;
632 u16 ents[0];
633};
634#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
60b778ce 635 ((_num) * sizeof(u16)))
fec5e652
TH
636
637#define RPS_NO_CPU 0xffff
638
639static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
640 u32 hash)
641{
642 if (table && hash) {
643 unsigned int cpu, index = hash & table->mask;
644
645 /* We only give a hint, preemption can change cpu under us */
646 cpu = raw_smp_processor_id();
647
648 if (table->ents[index] != cpu)
649 table->ents[index] = cpu;
650 }
651}
652
653static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
654 u32 hash)
655{
656 if (table && hash)
657 table->ents[hash & table->mask] = RPS_NO_CPU;
658}
659
6e3f7faf 660extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
fec5e652 661
c445477d
BH
662#ifdef CONFIG_RFS_ACCEL
663extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
664 u32 flow_id, u16 filter_id);
665#endif
666
0a9627f2
TH
667/* This structure contains an instance of an RX queue. */
668struct netdev_rx_queue {
6e3f7faf
ED
669 struct rps_map __rcu *rps_map;
670 struct rps_dev_flow_table __rcu *rps_flow_table;
671 struct kobject kobj;
fe822240 672 struct net_device *dev;
0a9627f2 673} ____cacheline_aligned_in_smp;
fec5e652 674#endif /* CONFIG_RPS */
d314774c 675
bf264145
TH
676#ifdef CONFIG_XPS
677/*
678 * This structure holds an XPS map which can be of variable length. The
679 * map is an array of queues.
680 */
681struct xps_map {
682 unsigned int len;
683 unsigned int alloc_len;
684 struct rcu_head rcu;
685 u16 queues[0];
686};
60b778ce 687#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
bf264145
TH
688#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
689 / sizeof(u16))
690
691/*
692 * This structure holds all XPS maps for device. Maps are indexed by CPU.
693 */
694struct xps_dev_maps {
695 struct rcu_head rcu;
a4177869 696 struct xps_map __rcu *cpu_map[0];
bf264145
TH
697};
698#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
699 (nr_cpu_ids * sizeof(struct xps_map *)))
700#endif /* CONFIG_XPS */
701
4f57c087
JF
702#define TC_MAX_QUEUE 16
703#define TC_BITMASK 15
704/* HW offloaded queuing disciplines txq count and offset maps */
705struct netdev_tc_txq {
706 u16 count;
707 u16 offset;
708};
709
68bad94e
NP
710#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
711/*
712 * This structure is to hold information about the device
713 * configured to run FCoE protocol stack.
714 */
715struct netdev_fcoe_hbainfo {
716 char manufacturer[64];
717 char serial_number[64];
718 char hardware_version[64];
719 char driver_version[64];
720 char optionrom_version[64];
721 char firmware_version[64];
722 char model[256];
723 char model_description[256];
724};
725#endif
726
d314774c
SH
727/*
728 * This structure defines the management hooks for network devices.
00829823
SH
729 * The following hooks can be defined; unless noted otherwise, they are
730 * optional and can be filled with a null pointer.
d314774c
SH
731 *
732 * int (*ndo_init)(struct net_device *dev);
733 * This function is called once when network device is registered.
734 * The network device can use this to any late stage initializaton
735 * or semantic validattion. It can fail with an error code which will
736 * be propogated back to register_netdev
737 *
738 * void (*ndo_uninit)(struct net_device *dev);
739 * This function is called when device is unregistered or when registration
740 * fails. It is not called if init fails.
741 *
742 * int (*ndo_open)(struct net_device *dev);
743 * This function is called when network device transistions to the up
744 * state.
745 *
746 * int (*ndo_stop)(struct net_device *dev);
747 * This function is called when network device transistions to the down
748 * state.
749 *
dc1f8bf6
SH
750 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
751 * struct net_device *dev);
00829823 752 * Called when a packet needs to be transmitted.
dc1f8bf6
SH
753 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
754 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
00829823
SH
755 * Required can not be NULL.
756 *
757 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
758 * Called to decide which queue to when device supports multiple
759 * transmit queues.
760 *
d314774c
SH
761 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
762 * This function is called to allow device receiver to make
763 * changes to configuration when multicast or promiscious is enabled.
764 *
765 * void (*ndo_set_rx_mode)(struct net_device *dev);
766 * This function is called device changes address list filtering.
01789349
JP
767 * If driver handles unicast address filtering, it should set
768 * IFF_UNICAST_FLT to its priv_flags.
d314774c
SH
769 *
770 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
771 * This function is called when the Media Access Control address
37b607c5 772 * needs to be changed. If this interface is not defined, the
d314774c
SH
773 * mac address can not be changed.
774 *
775 * int (*ndo_validate_addr)(struct net_device *dev);
776 * Test if Media Access Control address is valid for the device.
777 *
778 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
779 * Called when a user request an ioctl which can't be handled by
780 * the generic interface code. If not defined ioctl's return
781 * not supported error code.
782 *
783 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
784 * Used to set network devices bus interface parameters. This interface
785 * is retained for legacy reason, new devices should use the bus
786 * interface (PCI) for low level management.
787 *
788 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
789 * Called when a user wants to change the Maximum Transfer Unit
790 * of a device. If not defined, any request to change MTU will
791 * will return an error.
792 *
00829823 793 * void (*ndo_tx_timeout)(struct net_device *dev);
d314774c
SH
794 * Callback uses when the transmitter has not made any progress
795 * for dev->watchdog ticks.
796 *
3cfde79c 797 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
28172739 798 * struct rtnl_link_stats64 *storage);
d308e38f 799 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
d314774c 800 * Called when a user wants to get the network device usage
be1f3c2c 801 * statistics. Drivers must do one of the following:
3cfde79c
BH
802 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
803 * rtnl_link_stats64 structure passed by the caller.
82695d9b 804 * 2. Define @ndo_get_stats to update a net_device_stats structure
be1f3c2c
BH
805 * (which should normally be dev->stats) and return a pointer to
806 * it. The structure may be changed asynchronously only if each
807 * field is written atomically.
808 * 3. Update dev->stats asynchronously and atomically, and define
809 * neither operation.
d314774c 810 *
8e586137 811 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
d314774c
SH
812 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
813 * this function is called when a VLAN id is registered.
814 *
8e586137 815 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
d314774c
SH
816 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
817 * this function is called when a VLAN id is unregistered.
818 *
819 * void (*ndo_poll_controller)(struct net_device *dev);
95c26df8
WM
820 *
821 * SR-IOV management functions.
822 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
823 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
824 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
5f8444a3 825 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
95c26df8
WM
826 * int (*ndo_get_vf_config)(struct net_device *dev,
827 * int vf, struct ifla_vf_info *ivf);
57b61080
SF
828 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
829 * struct nlattr *port[]);
830 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
4f57c087
JF
831 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
832 * Called to setup 'tc' number of traffic classes in the net device. This
833 * is always called from the stack with the rtnl lock held and netif tx
834 * queues stopped. This allows the netdevice to perform queue management
835 * safely.
c445477d 836 *
e9bce845
YZ
837 * Fiber Channel over Ethernet (FCoE) offload functions.
838 * int (*ndo_fcoe_enable)(struct net_device *dev);
839 * Called when the FCoE protocol stack wants to start using LLD for FCoE
840 * so the underlying device can perform whatever needed configuration or
841 * initialization to support acceleration of FCoE traffic.
842 *
843 * int (*ndo_fcoe_disable)(struct net_device *dev);
844 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
845 * so the underlying device can perform whatever needed clean-ups to
846 * stop supporting acceleration of FCoE traffic.
847 *
848 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
849 * struct scatterlist *sgl, unsigned int sgc);
850 * Called when the FCoE Initiator wants to initialize an I/O that
851 * is a possible candidate for Direct Data Placement (DDP). The LLD can
852 * perform necessary setup and returns 1 to indicate the device is set up
853 * successfully to perform DDP on this I/O, otherwise this returns 0.
854 *
855 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
856 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
857 * indicated by the FC exchange id 'xid', so the underlying device can
858 * clean up and reuse resources for later DDP requests.
859 *
860 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
861 * struct scatterlist *sgl, unsigned int sgc);
862 * Called when the FCoE Target wants to initialize an I/O that
863 * is a possible candidate for Direct Data Placement (DDP). The LLD can
864 * perform necessary setup and returns 1 to indicate the device is set up
865 * successfully to perform DDP on this I/O, otherwise this returns 0.
866 *
68bad94e
NP
867 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
868 * struct netdev_fcoe_hbainfo *hbainfo);
869 * Called when the FCoE Protocol stack wants information on the underlying
870 * device. This information is utilized by the FCoE protocol stack to
871 * register attributes with Fiber Channel management service as per the
872 * FC-GS Fabric Device Management Information(FDMI) specification.
873 *
e9bce845
YZ
874 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
875 * Called when the underlying device wants to override default World Wide
876 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
877 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
878 * protocol stack to use.
879 *
c445477d
BH
880 * RFS acceleration.
881 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
882 * u16 rxq_index, u32 flow_id);
883 * Set hardware filter for RFS. rxq_index is the target queue index;
884 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
885 * Return the filter ID on success, or a negative error code.
fbaec0ea
JP
886 *
887 * Slave management functions (for bridge, bonding, etc). User should
888 * call netdev_set_master() to set dev->master properly.
889 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
890 * Called to make another netdev an underling.
891 *
892 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
893 * Called to release previously enslaved netdev.
5455c699
MM
894 *
895 * Feature/offload setting functions.
c8f44aff
MM
896 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
897 * netdev_features_t features);
5455c699
MM
898 * Adjusts the requested feature flags according to device-specific
899 * constraints, and returns the resulting flags. Must not modify
900 * the device state.
901 *
c8f44aff 902 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
5455c699
MM
903 * Called to update device configuration to new features. Passed
904 * feature set might be less than what was returned by ndo_fix_features()).
905 * Must return >0 or -errno if it changed dev->features itself.
906 *
edc7d573 907 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
908 * struct net_device *dev,
6b6e2725 909 * const unsigned char *addr, u16 flags)
77162022
JF
910 * Adds an FDB entry to dev for addr.
911 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
6b6e2725 912 * const unsigned char *addr)
77162022
JF
913 * Deletes the FDB entry from dev coresponding to addr.
914 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
915 * struct net_device *dev, int idx)
916 * Used to add FDB entries to dump requests. Implementers should add
917 * entries to skb and update idx with the number of entries.
d314774c
SH
918 */
919struct net_device_ops {
920 int (*ndo_init)(struct net_device *dev);
921 void (*ndo_uninit)(struct net_device *dev);
922 int (*ndo_open)(struct net_device *dev);
923 int (*ndo_stop)(struct net_device *dev);
dc1f8bf6 924 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
00829823
SH
925 struct net_device *dev);
926 u16 (*ndo_select_queue)(struct net_device *dev,
927 struct sk_buff *skb);
d314774c
SH
928 void (*ndo_change_rx_flags)(struct net_device *dev,
929 int flags);
d314774c 930 void (*ndo_set_rx_mode)(struct net_device *dev);
d314774c
SH
931 int (*ndo_set_mac_address)(struct net_device *dev,
932 void *addr);
d314774c 933 int (*ndo_validate_addr)(struct net_device *dev);
d314774c
SH
934 int (*ndo_do_ioctl)(struct net_device *dev,
935 struct ifreq *ifr, int cmd);
d314774c
SH
936 int (*ndo_set_config)(struct net_device *dev,
937 struct ifmap *map);
00829823
SH
938 int (*ndo_change_mtu)(struct net_device *dev,
939 int new_mtu);
940 int (*ndo_neigh_setup)(struct net_device *dev,
941 struct neigh_parms *);
d314774c
SH
942 void (*ndo_tx_timeout) (struct net_device *dev);
943
28172739
ED
944 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
945 struct rtnl_link_stats64 *storage);
d314774c
SH
946 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
947
8e586137 948 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
d314774c 949 unsigned short vid);
8e586137 950 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
d314774c
SH
951 unsigned short vid);
952#ifdef CONFIG_NET_POLL_CONTROLLER
d314774c 953 void (*ndo_poll_controller)(struct net_device *dev);
4247e161 954 int (*ndo_netpoll_setup)(struct net_device *dev,
47be03a2
AW
955 struct netpoll_info *info,
956 gfp_t gfp);
0e34e931 957 void (*ndo_netpoll_cleanup)(struct net_device *dev);
d314774c 958#endif
95c26df8
WM
959 int (*ndo_set_vf_mac)(struct net_device *dev,
960 int queue, u8 *mac);
961 int (*ndo_set_vf_vlan)(struct net_device *dev,
962 int queue, u16 vlan, u8 qos);
963 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
964 int vf, int rate);
5f8444a3
GR
965 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
966 int vf, bool setting);
95c26df8
WM
967 int (*ndo_get_vf_config)(struct net_device *dev,
968 int vf,
969 struct ifla_vf_info *ivf);
57b61080
SF
970 int (*ndo_set_vf_port)(struct net_device *dev,
971 int vf,
972 struct nlattr *port[]);
973 int (*ndo_get_vf_port)(struct net_device *dev,
974 int vf, struct sk_buff *skb);
4f57c087 975 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
d11ead75 976#if IS_ENABLED(CONFIG_FCOE)
cb454399
YZ
977 int (*ndo_fcoe_enable)(struct net_device *dev);
978 int (*ndo_fcoe_disable)(struct net_device *dev);
4d288d57
YZ
979 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
980 u16 xid,
981 struct scatterlist *sgl,
982 unsigned int sgc);
983 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
984 u16 xid);
6247e086
YZ
985 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
986 u16 xid,
987 struct scatterlist *sgl,
988 unsigned int sgc);
68bad94e
NP
989 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
990 struct netdev_fcoe_hbainfo *hbainfo);
3c9c36bc
BPG
991#endif
992
d11ead75 993#if IS_ENABLED(CONFIG_LIBFCOE)
df5c7945
YZ
994#define NETDEV_FCOE_WWNN 0
995#define NETDEV_FCOE_WWPN 1
996 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
997 u64 *wwn, int type);
4d288d57 998#endif
3c9c36bc 999
c445477d
BH
1000#ifdef CONFIG_RFS_ACCEL
1001 int (*ndo_rx_flow_steer)(struct net_device *dev,
1002 const struct sk_buff *skb,
1003 u16 rxq_index,
1004 u32 flow_id);
1005#endif
fbaec0ea
JP
1006 int (*ndo_add_slave)(struct net_device *dev,
1007 struct net_device *slave_dev);
1008 int (*ndo_del_slave)(struct net_device *dev,
1009 struct net_device *slave_dev);
c8f44aff
MM
1010 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1011 netdev_features_t features);
5455c699 1012 int (*ndo_set_features)(struct net_device *dev,
c8f44aff 1013 netdev_features_t features);
da6a8fa0 1014 int (*ndo_neigh_construct)(struct neighbour *n);
447f2191 1015 void (*ndo_neigh_destroy)(struct neighbour *n);
77162022
JF
1016
1017 int (*ndo_fdb_add)(struct ndmsg *ndm,
edc7d573 1018 struct nlattr *tb[],
77162022 1019 struct net_device *dev,
6b6e2725 1020 const unsigned char *addr,
77162022
JF
1021 u16 flags);
1022 int (*ndo_fdb_del)(struct ndmsg *ndm,
1023 struct net_device *dev,
6b6e2725 1024 const unsigned char *addr);
77162022
JF
1025 int (*ndo_fdb_dump)(struct sk_buff *skb,
1026 struct netlink_callback *cb,
1027 struct net_device *dev,
1028 int idx);
d314774c
SH
1029};
1030
1da177e4
LT
1031/*
1032 * The DEVICE structure.
1033 * Actually, this whole structure is a big mistake. It mixes I/O
1034 * data with strictly "high-level" data, and it has to know about
1035 * almost every data structure used in the INET module.
1036 *
1037 * FIXME: cleanup struct net_device such that network protocol info
1038 * moves out.
1039 */
1040
d94d9fee 1041struct net_device {
1da177e4
LT
1042
1043 /*
1044 * This is the first field of the "visible" part of this structure
1045 * (i.e. as seen by users in the "Space.c" file). It is the name
724df615 1046 * of the interface.
1da177e4
LT
1047 */
1048 char name[IFNAMSIZ];
ed77134b 1049
9136461a 1050 /* device name hash chain, please keep it close to name[] */
9356b8fc 1051 struct hlist_node name_hlist;
9136461a 1052
0b815a1a
SH
1053 /* snmp alias */
1054 char *ifalias;
1da177e4
LT
1055
1056 /*
1057 * I/O specific fields
1058 * FIXME: Merge these and struct ifmap into one
1059 */
1060 unsigned long mem_end; /* shared mem end */
1061 unsigned long mem_start; /* shared mem start */
1062 unsigned long base_addr; /* device I/O address */
1063 unsigned int irq; /* device IRQ number */
1064
1065 /*
1066 * Some hardware also needs these fields, but they are not
1067 * part of the usual set specified in Space.c.
1068 */
1069
1da177e4
LT
1070 unsigned long state;
1071
7562f876 1072 struct list_head dev_list;
bea3348e 1073 struct list_head napi_list;
44a0873d 1074 struct list_head unreg_list;
1da177e4 1075
5455c699 1076 /* currently active device features */
c8f44aff 1077 netdev_features_t features;
5455c699 1078 /* user-changeable features */
c8f44aff 1079 netdev_features_t hw_features;
5455c699 1080 /* user-requested features */
c8f44aff 1081 netdev_features_t wanted_features;
1aac6267 1082 /* mask of features inheritable by VLAN devices */
c8f44aff 1083 netdev_features_t vlan_features;
04ed3e74 1084
1da177e4
LT
1085 /* Interface index. Unique device identifier */
1086 int ifindex;
1087 int iflink;
1088
c45d286e 1089 struct net_device_stats stats;
caf586e5
ED
1090 atomic_long_t rx_dropped; /* dropped packets by core network
1091 * Do not use this in drivers.
1092 */
1da177e4 1093
b86e0280 1094#ifdef CONFIG_WIRELESS_EXT
1da177e4
LT
1095 /* List of functions to handle Wireless Extensions (instead of ioctl).
1096 * See <net/iw_handler.h> for details. Jean II */
1097 const struct iw_handler_def * wireless_handlers;
1098 /* Instance data managed by the core of Wireless Extensions. */
1099 struct iw_public_data * wireless_data;
b86e0280 1100#endif
d314774c
SH
1101 /* Management operations */
1102 const struct net_device_ops *netdev_ops;
76fd8593 1103 const struct ethtool_ops *ethtool_ops;
1da177e4 1104
3b04ddde
SH
1105 /* Hardware header description */
1106 const struct header_ops *header_ops;
1107
b00055aa 1108 unsigned int flags; /* interface flags (a la BSD) */
3bdc0eba
BG
1109 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1110 * See if.h for definitions. */
1da177e4 1111 unsigned short gflags;
1da177e4
LT
1112 unsigned short padded; /* How much padding added by alloc_netdev() */
1113
b00055aa
SR
1114 unsigned char operstate; /* RFC2863 operstate */
1115 unsigned char link_mode; /* mapping policy to operstate */
1116
bdc220da
JP
1117 unsigned char if_port; /* Selectable AUI, TP,..*/
1118 unsigned char dma; /* DMA channel */
1119
cd7b5396 1120 unsigned int mtu; /* interface MTU value */
1da177e4
LT
1121 unsigned short type; /* interface hardware type */
1122 unsigned short hard_header_len; /* hardware hdr length */
1da177e4 1123
f5184d26
JB
1124 /* extra head- and tailroom the hardware may need, but not in all cases
1125 * can this be guaranteed, especially tailroom. Some cases also use
1126 * LL_MAX_HEADER instead to allocate the skb.
1127 */
1128 unsigned short needed_headroom;
1129 unsigned short needed_tailroom;
1130
1da177e4 1131 /* Interface address info. */
a6f9a705 1132 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
c1f79426 1133 unsigned char addr_assign_type; /* hw address assignment type */
1da177e4 1134 unsigned char addr_len; /* hardware address length */
596b9b68 1135 unsigned char neigh_priv_len;
1da177e4
LT
1136 unsigned short dev_id; /* for shared network cards */
1137
ccffad25 1138 spinlock_t addr_list_lock;
22bedad3
JP
1139 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1140 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
2d348d1f 1141 bool uc_promisc;
9d45abe1
WC
1142 unsigned int promiscuity;
1143 unsigned int allmulti;
1da177e4 1144
1da177e4
LT
1145
1146 /* Protocol specific pointers */
65ac6a5f 1147
d11ead75 1148#if IS_ENABLED(CONFIG_VLAN_8021Q)
5b9ea6e0 1149 struct vlan_info __rcu *vlan_info; /* VLAN info */
65ac6a5f 1150#endif
34a430d7 1151#if IS_ENABLED(CONFIG_NET_DSA)
cf50dcc2 1152 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
91da11f8 1153#endif
1da177e4 1154 void *atalk_ptr; /* AppleTalk link */
95ae6b22 1155 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
fc766e4c 1156 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
198caeca 1157 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
1da177e4 1158 void *ax25_ptr; /* AX.25 specific data */
704232c2
JB
1159 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1160 assign before registering */
1da177e4 1161
9356b8fc 1162/*
cd13539b 1163 * Cache lines mostly used on receive path (including eth_type_trans())
9356b8fc 1164 */
4dc89133
ED
1165 unsigned long last_rx; /* Time of last Rx
1166 * This should not be set in
1167 * drivers, unless really needed,
1168 * because network stack (bonding)
1169 * use it if/when necessary, to
1170 * avoid dirtying this cache line.
1171 */
1172
cd13539b
ED
1173 struct net_device *master; /* Pointer to master device of a group,
1174 * which this device is member of.
1175 */
1176
9356b8fc 1177 /* Interface address info used in eth_type_trans() */
f001fde5
JP
1178 unsigned char *dev_addr; /* hw address, (before bcast
1179 because most packets are
1180 unicast) */
1181
31278e71
JP
1182 struct netdev_hw_addr_list dev_addrs; /* list of device
1183 hw addresses */
9356b8fc
ED
1184
1185 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1da177e4 1186
ccf5ff69 1187#ifdef CONFIG_SYSFS
0a9627f2 1188 struct kset *queues_kset;
ccf5ff69 1189#endif
0a9627f2 1190
ccf5ff69 1191#ifdef CONFIG_RPS
0a9627f2
TH
1192 struct netdev_rx_queue *_rx;
1193
62fe0b40 1194 /* Number of RX queues allocated at register_netdev() time */
0a9627f2 1195 unsigned int num_rx_queues;
62fe0b40
BH
1196
1197 /* Number of RX queues currently active in device */
1198 unsigned int real_num_rx_queues;
c445477d
BH
1199
1200#ifdef CONFIG_RFS_ACCEL
1201 /* CPU reverse-mapping for RX completion interrupts, indexed
1202 * by RX queue number. Assigned by driver. This must only be
1203 * set if the ndo_rx_flow_steer operation is defined. */
1204 struct cpu_rmap *rx_cpu_rmap;
1205#endif
df334545 1206#endif
0a9627f2 1207
61391cde 1208 rx_handler_func_t __rcu *rx_handler;
1209 void __rcu *rx_handler_data;
e8a0464c 1210
24824a09 1211 struct netdev_queue __rcu *ingress_queue;
cd13539b
ED
1212
1213/*
1214 * Cache lines mostly used on transmit path
1215 */
e8a0464c 1216 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
fd2ea0a7
DM
1217
1218 /* Number of TX queues allocated at alloc_netdev_mq() time */
e8a0464c 1219 unsigned int num_tx_queues;
fd2ea0a7
DM
1220
1221 /* Number of TX queues currently active in device */
1222 unsigned int real_num_tx_queues;
1223
af356afa
PM
1224 /* root qdisc from userspace point of view */
1225 struct Qdisc *qdisc;
1226
1da177e4 1227 unsigned long tx_queue_len; /* Max frames per queue allowed */
c3f26a26 1228 spinlock_t tx_global_lock;
cd13539b 1229
bf264145 1230#ifdef CONFIG_XPS
a4177869 1231 struct xps_dev_maps __rcu *xps_maps;
bf264145 1232#endif
1d24eb48 1233
9356b8fc 1234 /* These may be needed for future network-power-down code. */
9d21493b
ED
1235
1236 /*
1237 * trans_start here is expensive for high speed devices on SMP,
1238 * please use netdev_queue->trans_start instead.
1239 */
9356b8fc
ED
1240 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1241
1242 int watchdog_timeo; /* used by dev_watchdog() */
1243 struct timer_list watchdog_timer;
1244
1da177e4 1245 /* Number of references to this device */
29b4433d 1246 int __percpu *pcpu_refcnt;
9356b8fc 1247
1da177e4
LT
1248 /* delayed register/unregister */
1249 struct list_head todo_list;
1da177e4
LT
1250 /* device index hash chain */
1251 struct hlist_node index_hlist;
1252
e014debe 1253 struct list_head link_watch_list;
572a103d 1254
1da177e4
LT
1255 /* register/unregister state machine */
1256 enum { NETREG_UNINITIALIZED=0,
b17a7c17 1257 NETREG_REGISTERED, /* completed register_netdevice */
1da177e4
LT
1258 NETREG_UNREGISTERING, /* called unregister_netdevice */
1259 NETREG_UNREGISTERED, /* completed unregister todo */
1260 NETREG_RELEASED, /* called free_netdev */
937f1ba5 1261 NETREG_DUMMY, /* dummy device for NAPI poll */
449f4544
ED
1262 } reg_state:8;
1263
1264 bool dismantle; /* device is going do be freed */
a2835763
PM
1265
1266 enum {
1267 RTNL_LINK_INITIALIZED,
1268 RTNL_LINK_INITIALIZING,
1269 } rtnl_link_state:16;
1da177e4 1270
d314774c
SH
1271 /* Called from unregister, can be used to call free_netdev */
1272 void (*destructor)(struct net_device *dev);
1da177e4 1273
1da177e4 1274#ifdef CONFIG_NETPOLL
115c1d6e 1275 struct netpoll_info *npinfo;
1da177e4 1276#endif
eae792b7 1277
c346dca1 1278#ifdef CONFIG_NET_NS
4a1c5371
EB
1279 /* Network namespace this network device is inside */
1280 struct net *nd_net;
c346dca1 1281#endif
4a1c5371 1282
4951704b 1283 /* mid-layer private */
a7855c78
ED
1284 union {
1285 void *ml_priv;
1286 struct pcpu_lstats __percpu *lstats; /* loopback stats */
290b895e 1287 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
6d81f41c 1288 struct pcpu_dstats __percpu *dstats; /* dummy stats */
a7855c78 1289 };
eca9ebac 1290 /* GARP */
3cc77ec7 1291 struct garp_port __rcu *garp_port;
1da177e4 1292
1da177e4 1293 /* class/net/name entry */
43cb76d9 1294 struct device dev;
0c509a6c
EB
1295 /* space for optional device, statistics, and wireless sysfs groups */
1296 const struct attribute_group *sysfs_groups[4];
38f7b870
PM
1297
1298 /* rtnetlink link ops */
1299 const struct rtnl_link_ops *rtnl_link_ops;
f25f4e44 1300
82cc1a7a
PWJ
1301 /* for setting kernel sock attribute on TCP connection setup */
1302#define GSO_MAX_SIZE 65536
1303 unsigned int gso_max_size;
30b678d8
BH
1304#define GSO_MAX_SEGS 65535
1305 u16 gso_max_segs;
d314774c 1306
7a6b6f51 1307#ifdef CONFIG_DCB
2f90b865 1308 /* Data Center Bridging netlink ops */
32953543 1309 const struct dcbnl_rtnl_ops *dcbnl_ops;
2f90b865 1310#endif
4f57c087
JF
1311 u8 num_tc;
1312 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1313 u8 prio_tc_map[TC_BITMASK + 1];
2f90b865 1314
d11ead75 1315#if IS_ENABLED(CONFIG_FCOE)
4d288d57
YZ
1316 /* max exchange id for FCoE LRO by ddp */
1317 unsigned int fcoe_ddp_xid;
5bc1421e
NH
1318#endif
1319#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1320 struct netprio_map __rcu *priomap;
4d288d57 1321#endif
c1f19b51
RC
1322 /* phy device may attach itself for hardware timestamping */
1323 struct phy_device *phydev;
cbda10fa 1324
23d3b8bf
ED
1325 struct lock_class_key *qdisc_tx_busylock;
1326
cbda10fa
VD
1327 /* group the device belongs to */
1328 int group;
9136461a
ED
1329
1330 struct pm_qos_request pm_qos_req;
1da177e4 1331};
43cb76d9 1332#define to_net_dev(d) container_of(d, struct net_device, dev)
1da177e4
LT
1333
1334#define NETDEV_ALIGN 32
1da177e4 1335
4f57c087
JF
1336static inline
1337int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1338{
1339 return dev->prio_tc_map[prio & TC_BITMASK];
1340}
1341
1342static inline
1343int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1344{
1345 if (tc >= dev->num_tc)
1346 return -EINVAL;
1347
1348 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1349 return 0;
1350}
1351
1352static inline
1353void netdev_reset_tc(struct net_device *dev)
1354{
1355 dev->num_tc = 0;
1356 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1357 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1358}
1359
1360static inline
1361int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1362{
1363 if (tc >= dev->num_tc)
1364 return -EINVAL;
1365
1366 dev->tc_to_txq[tc].count = count;
1367 dev->tc_to_txq[tc].offset = offset;
1368 return 0;
1369}
1370
1371static inline
1372int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1373{
1374 if (num_tc > TC_MAX_QUEUE)
1375 return -EINVAL;
1376
1377 dev->num_tc = num_tc;
1378 return 0;
1379}
1380
1381static inline
1382int netdev_get_num_tc(struct net_device *dev)
1383{
1384 return dev->num_tc;
1385}
1386
e8a0464c
DM
1387static inline
1388struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1389 unsigned int index)
1390{
1391 return &dev->_tx[index];
1392}
1393
1394static inline void netdev_for_each_tx_queue(struct net_device *dev,
1395 void (*f)(struct net_device *,
1396 struct netdev_queue *,
1397 void *),
1398 void *arg)
1399{
1400 unsigned int i;
1401
1402 for (i = 0; i < dev->num_tx_queues; i++)
1403 f(dev, &dev->_tx[i], arg);
1404}
1405
8c4c49df
AW
1406extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1407 struct sk_buff *skb);
1408
c346dca1
YH
1409/*
1410 * Net namespace inlines
1411 */
1412static inline
1413struct net *dev_net(const struct net_device *dev)
1414{
c2d9ba9b 1415 return read_pnet(&dev->nd_net);
c346dca1
YH
1416}
1417
1418static inline
f5aa23fd 1419void dev_net_set(struct net_device *dev, struct net *net)
c346dca1
YH
1420{
1421#ifdef CONFIG_NET_NS
f3005d7f
DL
1422 release_net(dev->nd_net);
1423 dev->nd_net = hold_net(net);
c346dca1
YH
1424#endif
1425}
1426
cf85d08f
LB
1427static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1428{
1429#ifdef CONFIG_NET_DSA_TAG_DSA
1430 if (dev->dsa_ptr != NULL)
1431 return dsa_uses_dsa_tags(dev->dsa_ptr);
1432#endif
1433
1434 return 0;
1435}
1436
396138f0
LB
1437static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1438{
1439#ifdef CONFIG_NET_DSA_TAG_TRAILER
1440 if (dev->dsa_ptr != NULL)
1441 return dsa_uses_trailer_tags(dev->dsa_ptr);
1442#endif
1443
1444 return 0;
1445}
1446
bea3348e
SH
1447/**
1448 * netdev_priv - access network device private data
1449 * @dev: network device
1450 *
1451 * Get network device private data
1452 */
6472ce60 1453static inline void *netdev_priv(const struct net_device *dev)
1da177e4 1454{
1ce8e7b5 1455 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1da177e4
LT
1456}
1457
1da177e4
LT
1458/* Set the sysfs physical device reference for the network logical device
1459 * if set prior to registration will cause a symlink during initialization.
1460 */
43cb76d9 1461#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1da177e4 1462
384912ed
MH
1463/* Set the sysfs device type for the network logical device to allow
1464 * fin grained indentification of different network device types. For
1465 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1466 */
1467#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1468
3b582cc1
SH
1469/**
1470 * netif_napi_add - initialize a napi context
1471 * @dev: network device
1472 * @napi: napi context
1473 * @poll: polling function
1474 * @weight: default weight
1475 *
1476 * netif_napi_add() must be used to initialize a napi context prior to calling
1477 * *any* of the other napi related functions.
1478 */
d565b0a1
HX
1479void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1480 int (*poll)(struct napi_struct *, int), int weight);
bea3348e 1481
d8156534
AD
1482/**
1483 * netif_napi_del - remove a napi context
1484 * @napi: napi context
1485 *
1486 * netif_napi_del() removes a napi context from the network device napi list
1487 */
d565b0a1
HX
1488void netif_napi_del(struct napi_struct *napi);
1489
1490struct napi_gro_cb {
78a478d0
HX
1491 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1492 void *frag0;
1493
7489594c
HX
1494 /* Length of frag0. */
1495 unsigned int frag0_len;
1496
86911732
HX
1497 /* This indicates where we are processing relative to skb->data. */
1498 int data_offset;
1499
d565b0a1
HX
1500 /* This is non-zero if the packet may be of the same flow. */
1501 int same_flow;
1502
1503 /* This is non-zero if the packet cannot be merged with the new skb. */
1504 int flush;
1505
1506 /* Number of segments aggregated. */
1507 int count;
5d38a079
HX
1508
1509 /* Free the skb? */
1510 int free;
d7e8883c
ED
1511#define NAPI_GRO_FREE 1
1512#define NAPI_GRO_FREE_STOLEN_HEAD 2
d565b0a1
HX
1513};
1514
1515#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
d8156534 1516
1da177e4 1517struct packet_type {
f2ccd8fa
DM
1518 __be16 type; /* This is really htons(ether_type). */
1519 struct net_device *dev; /* NULL is wildcarded here */
1520 int (*func) (struct sk_buff *,
1521 struct net_device *,
1522 struct packet_type *,
1523 struct net_device *);
576a30eb 1524 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
c8f44aff 1525 netdev_features_t features);
a430a43d 1526 int (*gso_send_check)(struct sk_buff *skb);
d565b0a1
HX
1527 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1528 struct sk_buff *skb);
1529 int (*gro_complete)(struct sk_buff *skb);
c0de08d0
EL
1530 bool (*id_match)(struct packet_type *ptype,
1531 struct sock *sk);
1da177e4
LT
1532 void *af_packet_priv;
1533 struct list_head list;
1534};
1535
1da177e4
LT
1536#include <linux/notifier.h>
1537
dcfe1421
AW
1538/* netdevice notifier chain. Please remember to update the rtnetlink
1539 * notification exclusion list in rtnetlink_event() when adding new
1540 * types.
1541 */
1542#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1543#define NETDEV_DOWN 0x0002
1544#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1545 detected a hardware crash and restarted
1546 - we can use this eg to kick tcp sessions
1547 once done */
1548#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1549#define NETDEV_REGISTER 0x0005
1550#define NETDEV_UNREGISTER 0x0006
1551#define NETDEV_CHANGEMTU 0x0007
1552#define NETDEV_CHANGEADDR 0x0008
1553#define NETDEV_GOING_DOWN 0x0009
1554#define NETDEV_CHANGENAME 0x000A
1555#define NETDEV_FEAT_CHANGE 0x000B
1556#define NETDEV_BONDING_FAILOVER 0x000C
1557#define NETDEV_PRE_UP 0x000D
1558#define NETDEV_PRE_TYPE_CHANGE 0x000E
1559#define NETDEV_POST_TYPE_CHANGE 0x000F
1560#define NETDEV_POST_INIT 0x0010
0115e8e3 1561#define NETDEV_UNREGISTER_FINAL 0x0011
dcfe1421
AW
1562#define NETDEV_RELEASE 0x0012
1563#define NETDEV_NOTIFY_PEERS 0x0013
1564#define NETDEV_JOIN 0x0014
1565
1566extern int register_netdevice_notifier(struct notifier_block *nb);
1567extern int unregister_netdevice_notifier(struct notifier_block *nb);
1568extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1569
1570
1da177e4
LT
1571extern rwlock_t dev_base_lock; /* Device list lock */
1572
7562f876 1573
881d966b
EB
1574#define for_each_netdev(net, d) \
1575 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
dcbccbd4
EB
1576#define for_each_netdev_reverse(net, d) \
1577 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
c6d14c84
ED
1578#define for_each_netdev_rcu(net, d) \
1579 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
881d966b
EB
1580#define for_each_netdev_safe(net, d, n) \
1581 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1582#define for_each_netdev_continue(net, d) \
1583 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
254245d2 1584#define for_each_netdev_continue_rcu(net, d) \
1585 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
881d966b 1586#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
7562f876 1587
a050c33f
DL
1588static inline struct net_device *next_net_device(struct net_device *dev)
1589{
1590 struct list_head *lh;
1591 struct net *net;
1592
c346dca1 1593 net = dev_net(dev);
a050c33f
DL
1594 lh = dev->dev_list.next;
1595 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1596}
1597
ce81b76a
ED
1598static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1599{
1600 struct list_head *lh;
1601 struct net *net;
1602
1603 net = dev_net(dev);
ccf43438 1604 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
ce81b76a
ED
1605 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1606}
1607
a050c33f
DL
1608static inline struct net_device *first_net_device(struct net *net)
1609{
1610 return list_empty(&net->dev_base_head) ? NULL :
1611 net_device_entry(net->dev_base_head.next);
1612}
7562f876 1613
ccf43438
ED
1614static inline struct net_device *first_net_device_rcu(struct net *net)
1615{
1616 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1617
1618 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1619}
1620
1da177e4
LT
1621extern int netdev_boot_setup_check(struct net_device *dev);
1622extern unsigned long netdev_boot_base(const char *prefix, int unit);
941666c2
ED
1623extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1624 const char *hwaddr);
881d966b
EB
1625extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1626extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1da177e4
LT
1627extern void dev_add_pack(struct packet_type *pt);
1628extern void dev_remove_pack(struct packet_type *pt);
1629extern void __dev_remove_pack(struct packet_type *pt);
1630
bb69ae04
ED
1631extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1632 unsigned short mask);
881d966b 1633extern struct net_device *dev_get_by_name(struct net *net, const char *name);
72c9528b 1634extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
881d966b 1635extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1da177e4
LT
1636extern int dev_alloc_name(struct net_device *dev, const char *name);
1637extern int dev_open(struct net_device *dev);
1638extern int dev_close(struct net_device *dev);
0187bdfb 1639extern void dev_disable_lro(struct net_device *dev);
95603e22 1640extern int dev_loopback_xmit(struct sk_buff *newskb);
1da177e4
LT
1641extern int dev_queue_xmit(struct sk_buff *skb);
1642extern int register_netdevice(struct net_device *dev);
44a0873d
ED
1643extern void unregister_netdevice_queue(struct net_device *dev,
1644 struct list_head *head);
9b5e383c 1645extern void unregister_netdevice_many(struct list_head *head);
44a0873d
ED
1646static inline void unregister_netdevice(struct net_device *dev)
1647{
1648 unregister_netdevice_queue(dev, NULL);
1649}
1650
29b4433d 1651extern int netdev_refcnt_read(const struct net_device *dev);
1da177e4
LT
1652extern void free_netdev(struct net_device *dev);
1653extern void synchronize_net(void);
937f1ba5 1654extern int init_dummy_netdev(struct net_device *dev);
9d40bbda 1655extern void netdev_resync_ops(struct net_device *dev);
937f1ba5 1656
881d966b
EB
1657extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1658extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
fb699dfd 1659extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1da177e4
LT
1660extern int dev_restart(struct net_device *dev);
1661#ifdef CONFIG_NETPOLL_TRAP
1662extern int netpoll_trap(void);
1663#endif
86911732
HX
1664extern int skb_gro_receive(struct sk_buff **head,
1665 struct sk_buff *skb);
1666
1667static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1668{
1669 return NAPI_GRO_CB(skb)->data_offset;
1670}
1671
1672static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1673{
1674 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1675}
1676
1677static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1678{
1679 NAPI_GRO_CB(skb)->data_offset += len;
1680}
1681
a5b1cf28
HX
1682static inline void *skb_gro_header_fast(struct sk_buff *skb,
1683 unsigned int offset)
86911732 1684{
a5b1cf28
HX
1685 return NAPI_GRO_CB(skb)->frag0 + offset;
1686}
78a478d0 1687
a5b1cf28
HX
1688static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1689{
1690 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1691}
78a478d0 1692
a5b1cf28
HX
1693static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1694 unsigned int offset)
1695{
17dd759c
HX
1696 if (!pskb_may_pull(skb, hlen))
1697 return NULL;
1698
a5b1cf28
HX
1699 NAPI_GRO_CB(skb)->frag0 = NULL;
1700 NAPI_GRO_CB(skb)->frag0_len = 0;
17dd759c 1701 return skb->data + offset;
86911732 1702}
1da177e4 1703
aa4b9f53
HX
1704static inline void *skb_gro_mac_header(struct sk_buff *skb)
1705{
78d3fd0b 1706 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
aa4b9f53
HX
1707}
1708
36e7b1b8
HX
1709static inline void *skb_gro_network_header(struct sk_buff *skb)
1710{
78d3fd0b
HX
1711 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1712 skb_network_offset(skb);
36e7b1b8
HX
1713}
1714
0c4e8581
SH
1715static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1716 unsigned short type,
3b04ddde 1717 const void *daddr, const void *saddr,
95c96174 1718 unsigned int len)
0c4e8581 1719{
f1ecfd5d 1720 if (!dev->header_ops || !dev->header_ops->create)
0c4e8581 1721 return 0;
3b04ddde
SH
1722
1723 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
0c4e8581
SH
1724}
1725
b95cce35
SH
1726static inline int dev_parse_header(const struct sk_buff *skb,
1727 unsigned char *haddr)
1728{
1729 const struct net_device *dev = skb->dev;
1730
1b83336b 1731 if (!dev->header_ops || !dev->header_ops->parse)
b95cce35 1732 return 0;
3b04ddde 1733 return dev->header_ops->parse(skb, haddr);
b95cce35
SH
1734}
1735
1da177e4
LT
1736typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1737extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1738static inline int unregister_gifconf(unsigned int family)
1739{
1740 return register_gifconf(family, NULL);
1741}
1742
1743/*
88751275 1744 * Incoming packets are placed on per-cpu queues
1da177e4 1745 */
d94d9fee 1746struct softnet_data {
37437bb2 1747 struct Qdisc *output_queue;
a9cbd588 1748 struct Qdisc **output_queue_tailp;
1da177e4 1749 struct list_head poll_list;
1da177e4 1750 struct sk_buff *completion_queue;
6e7676c1 1751 struct sk_buff_head process_queue;
1da177e4 1752
dee42870 1753 /* stats */
cd7b5396
DM
1754 unsigned int processed;
1755 unsigned int time_squeeze;
1756 unsigned int cpu_collision;
1757 unsigned int received_rps;
dee42870 1758
fd793d89 1759#ifdef CONFIG_RPS
88751275
ED
1760 struct softnet_data *rps_ipi_list;
1761
1762 /* Elements below can be accessed between CPUs for RPS */
0a9627f2 1763 struct call_single_data csd ____cacheline_aligned_in_smp;
88751275
ED
1764 struct softnet_data *rps_ipi_next;
1765 unsigned int cpu;
fec5e652 1766 unsigned int input_queue_head;
76cc8b13 1767 unsigned int input_queue_tail;
1e94d72f 1768#endif
95c96174 1769 unsigned int dropped;
0a9627f2 1770 struct sk_buff_head input_pkt_queue;
bea3348e 1771 struct napi_struct backlog;
1da177e4
LT
1772};
1773
76cc8b13 1774static inline void input_queue_head_incr(struct softnet_data *sd)
fec5e652
TH
1775{
1776#ifdef CONFIG_RPS
76cc8b13
TH
1777 sd->input_queue_head++;
1778#endif
1779}
1780
1781static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1782 unsigned int *qtail)
1783{
1784#ifdef CONFIG_RPS
1785 *qtail = ++sd->input_queue_tail;
fec5e652
TH
1786#endif
1787}
1788
0a9627f2 1789DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1da177e4 1790
37437bb2 1791extern void __netif_schedule(struct Qdisc *q);
1da177e4 1792
86d804e1 1793static inline void netif_schedule_queue(struct netdev_queue *txq)
1da177e4 1794{
73466498 1795 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
37437bb2 1796 __netif_schedule(txq->qdisc);
86d804e1
DM
1797}
1798
fd2ea0a7
DM
1799static inline void netif_tx_schedule_all(struct net_device *dev)
1800{
1801 unsigned int i;
1802
1803 for (i = 0; i < dev->num_tx_queues; i++)
1804 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1805}
1806
d29f749e
DJ
1807static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1808{
73466498 1809 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1810}
1811
bea3348e
SH
1812/**
1813 * netif_start_queue - allow transmit
1814 * @dev: network device
1815 *
1816 * Allow upper layers to call the device hard_start_xmit routine.
1817 */
1da177e4
LT
1818static inline void netif_start_queue(struct net_device *dev)
1819{
e8a0464c 1820 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1821}
1822
fd2ea0a7
DM
1823static inline void netif_tx_start_all_queues(struct net_device *dev)
1824{
1825 unsigned int i;
1826
1827 for (i = 0; i < dev->num_tx_queues; i++) {
1828 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1829 netif_tx_start_queue(txq);
1830 }
1831}
1832
79d16385 1833static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1da177e4
LT
1834{
1835#ifdef CONFIG_NETPOLL_TRAP
5f286e11 1836 if (netpoll_trap()) {
7b3d3e4f 1837 netif_tx_start_queue(dev_queue);
1da177e4 1838 return;
5f286e11 1839 }
1da177e4 1840#endif
73466498 1841 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
37437bb2 1842 __netif_schedule(dev_queue->qdisc);
79d16385
DM
1843}
1844
d29f749e
DJ
1845/**
1846 * netif_wake_queue - restart transmit
1847 * @dev: network device
1848 *
1849 * Allow upper layers to call the device hard_start_xmit routine.
1850 * Used for flow control when transmit resources are available.
1851 */
79d16385
DM
1852static inline void netif_wake_queue(struct net_device *dev)
1853{
e8a0464c 1854 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1855}
1856
fd2ea0a7
DM
1857static inline void netif_tx_wake_all_queues(struct net_device *dev)
1858{
1859 unsigned int i;
1860
1861 for (i = 0; i < dev->num_tx_queues; i++) {
1862 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1863 netif_tx_wake_queue(txq);
1864 }
1865}
1866
d29f749e
DJ
1867static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1868{
18543a64 1869 if (WARN_ON(!dev_queue)) {
256ee435 1870 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
18543a64
GC
1871 return;
1872 }
73466498 1873 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1874}
1875
bea3348e
SH
1876/**
1877 * netif_stop_queue - stop transmitted packets
1878 * @dev: network device
1879 *
1880 * Stop upper layers calling the device hard_start_xmit routine.
1881 * Used for flow control when transmit resources are unavailable.
1882 */
1da177e4
LT
1883static inline void netif_stop_queue(struct net_device *dev)
1884{
e8a0464c 1885 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1886}
1887
fd2ea0a7
DM
1888static inline void netif_tx_stop_all_queues(struct net_device *dev)
1889{
1890 unsigned int i;
1891
1892 for (i = 0; i < dev->num_tx_queues; i++) {
1893 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1894 netif_tx_stop_queue(txq);
1895 }
1896}
1897
4d29515f 1898static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
d29f749e 1899{
73466498 1900 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1901}
1902
bea3348e
SH
1903/**
1904 * netif_queue_stopped - test if transmit queue is flowblocked
1905 * @dev: network device
1906 *
1907 * Test if transmit queue on device is currently unable to send.
1908 */
4d29515f 1909static inline bool netif_queue_stopped(const struct net_device *dev)
1da177e4 1910{
e8a0464c 1911 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1912}
1913
4d29515f 1914static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
c3f26a26 1915{
73466498
TH
1916 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1917}
1918
4d29515f 1919static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
73466498
TH
1920{
1921 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1922}
1923
c5d67bd7
TH
1924static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1925 unsigned int bytes)
1926{
114cf580
TH
1927#ifdef CONFIG_BQL
1928 dql_queued(&dev_queue->dql, bytes);
b37c0fbe
AD
1929
1930 if (likely(dql_avail(&dev_queue->dql) >= 0))
1931 return;
1932
1933 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1934
1935 /*
1936 * The XOFF flag must be set before checking the dql_avail below,
1937 * because in netdev_tx_completed_queue we update the dql_completed
1938 * before checking the XOFF flag.
1939 */
1940 smp_mb();
1941
1942 /* check again in case another CPU has just made room avail */
1943 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1944 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
114cf580 1945#endif
c5d67bd7
TH
1946}
1947
1948static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1949{
1950 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1951}
1952
1953static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
95c96174 1954 unsigned int pkts, unsigned int bytes)
c5d67bd7 1955{
114cf580 1956#ifdef CONFIG_BQL
b37c0fbe
AD
1957 if (unlikely(!bytes))
1958 return;
1959
1960 dql_completed(&dev_queue->dql, bytes);
1961
1962 /*
1963 * Without the memory barrier there is a small possiblity that
1964 * netdev_tx_sent_queue will miss the update and cause the queue to
1965 * be stopped forever
1966 */
1967 smp_mb();
1968
1969 if (dql_avail(&dev_queue->dql) < 0)
1970 return;
1971
1972 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1973 netif_schedule_queue(dev_queue);
114cf580 1974#endif
c5d67bd7
TH
1975}
1976
1977static inline void netdev_completed_queue(struct net_device *dev,
95c96174 1978 unsigned int pkts, unsigned int bytes)
c5d67bd7
TH
1979{
1980 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1981}
1982
1983static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1984{
114cf580 1985#ifdef CONFIG_BQL
5c490354 1986 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
114cf580
TH
1987 dql_reset(&q->dql);
1988#endif
c5d67bd7
TH
1989}
1990
1991static inline void netdev_reset_queue(struct net_device *dev_queue)
1992{
1993 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
c3f26a26
DM
1994}
1995
bea3348e
SH
1996/**
1997 * netif_running - test if up
1998 * @dev: network device
1999 *
2000 * Test if the device has been brought up.
2001 */
4d29515f 2002static inline bool netif_running(const struct net_device *dev)
1da177e4
LT
2003{
2004 return test_bit(__LINK_STATE_START, &dev->state);
2005}
2006
f25f4e44
PWJ
2007/*
2008 * Routines to manage the subqueues on a device. We only need start
2009 * stop, and a check if it's stopped. All other device management is
2010 * done at the overall netdevice level.
2011 * Also test the device if we're multiqueue.
2012 */
bea3348e
SH
2013
2014/**
2015 * netif_start_subqueue - allow sending packets on subqueue
2016 * @dev: network device
2017 * @queue_index: sub queue index
2018 *
2019 * Start individual transmit queue of a device with multiple transmit queues.
2020 */
f25f4e44
PWJ
2021static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2022{
fd2ea0a7 2023 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
7b3d3e4f
KK
2024
2025 netif_tx_start_queue(txq);
f25f4e44
PWJ
2026}
2027
bea3348e
SH
2028/**
2029 * netif_stop_subqueue - stop sending packets on subqueue
2030 * @dev: network device
2031 * @queue_index: sub queue index
2032 *
2033 * Stop individual transmit queue of a device with multiple transmit queues.
2034 */
f25f4e44
PWJ
2035static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2036{
fd2ea0a7 2037 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
f25f4e44
PWJ
2038#ifdef CONFIG_NETPOLL_TRAP
2039 if (netpoll_trap())
2040 return;
2041#endif
7b3d3e4f 2042 netif_tx_stop_queue(txq);
f25f4e44
PWJ
2043}
2044
bea3348e
SH
2045/**
2046 * netif_subqueue_stopped - test status of subqueue
2047 * @dev: network device
2048 * @queue_index: sub queue index
2049 *
2050 * Check individual transmit queue of a device with multiple transmit queues.
2051 */
4d29515f
DM
2052static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2053 u16 queue_index)
f25f4e44 2054{
fd2ea0a7 2055 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
7b3d3e4f
KK
2056
2057 return netif_tx_queue_stopped(txq);
f25f4e44
PWJ
2058}
2059
4d29515f
DM
2060static inline bool netif_subqueue_stopped(const struct net_device *dev,
2061 struct sk_buff *skb)
668f895a
PE
2062{
2063 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2064}
bea3348e
SH
2065
2066/**
2067 * netif_wake_subqueue - allow sending packets on subqueue
2068 * @dev: network device
2069 * @queue_index: sub queue index
2070 *
2071 * Resume individual transmit queue of a device with multiple transmit queues.
2072 */
f25f4e44
PWJ
2073static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2074{
fd2ea0a7 2075 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
f25f4e44
PWJ
2076#ifdef CONFIG_NETPOLL_TRAP
2077 if (netpoll_trap())
2078 return;
2079#endif
73466498 2080 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
37437bb2 2081 __netif_schedule(txq->qdisc);
f25f4e44
PWJ
2082}
2083
a3d22a68
VZ
2084/*
2085 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2086 * as a distribution range limit for the returned value.
2087 */
2088static inline u16 skb_tx_hash(const struct net_device *dev,
2089 const struct sk_buff *skb)
2090{
2091 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2092}
2093
bea3348e
SH
2094/**
2095 * netif_is_multiqueue - test if device has multiple transmit queues
2096 * @dev: network device
2097 *
2098 * Check if device has multiple transmit queues
bea3348e 2099 */
4d29515f 2100static inline bool netif_is_multiqueue(const struct net_device *dev)
f25f4e44 2101{
a02cec21 2102 return dev->num_tx_queues > 1;
f25f4e44 2103}
1da177e4 2104
e6484930
TH
2105extern int netif_set_real_num_tx_queues(struct net_device *dev,
2106 unsigned int txq);
f0796d5c 2107
62fe0b40
BH
2108#ifdef CONFIG_RPS
2109extern int netif_set_real_num_rx_queues(struct net_device *dev,
2110 unsigned int rxq);
2111#else
2112static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2113 unsigned int rxq)
2114{
2115 return 0;
2116}
2117#endif
2118
3171d026
BH
2119static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2120 const struct net_device *from_dev)
2121{
ee6ae1a1
JP
2122 int err;
2123
2124 err = netif_set_real_num_tx_queues(to_dev,
2125 from_dev->real_num_tx_queues);
2126 if (err)
2127 return err;
3171d026
BH
2128#ifdef CONFIG_RPS
2129 return netif_set_real_num_rx_queues(to_dev,
2130 from_dev->real_num_rx_queues);
2131#else
2132 return 0;
2133#endif
2134}
2135
16917b87
YM
2136#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2137extern int netif_get_num_default_rss_queues(void);
2138
1da177e4 2139/* Use this variant when it is known for sure that it
0ef47309
ML
2140 * is executing from hardware interrupt context or with hardware interrupts
2141 * disabled.
1da177e4 2142 */
bea3348e 2143extern void dev_kfree_skb_irq(struct sk_buff *skb);
1da177e4
LT
2144
2145/* Use this variant in places where it could be invoked
0ef47309
ML
2146 * from either hardware interrupt or other context, with hardware interrupts
2147 * either disabled or enabled.
1da177e4 2148 */
56079431 2149extern void dev_kfree_skb_any(struct sk_buff *skb);
1da177e4 2150
1da177e4
LT
2151extern int netif_rx(struct sk_buff *skb);
2152extern int netif_rx_ni(struct sk_buff *skb);
1da177e4 2153extern int netif_receive_skb(struct sk_buff *skb);
5b252f0c 2154extern gro_result_t dev_gro_receive(struct napi_struct *napi,
96e93eab 2155 struct sk_buff *skb);
c7c4b3b6
BH
2156extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2157extern gro_result_t napi_gro_receive(struct napi_struct *napi,
d565b0a1 2158 struct sk_buff *skb);
86cac58b 2159extern void napi_gro_flush(struct napi_struct *napi);
76620aaf 2160extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
c7c4b3b6 2161extern gro_result_t napi_frags_finish(struct napi_struct *napi,
5b252f0c
BH
2162 struct sk_buff *skb,
2163 gro_result_t ret);
c7c4b3b6 2164extern gro_result_t napi_gro_frags(struct napi_struct *napi);
76620aaf
HX
2165
2166static inline void napi_free_frags(struct napi_struct *napi)
2167{
2168 kfree_skb(napi->skb);
2169 napi->skb = NULL;
2170}
2171
ab95bfe0 2172extern int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2173 rx_handler_func_t *rx_handler,
2174 void *rx_handler_data);
ab95bfe0
JP
2175extern void netdev_rx_handler_unregister(struct net_device *dev);
2176
95f050bf 2177extern bool dev_valid_name(const char *name);
881d966b
EB
2178extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2179extern int dev_ethtool(struct net *net, struct ifreq *);
95c96174 2180extern unsigned int dev_get_flags(const struct net_device *);
bd380811 2181extern int __dev_change_flags(struct net_device *, unsigned int flags);
95c96174 2182extern int dev_change_flags(struct net_device *, unsigned int);
bd380811 2183extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
cf04a4c7 2184extern int dev_change_name(struct net_device *, const char *);
0b815a1a 2185extern int dev_set_alias(struct net_device *, const char *, size_t);
ce286d32
EB
2186extern int dev_change_net_namespace(struct net_device *,
2187 struct net *, const char *);
1da177e4 2188extern int dev_set_mtu(struct net_device *, int);
cbda10fa 2189extern void dev_set_group(struct net_device *, int);
1da177e4
LT
2190extern int dev_set_mac_address(struct net_device *,
2191 struct sockaddr *);
f6a78bfc 2192extern int dev_hard_start_xmit(struct sk_buff *skb,
fd2ea0a7
DM
2193 struct net_device *dev,
2194 struct netdev_queue *txq);
44540960
AB
2195extern int dev_forward_skb(struct net_device *dev,
2196 struct sk_buff *skb);
1da177e4 2197
20380731 2198extern int netdev_budget;
1da177e4
LT
2199
2200/* Called by rtnetlink.c:rtnl_unlock() */
2201extern void netdev_run_todo(void);
2202
bea3348e
SH
2203/**
2204 * dev_put - release reference to device
2205 * @dev: network device
2206 *
9ef4429b 2207 * Release reference to device to allow it to be freed.
bea3348e 2208 */
1da177e4
LT
2209static inline void dev_put(struct net_device *dev)
2210{
933393f5 2211 this_cpu_dec(*dev->pcpu_refcnt);
1da177e4
LT
2212}
2213
bea3348e
SH
2214/**
2215 * dev_hold - get reference to device
2216 * @dev: network device
2217 *
9ef4429b 2218 * Hold reference to device to keep it from being freed.
bea3348e 2219 */
15333061
SH
2220static inline void dev_hold(struct net_device *dev)
2221{
933393f5 2222 this_cpu_inc(*dev->pcpu_refcnt);
15333061 2223}
1da177e4
LT
2224
2225/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2226 * and _off may be called from IRQ context, but it is caller
2227 * who is responsible for serialization of these calls.
b00055aa
SR
2228 *
2229 * The name carrier is inappropriate, these functions should really be
2230 * called netif_lowerlayer_*() because they represent the state of any
2231 * kind of lower layer not just hardware media.
1da177e4
LT
2232 */
2233
8f4cccbb 2234extern void linkwatch_init_dev(struct net_device *dev);
1da177e4 2235extern void linkwatch_fire_event(struct net_device *dev);
e014debe 2236extern void linkwatch_forget_dev(struct net_device *dev);
1da177e4 2237
bea3348e
SH
2238/**
2239 * netif_carrier_ok - test if carrier present
2240 * @dev: network device
2241 *
2242 * Check if carrier is present on device
2243 */
4d29515f 2244static inline bool netif_carrier_ok(const struct net_device *dev)
1da177e4
LT
2245{
2246 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2247}
2248
9d21493b
ED
2249extern unsigned long dev_trans_start(struct net_device *dev);
2250
1da177e4
LT
2251extern void __netdev_watchdog_up(struct net_device *dev);
2252
0a242efc 2253extern void netif_carrier_on(struct net_device *dev);
1da177e4 2254
0a242efc 2255extern void netif_carrier_off(struct net_device *dev);
1da177e4 2256
bea3348e
SH
2257/**
2258 * netif_dormant_on - mark device as dormant.
2259 * @dev: network device
2260 *
2261 * Mark device as dormant (as per RFC2863).
2262 *
2263 * The dormant state indicates that the relevant interface is not
2264 * actually in a condition to pass packets (i.e., it is not 'up') but is
2265 * in a "pending" state, waiting for some external event. For "on-
2266 * demand" interfaces, this new state identifies the situation where the
2267 * interface is waiting for events to place it in the up state.
2268 *
2269 */
b00055aa
SR
2270static inline void netif_dormant_on(struct net_device *dev)
2271{
2272 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2273 linkwatch_fire_event(dev);
2274}
2275
bea3348e
SH
2276/**
2277 * netif_dormant_off - set device as not dormant.
2278 * @dev: network device
2279 *
2280 * Device is not in dormant state.
2281 */
b00055aa
SR
2282static inline void netif_dormant_off(struct net_device *dev)
2283{
2284 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2285 linkwatch_fire_event(dev);
2286}
2287
bea3348e
SH
2288/**
2289 * netif_dormant - test if carrier present
2290 * @dev: network device
2291 *
2292 * Check if carrier is present on device
2293 */
4d29515f 2294static inline bool netif_dormant(const struct net_device *dev)
b00055aa
SR
2295{
2296 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2297}
2298
2299
bea3348e
SH
2300/**
2301 * netif_oper_up - test if device is operational
2302 * @dev: network device
2303 *
2304 * Check if carrier is operational
2305 */
4d29515f 2306static inline bool netif_oper_up(const struct net_device *dev)
d94d9fee 2307{
b00055aa
SR
2308 return (dev->operstate == IF_OPER_UP ||
2309 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2310}
2311
bea3348e
SH
2312/**
2313 * netif_device_present - is device available or removed
2314 * @dev: network device
2315 *
2316 * Check if device has not been removed from system.
2317 */
4d29515f 2318static inline bool netif_device_present(struct net_device *dev)
1da177e4
LT
2319{
2320 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2321}
2322
56079431 2323extern void netif_device_detach(struct net_device *dev);
1da177e4 2324
56079431 2325extern void netif_device_attach(struct net_device *dev);
1da177e4
LT
2326
2327/*
2328 * Network interface message level settings
2329 */
1da177e4
LT
2330
2331enum {
2332 NETIF_MSG_DRV = 0x0001,
2333 NETIF_MSG_PROBE = 0x0002,
2334 NETIF_MSG_LINK = 0x0004,
2335 NETIF_MSG_TIMER = 0x0008,
2336 NETIF_MSG_IFDOWN = 0x0010,
2337 NETIF_MSG_IFUP = 0x0020,
2338 NETIF_MSG_RX_ERR = 0x0040,
2339 NETIF_MSG_TX_ERR = 0x0080,
2340 NETIF_MSG_TX_QUEUED = 0x0100,
2341 NETIF_MSG_INTR = 0x0200,
2342 NETIF_MSG_TX_DONE = 0x0400,
2343 NETIF_MSG_RX_STATUS = 0x0800,
2344 NETIF_MSG_PKTDATA = 0x1000,
2345 NETIF_MSG_HW = 0x2000,
2346 NETIF_MSG_WOL = 0x4000,
2347};
2348
2349#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2350#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2351#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2352#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2353#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2354#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2355#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2356#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2357#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2358#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2359#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2360#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2361#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2362#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2363#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2364
2365static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2366{
2367 /* use default */
2368 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2369 return default_msg_enable_bits;
2370 if (debug_value == 0) /* no output */
2371 return 0;
2372 /* set low N bits */
2373 return (1 << debug_value) - 1;
2374}
2375
c773e847 2376static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
932ff279 2377{
c773e847
DM
2378 spin_lock(&txq->_xmit_lock);
2379 txq->xmit_lock_owner = cpu;
22dd7495
JHS
2380}
2381
fd2ea0a7
DM
2382static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2383{
2384 spin_lock_bh(&txq->_xmit_lock);
2385 txq->xmit_lock_owner = smp_processor_id();
2386}
2387
4d29515f 2388static inline bool __netif_tx_trylock(struct netdev_queue *txq)
c3f26a26 2389{
4d29515f 2390 bool ok = spin_trylock(&txq->_xmit_lock);
c3f26a26
DM
2391 if (likely(ok))
2392 txq->xmit_lock_owner = smp_processor_id();
2393 return ok;
2394}
2395
2396static inline void __netif_tx_unlock(struct netdev_queue *txq)
2397{
2398 txq->xmit_lock_owner = -1;
2399 spin_unlock(&txq->_xmit_lock);
2400}
2401
2402static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2403{
2404 txq->xmit_lock_owner = -1;
2405 spin_unlock_bh(&txq->_xmit_lock);
2406}
2407
08baf561
ED
2408static inline void txq_trans_update(struct netdev_queue *txq)
2409{
2410 if (txq->xmit_lock_owner != -1)
2411 txq->trans_start = jiffies;
2412}
2413
d29f749e
DJ
2414/**
2415 * netif_tx_lock - grab network device transmit lock
2416 * @dev: network device
d29f749e
DJ
2417 *
2418 * Get network device transmit lock
2419 */
22dd7495
JHS
2420static inline void netif_tx_lock(struct net_device *dev)
2421{
e8a0464c 2422 unsigned int i;
c3f26a26 2423 int cpu;
c773e847 2424
c3f26a26
DM
2425 spin_lock(&dev->tx_global_lock);
2426 cpu = smp_processor_id();
e8a0464c
DM
2427 for (i = 0; i < dev->num_tx_queues; i++) {
2428 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c3f26a26
DM
2429
2430 /* We are the only thread of execution doing a
2431 * freeze, but we have to grab the _xmit_lock in
2432 * order to synchronize with threads which are in
2433 * the ->hard_start_xmit() handler and already
2434 * checked the frozen bit.
2435 */
e8a0464c 2436 __netif_tx_lock(txq, cpu);
c3f26a26
DM
2437 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2438 __netif_tx_unlock(txq);
e8a0464c 2439 }
932ff279
HX
2440}
2441
2442static inline void netif_tx_lock_bh(struct net_device *dev)
2443{
e8a0464c
DM
2444 local_bh_disable();
2445 netif_tx_lock(dev);
932ff279
HX
2446}
2447
932ff279
HX
2448static inline void netif_tx_unlock(struct net_device *dev)
2449{
e8a0464c
DM
2450 unsigned int i;
2451
2452 for (i = 0; i < dev->num_tx_queues; i++) {
2453 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c773e847 2454
c3f26a26
DM
2455 /* No need to grab the _xmit_lock here. If the
2456 * queue is not stopped for another reason, we
2457 * force a schedule.
2458 */
2459 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
7b3d3e4f 2460 netif_schedule_queue(txq);
c3f26a26
DM
2461 }
2462 spin_unlock(&dev->tx_global_lock);
932ff279
HX
2463}
2464
2465static inline void netif_tx_unlock_bh(struct net_device *dev)
2466{
e8a0464c
DM
2467 netif_tx_unlock(dev);
2468 local_bh_enable();
932ff279
HX
2469}
2470
c773e847 2471#define HARD_TX_LOCK(dev, txq, cpu) { \
22dd7495 2472 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 2473 __netif_tx_lock(txq, cpu); \
22dd7495
JHS
2474 } \
2475}
2476
c773e847 2477#define HARD_TX_UNLOCK(dev, txq) { \
22dd7495 2478 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 2479 __netif_tx_unlock(txq); \
22dd7495
JHS
2480 } \
2481}
2482
1da177e4
LT
2483static inline void netif_tx_disable(struct net_device *dev)
2484{
fd2ea0a7 2485 unsigned int i;
c3f26a26 2486 int cpu;
fd2ea0a7 2487
c3f26a26
DM
2488 local_bh_disable();
2489 cpu = smp_processor_id();
fd2ea0a7
DM
2490 for (i = 0; i < dev->num_tx_queues; i++) {
2491 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c3f26a26
DM
2492
2493 __netif_tx_lock(txq, cpu);
fd2ea0a7 2494 netif_tx_stop_queue(txq);
c3f26a26 2495 __netif_tx_unlock(txq);
fd2ea0a7 2496 }
c3f26a26 2497 local_bh_enable();
1da177e4
LT
2498}
2499
e308a5d8
DM
2500static inline void netif_addr_lock(struct net_device *dev)
2501{
2502 spin_lock(&dev->addr_list_lock);
2503}
2504
2429f7ac
JP
2505static inline void netif_addr_lock_nested(struct net_device *dev)
2506{
2507 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2508}
2509
e308a5d8
DM
2510static inline void netif_addr_lock_bh(struct net_device *dev)
2511{
2512 spin_lock_bh(&dev->addr_list_lock);
2513}
2514
2515static inline void netif_addr_unlock(struct net_device *dev)
2516{
2517 spin_unlock(&dev->addr_list_lock);
2518}
2519
2520static inline void netif_addr_unlock_bh(struct net_device *dev)
2521{
2522 spin_unlock_bh(&dev->addr_list_lock);
2523}
2524
f001fde5 2525/*
31278e71 2526 * dev_addrs walker. Should be used only for read access. Call with
f001fde5
JP
2527 * rcu_read_lock held.
2528 */
2529#define for_each_dev_addr(dev, ha) \
31278e71 2530 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
f001fde5 2531
1da177e4
LT
2532/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2533
2534extern void ether_setup(struct net_device *dev);
2535
2536/* Support for loadable net-drivers */
36909ea4 2537extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
f25f4e44 2538 void (*setup)(struct net_device *),
36909ea4 2539 unsigned int txqs, unsigned int rxqs);
f25f4e44 2540#define alloc_netdev(sizeof_priv, name, setup) \
36909ea4
TH
2541 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2542
2543#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2544 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2545
1da177e4
LT
2546extern int register_netdev(struct net_device *dev);
2547extern void unregister_netdev(struct net_device *dev);
f001fde5 2548
22bedad3
JP
2549/* General hardware address lists handling functions */
2550extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2551 struct netdev_hw_addr_list *from_list,
2552 int addr_len, unsigned char addr_type);
2553extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2554 struct netdev_hw_addr_list *from_list,
2555 int addr_len, unsigned char addr_type);
2556extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2557 struct netdev_hw_addr_list *from_list,
2558 int addr_len);
2559extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2560 struct netdev_hw_addr_list *from_list,
2561 int addr_len);
2562extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2563extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2564
f001fde5 2565/* Functions used for device addresses handling */
6b6e2725 2566extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
f001fde5 2567 unsigned char addr_type);
6b6e2725 2568extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
f001fde5
JP
2569 unsigned char addr_type);
2570extern int dev_addr_add_multiple(struct net_device *to_dev,
2571 struct net_device *from_dev,
2572 unsigned char addr_type);
2573extern int dev_addr_del_multiple(struct net_device *to_dev,
2574 struct net_device *from_dev,
2575 unsigned char addr_type);
a748ee24
JP
2576extern void dev_addr_flush(struct net_device *dev);
2577extern int dev_addr_init(struct net_device *dev);
2578
2579/* Functions used for unicast addresses handling */
6b6e2725 2580extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2581extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2582extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
a748ee24
JP
2583extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2584extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2585extern void dev_uc_flush(struct net_device *dev);
2586extern void dev_uc_init(struct net_device *dev);
f001fde5 2587
22bedad3 2588/* Functions used for multicast addresses handling */
6b6e2725 2589extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2590extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2591extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2592extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2593extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
22bedad3
JP
2594extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2595extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2596extern void dev_mc_flush(struct net_device *dev);
2597extern void dev_mc_init(struct net_device *dev);
f001fde5 2598
4417da66
PM
2599/* Functions used for secondary unicast and multicast support */
2600extern void dev_set_rx_mode(struct net_device *dev);
2601extern void __dev_set_rx_mode(struct net_device *dev);
dad9b335
WC
2602extern int dev_set_promiscuity(struct net_device *dev, int inc);
2603extern int dev_set_allmulti(struct net_device *dev, int inc);
1da177e4 2604extern void netdev_state_change(struct net_device *dev);
ee89bab1 2605extern void netdev_notify_peers(struct net_device *dev);
d8a33ac4 2606extern void netdev_features_change(struct net_device *dev);
1da177e4 2607/* Load a device via the kmod */
881d966b 2608extern void dev_load(struct net *net, const char *name);
1da177e4 2609extern void dev_mcast_init(void);
d7753516
BH
2610extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2611 struct rtnl_link_stats64 *storage);
77a1abf5
ED
2612extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2613 const struct net_device_stats *netdev_stats);
eeda3fd6 2614
1da177e4 2615extern int netdev_max_backlog;
3b098e2d 2616extern int netdev_tstamp_prequeue;
1da177e4 2617extern int weight_p;
0a14842f 2618extern int bpf_jit_enable;
1da177e4 2619extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1765a575
JP
2620extern int netdev_set_bond_master(struct net_device *dev,
2621 struct net_device *master);
84fa7933 2622extern int skb_checksum_help(struct sk_buff *skb);
c8f44aff
MM
2623extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2624 netdev_features_t features);
fb286bb2
HX
2625#ifdef CONFIG_BUG
2626extern void netdev_rx_csum_fault(struct net_device *dev);
2627#else
2628static inline void netdev_rx_csum_fault(struct net_device *dev)
2629{
2630}
2631#endif
1da177e4
LT
2632/* rx skb timestamps */
2633extern void net_enable_timestamp(void);
2634extern void net_disable_timestamp(void);
2635
20380731
ACM
2636#ifdef CONFIG_PROC_FS
2637extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2638extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2639extern void dev_seq_stop(struct seq_file *seq, void *v);
2640#endif
2641
b8a9787e
JV
2642extern int netdev_class_create_file(struct class_attribute *class_attr);
2643extern void netdev_class_remove_file(struct class_attribute *class_attr);
2644
04600794
JB
2645extern struct kobj_ns_type_operations net_ns_type_operations;
2646
3019de12 2647extern const char *netdev_drivername(const struct net_device *dev);
6579e57b 2648
20380731
ACM
2649extern void linkwatch_run_queue(void);
2650
c8f44aff
MM
2651static inline netdev_features_t netdev_get_wanted_features(
2652 struct net_device *dev)
5455c699
MM
2653{
2654 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2655}
c8f44aff
MM
2656netdev_features_t netdev_increment_features(netdev_features_t all,
2657 netdev_features_t one, netdev_features_t mask);
6cb6a27c 2658int __netdev_update_features(struct net_device *dev);
5455c699 2659void netdev_update_features(struct net_device *dev);
afe12cc8 2660void netdev_change_features(struct net_device *dev);
7f353bf2 2661
fc4a7489
PM
2662void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2663 struct net_device *dev);
2664
c8f44aff 2665netdev_features_t netif_skb_features(struct sk_buff *skb);
58e998c6 2666
4d29515f 2667static inline bool net_gso_ok(netdev_features_t features, int gso_type)
576a30eb 2668{
c8f44aff 2669 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
0345e186
MM
2670
2671 /* check flags correspondence */
2672 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2673 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2674 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2675 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2676 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2677 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2678
d6b4991a 2679 return (features & feature) == feature;
576a30eb
HX
2680}
2681
4d29515f 2682static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
bcd76111 2683{
278b2513 2684 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
21dc3301 2685 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
bcd76111
HX
2686}
2687
4d29515f
DM
2688static inline bool netif_needs_gso(struct sk_buff *skb,
2689 netdev_features_t features)
7967168c 2690{
fc741216 2691 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
cdbee74c
YZ
2692 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2693 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
7967168c
HX
2694}
2695
82cc1a7a
PWJ
2696static inline void netif_set_gso_max_size(struct net_device *dev,
2697 unsigned int size)
2698{
2699 dev->gso_max_size = size;
2700}
2701
4d29515f 2702static inline bool netif_is_bond_slave(struct net_device *dev)
1765a575
JP
2703{
2704 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2705}
2706
3bdc0eba
BG
2707static inline bool netif_supports_nofcs(struct net_device *dev)
2708{
2709 return dev->priv_flags & IFF_SUPP_NOFCS;
2710}
2711
505d4f73 2712extern struct pernet_operations __net_initdata loopback_net_ops;
b1b67dd4 2713
571ba423
JP
2714/* Logging, debugging and troubleshooting/diagnostic helpers. */
2715
2716/* netdev_printk helpers, similar to dev_printk */
2717
2718static inline const char *netdev_name(const struct net_device *dev)
2719{
2720 if (dev->reg_state != NETREG_REGISTERED)
2721 return "(unregistered net_device)";
2722 return dev->name;
2723}
2724
b9075fa9
JP
2725extern __printf(3, 4)
2726int netdev_printk(const char *level, const struct net_device *dev,
2727 const char *format, ...);
2728extern __printf(2, 3)
2729int netdev_emerg(const struct net_device *dev, const char *format, ...);
2730extern __printf(2, 3)
2731int netdev_alert(const struct net_device *dev, const char *format, ...);
2732extern __printf(2, 3)
2733int netdev_crit(const struct net_device *dev, const char *format, ...);
2734extern __printf(2, 3)
2735int netdev_err(const struct net_device *dev, const char *format, ...);
2736extern __printf(2, 3)
2737int netdev_warn(const struct net_device *dev, const char *format, ...);
2738extern __printf(2, 3)
2739int netdev_notice(const struct net_device *dev, const char *format, ...);
2740extern __printf(2, 3)
2741int netdev_info(const struct net_device *dev, const char *format, ...);
571ba423 2742
8909c9ad
VK
2743#define MODULE_ALIAS_NETDEV(device) \
2744 MODULE_ALIAS("netdev-" device)
2745
b558c96f 2746#if defined(CONFIG_DYNAMIC_DEBUG)
571ba423
JP
2747#define netdev_dbg(__dev, format, args...) \
2748do { \
ffa10cb4 2749 dynamic_netdev_dbg(__dev, format, ##args); \
571ba423 2750} while (0)
b558c96f
JC
2751#elif defined(DEBUG)
2752#define netdev_dbg(__dev, format, args...) \
2753 netdev_printk(KERN_DEBUG, __dev, format, ##args)
571ba423
JP
2754#else
2755#define netdev_dbg(__dev, format, args...) \
2756({ \
2757 if (0) \
2758 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2759 0; \
2760})
2761#endif
2762
2763#if defined(VERBOSE_DEBUG)
2764#define netdev_vdbg netdev_dbg
2765#else
2766
2767#define netdev_vdbg(dev, format, args...) \
2768({ \
2769 if (0) \
2770 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2771 0; \
2772})
2773#endif
2774
2775/*
2776 * netdev_WARN() acts like dev_printk(), but with the key difference
2777 * of using a WARN/WARN_ON to get the message out, including the
2778 * file/line information and a backtrace.
2779 */
2780#define netdev_WARN(dev, format, args...) \
2781 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2782
b3d95c5c
JP
2783/* netif printk helpers, similar to netdev_printk */
2784
2785#define netif_printk(priv, type, level, dev, fmt, args...) \
2786do { \
2787 if (netif_msg_##type(priv)) \
2788 netdev_printk(level, (dev), fmt, ##args); \
2789} while (0)
2790
f45f4321
JP
2791#define netif_level(level, priv, type, dev, fmt, args...) \
2792do { \
2793 if (netif_msg_##type(priv)) \
2794 netdev_##level(dev, fmt, ##args); \
2795} while (0)
2796
b3d95c5c 2797#define netif_emerg(priv, type, dev, fmt, args...) \
f45f4321 2798 netif_level(emerg, priv, type, dev, fmt, ##args)
b3d95c5c 2799#define netif_alert(priv, type, dev, fmt, args...) \
f45f4321 2800 netif_level(alert, priv, type, dev, fmt, ##args)
b3d95c5c 2801#define netif_crit(priv, type, dev, fmt, args...) \
f45f4321 2802 netif_level(crit, priv, type, dev, fmt, ##args)
b3d95c5c 2803#define netif_err(priv, type, dev, fmt, args...) \
f45f4321 2804 netif_level(err, priv, type, dev, fmt, ##args)
b3d95c5c 2805#define netif_warn(priv, type, dev, fmt, args...) \
f45f4321 2806 netif_level(warn, priv, type, dev, fmt, ##args)
b3d95c5c 2807#define netif_notice(priv, type, dev, fmt, args...) \
f45f4321 2808 netif_level(notice, priv, type, dev, fmt, ##args)
b3d95c5c 2809#define netif_info(priv, type, dev, fmt, args...) \
f45f4321 2810 netif_level(info, priv, type, dev, fmt, ##args)
b3d95c5c 2811
0053ea9c 2812#if defined(CONFIG_DYNAMIC_DEBUG)
b3d95c5c
JP
2813#define netif_dbg(priv, type, netdev, format, args...) \
2814do { \
2815 if (netif_msg_##type(priv)) \
b5fb0a03 2816 dynamic_netdev_dbg(netdev, format, ##args); \
b3d95c5c 2817} while (0)
0053ea9c
JP
2818#elif defined(DEBUG)
2819#define netif_dbg(priv, type, dev, format, args...) \
2820 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
b3d95c5c
JP
2821#else
2822#define netif_dbg(priv, type, dev, format, args...) \
2823({ \
2824 if (0) \
2825 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2826 0; \
2827})
2828#endif
2829
2830#if defined(VERBOSE_DEBUG)
bcfcc450 2831#define netif_vdbg netif_dbg
b3d95c5c
JP
2832#else
2833#define netif_vdbg(priv, type, dev, format, args...) \
2834({ \
2835 if (0) \
a4ed89cb 2836 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
b3d95c5c
JP
2837 0; \
2838})
2839#endif
571ba423 2840
1da177e4
LT
2841#endif /* __KERNEL__ */
2842
385a154c 2843#endif /* _LINUX_NETDEVICE_H */
This page took 2.742519 seconds and 5 git commands to generate.