BUG: headers with BUG/BUG_ON etc. need linux/bug.h
[deliverable/linux.git] / include / linux / netdevice.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
113aa838 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
95c26df8 31#include <linux/if_link.h>
1da177e4
LT
32
33#ifdef __KERNEL__
e8db0be1 34#include <linux/pm_qos.h>
d7fe0f24 35#include <linux/timer.h>
187f1882 36#include <linux/bug.h>
bea3348e 37#include <linux/delay.h>
60063497 38#include <linux/atomic.h>
1da177e4
LT
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
1da177e4
LT
42#include <linux/device.h>
43#include <linux/percpu.h>
4d5b78c0 44#include <linux/rculist.h>
db217334 45#include <linux/dmaengine.h>
bea3348e 46#include <linux/workqueue.h>
114cf580 47#include <linux/dynamic_queue_limits.h>
1da177e4 48
b1b67dd4 49#include <linux/ethtool.h>
a050c33f 50#include <net/net_namespace.h>
cf85d08f 51#include <net/dsa.h>
7a6b6f51 52#ifdef CONFIG_DCB
2f90b865
AD
53#include <net/dcbnl.h>
54#endif
5bc1421e 55#include <net/netprio_cgroup.h>
a050c33f 56
a59e2ecb
MM
57#include <linux/netdev_features.h>
58
115c1d6e 59struct netpoll_info;
c1f19b51 60struct phy_device;
704232c2
JB
61/* 802.11 specific */
62struct wireless_dev;
1da177e4
LT
63 /* source back-compat hooks */
64#define SET_ETHTOOL_OPS(netdev,ops) \
65 ( (netdev)->ethtool_ops = (ops) )
66
c1f79426
SA
67/* hardware address assignment types */
68#define NET_ADDR_PERM 0 /* address is permanent (default) */
69#define NET_ADDR_RANDOM 1 /* address is generated randomly */
70#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
71
9a1654ba
JP
72/* Backlog congestion levels */
73#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74#define NET_RX_DROP 1 /* packet dropped */
75
572a9d7b
PM
76/*
77 * Transmit return codes: transmit return codes originate from three different
78 * namespaces:
79 *
80 * - qdisc return codes
81 * - driver transmit return codes
82 * - errno values
83 *
84 * Drivers are allowed to return any one of those in their hard_start_xmit()
85 * function. Real network devices commonly used with qdiscs should only return
86 * the driver transmit return codes though - when qdiscs are used, the actual
87 * transmission happens asynchronously, so the value is not propagated to
88 * higher layers. Virtual network devices transmit synchronously, in this case
89 * the driver transmit return codes are consumed by dev_queue_xmit(), all
90 * others are propagated to higher layers.
91 */
92
93/* qdisc ->enqueue() return codes. */
94#define NET_XMIT_SUCCESS 0x00
9a1654ba
JP
95#define NET_XMIT_DROP 0x01 /* skb dropped */
96#define NET_XMIT_CN 0x02 /* congestion notification */
97#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
98#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
1da177e4 99
b9df3cb8
GR
100/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
101 * indicates that the device will soon be dropping packets, or already drops
102 * some packets of the same priority; prompting us to send less aggressively. */
572a9d7b 103#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
1da177e4
LT
104#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
105
dc1f8bf6 106/* Driver transmit return codes */
9a1654ba 107#define NETDEV_TX_MASK 0xf0
572a9d7b 108
dc1f8bf6 109enum netdev_tx {
572a9d7b 110 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
9a1654ba
JP
111 NETDEV_TX_OK = 0x00, /* driver took care of packet */
112 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
113 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
dc1f8bf6
SH
114};
115typedef enum netdev_tx netdev_tx_t;
116
9a1654ba
JP
117/*
118 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
119 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
120 */
121static inline bool dev_xmit_complete(int rc)
122{
123 /*
124 * Positive cases with an skb consumed by a driver:
125 * - successful transmission (rc == NETDEV_TX_OK)
126 * - error while transmitting (rc < 0)
127 * - error while queueing to a different device (rc & NET_XMIT_MASK)
128 */
129 if (likely(rc < NET_XMIT_MASK))
130 return true;
131
132 return false;
133}
134
1da177e4
LT
135#endif
136
137#define MAX_ADDR_LEN 32 /* Largest hardware address length */
138
23b41168
VD
139/* Initial net device group. All devices belong to group 0 by default. */
140#define INIT_NETDEV_GROUP 0
141
c88e6f51 142#ifdef __KERNEL__
1da177e4
LT
143/*
144 * Compute the worst case header length according to the protocols
145 * used.
146 */
fe2918b0 147
d11ead75 148#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
8388e3da
DM
149# if defined(CONFIG_MAC80211_MESH)
150# define LL_MAX_HEADER 128
151# else
152# define LL_MAX_HEADER 96
153# endif
d11ead75 154#elif IS_ENABLED(CONFIG_TR)
8388e3da 155# define LL_MAX_HEADER 48
1da177e4 156#else
8388e3da 157# define LL_MAX_HEADER 32
1da177e4
LT
158#endif
159
d11ead75
BH
160#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
161 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
1da177e4
LT
162#define MAX_HEADER LL_MAX_HEADER
163#else
164#define MAX_HEADER (LL_MAX_HEADER + 48)
165#endif
166
167/*
be1f3c2c
BH
168 * Old network device statistics. Fields are native words
169 * (unsigned long) so they can be read and written atomically.
1da177e4 170 */
fe2918b0 171
d94d9fee 172struct net_device_stats {
3cfde79c
BH
173 unsigned long rx_packets;
174 unsigned long tx_packets;
175 unsigned long rx_bytes;
176 unsigned long tx_bytes;
177 unsigned long rx_errors;
178 unsigned long tx_errors;
179 unsigned long rx_dropped;
180 unsigned long tx_dropped;
181 unsigned long multicast;
1da177e4 182 unsigned long collisions;
1da177e4 183 unsigned long rx_length_errors;
3cfde79c
BH
184 unsigned long rx_over_errors;
185 unsigned long rx_crc_errors;
186 unsigned long rx_frame_errors;
187 unsigned long rx_fifo_errors;
188 unsigned long rx_missed_errors;
1da177e4
LT
189 unsigned long tx_aborted_errors;
190 unsigned long tx_carrier_errors;
191 unsigned long tx_fifo_errors;
192 unsigned long tx_heartbeat_errors;
193 unsigned long tx_window_errors;
1da177e4
LT
194 unsigned long rx_compressed;
195 unsigned long tx_compressed;
196};
197
be1f3c2c
BH
198#endif /* __KERNEL__ */
199
1da177e4
LT
200
201/* Media selection options. */
202enum {
203 IF_PORT_UNKNOWN = 0,
204 IF_PORT_10BASE2,
205 IF_PORT_10BASET,
206 IF_PORT_AUI,
207 IF_PORT_100BASET,
208 IF_PORT_100BASETX,
209 IF_PORT_100BASEFX
210};
211
212#ifdef __KERNEL__
213
214#include <linux/cache.h>
215#include <linux/skbuff.h>
216
adc9300e
ED
217#ifdef CONFIG_RPS
218#include <linux/jump_label.h>
219extern struct jump_label_key rps_needed;
220#endif
221
1da177e4
LT
222struct neighbour;
223struct neigh_parms;
224struct sk_buff;
225
f001fde5
JP
226struct netdev_hw_addr {
227 struct list_head list;
228 unsigned char addr[MAX_ADDR_LEN];
229 unsigned char type;
ccffad25
JP
230#define NETDEV_HW_ADDR_T_LAN 1
231#define NETDEV_HW_ADDR_T_SAN 2
232#define NETDEV_HW_ADDR_T_SLAVE 3
233#define NETDEV_HW_ADDR_T_UNICAST 4
22bedad3 234#define NETDEV_HW_ADDR_T_MULTICAST 5
ccffad25 235 bool synced;
22bedad3 236 bool global_use;
8f8f103d 237 int refcount;
f001fde5
JP
238 struct rcu_head rcu_head;
239};
240
31278e71
JP
241struct netdev_hw_addr_list {
242 struct list_head list;
243 int count;
244};
245
22bedad3
JP
246#define netdev_hw_addr_list_count(l) ((l)->count)
247#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
248#define netdev_hw_addr_list_for_each(ha, l) \
249 list_for_each_entry(ha, &(l)->list, list)
32e7bfc4 250
22bedad3
JP
251#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
252#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
253#define netdev_for_each_uc_addr(ha, dev) \
254 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
6683ece3 255
22bedad3
JP
256#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
257#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
18e225f2 258#define netdev_for_each_mc_addr(ha, dev) \
22bedad3 259 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
6683ece3 260
d94d9fee 261struct hh_cache {
f6b72b62 262 u16 hh_len;
5c25f686 263 u16 __pad;
3644f0ce 264 seqlock_t hh_lock;
1da177e4
LT
265
266 /* cached hardware header; allow for machine alignment needs. */
267#define HH_DATA_MOD 16
268#define HH_DATA_OFF(__len) \
5ba0eac6 269 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
1da177e4
LT
270#define HH_DATA_ALIGN(__len) \
271 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
272 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
273};
274
275/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
276 * Alternative is:
277 * dev->hard_header_len ? (dev->hard_header_len +
278 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
279 *
280 * We could use other alignment values, but we must maintain the
281 * relationship HH alignment <= LL alignment.
282 */
283#define LL_RESERVED_SPACE(dev) \
f5184d26 284 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 285#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
f5184d26 286 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 287
3b04ddde
SH
288struct header_ops {
289 int (*create) (struct sk_buff *skb, struct net_device *dev,
290 unsigned short type, const void *daddr,
291 const void *saddr, unsigned len);
292 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
293 int (*rebuild)(struct sk_buff *skb);
e69dd336 294 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
3b04ddde
SH
295 void (*cache_update)(struct hh_cache *hh,
296 const struct net_device *dev,
297 const unsigned char *haddr);
298};
299
1da177e4
LT
300/* These flag bits are private to the generic network queueing
301 * layer, they may not be explicitly referenced by any other
302 * code.
303 */
304
d94d9fee 305enum netdev_state_t {
1da177e4
LT
306 __LINK_STATE_START,
307 __LINK_STATE_PRESENT,
1da177e4 308 __LINK_STATE_NOCARRIER,
b00055aa
SR
309 __LINK_STATE_LINKWATCH_PENDING,
310 __LINK_STATE_DORMANT,
1da177e4
LT
311};
312
313
314/*
315 * This structure holds at boot time configured netdevice settings. They
fe2918b0 316 * are then used in the device probing.
1da177e4
LT
317 */
318struct netdev_boot_setup {
319 char name[IFNAMSIZ];
320 struct ifmap map;
321};
322#define NETDEV_BOOT_SETUP_MAX 8
323
20380731 324extern int __init netdev_boot_setup(char *str);
1da177e4 325
bea3348e
SH
326/*
327 * Structure for NAPI scheduling similar to tasklet but with weighting
328 */
329struct napi_struct {
330 /* The poll_list must only be managed by the entity which
331 * changes the state of the NAPI_STATE_SCHED bit. This means
332 * whoever atomically sets that bit can add this napi_struct
333 * to the per-cpu poll_list, and whoever clears that bit
334 * can remove from the list right before clearing the bit.
335 */
336 struct list_head poll_list;
337
338 unsigned long state;
339 int weight;
340 int (*poll)(struct napi_struct *, int);
341#ifdef CONFIG_NETPOLL
342 spinlock_t poll_lock;
343 int poll_owner;
bea3348e 344#endif
4ae5544f
HX
345
346 unsigned int gro_count;
347
5d38a079 348 struct net_device *dev;
d565b0a1
HX
349 struct list_head dev_list;
350 struct sk_buff *gro_list;
5d38a079 351 struct sk_buff *skb;
bea3348e
SH
352};
353
d94d9fee 354enum {
bea3348e 355 NAPI_STATE_SCHED, /* Poll is scheduled */
a0a46196 356 NAPI_STATE_DISABLE, /* Disable pending */
7b363e44 357 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
bea3348e
SH
358};
359
5b252f0c 360enum gro_result {
d1c76af9
HX
361 GRO_MERGED,
362 GRO_MERGED_FREE,
363 GRO_HELD,
364 GRO_NORMAL,
365 GRO_DROP,
366};
5b252f0c 367typedef enum gro_result gro_result_t;
d1c76af9 368
8a4eb573
JP
369/*
370 * enum rx_handler_result - Possible return values for rx_handlers.
371 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
372 * further.
373 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
374 * case skb->dev was changed by rx_handler.
375 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
376 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
377 *
378 * rx_handlers are functions called from inside __netif_receive_skb(), to do
379 * special processing of the skb, prior to delivery to protocol handlers.
380 *
381 * Currently, a net_device can only have a single rx_handler registered. Trying
382 * to register a second rx_handler will return -EBUSY.
383 *
384 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
385 * To unregister a rx_handler on a net_device, use
386 * netdev_rx_handler_unregister().
387 *
388 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
389 * do with the skb.
390 *
391 * If the rx_handler consumed to skb in some way, it should return
392 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
393 * the skb to be delivered in some other ways.
394 *
395 * If the rx_handler changed skb->dev, to divert the skb to another
396 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
397 * new device will be called if it exists.
398 *
399 * If the rx_handler consider the skb should be ignored, it should return
400 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
401 * are registred on exact device (ptype->dev == skb->dev).
402 *
403 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
404 * delivered, it should return RX_HANDLER_PASS.
405 *
406 * A device without a registered rx_handler will behave as if rx_handler
407 * returned RX_HANDLER_PASS.
408 */
409
410enum rx_handler_result {
411 RX_HANDLER_CONSUMED,
412 RX_HANDLER_ANOTHER,
413 RX_HANDLER_EXACT,
414 RX_HANDLER_PASS,
415};
416typedef enum rx_handler_result rx_handler_result_t;
417typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
ab95bfe0 418
b3c97528 419extern void __napi_schedule(struct napi_struct *n);
bea3348e 420
a0a46196
DM
421static inline int napi_disable_pending(struct napi_struct *n)
422{
423 return test_bit(NAPI_STATE_DISABLE, &n->state);
424}
425
bea3348e
SH
426/**
427 * napi_schedule_prep - check if napi can be scheduled
428 * @n: napi context
429 *
430 * Test if NAPI routine is already running, and if not mark
431 * it as running. This is used as a condition variable
a0a46196
DM
432 * insure only one NAPI poll instance runs. We also make
433 * sure there is no pending NAPI disable.
bea3348e
SH
434 */
435static inline int napi_schedule_prep(struct napi_struct *n)
436{
a0a46196
DM
437 return !napi_disable_pending(n) &&
438 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
bea3348e
SH
439}
440
441/**
442 * napi_schedule - schedule NAPI poll
443 * @n: napi context
444 *
445 * Schedule NAPI poll routine to be called if it is not already
446 * running.
447 */
448static inline void napi_schedule(struct napi_struct *n)
449{
450 if (napi_schedule_prep(n))
451 __napi_schedule(n);
452}
453
bfe13f54
RD
454/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
455static inline int napi_reschedule(struct napi_struct *napi)
456{
457 if (napi_schedule_prep(napi)) {
458 __napi_schedule(napi);
459 return 1;
460 }
461 return 0;
462}
463
bea3348e
SH
464/**
465 * napi_complete - NAPI processing complete
466 * @n: napi context
467 *
468 * Mark NAPI processing as complete.
469 */
d565b0a1
HX
470extern void __napi_complete(struct napi_struct *n);
471extern void napi_complete(struct napi_struct *n);
bea3348e
SH
472
473/**
474 * napi_disable - prevent NAPI from scheduling
475 * @n: napi context
476 *
477 * Stop NAPI from being scheduled on this context.
478 * Waits till any outstanding processing completes.
479 */
480static inline void napi_disable(struct napi_struct *n)
481{
a0a46196 482 set_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e 483 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
43cc7380 484 msleep(1);
a0a46196 485 clear_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e
SH
486}
487
488/**
489 * napi_enable - enable NAPI scheduling
490 * @n: napi context
491 *
492 * Resume NAPI from being scheduled on this context.
493 * Must be paired with napi_disable.
494 */
495static inline void napi_enable(struct napi_struct *n)
496{
497 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
498 smp_mb__before_clear_bit();
499 clear_bit(NAPI_STATE_SCHED, &n->state);
500}
501
c264c3de
SH
502#ifdef CONFIG_SMP
503/**
504 * napi_synchronize - wait until NAPI is not running
505 * @n: napi context
506 *
507 * Wait until NAPI is done being scheduled on this context.
508 * Waits till any outstanding processing completes but
509 * does not disable future activations.
510 */
511static inline void napi_synchronize(const struct napi_struct *n)
512{
513 while (test_bit(NAPI_STATE_SCHED, &n->state))
514 msleep(1);
515}
516#else
517# define napi_synchronize(n) barrier()
518#endif
519
d94d9fee 520enum netdev_queue_state_t {
73466498
TH
521 __QUEUE_STATE_DRV_XOFF,
522 __QUEUE_STATE_STACK_XOFF,
c3f26a26 523 __QUEUE_STATE_FROZEN,
73466498
TH
524#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
525 (1 << __QUEUE_STATE_STACK_XOFF))
526#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
527 (1 << __QUEUE_STATE_FROZEN))
79d16385 528};
73466498
TH
529/*
530 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
531 * netif_tx_* functions below are used to manipulate this flag. The
532 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
533 * queue independently. The netif_xmit_*stopped functions below are called
534 * to check if the queue has been stopped by the driver or stack (either
535 * of the XOFF bits are set in the state). Drivers should not need to call
536 * netif_xmit*stopped functions, they should only be using netif_tx_*.
537 */
79d16385 538
bb949fbd 539struct netdev_queue {
6a321cb3
ED
540/*
541 * read mostly part
542 */
bb949fbd 543 struct net_device *dev;
b0e1e646
DM
544 struct Qdisc *qdisc;
545 struct Qdisc *qdisc_sleeping;
ccf5ff69 546#ifdef CONFIG_SYSFS
1d24eb48
TH
547 struct kobject kobj;
548#endif
f2cd2d3e
ED
549#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
550 int numa_node;
551#endif
6a321cb3
ED
552/*
553 * write mostly part
554 */
555 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
556 int xmit_lock_owner;
9d21493b
ED
557 /*
558 * please use this field instead of dev->trans_start
559 */
560 unsigned long trans_start;
ccf5ff69 561
562 /*
563 * Number of TX timeouts for this queue
564 * (/sys/class/net/DEV/Q/trans_timeout)
565 */
566 unsigned long trans_timeout;
114cf580
TH
567
568 unsigned long state;
569
570#ifdef CONFIG_BQL
571 struct dql dql;
572#endif
e8a0464c 573} ____cacheline_aligned_in_smp;
bb949fbd 574
f2cd2d3e
ED
575static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
576{
577#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
578 return q->numa_node;
579#else
b236da69 580 return NUMA_NO_NODE;
f2cd2d3e
ED
581#endif
582}
583
584static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
585{
586#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
587 q->numa_node = node;
588#endif
589}
590
df334545 591#ifdef CONFIG_RPS
0a9627f2
TH
592/*
593 * This structure holds an RPS map which can be of variable length. The
594 * map is an array of CPUs.
595 */
596struct rps_map {
597 unsigned int len;
598 struct rcu_head rcu;
599 u16 cpus[0];
600};
60b778ce 601#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
0a9627f2 602
fec5e652 603/*
c445477d
BH
604 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
605 * tail pointer for that CPU's input queue at the time of last enqueue, and
606 * a hardware filter index.
fec5e652
TH
607 */
608struct rps_dev_flow {
609 u16 cpu;
c445477d 610 u16 filter;
fec5e652
TH
611 unsigned int last_qtail;
612};
c445477d 613#define RPS_NO_FILTER 0xffff
fec5e652
TH
614
615/*
616 * The rps_dev_flow_table structure contains a table of flow mappings.
617 */
618struct rps_dev_flow_table {
619 unsigned int mask;
620 struct rcu_head rcu;
621 struct work_struct free_work;
622 struct rps_dev_flow flows[0];
623};
624#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
60b778ce 625 ((_num) * sizeof(struct rps_dev_flow)))
fec5e652
TH
626
627/*
628 * The rps_sock_flow_table contains mappings of flows to the last CPU
629 * on which they were processed by the application (set in recvmsg).
630 */
631struct rps_sock_flow_table {
632 unsigned int mask;
633 u16 ents[0];
634};
635#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
60b778ce 636 ((_num) * sizeof(u16)))
fec5e652
TH
637
638#define RPS_NO_CPU 0xffff
639
640static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
641 u32 hash)
642{
643 if (table && hash) {
644 unsigned int cpu, index = hash & table->mask;
645
646 /* We only give a hint, preemption can change cpu under us */
647 cpu = raw_smp_processor_id();
648
649 if (table->ents[index] != cpu)
650 table->ents[index] = cpu;
651 }
652}
653
654static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
655 u32 hash)
656{
657 if (table && hash)
658 table->ents[hash & table->mask] = RPS_NO_CPU;
659}
660
6e3f7faf 661extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
fec5e652 662
c445477d
BH
663#ifdef CONFIG_RFS_ACCEL
664extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
665 u32 flow_id, u16 filter_id);
666#endif
667
0a9627f2
TH
668/* This structure contains an instance of an RX queue. */
669struct netdev_rx_queue {
6e3f7faf
ED
670 struct rps_map __rcu *rps_map;
671 struct rps_dev_flow_table __rcu *rps_flow_table;
672 struct kobject kobj;
fe822240 673 struct net_device *dev;
0a9627f2 674} ____cacheline_aligned_in_smp;
fec5e652 675#endif /* CONFIG_RPS */
d314774c 676
bf264145
TH
677#ifdef CONFIG_XPS
678/*
679 * This structure holds an XPS map which can be of variable length. The
680 * map is an array of queues.
681 */
682struct xps_map {
683 unsigned int len;
684 unsigned int alloc_len;
685 struct rcu_head rcu;
686 u16 queues[0];
687};
60b778ce 688#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
bf264145
TH
689#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
690 / sizeof(u16))
691
692/*
693 * This structure holds all XPS maps for device. Maps are indexed by CPU.
694 */
695struct xps_dev_maps {
696 struct rcu_head rcu;
a4177869 697 struct xps_map __rcu *cpu_map[0];
bf264145
TH
698};
699#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
700 (nr_cpu_ids * sizeof(struct xps_map *)))
701#endif /* CONFIG_XPS */
702
4f57c087
JF
703#define TC_MAX_QUEUE 16
704#define TC_BITMASK 15
705/* HW offloaded queuing disciplines txq count and offset maps */
706struct netdev_tc_txq {
707 u16 count;
708 u16 offset;
709};
710
68bad94e
NP
711#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
712/*
713 * This structure is to hold information about the device
714 * configured to run FCoE protocol stack.
715 */
716struct netdev_fcoe_hbainfo {
717 char manufacturer[64];
718 char serial_number[64];
719 char hardware_version[64];
720 char driver_version[64];
721 char optionrom_version[64];
722 char firmware_version[64];
723 char model[256];
724 char model_description[256];
725};
726#endif
727
d314774c
SH
728/*
729 * This structure defines the management hooks for network devices.
00829823
SH
730 * The following hooks can be defined; unless noted otherwise, they are
731 * optional and can be filled with a null pointer.
d314774c
SH
732 *
733 * int (*ndo_init)(struct net_device *dev);
734 * This function is called once when network device is registered.
735 * The network device can use this to any late stage initializaton
736 * or semantic validattion. It can fail with an error code which will
737 * be propogated back to register_netdev
738 *
739 * void (*ndo_uninit)(struct net_device *dev);
740 * This function is called when device is unregistered or when registration
741 * fails. It is not called if init fails.
742 *
743 * int (*ndo_open)(struct net_device *dev);
744 * This function is called when network device transistions to the up
745 * state.
746 *
747 * int (*ndo_stop)(struct net_device *dev);
748 * This function is called when network device transistions to the down
749 * state.
750 *
dc1f8bf6
SH
751 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
752 * struct net_device *dev);
00829823 753 * Called when a packet needs to be transmitted.
dc1f8bf6
SH
754 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
755 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
00829823
SH
756 * Required can not be NULL.
757 *
758 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
759 * Called to decide which queue to when device supports multiple
760 * transmit queues.
761 *
d314774c
SH
762 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
763 * This function is called to allow device receiver to make
764 * changes to configuration when multicast or promiscious is enabled.
765 *
766 * void (*ndo_set_rx_mode)(struct net_device *dev);
767 * This function is called device changes address list filtering.
01789349
JP
768 * If driver handles unicast address filtering, it should set
769 * IFF_UNICAST_FLT to its priv_flags.
d314774c
SH
770 *
771 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
772 * This function is called when the Media Access Control address
37b607c5 773 * needs to be changed. If this interface is not defined, the
d314774c
SH
774 * mac address can not be changed.
775 *
776 * int (*ndo_validate_addr)(struct net_device *dev);
777 * Test if Media Access Control address is valid for the device.
778 *
779 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
780 * Called when a user request an ioctl which can't be handled by
781 * the generic interface code. If not defined ioctl's return
782 * not supported error code.
783 *
784 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
785 * Used to set network devices bus interface parameters. This interface
786 * is retained for legacy reason, new devices should use the bus
787 * interface (PCI) for low level management.
788 *
789 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
790 * Called when a user wants to change the Maximum Transfer Unit
791 * of a device. If not defined, any request to change MTU will
792 * will return an error.
793 *
00829823 794 * void (*ndo_tx_timeout)(struct net_device *dev);
d314774c
SH
795 * Callback uses when the transmitter has not made any progress
796 * for dev->watchdog ticks.
797 *
3cfde79c 798 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
28172739 799 * struct rtnl_link_stats64 *storage);
d308e38f 800 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
d314774c 801 * Called when a user wants to get the network device usage
be1f3c2c 802 * statistics. Drivers must do one of the following:
3cfde79c
BH
803 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
804 * rtnl_link_stats64 structure passed by the caller.
82695d9b 805 * 2. Define @ndo_get_stats to update a net_device_stats structure
be1f3c2c
BH
806 * (which should normally be dev->stats) and return a pointer to
807 * it. The structure may be changed asynchronously only if each
808 * field is written atomically.
809 * 3. Update dev->stats asynchronously and atomically, and define
810 * neither operation.
d314774c 811 *
8e586137 812 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
d314774c
SH
813 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
814 * this function is called when a VLAN id is registered.
815 *
8e586137 816 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
d314774c
SH
817 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
818 * this function is called when a VLAN id is unregistered.
819 *
820 * void (*ndo_poll_controller)(struct net_device *dev);
95c26df8
WM
821 *
822 * SR-IOV management functions.
823 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
824 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
825 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
5f8444a3 826 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
95c26df8
WM
827 * int (*ndo_get_vf_config)(struct net_device *dev,
828 * int vf, struct ifla_vf_info *ivf);
57b61080
SF
829 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
830 * struct nlattr *port[]);
831 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
4f57c087
JF
832 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
833 * Called to setup 'tc' number of traffic classes in the net device. This
834 * is always called from the stack with the rtnl lock held and netif tx
835 * queues stopped. This allows the netdevice to perform queue management
836 * safely.
c445477d 837 *
e9bce845
YZ
838 * Fiber Channel over Ethernet (FCoE) offload functions.
839 * int (*ndo_fcoe_enable)(struct net_device *dev);
840 * Called when the FCoE protocol stack wants to start using LLD for FCoE
841 * so the underlying device can perform whatever needed configuration or
842 * initialization to support acceleration of FCoE traffic.
843 *
844 * int (*ndo_fcoe_disable)(struct net_device *dev);
845 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
846 * so the underlying device can perform whatever needed clean-ups to
847 * stop supporting acceleration of FCoE traffic.
848 *
849 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
850 * struct scatterlist *sgl, unsigned int sgc);
851 * Called when the FCoE Initiator wants to initialize an I/O that
852 * is a possible candidate for Direct Data Placement (DDP). The LLD can
853 * perform necessary setup and returns 1 to indicate the device is set up
854 * successfully to perform DDP on this I/O, otherwise this returns 0.
855 *
856 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
857 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
858 * indicated by the FC exchange id 'xid', so the underlying device can
859 * clean up and reuse resources for later DDP requests.
860 *
861 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
862 * struct scatterlist *sgl, unsigned int sgc);
863 * Called when the FCoE Target wants to initialize an I/O that
864 * is a possible candidate for Direct Data Placement (DDP). The LLD can
865 * perform necessary setup and returns 1 to indicate the device is set up
866 * successfully to perform DDP on this I/O, otherwise this returns 0.
867 *
68bad94e
NP
868 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
869 * struct netdev_fcoe_hbainfo *hbainfo);
870 * Called when the FCoE Protocol stack wants information on the underlying
871 * device. This information is utilized by the FCoE protocol stack to
872 * register attributes with Fiber Channel management service as per the
873 * FC-GS Fabric Device Management Information(FDMI) specification.
874 *
e9bce845
YZ
875 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
876 * Called when the underlying device wants to override default World Wide
877 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
878 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
879 * protocol stack to use.
880 *
c445477d
BH
881 * RFS acceleration.
882 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
883 * u16 rxq_index, u32 flow_id);
884 * Set hardware filter for RFS. rxq_index is the target queue index;
885 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
886 * Return the filter ID on success, or a negative error code.
fbaec0ea
JP
887 *
888 * Slave management functions (for bridge, bonding, etc). User should
889 * call netdev_set_master() to set dev->master properly.
890 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
891 * Called to make another netdev an underling.
892 *
893 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
894 * Called to release previously enslaved netdev.
5455c699
MM
895 *
896 * Feature/offload setting functions.
c8f44aff
MM
897 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
898 * netdev_features_t features);
5455c699
MM
899 * Adjusts the requested feature flags according to device-specific
900 * constraints, and returns the resulting flags. Must not modify
901 * the device state.
902 *
c8f44aff 903 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
5455c699
MM
904 * Called to update device configuration to new features. Passed
905 * feature set might be less than what was returned by ndo_fix_features()).
906 * Must return >0 or -errno if it changed dev->features itself.
907 *
d314774c
SH
908 */
909struct net_device_ops {
910 int (*ndo_init)(struct net_device *dev);
911 void (*ndo_uninit)(struct net_device *dev);
912 int (*ndo_open)(struct net_device *dev);
913 int (*ndo_stop)(struct net_device *dev);
dc1f8bf6 914 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
00829823
SH
915 struct net_device *dev);
916 u16 (*ndo_select_queue)(struct net_device *dev,
917 struct sk_buff *skb);
d314774c
SH
918 void (*ndo_change_rx_flags)(struct net_device *dev,
919 int flags);
d314774c 920 void (*ndo_set_rx_mode)(struct net_device *dev);
d314774c
SH
921 int (*ndo_set_mac_address)(struct net_device *dev,
922 void *addr);
d314774c 923 int (*ndo_validate_addr)(struct net_device *dev);
d314774c
SH
924 int (*ndo_do_ioctl)(struct net_device *dev,
925 struct ifreq *ifr, int cmd);
d314774c
SH
926 int (*ndo_set_config)(struct net_device *dev,
927 struct ifmap *map);
00829823
SH
928 int (*ndo_change_mtu)(struct net_device *dev,
929 int new_mtu);
930 int (*ndo_neigh_setup)(struct net_device *dev,
931 struct neigh_parms *);
d314774c
SH
932 void (*ndo_tx_timeout) (struct net_device *dev);
933
28172739
ED
934 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
935 struct rtnl_link_stats64 *storage);
d314774c
SH
936 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
937
8e586137 938 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
d314774c 939 unsigned short vid);
8e586137 940 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
d314774c
SH
941 unsigned short vid);
942#ifdef CONFIG_NET_POLL_CONTROLLER
d314774c 943 void (*ndo_poll_controller)(struct net_device *dev);
4247e161
HX
944 int (*ndo_netpoll_setup)(struct net_device *dev,
945 struct netpoll_info *info);
0e34e931 946 void (*ndo_netpoll_cleanup)(struct net_device *dev);
d314774c 947#endif
95c26df8
WM
948 int (*ndo_set_vf_mac)(struct net_device *dev,
949 int queue, u8 *mac);
950 int (*ndo_set_vf_vlan)(struct net_device *dev,
951 int queue, u16 vlan, u8 qos);
952 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
953 int vf, int rate);
5f8444a3
GR
954 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
955 int vf, bool setting);
95c26df8
WM
956 int (*ndo_get_vf_config)(struct net_device *dev,
957 int vf,
958 struct ifla_vf_info *ivf);
57b61080
SF
959 int (*ndo_set_vf_port)(struct net_device *dev,
960 int vf,
961 struct nlattr *port[]);
962 int (*ndo_get_vf_port)(struct net_device *dev,
963 int vf, struct sk_buff *skb);
4f57c087 964 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
d11ead75 965#if IS_ENABLED(CONFIG_FCOE)
cb454399
YZ
966 int (*ndo_fcoe_enable)(struct net_device *dev);
967 int (*ndo_fcoe_disable)(struct net_device *dev);
4d288d57
YZ
968 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
969 u16 xid,
970 struct scatterlist *sgl,
971 unsigned int sgc);
972 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
973 u16 xid);
6247e086
YZ
974 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
975 u16 xid,
976 struct scatterlist *sgl,
977 unsigned int sgc);
68bad94e
NP
978 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
979 struct netdev_fcoe_hbainfo *hbainfo);
3c9c36bc
BPG
980#endif
981
d11ead75 982#if IS_ENABLED(CONFIG_LIBFCOE)
df5c7945
YZ
983#define NETDEV_FCOE_WWNN 0
984#define NETDEV_FCOE_WWPN 1
985 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
986 u64 *wwn, int type);
4d288d57 987#endif
3c9c36bc 988
c445477d
BH
989#ifdef CONFIG_RFS_ACCEL
990 int (*ndo_rx_flow_steer)(struct net_device *dev,
991 const struct sk_buff *skb,
992 u16 rxq_index,
993 u32 flow_id);
994#endif
fbaec0ea
JP
995 int (*ndo_add_slave)(struct net_device *dev,
996 struct net_device *slave_dev);
997 int (*ndo_del_slave)(struct net_device *dev,
998 struct net_device *slave_dev);
c8f44aff
MM
999 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1000 netdev_features_t features);
5455c699 1001 int (*ndo_set_features)(struct net_device *dev,
c8f44aff 1002 netdev_features_t features);
da6a8fa0 1003 int (*ndo_neigh_construct)(struct neighbour *n);
447f2191 1004 void (*ndo_neigh_destroy)(struct neighbour *n);
d314774c
SH
1005};
1006
1da177e4
LT
1007/*
1008 * The DEVICE structure.
1009 * Actually, this whole structure is a big mistake. It mixes I/O
1010 * data with strictly "high-level" data, and it has to know about
1011 * almost every data structure used in the INET module.
1012 *
1013 * FIXME: cleanup struct net_device such that network protocol info
1014 * moves out.
1015 */
1016
d94d9fee 1017struct net_device {
1da177e4
LT
1018
1019 /*
1020 * This is the first field of the "visible" part of this structure
1021 * (i.e. as seen by users in the "Space.c" file). It is the name
724df615 1022 * of the interface.
1da177e4
LT
1023 */
1024 char name[IFNAMSIZ];
ed77134b 1025
cc749986 1026 struct pm_qos_request pm_qos_req;
ed77134b 1027
9356b8fc
ED
1028 /* device name hash chain */
1029 struct hlist_node name_hlist;
0b815a1a
SH
1030 /* snmp alias */
1031 char *ifalias;
1da177e4
LT
1032
1033 /*
1034 * I/O specific fields
1035 * FIXME: Merge these and struct ifmap into one
1036 */
1037 unsigned long mem_end; /* shared mem end */
1038 unsigned long mem_start; /* shared mem start */
1039 unsigned long base_addr; /* device I/O address */
1040 unsigned int irq; /* device IRQ number */
1041
1042 /*
1043 * Some hardware also needs these fields, but they are not
1044 * part of the usual set specified in Space.c.
1045 */
1046
1da177e4
LT
1047 unsigned long state;
1048
7562f876 1049 struct list_head dev_list;
bea3348e 1050 struct list_head napi_list;
44a0873d 1051 struct list_head unreg_list;
1da177e4 1052
5455c699 1053 /* currently active device features */
c8f44aff 1054 netdev_features_t features;
5455c699 1055 /* user-changeable features */
c8f44aff 1056 netdev_features_t hw_features;
5455c699 1057 /* user-requested features */
c8f44aff 1058 netdev_features_t wanted_features;
1aac6267 1059 /* mask of features inheritable by VLAN devices */
c8f44aff 1060 netdev_features_t vlan_features;
04ed3e74 1061
1da177e4
LT
1062 /* Interface index. Unique device identifier */
1063 int ifindex;
1064 int iflink;
1065
c45d286e 1066 struct net_device_stats stats;
caf586e5
ED
1067 atomic_long_t rx_dropped; /* dropped packets by core network
1068 * Do not use this in drivers.
1069 */
1da177e4 1070
b86e0280 1071#ifdef CONFIG_WIRELESS_EXT
1da177e4
LT
1072 /* List of functions to handle Wireless Extensions (instead of ioctl).
1073 * See <net/iw_handler.h> for details. Jean II */
1074 const struct iw_handler_def * wireless_handlers;
1075 /* Instance data managed by the core of Wireless Extensions. */
1076 struct iw_public_data * wireless_data;
b86e0280 1077#endif
d314774c
SH
1078 /* Management operations */
1079 const struct net_device_ops *netdev_ops;
76fd8593 1080 const struct ethtool_ops *ethtool_ops;
1da177e4 1081
3b04ddde
SH
1082 /* Hardware header description */
1083 const struct header_ops *header_ops;
1084
b00055aa 1085 unsigned int flags; /* interface flags (a la BSD) */
bdc220da 1086 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
1da177e4 1087 unsigned short gflags;
1da177e4
LT
1088 unsigned short padded; /* How much padding added by alloc_netdev() */
1089
b00055aa
SR
1090 unsigned char operstate; /* RFC2863 operstate */
1091 unsigned char link_mode; /* mapping policy to operstate */
1092
bdc220da
JP
1093 unsigned char if_port; /* Selectable AUI, TP,..*/
1094 unsigned char dma; /* DMA channel */
1095
cd7b5396 1096 unsigned int mtu; /* interface MTU value */
1da177e4
LT
1097 unsigned short type; /* interface hardware type */
1098 unsigned short hard_header_len; /* hardware hdr length */
1da177e4 1099
f5184d26
JB
1100 /* extra head- and tailroom the hardware may need, but not in all cases
1101 * can this be guaranteed, especially tailroom. Some cases also use
1102 * LL_MAX_HEADER instead to allocate the skb.
1103 */
1104 unsigned short needed_headroom;
1105 unsigned short needed_tailroom;
1106
1da177e4 1107 /* Interface address info. */
a6f9a705 1108 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
c1f79426 1109 unsigned char addr_assign_type; /* hw address assignment type */
1da177e4 1110 unsigned char addr_len; /* hardware address length */
596b9b68 1111 unsigned char neigh_priv_len;
1da177e4
LT
1112 unsigned short dev_id; /* for shared network cards */
1113
ccffad25 1114 spinlock_t addr_list_lock;
22bedad3
JP
1115 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1116 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
2d348d1f 1117 bool uc_promisc;
9d45abe1
WC
1118 unsigned int promiscuity;
1119 unsigned int allmulti;
1da177e4 1120
1da177e4
LT
1121
1122 /* Protocol specific pointers */
65ac6a5f 1123
d11ead75 1124#if IS_ENABLED(CONFIG_VLAN_8021Q)
5b9ea6e0 1125 struct vlan_info __rcu *vlan_info; /* VLAN info */
65ac6a5f 1126#endif
34a430d7 1127#if IS_ENABLED(CONFIG_NET_DSA)
cf50dcc2 1128 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
91da11f8 1129#endif
1da177e4 1130 void *atalk_ptr; /* AppleTalk link */
95ae6b22 1131 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
fc766e4c 1132 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
198caeca 1133 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
1da177e4
LT
1134 void *ec_ptr; /* Econet specific data */
1135 void *ax25_ptr; /* AX.25 specific data */
704232c2
JB
1136 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1137 assign before registering */
1da177e4 1138
9356b8fc 1139/*
cd13539b 1140 * Cache lines mostly used on receive path (including eth_type_trans())
9356b8fc 1141 */
4dc89133
ED
1142 unsigned long last_rx; /* Time of last Rx
1143 * This should not be set in
1144 * drivers, unless really needed,
1145 * because network stack (bonding)
1146 * use it if/when necessary, to
1147 * avoid dirtying this cache line.
1148 */
1149
cd13539b
ED
1150 struct net_device *master; /* Pointer to master device of a group,
1151 * which this device is member of.
1152 */
1153
9356b8fc 1154 /* Interface address info used in eth_type_trans() */
f001fde5
JP
1155 unsigned char *dev_addr; /* hw address, (before bcast
1156 because most packets are
1157 unicast) */
1158
31278e71
JP
1159 struct netdev_hw_addr_list dev_addrs; /* list of device
1160 hw addresses */
9356b8fc
ED
1161
1162 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1da177e4 1163
ccf5ff69 1164#ifdef CONFIG_SYSFS
0a9627f2 1165 struct kset *queues_kset;
ccf5ff69 1166#endif
0a9627f2 1167
ccf5ff69 1168#ifdef CONFIG_RPS
0a9627f2
TH
1169 struct netdev_rx_queue *_rx;
1170
62fe0b40 1171 /* Number of RX queues allocated at register_netdev() time */
0a9627f2 1172 unsigned int num_rx_queues;
62fe0b40
BH
1173
1174 /* Number of RX queues currently active in device */
1175 unsigned int real_num_rx_queues;
c445477d
BH
1176
1177#ifdef CONFIG_RFS_ACCEL
1178 /* CPU reverse-mapping for RX completion interrupts, indexed
1179 * by RX queue number. Assigned by driver. This must only be
1180 * set if the ndo_rx_flow_steer operation is defined. */
1181 struct cpu_rmap *rx_cpu_rmap;
1182#endif
df334545 1183#endif
0a9627f2 1184
61391cde 1185 rx_handler_func_t __rcu *rx_handler;
1186 void __rcu *rx_handler_data;
e8a0464c 1187
24824a09 1188 struct netdev_queue __rcu *ingress_queue;
cd13539b
ED
1189
1190/*
1191 * Cache lines mostly used on transmit path
1192 */
e8a0464c 1193 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
fd2ea0a7
DM
1194
1195 /* Number of TX queues allocated at alloc_netdev_mq() time */
e8a0464c 1196 unsigned int num_tx_queues;
fd2ea0a7
DM
1197
1198 /* Number of TX queues currently active in device */
1199 unsigned int real_num_tx_queues;
1200
af356afa
PM
1201 /* root qdisc from userspace point of view */
1202 struct Qdisc *qdisc;
1203
1da177e4 1204 unsigned long tx_queue_len; /* Max frames per queue allowed */
c3f26a26 1205 spinlock_t tx_global_lock;
cd13539b 1206
bf264145 1207#ifdef CONFIG_XPS
a4177869 1208 struct xps_dev_maps __rcu *xps_maps;
bf264145 1209#endif
1d24eb48 1210
9356b8fc 1211 /* These may be needed for future network-power-down code. */
9d21493b
ED
1212
1213 /*
1214 * trans_start here is expensive for high speed devices on SMP,
1215 * please use netdev_queue->trans_start instead.
1216 */
9356b8fc
ED
1217 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1218
1219 int watchdog_timeo; /* used by dev_watchdog() */
1220 struct timer_list watchdog_timer;
1221
1da177e4 1222 /* Number of references to this device */
29b4433d 1223 int __percpu *pcpu_refcnt;
9356b8fc 1224
1da177e4
LT
1225 /* delayed register/unregister */
1226 struct list_head todo_list;
1da177e4
LT
1227 /* device index hash chain */
1228 struct hlist_node index_hlist;
1229
e014debe 1230 struct list_head link_watch_list;
572a103d 1231
1da177e4
LT
1232 /* register/unregister state machine */
1233 enum { NETREG_UNINITIALIZED=0,
b17a7c17 1234 NETREG_REGISTERED, /* completed register_netdevice */
1da177e4
LT
1235 NETREG_UNREGISTERING, /* called unregister_netdevice */
1236 NETREG_UNREGISTERED, /* completed unregister todo */
1237 NETREG_RELEASED, /* called free_netdev */
937f1ba5 1238 NETREG_DUMMY, /* dummy device for NAPI poll */
449f4544
ED
1239 } reg_state:8;
1240
1241 bool dismantle; /* device is going do be freed */
a2835763
PM
1242
1243 enum {
1244 RTNL_LINK_INITIALIZED,
1245 RTNL_LINK_INITIALIZING,
1246 } rtnl_link_state:16;
1da177e4 1247
d314774c
SH
1248 /* Called from unregister, can be used to call free_netdev */
1249 void (*destructor)(struct net_device *dev);
1da177e4 1250
1da177e4 1251#ifdef CONFIG_NETPOLL
115c1d6e 1252 struct netpoll_info *npinfo;
1da177e4 1253#endif
eae792b7 1254
c346dca1 1255#ifdef CONFIG_NET_NS
4a1c5371
EB
1256 /* Network namespace this network device is inside */
1257 struct net *nd_net;
c346dca1 1258#endif
4a1c5371 1259
4951704b 1260 /* mid-layer private */
a7855c78
ED
1261 union {
1262 void *ml_priv;
1263 struct pcpu_lstats __percpu *lstats; /* loopback stats */
290b895e 1264 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
6d81f41c 1265 struct pcpu_dstats __percpu *dstats; /* dummy stats */
a7855c78 1266 };
eca9ebac 1267 /* GARP */
3cc77ec7 1268 struct garp_port __rcu *garp_port;
1da177e4 1269
1da177e4 1270 /* class/net/name entry */
43cb76d9 1271 struct device dev;
0c509a6c
EB
1272 /* space for optional device, statistics, and wireless sysfs groups */
1273 const struct attribute_group *sysfs_groups[4];
38f7b870
PM
1274
1275 /* rtnetlink link ops */
1276 const struct rtnl_link_ops *rtnl_link_ops;
f25f4e44 1277
82cc1a7a
PWJ
1278 /* for setting kernel sock attribute on TCP connection setup */
1279#define GSO_MAX_SIZE 65536
1280 unsigned int gso_max_size;
d314774c 1281
7a6b6f51 1282#ifdef CONFIG_DCB
2f90b865 1283 /* Data Center Bridging netlink ops */
32953543 1284 const struct dcbnl_rtnl_ops *dcbnl_ops;
2f90b865 1285#endif
4f57c087
JF
1286 u8 num_tc;
1287 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1288 u8 prio_tc_map[TC_BITMASK + 1];
2f90b865 1289
d11ead75 1290#if IS_ENABLED(CONFIG_FCOE)
4d288d57
YZ
1291 /* max exchange id for FCoE LRO by ddp */
1292 unsigned int fcoe_ddp_xid;
5bc1421e
NH
1293#endif
1294#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1295 struct netprio_map __rcu *priomap;
4d288d57 1296#endif
c1f19b51
RC
1297 /* phy device may attach itself for hardware timestamping */
1298 struct phy_device *phydev;
cbda10fa
VD
1299
1300 /* group the device belongs to */
1301 int group;
1da177e4 1302};
43cb76d9 1303#define to_net_dev(d) container_of(d, struct net_device, dev)
1da177e4
LT
1304
1305#define NETDEV_ALIGN 32
1da177e4 1306
4f57c087
JF
1307static inline
1308int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1309{
1310 return dev->prio_tc_map[prio & TC_BITMASK];
1311}
1312
1313static inline
1314int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1315{
1316 if (tc >= dev->num_tc)
1317 return -EINVAL;
1318
1319 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1320 return 0;
1321}
1322
1323static inline
1324void netdev_reset_tc(struct net_device *dev)
1325{
1326 dev->num_tc = 0;
1327 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1328 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1329}
1330
1331static inline
1332int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1333{
1334 if (tc >= dev->num_tc)
1335 return -EINVAL;
1336
1337 dev->tc_to_txq[tc].count = count;
1338 dev->tc_to_txq[tc].offset = offset;
1339 return 0;
1340}
1341
1342static inline
1343int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1344{
1345 if (num_tc > TC_MAX_QUEUE)
1346 return -EINVAL;
1347
1348 dev->num_tc = num_tc;
1349 return 0;
1350}
1351
1352static inline
1353int netdev_get_num_tc(struct net_device *dev)
1354{
1355 return dev->num_tc;
1356}
1357
e8a0464c
DM
1358static inline
1359struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1360 unsigned int index)
1361{
1362 return &dev->_tx[index];
1363}
1364
1365static inline void netdev_for_each_tx_queue(struct net_device *dev,
1366 void (*f)(struct net_device *,
1367 struct netdev_queue *,
1368 void *),
1369 void *arg)
1370{
1371 unsigned int i;
1372
1373 for (i = 0; i < dev->num_tx_queues; i++)
1374 f(dev, &dev->_tx[i], arg);
1375}
1376
c346dca1
YH
1377/*
1378 * Net namespace inlines
1379 */
1380static inline
1381struct net *dev_net(const struct net_device *dev)
1382{
c2d9ba9b 1383 return read_pnet(&dev->nd_net);
c346dca1
YH
1384}
1385
1386static inline
f5aa23fd 1387void dev_net_set(struct net_device *dev, struct net *net)
c346dca1
YH
1388{
1389#ifdef CONFIG_NET_NS
f3005d7f
DL
1390 release_net(dev->nd_net);
1391 dev->nd_net = hold_net(net);
c346dca1
YH
1392#endif
1393}
1394
cf85d08f
LB
1395static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1396{
1397#ifdef CONFIG_NET_DSA_TAG_DSA
1398 if (dev->dsa_ptr != NULL)
1399 return dsa_uses_dsa_tags(dev->dsa_ptr);
1400#endif
1401
1402 return 0;
1403}
1404
8a83a00b
AB
1405#ifndef CONFIG_NET_NS
1406static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1407{
1408 skb->dev = dev;
1409}
1410#else /* CONFIG_NET_NS */
1411void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1412#endif
1413
396138f0
LB
1414static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1415{
1416#ifdef CONFIG_NET_DSA_TAG_TRAILER
1417 if (dev->dsa_ptr != NULL)
1418 return dsa_uses_trailer_tags(dev->dsa_ptr);
1419#endif
1420
1421 return 0;
1422}
1423
bea3348e
SH
1424/**
1425 * netdev_priv - access network device private data
1426 * @dev: network device
1427 *
1428 * Get network device private data
1429 */
6472ce60 1430static inline void *netdev_priv(const struct net_device *dev)
1da177e4 1431{
1ce8e7b5 1432 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1da177e4
LT
1433}
1434
1da177e4
LT
1435/* Set the sysfs physical device reference for the network logical device
1436 * if set prior to registration will cause a symlink during initialization.
1437 */
43cb76d9 1438#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1da177e4 1439
384912ed
MH
1440/* Set the sysfs device type for the network logical device to allow
1441 * fin grained indentification of different network device types. For
1442 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1443 */
1444#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1445
3b582cc1
SH
1446/**
1447 * netif_napi_add - initialize a napi context
1448 * @dev: network device
1449 * @napi: napi context
1450 * @poll: polling function
1451 * @weight: default weight
1452 *
1453 * netif_napi_add() must be used to initialize a napi context prior to calling
1454 * *any* of the other napi related functions.
1455 */
d565b0a1
HX
1456void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1457 int (*poll)(struct napi_struct *, int), int weight);
bea3348e 1458
d8156534
AD
1459/**
1460 * netif_napi_del - remove a napi context
1461 * @napi: napi context
1462 *
1463 * netif_napi_del() removes a napi context from the network device napi list
1464 */
d565b0a1
HX
1465void netif_napi_del(struct napi_struct *napi);
1466
1467struct napi_gro_cb {
78a478d0
HX
1468 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1469 void *frag0;
1470
7489594c
HX
1471 /* Length of frag0. */
1472 unsigned int frag0_len;
1473
86911732
HX
1474 /* This indicates where we are processing relative to skb->data. */
1475 int data_offset;
1476
d565b0a1
HX
1477 /* This is non-zero if the packet may be of the same flow. */
1478 int same_flow;
1479
1480 /* This is non-zero if the packet cannot be merged with the new skb. */
1481 int flush;
1482
1483 /* Number of segments aggregated. */
1484 int count;
5d38a079
HX
1485
1486 /* Free the skb? */
1487 int free;
d565b0a1
HX
1488};
1489
1490#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
d8156534 1491
1da177e4 1492struct packet_type {
f2ccd8fa
DM
1493 __be16 type; /* This is really htons(ether_type). */
1494 struct net_device *dev; /* NULL is wildcarded here */
1495 int (*func) (struct sk_buff *,
1496 struct net_device *,
1497 struct packet_type *,
1498 struct net_device *);
576a30eb 1499 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
c8f44aff 1500 netdev_features_t features);
a430a43d 1501 int (*gso_send_check)(struct sk_buff *skb);
d565b0a1
HX
1502 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1503 struct sk_buff *skb);
1504 int (*gro_complete)(struct sk_buff *skb);
1da177e4
LT
1505 void *af_packet_priv;
1506 struct list_head list;
1507};
1508
1da177e4
LT
1509#include <linux/notifier.h>
1510
dcfe1421
AW
1511/* netdevice notifier chain. Please remember to update the rtnetlink
1512 * notification exclusion list in rtnetlink_event() when adding new
1513 * types.
1514 */
1515#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1516#define NETDEV_DOWN 0x0002
1517#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1518 detected a hardware crash and restarted
1519 - we can use this eg to kick tcp sessions
1520 once done */
1521#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1522#define NETDEV_REGISTER 0x0005
1523#define NETDEV_UNREGISTER 0x0006
1524#define NETDEV_CHANGEMTU 0x0007
1525#define NETDEV_CHANGEADDR 0x0008
1526#define NETDEV_GOING_DOWN 0x0009
1527#define NETDEV_CHANGENAME 0x000A
1528#define NETDEV_FEAT_CHANGE 0x000B
1529#define NETDEV_BONDING_FAILOVER 0x000C
1530#define NETDEV_PRE_UP 0x000D
1531#define NETDEV_PRE_TYPE_CHANGE 0x000E
1532#define NETDEV_POST_TYPE_CHANGE 0x000F
1533#define NETDEV_POST_INIT 0x0010
1534#define NETDEV_UNREGISTER_BATCH 0x0011
1535#define NETDEV_RELEASE 0x0012
1536#define NETDEV_NOTIFY_PEERS 0x0013
1537#define NETDEV_JOIN 0x0014
1538
1539extern int register_netdevice_notifier(struct notifier_block *nb);
1540extern int unregister_netdevice_notifier(struct notifier_block *nb);
1541extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1542
1543
1da177e4
LT
1544extern rwlock_t dev_base_lock; /* Device list lock */
1545
7562f876 1546
881d966b
EB
1547#define for_each_netdev(net, d) \
1548 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
dcbccbd4
EB
1549#define for_each_netdev_reverse(net, d) \
1550 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
c6d14c84
ED
1551#define for_each_netdev_rcu(net, d) \
1552 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
881d966b
EB
1553#define for_each_netdev_safe(net, d, n) \
1554 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1555#define for_each_netdev_continue(net, d) \
1556 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
254245d2 1557#define for_each_netdev_continue_rcu(net, d) \
1558 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
881d966b 1559#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
7562f876 1560
a050c33f
DL
1561static inline struct net_device *next_net_device(struct net_device *dev)
1562{
1563 struct list_head *lh;
1564 struct net *net;
1565
c346dca1 1566 net = dev_net(dev);
a050c33f
DL
1567 lh = dev->dev_list.next;
1568 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1569}
1570
ce81b76a
ED
1571static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1572{
1573 struct list_head *lh;
1574 struct net *net;
1575
1576 net = dev_net(dev);
ccf43438 1577 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
ce81b76a
ED
1578 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1579}
1580
a050c33f
DL
1581static inline struct net_device *first_net_device(struct net *net)
1582{
1583 return list_empty(&net->dev_base_head) ? NULL :
1584 net_device_entry(net->dev_base_head.next);
1585}
7562f876 1586
ccf43438
ED
1587static inline struct net_device *first_net_device_rcu(struct net *net)
1588{
1589 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1590
1591 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1592}
1593
1da177e4
LT
1594extern int netdev_boot_setup_check(struct net_device *dev);
1595extern unsigned long netdev_boot_base(const char *prefix, int unit);
941666c2
ED
1596extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1597 const char *hwaddr);
881d966b
EB
1598extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1599extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1da177e4
LT
1600extern void dev_add_pack(struct packet_type *pt);
1601extern void dev_remove_pack(struct packet_type *pt);
1602extern void __dev_remove_pack(struct packet_type *pt);
1603
bb69ae04
ED
1604extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1605 unsigned short mask);
881d966b 1606extern struct net_device *dev_get_by_name(struct net *net, const char *name);
72c9528b 1607extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
881d966b 1608extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1da177e4
LT
1609extern int dev_alloc_name(struct net_device *dev, const char *name);
1610extern int dev_open(struct net_device *dev);
1611extern int dev_close(struct net_device *dev);
0187bdfb 1612extern void dev_disable_lro(struct net_device *dev);
1da177e4
LT
1613extern int dev_queue_xmit(struct sk_buff *skb);
1614extern int register_netdevice(struct net_device *dev);
44a0873d
ED
1615extern void unregister_netdevice_queue(struct net_device *dev,
1616 struct list_head *head);
9b5e383c 1617extern void unregister_netdevice_many(struct list_head *head);
44a0873d
ED
1618static inline void unregister_netdevice(struct net_device *dev)
1619{
1620 unregister_netdevice_queue(dev, NULL);
1621}
1622
29b4433d 1623extern int netdev_refcnt_read(const struct net_device *dev);
1da177e4
LT
1624extern void free_netdev(struct net_device *dev);
1625extern void synchronize_net(void);
937f1ba5 1626extern int init_dummy_netdev(struct net_device *dev);
9d40bbda 1627extern void netdev_resync_ops(struct net_device *dev);
937f1ba5 1628
881d966b
EB
1629extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1630extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
fb699dfd 1631extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1da177e4
LT
1632extern int dev_restart(struct net_device *dev);
1633#ifdef CONFIG_NETPOLL_TRAP
1634extern int netpoll_trap(void);
1635#endif
86911732
HX
1636extern int skb_gro_receive(struct sk_buff **head,
1637 struct sk_buff *skb);
78a478d0 1638extern void skb_gro_reset_offset(struct sk_buff *skb);
86911732
HX
1639
1640static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1641{
1642 return NAPI_GRO_CB(skb)->data_offset;
1643}
1644
1645static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1646{
1647 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1648}
1649
1650static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1651{
1652 NAPI_GRO_CB(skb)->data_offset += len;
1653}
1654
a5b1cf28
HX
1655static inline void *skb_gro_header_fast(struct sk_buff *skb,
1656 unsigned int offset)
86911732 1657{
a5b1cf28
HX
1658 return NAPI_GRO_CB(skb)->frag0 + offset;
1659}
78a478d0 1660
a5b1cf28
HX
1661static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1662{
1663 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1664}
78a478d0 1665
a5b1cf28
HX
1666static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1667 unsigned int offset)
1668{
17dd759c
HX
1669 if (!pskb_may_pull(skb, hlen))
1670 return NULL;
1671
a5b1cf28
HX
1672 NAPI_GRO_CB(skb)->frag0 = NULL;
1673 NAPI_GRO_CB(skb)->frag0_len = 0;
17dd759c 1674 return skb->data + offset;
86911732 1675}
1da177e4 1676
aa4b9f53
HX
1677static inline void *skb_gro_mac_header(struct sk_buff *skb)
1678{
78d3fd0b 1679 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
aa4b9f53
HX
1680}
1681
36e7b1b8
HX
1682static inline void *skb_gro_network_header(struct sk_buff *skb)
1683{
78d3fd0b
HX
1684 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1685 skb_network_offset(skb);
36e7b1b8
HX
1686}
1687
0c4e8581
SH
1688static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1689 unsigned short type,
3b04ddde
SH
1690 const void *daddr, const void *saddr,
1691 unsigned len)
0c4e8581 1692{
f1ecfd5d 1693 if (!dev->header_ops || !dev->header_ops->create)
0c4e8581 1694 return 0;
3b04ddde
SH
1695
1696 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
0c4e8581
SH
1697}
1698
b95cce35
SH
1699static inline int dev_parse_header(const struct sk_buff *skb,
1700 unsigned char *haddr)
1701{
1702 const struct net_device *dev = skb->dev;
1703
1b83336b 1704 if (!dev->header_ops || !dev->header_ops->parse)
b95cce35 1705 return 0;
3b04ddde 1706 return dev->header_ops->parse(skb, haddr);
b95cce35
SH
1707}
1708
1da177e4
LT
1709typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1710extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1711static inline int unregister_gifconf(unsigned int family)
1712{
1713 return register_gifconf(family, NULL);
1714}
1715
1716/*
88751275 1717 * Incoming packets are placed on per-cpu queues
1da177e4 1718 */
d94d9fee 1719struct softnet_data {
37437bb2 1720 struct Qdisc *output_queue;
a9cbd588 1721 struct Qdisc **output_queue_tailp;
1da177e4 1722 struct list_head poll_list;
1da177e4 1723 struct sk_buff *completion_queue;
6e7676c1 1724 struct sk_buff_head process_queue;
1da177e4 1725
dee42870 1726 /* stats */
cd7b5396
DM
1727 unsigned int processed;
1728 unsigned int time_squeeze;
1729 unsigned int cpu_collision;
1730 unsigned int received_rps;
dee42870 1731
fd793d89 1732#ifdef CONFIG_RPS
88751275
ED
1733 struct softnet_data *rps_ipi_list;
1734
1735 /* Elements below can be accessed between CPUs for RPS */
0a9627f2 1736 struct call_single_data csd ____cacheline_aligned_in_smp;
88751275
ED
1737 struct softnet_data *rps_ipi_next;
1738 unsigned int cpu;
fec5e652 1739 unsigned int input_queue_head;
76cc8b13 1740 unsigned int input_queue_tail;
1e94d72f 1741#endif
dee42870 1742 unsigned dropped;
0a9627f2 1743 struct sk_buff_head input_pkt_queue;
bea3348e 1744 struct napi_struct backlog;
1da177e4
LT
1745};
1746
76cc8b13 1747static inline void input_queue_head_incr(struct softnet_data *sd)
fec5e652
TH
1748{
1749#ifdef CONFIG_RPS
76cc8b13
TH
1750 sd->input_queue_head++;
1751#endif
1752}
1753
1754static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1755 unsigned int *qtail)
1756{
1757#ifdef CONFIG_RPS
1758 *qtail = ++sd->input_queue_tail;
fec5e652
TH
1759#endif
1760}
1761
0a9627f2 1762DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1da177e4 1763
37437bb2 1764extern void __netif_schedule(struct Qdisc *q);
1da177e4 1765
86d804e1 1766static inline void netif_schedule_queue(struct netdev_queue *txq)
1da177e4 1767{
73466498 1768 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
37437bb2 1769 __netif_schedule(txq->qdisc);
86d804e1
DM
1770}
1771
fd2ea0a7
DM
1772static inline void netif_tx_schedule_all(struct net_device *dev)
1773{
1774 unsigned int i;
1775
1776 for (i = 0; i < dev->num_tx_queues; i++)
1777 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1778}
1779
d29f749e
DJ
1780static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1781{
73466498 1782 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1783}
1784
bea3348e
SH
1785/**
1786 * netif_start_queue - allow transmit
1787 * @dev: network device
1788 *
1789 * Allow upper layers to call the device hard_start_xmit routine.
1790 */
1da177e4
LT
1791static inline void netif_start_queue(struct net_device *dev)
1792{
e8a0464c 1793 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1794}
1795
fd2ea0a7
DM
1796static inline void netif_tx_start_all_queues(struct net_device *dev)
1797{
1798 unsigned int i;
1799
1800 for (i = 0; i < dev->num_tx_queues; i++) {
1801 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1802 netif_tx_start_queue(txq);
1803 }
1804}
1805
79d16385 1806static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1da177e4
LT
1807{
1808#ifdef CONFIG_NETPOLL_TRAP
5f286e11 1809 if (netpoll_trap()) {
7b3d3e4f 1810 netif_tx_start_queue(dev_queue);
1da177e4 1811 return;
5f286e11 1812 }
1da177e4 1813#endif
73466498 1814 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
37437bb2 1815 __netif_schedule(dev_queue->qdisc);
79d16385
DM
1816}
1817
d29f749e
DJ
1818/**
1819 * netif_wake_queue - restart transmit
1820 * @dev: network device
1821 *
1822 * Allow upper layers to call the device hard_start_xmit routine.
1823 * Used for flow control when transmit resources are available.
1824 */
79d16385
DM
1825static inline void netif_wake_queue(struct net_device *dev)
1826{
e8a0464c 1827 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1828}
1829
fd2ea0a7
DM
1830static inline void netif_tx_wake_all_queues(struct net_device *dev)
1831{
1832 unsigned int i;
1833
1834 for (i = 0; i < dev->num_tx_queues; i++) {
1835 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1836 netif_tx_wake_queue(txq);
1837 }
1838}
1839
d29f749e
DJ
1840static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1841{
18543a64 1842 if (WARN_ON(!dev_queue)) {
256ee435 1843 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
18543a64
GC
1844 return;
1845 }
73466498 1846 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1847}
1848
bea3348e
SH
1849/**
1850 * netif_stop_queue - stop transmitted packets
1851 * @dev: network device
1852 *
1853 * Stop upper layers calling the device hard_start_xmit routine.
1854 * Used for flow control when transmit resources are unavailable.
1855 */
1da177e4
LT
1856static inline void netif_stop_queue(struct net_device *dev)
1857{
e8a0464c 1858 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1859}
1860
fd2ea0a7
DM
1861static inline void netif_tx_stop_all_queues(struct net_device *dev)
1862{
1863 unsigned int i;
1864
1865 for (i = 0; i < dev->num_tx_queues; i++) {
1866 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1867 netif_tx_stop_queue(txq);
1868 }
1869}
1870
d29f749e
DJ
1871static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1872{
73466498 1873 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
d29f749e
DJ
1874}
1875
bea3348e
SH
1876/**
1877 * netif_queue_stopped - test if transmit queue is flowblocked
1878 * @dev: network device
1879 *
1880 * Test if transmit queue on device is currently unable to send.
1881 */
1da177e4
LT
1882static inline int netif_queue_stopped(const struct net_device *dev)
1883{
e8a0464c 1884 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1da177e4
LT
1885}
1886
73466498 1887static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
c3f26a26 1888{
73466498
TH
1889 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1890}
1891
1892static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1893{
1894 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1895}
1896
c5d67bd7
TH
1897static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1898 unsigned int bytes)
1899{
114cf580
TH
1900#ifdef CONFIG_BQL
1901 dql_queued(&dev_queue->dql, bytes);
1902 if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
1903 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1904 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1905 clear_bit(__QUEUE_STATE_STACK_XOFF,
1906 &dev_queue->state);
1907 }
1908#endif
c5d67bd7
TH
1909}
1910
1911static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1912{
1913 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1914}
1915
1916static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1917 unsigned pkts, unsigned bytes)
1918{
114cf580
TH
1919#ifdef CONFIG_BQL
1920 if (likely(bytes)) {
1921 dql_completed(&dev_queue->dql, bytes);
1922 if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
1923 &dev_queue->state) &&
1924 dql_avail(&dev_queue->dql) >= 0)) {
1925 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
1926 &dev_queue->state))
1927 netif_schedule_queue(dev_queue);
1928 }
1929 }
1930#endif
c5d67bd7
TH
1931}
1932
1933static inline void netdev_completed_queue(struct net_device *dev,
1934 unsigned pkts, unsigned bytes)
1935{
1936 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1937}
1938
1939static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1940{
114cf580
TH
1941#ifdef CONFIG_BQL
1942 dql_reset(&q->dql);
1943#endif
c5d67bd7
TH
1944}
1945
1946static inline void netdev_reset_queue(struct net_device *dev_queue)
1947{
1948 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
c3f26a26
DM
1949}
1950
bea3348e
SH
1951/**
1952 * netif_running - test if up
1953 * @dev: network device
1954 *
1955 * Test if the device has been brought up.
1956 */
1da177e4
LT
1957static inline int netif_running(const struct net_device *dev)
1958{
1959 return test_bit(__LINK_STATE_START, &dev->state);
1960}
1961
f25f4e44
PWJ
1962/*
1963 * Routines to manage the subqueues on a device. We only need start
1964 * stop, and a check if it's stopped. All other device management is
1965 * done at the overall netdevice level.
1966 * Also test the device if we're multiqueue.
1967 */
bea3348e
SH
1968
1969/**
1970 * netif_start_subqueue - allow sending packets on subqueue
1971 * @dev: network device
1972 * @queue_index: sub queue index
1973 *
1974 * Start individual transmit queue of a device with multiple transmit queues.
1975 */
f25f4e44
PWJ
1976static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1977{
fd2ea0a7 1978 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
7b3d3e4f
KK
1979
1980 netif_tx_start_queue(txq);
f25f4e44
PWJ
1981}
1982
bea3348e
SH
1983/**
1984 * netif_stop_subqueue - stop sending packets on subqueue
1985 * @dev: network device
1986 * @queue_index: sub queue index
1987 *
1988 * Stop individual transmit queue of a device with multiple transmit queues.
1989 */
f25f4e44
PWJ
1990static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1991{
fd2ea0a7 1992 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
f25f4e44
PWJ
1993#ifdef CONFIG_NETPOLL_TRAP
1994 if (netpoll_trap())
1995 return;
1996#endif
7b3d3e4f 1997 netif_tx_stop_queue(txq);
f25f4e44
PWJ
1998}
1999
bea3348e
SH
2000/**
2001 * netif_subqueue_stopped - test status of subqueue
2002 * @dev: network device
2003 * @queue_index: sub queue index
2004 *
2005 * Check individual transmit queue of a device with multiple transmit queues.
2006 */
668f895a 2007static inline int __netif_subqueue_stopped(const struct net_device *dev,
f25f4e44
PWJ
2008 u16 queue_index)
2009{
fd2ea0a7 2010 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
7b3d3e4f
KK
2011
2012 return netif_tx_queue_stopped(txq);
f25f4e44
PWJ
2013}
2014
668f895a
PE
2015static inline int netif_subqueue_stopped(const struct net_device *dev,
2016 struct sk_buff *skb)
2017{
2018 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2019}
bea3348e
SH
2020
2021/**
2022 * netif_wake_subqueue - allow sending packets on subqueue
2023 * @dev: network device
2024 * @queue_index: sub queue index
2025 *
2026 * Resume individual transmit queue of a device with multiple transmit queues.
2027 */
f25f4e44
PWJ
2028static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2029{
fd2ea0a7 2030 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
f25f4e44
PWJ
2031#ifdef CONFIG_NETPOLL_TRAP
2032 if (netpoll_trap())
2033 return;
2034#endif
73466498 2035 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
37437bb2 2036 __netif_schedule(txq->qdisc);
f25f4e44
PWJ
2037}
2038
a3d22a68
VZ
2039/*
2040 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2041 * as a distribution range limit for the returned value.
2042 */
2043static inline u16 skb_tx_hash(const struct net_device *dev,
2044 const struct sk_buff *skb)
2045{
2046 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2047}
2048
bea3348e
SH
2049/**
2050 * netif_is_multiqueue - test if device has multiple transmit queues
2051 * @dev: network device
2052 *
2053 * Check if device has multiple transmit queues
bea3348e 2054 */
f25f4e44
PWJ
2055static inline int netif_is_multiqueue(const struct net_device *dev)
2056{
a02cec21 2057 return dev->num_tx_queues > 1;
f25f4e44 2058}
1da177e4 2059
e6484930
TH
2060extern int netif_set_real_num_tx_queues(struct net_device *dev,
2061 unsigned int txq);
f0796d5c 2062
62fe0b40
BH
2063#ifdef CONFIG_RPS
2064extern int netif_set_real_num_rx_queues(struct net_device *dev,
2065 unsigned int rxq);
2066#else
2067static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2068 unsigned int rxq)
2069{
2070 return 0;
2071}
2072#endif
2073
3171d026
BH
2074static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2075 const struct net_device *from_dev)
2076{
2077 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
2078#ifdef CONFIG_RPS
2079 return netif_set_real_num_rx_queues(to_dev,
2080 from_dev->real_num_rx_queues);
2081#else
2082 return 0;
2083#endif
2084}
2085
1da177e4 2086/* Use this variant when it is known for sure that it
0ef47309
ML
2087 * is executing from hardware interrupt context or with hardware interrupts
2088 * disabled.
1da177e4 2089 */
bea3348e 2090extern void dev_kfree_skb_irq(struct sk_buff *skb);
1da177e4
LT
2091
2092/* Use this variant in places where it could be invoked
0ef47309
ML
2093 * from either hardware interrupt or other context, with hardware interrupts
2094 * either disabled or enabled.
1da177e4 2095 */
56079431 2096extern void dev_kfree_skb_any(struct sk_buff *skb);
1da177e4 2097
1da177e4
LT
2098extern int netif_rx(struct sk_buff *skb);
2099extern int netif_rx_ni(struct sk_buff *skb);
1da177e4 2100extern int netif_receive_skb(struct sk_buff *skb);
5b252f0c 2101extern gro_result_t dev_gro_receive(struct napi_struct *napi,
96e93eab 2102 struct sk_buff *skb);
c7c4b3b6
BH
2103extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2104extern gro_result_t napi_gro_receive(struct napi_struct *napi,
d565b0a1 2105 struct sk_buff *skb);
86cac58b 2106extern void napi_gro_flush(struct napi_struct *napi);
76620aaf 2107extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
c7c4b3b6 2108extern gro_result_t napi_frags_finish(struct napi_struct *napi,
5b252f0c
BH
2109 struct sk_buff *skb,
2110 gro_result_t ret);
76620aaf 2111extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
c7c4b3b6 2112extern gro_result_t napi_gro_frags(struct napi_struct *napi);
76620aaf
HX
2113
2114static inline void napi_free_frags(struct napi_struct *napi)
2115{
2116 kfree_skb(napi->skb);
2117 napi->skb = NULL;
2118}
2119
ab95bfe0 2120extern int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2121 rx_handler_func_t *rx_handler,
2122 void *rx_handler_data);
ab95bfe0
JP
2123extern void netdev_rx_handler_unregister(struct net_device *dev);
2124
c2373ee9 2125extern int dev_valid_name(const char *name);
881d966b
EB
2126extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2127extern int dev_ethtool(struct net *net, struct ifreq *);
1da177e4 2128extern unsigned dev_get_flags(const struct net_device *);
bd380811 2129extern int __dev_change_flags(struct net_device *, unsigned int flags);
1da177e4 2130extern int dev_change_flags(struct net_device *, unsigned);
bd380811 2131extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
cf04a4c7 2132extern int dev_change_name(struct net_device *, const char *);
0b815a1a 2133extern int dev_set_alias(struct net_device *, const char *, size_t);
ce286d32
EB
2134extern int dev_change_net_namespace(struct net_device *,
2135 struct net *, const char *);
1da177e4 2136extern int dev_set_mtu(struct net_device *, int);
cbda10fa 2137extern void dev_set_group(struct net_device *, int);
1da177e4
LT
2138extern int dev_set_mac_address(struct net_device *,
2139 struct sockaddr *);
f6a78bfc 2140extern int dev_hard_start_xmit(struct sk_buff *skb,
fd2ea0a7
DM
2141 struct net_device *dev,
2142 struct netdev_queue *txq);
44540960
AB
2143extern int dev_forward_skb(struct net_device *dev,
2144 struct sk_buff *skb);
1da177e4 2145
20380731 2146extern int netdev_budget;
1da177e4
LT
2147
2148/* Called by rtnetlink.c:rtnl_unlock() */
2149extern void netdev_run_todo(void);
2150
bea3348e
SH
2151/**
2152 * dev_put - release reference to device
2153 * @dev: network device
2154 *
9ef4429b 2155 * Release reference to device to allow it to be freed.
bea3348e 2156 */
1da177e4
LT
2157static inline void dev_put(struct net_device *dev)
2158{
933393f5 2159 this_cpu_dec(*dev->pcpu_refcnt);
1da177e4
LT
2160}
2161
bea3348e
SH
2162/**
2163 * dev_hold - get reference to device
2164 * @dev: network device
2165 *
9ef4429b 2166 * Hold reference to device to keep it from being freed.
bea3348e 2167 */
15333061
SH
2168static inline void dev_hold(struct net_device *dev)
2169{
933393f5 2170 this_cpu_inc(*dev->pcpu_refcnt);
15333061 2171}
1da177e4
LT
2172
2173/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2174 * and _off may be called from IRQ context, but it is caller
2175 * who is responsible for serialization of these calls.
b00055aa
SR
2176 *
2177 * The name carrier is inappropriate, these functions should really be
2178 * called netif_lowerlayer_*() because they represent the state of any
2179 * kind of lower layer not just hardware media.
1da177e4
LT
2180 */
2181
2182extern void linkwatch_fire_event(struct net_device *dev);
e014debe 2183extern void linkwatch_forget_dev(struct net_device *dev);
1da177e4 2184
bea3348e
SH
2185/**
2186 * netif_carrier_ok - test if carrier present
2187 * @dev: network device
2188 *
2189 * Check if carrier is present on device
2190 */
1da177e4
LT
2191static inline int netif_carrier_ok(const struct net_device *dev)
2192{
2193 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2194}
2195
9d21493b
ED
2196extern unsigned long dev_trans_start(struct net_device *dev);
2197
1da177e4
LT
2198extern void __netdev_watchdog_up(struct net_device *dev);
2199
0a242efc 2200extern void netif_carrier_on(struct net_device *dev);
1da177e4 2201
0a242efc 2202extern void netif_carrier_off(struct net_device *dev);
1da177e4 2203
06c4648d
IC
2204extern void netif_notify_peers(struct net_device *dev);
2205
bea3348e
SH
2206/**
2207 * netif_dormant_on - mark device as dormant.
2208 * @dev: network device
2209 *
2210 * Mark device as dormant (as per RFC2863).
2211 *
2212 * The dormant state indicates that the relevant interface is not
2213 * actually in a condition to pass packets (i.e., it is not 'up') but is
2214 * in a "pending" state, waiting for some external event. For "on-
2215 * demand" interfaces, this new state identifies the situation where the
2216 * interface is waiting for events to place it in the up state.
2217 *
2218 */
b00055aa
SR
2219static inline void netif_dormant_on(struct net_device *dev)
2220{
2221 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2222 linkwatch_fire_event(dev);
2223}
2224
bea3348e
SH
2225/**
2226 * netif_dormant_off - set device as not dormant.
2227 * @dev: network device
2228 *
2229 * Device is not in dormant state.
2230 */
b00055aa
SR
2231static inline void netif_dormant_off(struct net_device *dev)
2232{
2233 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2234 linkwatch_fire_event(dev);
2235}
2236
bea3348e
SH
2237/**
2238 * netif_dormant - test if carrier present
2239 * @dev: network device
2240 *
2241 * Check if carrier is present on device
2242 */
b00055aa
SR
2243static inline int netif_dormant(const struct net_device *dev)
2244{
2245 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2246}
2247
2248
bea3348e
SH
2249/**
2250 * netif_oper_up - test if device is operational
2251 * @dev: network device
2252 *
2253 * Check if carrier is operational
2254 */
d94d9fee
ED
2255static inline int netif_oper_up(const struct net_device *dev)
2256{
b00055aa
SR
2257 return (dev->operstate == IF_OPER_UP ||
2258 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2259}
2260
bea3348e
SH
2261/**
2262 * netif_device_present - is device available or removed
2263 * @dev: network device
2264 *
2265 * Check if device has not been removed from system.
2266 */
1da177e4
LT
2267static inline int netif_device_present(struct net_device *dev)
2268{
2269 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2270}
2271
56079431 2272extern void netif_device_detach(struct net_device *dev);
1da177e4 2273
56079431 2274extern void netif_device_attach(struct net_device *dev);
1da177e4
LT
2275
2276/*
2277 * Network interface message level settings
2278 */
1da177e4
LT
2279
2280enum {
2281 NETIF_MSG_DRV = 0x0001,
2282 NETIF_MSG_PROBE = 0x0002,
2283 NETIF_MSG_LINK = 0x0004,
2284 NETIF_MSG_TIMER = 0x0008,
2285 NETIF_MSG_IFDOWN = 0x0010,
2286 NETIF_MSG_IFUP = 0x0020,
2287 NETIF_MSG_RX_ERR = 0x0040,
2288 NETIF_MSG_TX_ERR = 0x0080,
2289 NETIF_MSG_TX_QUEUED = 0x0100,
2290 NETIF_MSG_INTR = 0x0200,
2291 NETIF_MSG_TX_DONE = 0x0400,
2292 NETIF_MSG_RX_STATUS = 0x0800,
2293 NETIF_MSG_PKTDATA = 0x1000,
2294 NETIF_MSG_HW = 0x2000,
2295 NETIF_MSG_WOL = 0x4000,
2296};
2297
2298#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2299#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2300#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2301#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2302#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2303#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2304#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2305#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2306#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2307#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2308#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2309#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2310#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2311#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2312#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2313
2314static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2315{
2316 /* use default */
2317 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2318 return default_msg_enable_bits;
2319 if (debug_value == 0) /* no output */
2320 return 0;
2321 /* set low N bits */
2322 return (1 << debug_value) - 1;
2323}
2324
c773e847 2325static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
932ff279 2326{
c773e847
DM
2327 spin_lock(&txq->_xmit_lock);
2328 txq->xmit_lock_owner = cpu;
22dd7495
JHS
2329}
2330
fd2ea0a7
DM
2331static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2332{
2333 spin_lock_bh(&txq->_xmit_lock);
2334 txq->xmit_lock_owner = smp_processor_id();
2335}
2336
c3f26a26
DM
2337static inline int __netif_tx_trylock(struct netdev_queue *txq)
2338{
2339 int ok = spin_trylock(&txq->_xmit_lock);
2340 if (likely(ok))
2341 txq->xmit_lock_owner = smp_processor_id();
2342 return ok;
2343}
2344
2345static inline void __netif_tx_unlock(struct netdev_queue *txq)
2346{
2347 txq->xmit_lock_owner = -1;
2348 spin_unlock(&txq->_xmit_lock);
2349}
2350
2351static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2352{
2353 txq->xmit_lock_owner = -1;
2354 spin_unlock_bh(&txq->_xmit_lock);
2355}
2356
08baf561
ED
2357static inline void txq_trans_update(struct netdev_queue *txq)
2358{
2359 if (txq->xmit_lock_owner != -1)
2360 txq->trans_start = jiffies;
2361}
2362
d29f749e
DJ
2363/**
2364 * netif_tx_lock - grab network device transmit lock
2365 * @dev: network device
d29f749e
DJ
2366 *
2367 * Get network device transmit lock
2368 */
22dd7495
JHS
2369static inline void netif_tx_lock(struct net_device *dev)
2370{
e8a0464c 2371 unsigned int i;
c3f26a26 2372 int cpu;
c773e847 2373
c3f26a26
DM
2374 spin_lock(&dev->tx_global_lock);
2375 cpu = smp_processor_id();
e8a0464c
DM
2376 for (i = 0; i < dev->num_tx_queues; i++) {
2377 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c3f26a26
DM
2378
2379 /* We are the only thread of execution doing a
2380 * freeze, but we have to grab the _xmit_lock in
2381 * order to synchronize with threads which are in
2382 * the ->hard_start_xmit() handler and already
2383 * checked the frozen bit.
2384 */
e8a0464c 2385 __netif_tx_lock(txq, cpu);
c3f26a26
DM
2386 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2387 __netif_tx_unlock(txq);
e8a0464c 2388 }
932ff279
HX
2389}
2390
2391static inline void netif_tx_lock_bh(struct net_device *dev)
2392{
e8a0464c
DM
2393 local_bh_disable();
2394 netif_tx_lock(dev);
932ff279
HX
2395}
2396
932ff279
HX
2397static inline void netif_tx_unlock(struct net_device *dev)
2398{
e8a0464c
DM
2399 unsigned int i;
2400
2401 for (i = 0; i < dev->num_tx_queues; i++) {
2402 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c773e847 2403
c3f26a26
DM
2404 /* No need to grab the _xmit_lock here. If the
2405 * queue is not stopped for another reason, we
2406 * force a schedule.
2407 */
2408 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
7b3d3e4f 2409 netif_schedule_queue(txq);
c3f26a26
DM
2410 }
2411 spin_unlock(&dev->tx_global_lock);
932ff279
HX
2412}
2413
2414static inline void netif_tx_unlock_bh(struct net_device *dev)
2415{
e8a0464c
DM
2416 netif_tx_unlock(dev);
2417 local_bh_enable();
932ff279
HX
2418}
2419
c773e847 2420#define HARD_TX_LOCK(dev, txq, cpu) { \
22dd7495 2421 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 2422 __netif_tx_lock(txq, cpu); \
22dd7495
JHS
2423 } \
2424}
2425
c773e847 2426#define HARD_TX_UNLOCK(dev, txq) { \
22dd7495 2427 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 2428 __netif_tx_unlock(txq); \
22dd7495
JHS
2429 } \
2430}
2431
1da177e4
LT
2432static inline void netif_tx_disable(struct net_device *dev)
2433{
fd2ea0a7 2434 unsigned int i;
c3f26a26 2435 int cpu;
fd2ea0a7 2436
c3f26a26
DM
2437 local_bh_disable();
2438 cpu = smp_processor_id();
fd2ea0a7
DM
2439 for (i = 0; i < dev->num_tx_queues; i++) {
2440 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
c3f26a26
DM
2441
2442 __netif_tx_lock(txq, cpu);
fd2ea0a7 2443 netif_tx_stop_queue(txq);
c3f26a26 2444 __netif_tx_unlock(txq);
fd2ea0a7 2445 }
c3f26a26 2446 local_bh_enable();
1da177e4
LT
2447}
2448
e308a5d8
DM
2449static inline void netif_addr_lock(struct net_device *dev)
2450{
2451 spin_lock(&dev->addr_list_lock);
2452}
2453
2429f7ac
JP
2454static inline void netif_addr_lock_nested(struct net_device *dev)
2455{
2456 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2457}
2458
e308a5d8
DM
2459static inline void netif_addr_lock_bh(struct net_device *dev)
2460{
2461 spin_lock_bh(&dev->addr_list_lock);
2462}
2463
2464static inline void netif_addr_unlock(struct net_device *dev)
2465{
2466 spin_unlock(&dev->addr_list_lock);
2467}
2468
2469static inline void netif_addr_unlock_bh(struct net_device *dev)
2470{
2471 spin_unlock_bh(&dev->addr_list_lock);
2472}
2473
f001fde5 2474/*
31278e71 2475 * dev_addrs walker. Should be used only for read access. Call with
f001fde5
JP
2476 * rcu_read_lock held.
2477 */
2478#define for_each_dev_addr(dev, ha) \
31278e71 2479 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
f001fde5 2480
1da177e4
LT
2481/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2482
2483extern void ether_setup(struct net_device *dev);
2484
2485/* Support for loadable net-drivers */
36909ea4 2486extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
f25f4e44 2487 void (*setup)(struct net_device *),
36909ea4 2488 unsigned int txqs, unsigned int rxqs);
f25f4e44 2489#define alloc_netdev(sizeof_priv, name, setup) \
36909ea4
TH
2490 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2491
2492#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2493 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2494
1da177e4
LT
2495extern int register_netdev(struct net_device *dev);
2496extern void unregister_netdev(struct net_device *dev);
f001fde5 2497
22bedad3
JP
2498/* General hardware address lists handling functions */
2499extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2500 struct netdev_hw_addr_list *from_list,
2501 int addr_len, unsigned char addr_type);
2502extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2503 struct netdev_hw_addr_list *from_list,
2504 int addr_len, unsigned char addr_type);
2505extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2506 struct netdev_hw_addr_list *from_list,
2507 int addr_len);
2508extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2509 struct netdev_hw_addr_list *from_list,
2510 int addr_len);
2511extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2512extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2513
f001fde5
JP
2514/* Functions used for device addresses handling */
2515extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2516 unsigned char addr_type);
2517extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2518 unsigned char addr_type);
2519extern int dev_addr_add_multiple(struct net_device *to_dev,
2520 struct net_device *from_dev,
2521 unsigned char addr_type);
2522extern int dev_addr_del_multiple(struct net_device *to_dev,
2523 struct net_device *from_dev,
2524 unsigned char addr_type);
a748ee24
JP
2525extern void dev_addr_flush(struct net_device *dev);
2526extern int dev_addr_init(struct net_device *dev);
2527
2528/* Functions used for unicast addresses handling */
2529extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2530extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2531extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2532extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2533extern void dev_uc_flush(struct net_device *dev);
2534extern void dev_uc_init(struct net_device *dev);
f001fde5 2535
22bedad3
JP
2536/* Functions used for multicast addresses handling */
2537extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2538extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2539extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2540extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2541extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2542extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2543extern void dev_mc_flush(struct net_device *dev);
2544extern void dev_mc_init(struct net_device *dev);
f001fde5 2545
4417da66
PM
2546/* Functions used for secondary unicast and multicast support */
2547extern void dev_set_rx_mode(struct net_device *dev);
2548extern void __dev_set_rx_mode(struct net_device *dev);
dad9b335
WC
2549extern int dev_set_promiscuity(struct net_device *dev, int inc);
2550extern int dev_set_allmulti(struct net_device *dev, int inc);
1da177e4 2551extern void netdev_state_change(struct net_device *dev);
3ca5b404 2552extern int netdev_bonding_change(struct net_device *dev,
75c78500 2553 unsigned long event);
d8a33ac4 2554extern void netdev_features_change(struct net_device *dev);
1da177e4 2555/* Load a device via the kmod */
881d966b 2556extern void dev_load(struct net *net, const char *name);
1da177e4 2557extern void dev_mcast_init(void);
d7753516
BH
2558extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2559 struct rtnl_link_stats64 *storage);
eeda3fd6 2560
1da177e4 2561extern int netdev_max_backlog;
3b098e2d 2562extern int netdev_tstamp_prequeue;
1da177e4 2563extern int weight_p;
0a14842f 2564extern int bpf_jit_enable;
1da177e4 2565extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1765a575
JP
2566extern int netdev_set_bond_master(struct net_device *dev,
2567 struct net_device *master);
84fa7933 2568extern int skb_checksum_help(struct sk_buff *skb);
c8f44aff
MM
2569extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2570 netdev_features_t features);
fb286bb2
HX
2571#ifdef CONFIG_BUG
2572extern void netdev_rx_csum_fault(struct net_device *dev);
2573#else
2574static inline void netdev_rx_csum_fault(struct net_device *dev)
2575{
2576}
2577#endif
1da177e4
LT
2578/* rx skb timestamps */
2579extern void net_enable_timestamp(void);
2580extern void net_disable_timestamp(void);
2581
20380731
ACM
2582#ifdef CONFIG_PROC_FS
2583extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2584extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2585extern void dev_seq_stop(struct seq_file *seq, void *v);
5cac98dd
AB
2586extern int dev_seq_open_ops(struct inode *inode, struct file *file,
2587 const struct seq_operations *ops);
20380731
ACM
2588#endif
2589
b8a9787e
JV
2590extern int netdev_class_create_file(struct class_attribute *class_attr);
2591extern void netdev_class_remove_file(struct class_attribute *class_attr);
2592
04600794
JB
2593extern struct kobj_ns_type_operations net_ns_type_operations;
2594
3019de12 2595extern const char *netdev_drivername(const struct net_device *dev);
6579e57b 2596
20380731
ACM
2597extern void linkwatch_run_queue(void);
2598
c8f44aff
MM
2599static inline netdev_features_t netdev_get_wanted_features(
2600 struct net_device *dev)
5455c699
MM
2601{
2602 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2603}
c8f44aff
MM
2604netdev_features_t netdev_increment_features(netdev_features_t all,
2605 netdev_features_t one, netdev_features_t mask);
6cb6a27c 2606int __netdev_update_features(struct net_device *dev);
5455c699 2607void netdev_update_features(struct net_device *dev);
afe12cc8 2608void netdev_change_features(struct net_device *dev);
7f353bf2 2609
fc4a7489
PM
2610void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2611 struct net_device *dev);
2612
c8f44aff 2613netdev_features_t netif_skb_features(struct sk_buff *skb);
58e998c6 2614
c8f44aff 2615static inline int net_gso_ok(netdev_features_t features, int gso_type)
576a30eb 2616{
c8f44aff 2617 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
0345e186
MM
2618
2619 /* check flags correspondence */
2620 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2621 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2622 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2623 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2624 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2625 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2626
d6b4991a 2627 return (features & feature) == feature;
576a30eb
HX
2628}
2629
c8f44aff 2630static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
bcd76111 2631{
278b2513 2632 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
21dc3301 2633 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
bcd76111
HX
2634}
2635
c8f44aff
MM
2636static inline int netif_needs_gso(struct sk_buff *skb,
2637 netdev_features_t features)
7967168c 2638{
fc741216
JG
2639 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2640 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
7967168c
HX
2641}
2642
82cc1a7a
PWJ
2643static inline void netif_set_gso_max_size(struct net_device *dev,
2644 unsigned int size)
2645{
2646 dev->gso_max_size = size;
2647}
2648
1765a575
JP
2649static inline int netif_is_bond_slave(struct net_device *dev)
2650{
2651 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2652}
2653
505d4f73 2654extern struct pernet_operations __net_initdata loopback_net_ops;
b1b67dd4 2655
571ba423
JP
2656/* Logging, debugging and troubleshooting/diagnostic helpers. */
2657
2658/* netdev_printk helpers, similar to dev_printk */
2659
2660static inline const char *netdev_name(const struct net_device *dev)
2661{
2662 if (dev->reg_state != NETREG_REGISTERED)
2663 return "(unregistered net_device)";
2664 return dev->name;
2665}
2666
ffa10cb4
JB
2667extern int __netdev_printk(const char *level, const struct net_device *dev,
2668 struct va_format *vaf);
2669
b9075fa9
JP
2670extern __printf(3, 4)
2671int netdev_printk(const char *level, const struct net_device *dev,
2672 const char *format, ...);
2673extern __printf(2, 3)
2674int netdev_emerg(const struct net_device *dev, const char *format, ...);
2675extern __printf(2, 3)
2676int netdev_alert(const struct net_device *dev, const char *format, ...);
2677extern __printf(2, 3)
2678int netdev_crit(const struct net_device *dev, const char *format, ...);
2679extern __printf(2, 3)
2680int netdev_err(const struct net_device *dev, const char *format, ...);
2681extern __printf(2, 3)
2682int netdev_warn(const struct net_device *dev, const char *format, ...);
2683extern __printf(2, 3)
2684int netdev_notice(const struct net_device *dev, const char *format, ...);
2685extern __printf(2, 3)
2686int netdev_info(const struct net_device *dev, const char *format, ...);
571ba423 2687
8909c9ad
VK
2688#define MODULE_ALIAS_NETDEV(device) \
2689 MODULE_ALIAS("netdev-" device)
2690
571ba423
JP
2691#if defined(DEBUG)
2692#define netdev_dbg(__dev, format, args...) \
2693 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2694#elif defined(CONFIG_DYNAMIC_DEBUG)
2695#define netdev_dbg(__dev, format, args...) \
2696do { \
ffa10cb4 2697 dynamic_netdev_dbg(__dev, format, ##args); \
571ba423
JP
2698} while (0)
2699#else
2700#define netdev_dbg(__dev, format, args...) \
2701({ \
2702 if (0) \
2703 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2704 0; \
2705})
2706#endif
2707
2708#if defined(VERBOSE_DEBUG)
2709#define netdev_vdbg netdev_dbg
2710#else
2711
2712#define netdev_vdbg(dev, format, args...) \
2713({ \
2714 if (0) \
2715 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2716 0; \
2717})
2718#endif
2719
2720/*
2721 * netdev_WARN() acts like dev_printk(), but with the key difference
2722 * of using a WARN/WARN_ON to get the message out, including the
2723 * file/line information and a backtrace.
2724 */
2725#define netdev_WARN(dev, format, args...) \
2726 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2727
b3d95c5c
JP
2728/* netif printk helpers, similar to netdev_printk */
2729
2730#define netif_printk(priv, type, level, dev, fmt, args...) \
2731do { \
2732 if (netif_msg_##type(priv)) \
2733 netdev_printk(level, (dev), fmt, ##args); \
2734} while (0)
2735
f45f4321
JP
2736#define netif_level(level, priv, type, dev, fmt, args...) \
2737do { \
2738 if (netif_msg_##type(priv)) \
2739 netdev_##level(dev, fmt, ##args); \
2740} while (0)
2741
b3d95c5c 2742#define netif_emerg(priv, type, dev, fmt, args...) \
f45f4321 2743 netif_level(emerg, priv, type, dev, fmt, ##args)
b3d95c5c 2744#define netif_alert(priv, type, dev, fmt, args...) \
f45f4321 2745 netif_level(alert, priv, type, dev, fmt, ##args)
b3d95c5c 2746#define netif_crit(priv, type, dev, fmt, args...) \
f45f4321 2747 netif_level(crit, priv, type, dev, fmt, ##args)
b3d95c5c 2748#define netif_err(priv, type, dev, fmt, args...) \
f45f4321 2749 netif_level(err, priv, type, dev, fmt, ##args)
b3d95c5c 2750#define netif_warn(priv, type, dev, fmt, args...) \
f45f4321 2751 netif_level(warn, priv, type, dev, fmt, ##args)
b3d95c5c 2752#define netif_notice(priv, type, dev, fmt, args...) \
f45f4321 2753 netif_level(notice, priv, type, dev, fmt, ##args)
b3d95c5c 2754#define netif_info(priv, type, dev, fmt, args...) \
f45f4321 2755 netif_level(info, priv, type, dev, fmt, ##args)
b3d95c5c
JP
2756
2757#if defined(DEBUG)
2758#define netif_dbg(priv, type, dev, format, args...) \
2759 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2760#elif defined(CONFIG_DYNAMIC_DEBUG)
2761#define netif_dbg(priv, type, netdev, format, args...) \
2762do { \
2763 if (netif_msg_##type(priv)) \
b5fb0a03 2764 dynamic_netdev_dbg(netdev, format, ##args); \
b3d95c5c
JP
2765} while (0)
2766#else
2767#define netif_dbg(priv, type, dev, format, args...) \
2768({ \
2769 if (0) \
2770 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2771 0; \
2772})
2773#endif
2774
2775#if defined(VERBOSE_DEBUG)
bcfcc450 2776#define netif_vdbg netif_dbg
b3d95c5c
JP
2777#else
2778#define netif_vdbg(priv, type, dev, format, args...) \
2779({ \
2780 if (0) \
a4ed89cb 2781 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
b3d95c5c
JP
2782 0; \
2783})
2784#endif
571ba423 2785
1da177e4
LT
2786#endif /* __KERNEL__ */
2787
385a154c 2788#endif /* _LINUX_NETDEVICE_H */
This page took 1.320043 seconds and 5 git commands to generate.