Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 14 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
e8db0be1 | 28 | #include <linux/pm_qos.h> |
d7fe0f24 | 29 | #include <linux/timer.h> |
187f1882 | 30 | #include <linux/bug.h> |
bea3348e | 31 | #include <linux/delay.h> |
60063497 | 32 | #include <linux/atomic.h> |
1da177e4 LT |
33 | #include <asm/cache.h> |
34 | #include <asm/byteorder.h> | |
35 | ||
1da177e4 | 36 | #include <linux/percpu.h> |
4d5b78c0 | 37 | #include <linux/rculist.h> |
db217334 | 38 | #include <linux/dmaengine.h> |
bea3348e | 39 | #include <linux/workqueue.h> |
114cf580 | 40 | #include <linux/dynamic_queue_limits.h> |
1da177e4 | 41 | |
b1b67dd4 | 42 | #include <linux/ethtool.h> |
a050c33f | 43 | #include <net/net_namespace.h> |
cf85d08f | 44 | #include <net/dsa.h> |
7a6b6f51 | 45 | #ifdef CONFIG_DCB |
2f90b865 AD |
46 | #include <net/dcbnl.h> |
47 | #endif | |
5bc1421e | 48 | #include <net/netprio_cgroup.h> |
a050c33f | 49 | |
a59e2ecb | 50 | #include <linux/netdev_features.h> |
77162022 | 51 | #include <linux/neighbour.h> |
607ca46e | 52 | #include <uapi/linux/netdevice.h> |
a59e2ecb | 53 | |
115c1d6e | 54 | struct netpoll_info; |
313162d0 | 55 | struct device; |
c1f19b51 | 56 | struct phy_device; |
704232c2 JB |
57 | /* 802.11 specific */ |
58 | struct wireless_dev; | |
1da177e4 LT |
59 | /* source back-compat hooks */ |
60 | #define SET_ETHTOOL_OPS(netdev,ops) \ | |
61 | ( (netdev)->ethtool_ops = (ops) ) | |
62 | ||
f629d208 JP |
63 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
64 | const struct ethtool_ops *ops); | |
d07d7507 | 65 | |
c1f79426 SA |
66 | /* hardware address assignment types */ |
67 | #define NET_ADDR_PERM 0 /* address is permanent (default) */ | |
68 | #define NET_ADDR_RANDOM 1 /* address is generated randomly */ | |
69 | #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ | |
fbdeca2d JP |
70 | #define NET_ADDR_SET 3 /* address is set using |
71 | * dev_set_mac_address() */ | |
c1f79426 | 72 | |
9a1654ba JP |
73 | /* Backlog congestion levels */ |
74 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
75 | #define NET_RX_DROP 1 /* packet dropped */ | |
76 | ||
572a9d7b PM |
77 | /* |
78 | * Transmit return codes: transmit return codes originate from three different | |
79 | * namespaces: | |
80 | * | |
81 | * - qdisc return codes | |
82 | * - driver transmit return codes | |
83 | * - errno values | |
84 | * | |
85 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
86 | * function. Real network devices commonly used with qdiscs should only return | |
87 | * the driver transmit return codes though - when qdiscs are used, the actual | |
88 | * transmission happens asynchronously, so the value is not propagated to | |
89 | * higher layers. Virtual network devices transmit synchronously, in this case | |
90 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | |
91 | * others are propagated to higher layers. | |
92 | */ | |
93 | ||
94 | /* qdisc ->enqueue() return codes. */ | |
95 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
96 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
97 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
98 | #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ | |
99 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ | |
1da177e4 | 100 | |
b9df3cb8 GR |
101 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
102 | * indicates that the device will soon be dropping packets, or already drops | |
103 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 104 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
105 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
106 | ||
dc1f8bf6 | 107 | /* Driver transmit return codes */ |
9a1654ba | 108 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 109 | |
dc1f8bf6 | 110 | enum netdev_tx { |
572a9d7b | 111 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
112 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
113 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
114 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ | |
dc1f8bf6 SH |
115 | }; |
116 | typedef enum netdev_tx netdev_tx_t; | |
117 | ||
9a1654ba JP |
118 | /* |
119 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
120 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
121 | */ | |
122 | static inline bool dev_xmit_complete(int rc) | |
123 | { | |
124 | /* | |
125 | * Positive cases with an skb consumed by a driver: | |
126 | * - successful transmission (rc == NETDEV_TX_OK) | |
127 | * - error while transmitting (rc < 0) | |
128 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
129 | */ | |
130 | if (likely(rc < NET_XMIT_MASK)) | |
131 | return true; | |
132 | ||
133 | return false; | |
134 | } | |
135 | ||
1da177e4 LT |
136 | /* |
137 | * Compute the worst case header length according to the protocols | |
138 | * used. | |
139 | */ | |
fe2918b0 | 140 | |
d11ead75 | 141 | #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) |
8388e3da DM |
142 | # if defined(CONFIG_MAC80211_MESH) |
143 | # define LL_MAX_HEADER 128 | |
144 | # else | |
145 | # define LL_MAX_HEADER 96 | |
146 | # endif | |
1da177e4 | 147 | #else |
8388e3da | 148 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
149 | #endif |
150 | ||
d11ead75 BH |
151 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
152 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) | |
1da177e4 LT |
153 | #define MAX_HEADER LL_MAX_HEADER |
154 | #else | |
155 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
156 | #endif | |
157 | ||
158 | /* | |
be1f3c2c BH |
159 | * Old network device statistics. Fields are native words |
160 | * (unsigned long) so they can be read and written atomically. | |
1da177e4 | 161 | */ |
fe2918b0 | 162 | |
d94d9fee | 163 | struct net_device_stats { |
3cfde79c BH |
164 | unsigned long rx_packets; |
165 | unsigned long tx_packets; | |
166 | unsigned long rx_bytes; | |
167 | unsigned long tx_bytes; | |
168 | unsigned long rx_errors; | |
169 | unsigned long tx_errors; | |
170 | unsigned long rx_dropped; | |
171 | unsigned long tx_dropped; | |
172 | unsigned long multicast; | |
1da177e4 | 173 | unsigned long collisions; |
1da177e4 | 174 | unsigned long rx_length_errors; |
3cfde79c BH |
175 | unsigned long rx_over_errors; |
176 | unsigned long rx_crc_errors; | |
177 | unsigned long rx_frame_errors; | |
178 | unsigned long rx_fifo_errors; | |
179 | unsigned long rx_missed_errors; | |
1da177e4 LT |
180 | unsigned long tx_aborted_errors; |
181 | unsigned long tx_carrier_errors; | |
182 | unsigned long tx_fifo_errors; | |
183 | unsigned long tx_heartbeat_errors; | |
184 | unsigned long tx_window_errors; | |
1da177e4 LT |
185 | unsigned long rx_compressed; |
186 | unsigned long tx_compressed; | |
187 | }; | |
188 | ||
1da177e4 LT |
189 | |
190 | #include <linux/cache.h> | |
191 | #include <linux/skbuff.h> | |
192 | ||
adc9300e | 193 | #ifdef CONFIG_RPS |
c5905afb IM |
194 | #include <linux/static_key.h> |
195 | extern struct static_key rps_needed; | |
adc9300e ED |
196 | #endif |
197 | ||
1da177e4 LT |
198 | struct neighbour; |
199 | struct neigh_parms; | |
200 | struct sk_buff; | |
201 | ||
f001fde5 JP |
202 | struct netdev_hw_addr { |
203 | struct list_head list; | |
204 | unsigned char addr[MAX_ADDR_LEN]; | |
205 | unsigned char type; | |
ccffad25 JP |
206 | #define NETDEV_HW_ADDR_T_LAN 1 |
207 | #define NETDEV_HW_ADDR_T_SAN 2 | |
208 | #define NETDEV_HW_ADDR_T_SLAVE 3 | |
209 | #define NETDEV_HW_ADDR_T_UNICAST 4 | |
22bedad3 | 210 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
22bedad3 | 211 | bool global_use; |
4cd729b0 | 212 | int sync_cnt; |
8f8f103d | 213 | int refcount; |
4543fbef | 214 | int synced; |
f001fde5 JP |
215 | struct rcu_head rcu_head; |
216 | }; | |
217 | ||
31278e71 JP |
218 | struct netdev_hw_addr_list { |
219 | struct list_head list; | |
220 | int count; | |
221 | }; | |
222 | ||
22bedad3 JP |
223 | #define netdev_hw_addr_list_count(l) ((l)->count) |
224 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | |
225 | #define netdev_hw_addr_list_for_each(ha, l) \ | |
226 | list_for_each_entry(ha, &(l)->list, list) | |
32e7bfc4 | 227 | |
22bedad3 JP |
228 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
229 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | |
230 | #define netdev_for_each_uc_addr(ha, dev) \ | |
231 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | |
6683ece3 | 232 | |
22bedad3 JP |
233 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
234 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | |
18e225f2 | 235 | #define netdev_for_each_mc_addr(ha, dev) \ |
22bedad3 | 236 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
6683ece3 | 237 | |
d94d9fee | 238 | struct hh_cache { |
f6b72b62 | 239 | u16 hh_len; |
5c25f686 | 240 | u16 __pad; |
3644f0ce | 241 | seqlock_t hh_lock; |
1da177e4 LT |
242 | |
243 | /* cached hardware header; allow for machine alignment needs. */ | |
244 | #define HH_DATA_MOD 16 | |
245 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 246 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
247 | #define HH_DATA_ALIGN(__len) \ |
248 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
249 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
250 | }; | |
251 | ||
252 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | |
253 | * Alternative is: | |
254 | * dev->hard_header_len ? (dev->hard_header_len + | |
255 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
256 | * | |
257 | * We could use other alignment values, but we must maintain the | |
258 | * relationship HH alignment <= LL alignment. | |
259 | */ | |
260 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 261 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 262 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 | 263 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 264 | |
3b04ddde SH |
265 | struct header_ops { |
266 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
267 | unsigned short type, const void *daddr, | |
95c96174 | 268 | const void *saddr, unsigned int len); |
3b04ddde SH |
269 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); |
270 | int (*rebuild)(struct sk_buff *skb); | |
e69dd336 | 271 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
3b04ddde SH |
272 | void (*cache_update)(struct hh_cache *hh, |
273 | const struct net_device *dev, | |
274 | const unsigned char *haddr); | |
275 | }; | |
276 | ||
1da177e4 LT |
277 | /* These flag bits are private to the generic network queueing |
278 | * layer, they may not be explicitly referenced by any other | |
279 | * code. | |
280 | */ | |
281 | ||
d94d9fee | 282 | enum netdev_state_t { |
1da177e4 LT |
283 | __LINK_STATE_START, |
284 | __LINK_STATE_PRESENT, | |
1da177e4 | 285 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
286 | __LINK_STATE_LINKWATCH_PENDING, |
287 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
288 | }; |
289 | ||
290 | ||
291 | /* | |
292 | * This structure holds at boot time configured netdevice settings. They | |
fe2918b0 | 293 | * are then used in the device probing. |
1da177e4 LT |
294 | */ |
295 | struct netdev_boot_setup { | |
296 | char name[IFNAMSIZ]; | |
297 | struct ifmap map; | |
298 | }; | |
299 | #define NETDEV_BOOT_SETUP_MAX 8 | |
300 | ||
f629d208 | 301 | int __init netdev_boot_setup(char *str); |
1da177e4 | 302 | |
bea3348e SH |
303 | /* |
304 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
305 | */ | |
306 | struct napi_struct { | |
307 | /* The poll_list must only be managed by the entity which | |
308 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
309 | * whoever atomically sets that bit can add this napi_struct | |
310 | * to the per-cpu poll_list, and whoever clears that bit | |
311 | * can remove from the list right before clearing the bit. | |
312 | */ | |
313 | struct list_head poll_list; | |
314 | ||
315 | unsigned long state; | |
316 | int weight; | |
404f7c9e | 317 | unsigned int gro_count; |
bea3348e SH |
318 | int (*poll)(struct napi_struct *, int); |
319 | #ifdef CONFIG_NETPOLL | |
320 | spinlock_t poll_lock; | |
321 | int poll_owner; | |
bea3348e | 322 | #endif |
5d38a079 | 323 | struct net_device *dev; |
d565b0a1 | 324 | struct sk_buff *gro_list; |
5d38a079 | 325 | struct sk_buff *skb; |
404f7c9e | 326 | struct list_head dev_list; |
af12fa6e ET |
327 | struct hlist_node napi_hash_node; |
328 | unsigned int napi_id; | |
bea3348e SH |
329 | }; |
330 | ||
d94d9fee | 331 | enum { |
bea3348e | 332 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
a0a46196 | 333 | NAPI_STATE_DISABLE, /* Disable pending */ |
7b363e44 | 334 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
af12fa6e | 335 | NAPI_STATE_HASHED, /* In NAPI hash */ |
bea3348e SH |
336 | }; |
337 | ||
5b252f0c | 338 | enum gro_result { |
d1c76af9 HX |
339 | GRO_MERGED, |
340 | GRO_MERGED_FREE, | |
341 | GRO_HELD, | |
342 | GRO_NORMAL, | |
343 | GRO_DROP, | |
344 | }; | |
5b252f0c | 345 | typedef enum gro_result gro_result_t; |
d1c76af9 | 346 | |
8a4eb573 JP |
347 | /* |
348 | * enum rx_handler_result - Possible return values for rx_handlers. | |
349 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | |
350 | * further. | |
351 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | |
352 | * case skb->dev was changed by rx_handler. | |
353 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | |
354 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | |
355 | * | |
356 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | |
357 | * special processing of the skb, prior to delivery to protocol handlers. | |
358 | * | |
359 | * Currently, a net_device can only have a single rx_handler registered. Trying | |
360 | * to register a second rx_handler will return -EBUSY. | |
361 | * | |
362 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | |
363 | * To unregister a rx_handler on a net_device, use | |
364 | * netdev_rx_handler_unregister(). | |
365 | * | |
366 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | |
367 | * do with the skb. | |
368 | * | |
369 | * If the rx_handler consumed to skb in some way, it should return | |
370 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | |
371 | * the skb to be delivered in some other ways. | |
372 | * | |
373 | * If the rx_handler changed skb->dev, to divert the skb to another | |
374 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | |
375 | * new device will be called if it exists. | |
376 | * | |
377 | * If the rx_handler consider the skb should be ignored, it should return | |
378 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | |
d93cf068 | 379 | * are registered on exact device (ptype->dev == skb->dev). |
8a4eb573 JP |
380 | * |
381 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | |
382 | * delivered, it should return RX_HANDLER_PASS. | |
383 | * | |
384 | * A device without a registered rx_handler will behave as if rx_handler | |
385 | * returned RX_HANDLER_PASS. | |
386 | */ | |
387 | ||
388 | enum rx_handler_result { | |
389 | RX_HANDLER_CONSUMED, | |
390 | RX_HANDLER_ANOTHER, | |
391 | RX_HANDLER_EXACT, | |
392 | RX_HANDLER_PASS, | |
393 | }; | |
394 | typedef enum rx_handler_result rx_handler_result_t; | |
395 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | |
ab95bfe0 | 396 | |
f629d208 | 397 | void __napi_schedule(struct napi_struct *n); |
bea3348e | 398 | |
4d29515f | 399 | static inline bool napi_disable_pending(struct napi_struct *n) |
a0a46196 DM |
400 | { |
401 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
402 | } | |
403 | ||
bea3348e SH |
404 | /** |
405 | * napi_schedule_prep - check if napi can be scheduled | |
406 | * @n: napi context | |
407 | * | |
408 | * Test if NAPI routine is already running, and if not mark | |
409 | * it as running. This is used as a condition variable | |
a0a46196 DM |
410 | * insure only one NAPI poll instance runs. We also make |
411 | * sure there is no pending NAPI disable. | |
bea3348e | 412 | */ |
4d29515f | 413 | static inline bool napi_schedule_prep(struct napi_struct *n) |
bea3348e | 414 | { |
a0a46196 DM |
415 | return !napi_disable_pending(n) && |
416 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
417 | } |
418 | ||
419 | /** | |
420 | * napi_schedule - schedule NAPI poll | |
421 | * @n: napi context | |
422 | * | |
423 | * Schedule NAPI poll routine to be called if it is not already | |
424 | * running. | |
425 | */ | |
426 | static inline void napi_schedule(struct napi_struct *n) | |
427 | { | |
428 | if (napi_schedule_prep(n)) | |
429 | __napi_schedule(n); | |
430 | } | |
431 | ||
bfe13f54 | 432 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
4d29515f | 433 | static inline bool napi_reschedule(struct napi_struct *napi) |
bfe13f54 RD |
434 | { |
435 | if (napi_schedule_prep(napi)) { | |
436 | __napi_schedule(napi); | |
4d29515f | 437 | return true; |
bfe13f54 | 438 | } |
4d29515f | 439 | return false; |
bfe13f54 RD |
440 | } |
441 | ||
bea3348e SH |
442 | /** |
443 | * napi_complete - NAPI processing complete | |
444 | * @n: napi context | |
445 | * | |
446 | * Mark NAPI processing as complete. | |
447 | */ | |
f629d208 JP |
448 | void __napi_complete(struct napi_struct *n); |
449 | void napi_complete(struct napi_struct *n); | |
bea3348e | 450 | |
af12fa6e ET |
451 | /** |
452 | * napi_by_id - lookup a NAPI by napi_id | |
453 | * @napi_id: hashed napi_id | |
454 | * | |
455 | * lookup @napi_id in napi_hash table | |
456 | * must be called under rcu_read_lock() | |
457 | */ | |
f629d208 | 458 | struct napi_struct *napi_by_id(unsigned int napi_id); |
af12fa6e ET |
459 | |
460 | /** | |
461 | * napi_hash_add - add a NAPI to global hashtable | |
462 | * @napi: napi context | |
463 | * | |
464 | * generate a new napi_id and store a @napi under it in napi_hash | |
465 | */ | |
f629d208 | 466 | void napi_hash_add(struct napi_struct *napi); |
af12fa6e ET |
467 | |
468 | /** | |
469 | * napi_hash_del - remove a NAPI from global table | |
470 | * @napi: napi context | |
471 | * | |
472 | * Warning: caller must observe rcu grace period | |
473 | * before freeing memory containing @napi | |
474 | */ | |
f629d208 | 475 | void napi_hash_del(struct napi_struct *napi); |
af12fa6e | 476 | |
bea3348e SH |
477 | /** |
478 | * napi_disable - prevent NAPI from scheduling | |
479 | * @n: napi context | |
480 | * | |
481 | * Stop NAPI from being scheduled on this context. | |
482 | * Waits till any outstanding processing completes. | |
483 | */ | |
484 | static inline void napi_disable(struct napi_struct *n) | |
485 | { | |
80c33ddd | 486 | might_sleep(); |
a0a46196 | 487 | set_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e | 488 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
43cc7380 | 489 | msleep(1); |
a0a46196 | 490 | clear_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e SH |
491 | } |
492 | ||
493 | /** | |
494 | * napi_enable - enable NAPI scheduling | |
495 | * @n: napi context | |
496 | * | |
497 | * Resume NAPI from being scheduled on this context. | |
498 | * Must be paired with napi_disable. | |
499 | */ | |
500 | static inline void napi_enable(struct napi_struct *n) | |
501 | { | |
502 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
503 | smp_mb__before_clear_bit(); | |
504 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
505 | } | |
506 | ||
c264c3de SH |
507 | #ifdef CONFIG_SMP |
508 | /** | |
509 | * napi_synchronize - wait until NAPI is not running | |
510 | * @n: napi context | |
511 | * | |
512 | * Wait until NAPI is done being scheduled on this context. | |
513 | * Waits till any outstanding processing completes but | |
514 | * does not disable future activations. | |
515 | */ | |
516 | static inline void napi_synchronize(const struct napi_struct *n) | |
517 | { | |
518 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
519 | msleep(1); | |
520 | } | |
521 | #else | |
522 | # define napi_synchronize(n) barrier() | |
523 | #endif | |
524 | ||
d94d9fee | 525 | enum netdev_queue_state_t { |
73466498 TH |
526 | __QUEUE_STATE_DRV_XOFF, |
527 | __QUEUE_STATE_STACK_XOFF, | |
c3f26a26 | 528 | __QUEUE_STATE_FROZEN, |
73466498 TH |
529 | #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ |
530 | (1 << __QUEUE_STATE_STACK_XOFF)) | |
531 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ | |
532 | (1 << __QUEUE_STATE_FROZEN)) | |
79d16385 | 533 | }; |
73466498 TH |
534 | /* |
535 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The | |
536 | * netif_tx_* functions below are used to manipulate this flag. The | |
537 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | |
538 | * queue independently. The netif_xmit_*stopped functions below are called | |
539 | * to check if the queue has been stopped by the driver or stack (either | |
540 | * of the XOFF bits are set in the state). Drivers should not need to call | |
541 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | |
542 | */ | |
79d16385 | 543 | |
bb949fbd | 544 | struct netdev_queue { |
6a321cb3 ED |
545 | /* |
546 | * read mostly part | |
547 | */ | |
bb949fbd | 548 | struct net_device *dev; |
b0e1e646 DM |
549 | struct Qdisc *qdisc; |
550 | struct Qdisc *qdisc_sleeping; | |
ccf5ff69 | 551 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
552 | struct kobject kobj; |
553 | #endif | |
f2cd2d3e ED |
554 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
555 | int numa_node; | |
556 | #endif | |
6a321cb3 ED |
557 | /* |
558 | * write mostly part | |
559 | */ | |
560 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
561 | int xmit_lock_owner; | |
9d21493b ED |
562 | /* |
563 | * please use this field instead of dev->trans_start | |
564 | */ | |
565 | unsigned long trans_start; | |
ccf5ff69 | 566 | |
567 | /* | |
568 | * Number of TX timeouts for this queue | |
569 | * (/sys/class/net/DEV/Q/trans_timeout) | |
570 | */ | |
571 | unsigned long trans_timeout; | |
114cf580 TH |
572 | |
573 | unsigned long state; | |
574 | ||
575 | #ifdef CONFIG_BQL | |
576 | struct dql dql; | |
577 | #endif | |
e8a0464c | 578 | } ____cacheline_aligned_in_smp; |
bb949fbd | 579 | |
f2cd2d3e ED |
580 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
581 | { | |
582 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
583 | return q->numa_node; | |
584 | #else | |
b236da69 | 585 | return NUMA_NO_NODE; |
f2cd2d3e ED |
586 | #endif |
587 | } | |
588 | ||
589 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | |
590 | { | |
591 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
592 | q->numa_node = node; | |
593 | #endif | |
594 | } | |
595 | ||
df334545 | 596 | #ifdef CONFIG_RPS |
0a9627f2 TH |
597 | /* |
598 | * This structure holds an RPS map which can be of variable length. The | |
599 | * map is an array of CPUs. | |
600 | */ | |
601 | struct rps_map { | |
602 | unsigned int len; | |
603 | struct rcu_head rcu; | |
604 | u16 cpus[0]; | |
605 | }; | |
60b778ce | 606 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
0a9627f2 | 607 | |
fec5e652 | 608 | /* |
c445477d BH |
609 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
610 | * tail pointer for that CPU's input queue at the time of last enqueue, and | |
611 | * a hardware filter index. | |
fec5e652 TH |
612 | */ |
613 | struct rps_dev_flow { | |
614 | u16 cpu; | |
c445477d | 615 | u16 filter; |
fec5e652 TH |
616 | unsigned int last_qtail; |
617 | }; | |
c445477d | 618 | #define RPS_NO_FILTER 0xffff |
fec5e652 TH |
619 | |
620 | /* | |
621 | * The rps_dev_flow_table structure contains a table of flow mappings. | |
622 | */ | |
623 | struct rps_dev_flow_table { | |
624 | unsigned int mask; | |
625 | struct rcu_head rcu; | |
fec5e652 TH |
626 | struct rps_dev_flow flows[0]; |
627 | }; | |
628 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | |
60b778ce | 629 | ((_num) * sizeof(struct rps_dev_flow))) |
fec5e652 TH |
630 | |
631 | /* | |
632 | * The rps_sock_flow_table contains mappings of flows to the last CPU | |
633 | * on which they were processed by the application (set in recvmsg). | |
634 | */ | |
635 | struct rps_sock_flow_table { | |
636 | unsigned int mask; | |
637 | u16 ents[0]; | |
638 | }; | |
639 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | |
60b778ce | 640 | ((_num) * sizeof(u16))) |
fec5e652 TH |
641 | |
642 | #define RPS_NO_CPU 0xffff | |
643 | ||
644 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | |
645 | u32 hash) | |
646 | { | |
647 | if (table && hash) { | |
648 | unsigned int cpu, index = hash & table->mask; | |
649 | ||
650 | /* We only give a hint, preemption can change cpu under us */ | |
651 | cpu = raw_smp_processor_id(); | |
652 | ||
653 | if (table->ents[index] != cpu) | |
654 | table->ents[index] = cpu; | |
655 | } | |
656 | } | |
657 | ||
658 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | |
659 | u32 hash) | |
660 | { | |
661 | if (table && hash) | |
662 | table->ents[hash & table->mask] = RPS_NO_CPU; | |
663 | } | |
664 | ||
6e3f7faf | 665 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; |
fec5e652 | 666 | |
c445477d | 667 | #ifdef CONFIG_RFS_ACCEL |
f629d208 JP |
668 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
669 | u16 filter_id); | |
c445477d | 670 | #endif |
a953be53 | 671 | #endif /* CONFIG_RPS */ |
c445477d | 672 | |
0a9627f2 TH |
673 | /* This structure contains an instance of an RX queue. */ |
674 | struct netdev_rx_queue { | |
a953be53 | 675 | #ifdef CONFIG_RPS |
6e3f7faf ED |
676 | struct rps_map __rcu *rps_map; |
677 | struct rps_dev_flow_table __rcu *rps_flow_table; | |
a953be53 | 678 | #endif |
6e3f7faf | 679 | struct kobject kobj; |
fe822240 | 680 | struct net_device *dev; |
0a9627f2 | 681 | } ____cacheline_aligned_in_smp; |
a953be53 MD |
682 | |
683 | /* | |
684 | * RX queue sysfs structures and functions. | |
685 | */ | |
686 | struct rx_queue_attribute { | |
687 | struct attribute attr; | |
688 | ssize_t (*show)(struct netdev_rx_queue *queue, | |
689 | struct rx_queue_attribute *attr, char *buf); | |
690 | ssize_t (*store)(struct netdev_rx_queue *queue, | |
691 | struct rx_queue_attribute *attr, const char *buf, size_t len); | |
692 | }; | |
d314774c | 693 | |
bf264145 TH |
694 | #ifdef CONFIG_XPS |
695 | /* | |
696 | * This structure holds an XPS map which can be of variable length. The | |
697 | * map is an array of queues. | |
698 | */ | |
699 | struct xps_map { | |
700 | unsigned int len; | |
701 | unsigned int alloc_len; | |
702 | struct rcu_head rcu; | |
703 | u16 queues[0]; | |
704 | }; | |
60b778ce | 705 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
bf264145 TH |
706 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ |
707 | / sizeof(u16)) | |
708 | ||
709 | /* | |
710 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | |
711 | */ | |
712 | struct xps_dev_maps { | |
713 | struct rcu_head rcu; | |
a4177869 | 714 | struct xps_map __rcu *cpu_map[0]; |
bf264145 TH |
715 | }; |
716 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ | |
717 | (nr_cpu_ids * sizeof(struct xps_map *))) | |
718 | #endif /* CONFIG_XPS */ | |
719 | ||
4f57c087 JF |
720 | #define TC_MAX_QUEUE 16 |
721 | #define TC_BITMASK 15 | |
722 | /* HW offloaded queuing disciplines txq count and offset maps */ | |
723 | struct netdev_tc_txq { | |
724 | u16 count; | |
725 | u16 offset; | |
726 | }; | |
727 | ||
68bad94e NP |
728 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
729 | /* | |
730 | * This structure is to hold information about the device | |
731 | * configured to run FCoE protocol stack. | |
732 | */ | |
733 | struct netdev_fcoe_hbainfo { | |
734 | char manufacturer[64]; | |
735 | char serial_number[64]; | |
736 | char hardware_version[64]; | |
737 | char driver_version[64]; | |
738 | char optionrom_version[64]; | |
739 | char firmware_version[64]; | |
740 | char model[256]; | |
741 | char model_description[256]; | |
742 | }; | |
743 | #endif | |
744 | ||
66b52b0d JP |
745 | #define MAX_PHYS_PORT_ID_LEN 32 |
746 | ||
747 | /* This structure holds a unique identifier to identify the | |
748 | * physical port used by a netdevice. | |
749 | */ | |
750 | struct netdev_phys_port_id { | |
751 | unsigned char id[MAX_PHYS_PORT_ID_LEN]; | |
752 | unsigned char id_len; | |
753 | }; | |
754 | ||
99932d4f DB |
755 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
756 | struct sk_buff *skb); | |
757 | ||
d314774c SH |
758 | /* |
759 | * This structure defines the management hooks for network devices. | |
00829823 SH |
760 | * The following hooks can be defined; unless noted otherwise, they are |
761 | * optional and can be filled with a null pointer. | |
d314774c SH |
762 | * |
763 | * int (*ndo_init)(struct net_device *dev); | |
764 | * This function is called once when network device is registered. | |
765 | * The network device can use this to any late stage initializaton | |
766 | * or semantic validattion. It can fail with an error code which will | |
767 | * be propogated back to register_netdev | |
768 | * | |
769 | * void (*ndo_uninit)(struct net_device *dev); | |
770 | * This function is called when device is unregistered or when registration | |
771 | * fails. It is not called if init fails. | |
772 | * | |
773 | * int (*ndo_open)(struct net_device *dev); | |
774 | * This function is called when network device transistions to the up | |
775 | * state. | |
776 | * | |
777 | * int (*ndo_stop)(struct net_device *dev); | |
778 | * This function is called when network device transistions to the down | |
779 | * state. | |
780 | * | |
dc1f8bf6 SH |
781 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
782 | * struct net_device *dev); | |
00829823 | 783 | * Called when a packet needs to be transmitted. |
dc1f8bf6 SH |
784 | * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. |
785 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | |
00829823 SH |
786 | * Required can not be NULL. |
787 | * | |
f663dd9a | 788 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
99932d4f | 789 | * void *accel_priv, select_queue_fallback_t fallback); |
00829823 SH |
790 | * Called to decide which queue to when device supports multiple |
791 | * transmit queues. | |
792 | * | |
d314774c SH |
793 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
794 | * This function is called to allow device receiver to make | |
795 | * changes to configuration when multicast or promiscious is enabled. | |
796 | * | |
797 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
798 | * This function is called device changes address list filtering. | |
01789349 JP |
799 | * If driver handles unicast address filtering, it should set |
800 | * IFF_UNICAST_FLT to its priv_flags. | |
d314774c SH |
801 | * |
802 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
803 | * This function is called when the Media Access Control address | |
37b607c5 | 804 | * needs to be changed. If this interface is not defined, the |
d314774c SH |
805 | * mac address can not be changed. |
806 | * | |
807 | * int (*ndo_validate_addr)(struct net_device *dev); | |
808 | * Test if Media Access Control address is valid for the device. | |
809 | * | |
810 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
811 | * Called when a user request an ioctl which can't be handled by | |
812 | * the generic interface code. If not defined ioctl's return | |
813 | * not supported error code. | |
814 | * | |
815 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
816 | * Used to set network devices bus interface parameters. This interface | |
817 | * is retained for legacy reason, new devices should use the bus | |
818 | * interface (PCI) for low level management. | |
819 | * | |
820 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
821 | * Called when a user wants to change the Maximum Transfer Unit | |
822 | * of a device. If not defined, any request to change MTU will | |
823 | * will return an error. | |
824 | * | |
00829823 | 825 | * void (*ndo_tx_timeout)(struct net_device *dev); |
d314774c SH |
826 | * Callback uses when the transmitter has not made any progress |
827 | * for dev->watchdog ticks. | |
828 | * | |
3cfde79c | 829 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
28172739 | 830 | * struct rtnl_link_stats64 *storage); |
d308e38f | 831 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c | 832 | * Called when a user wants to get the network device usage |
be1f3c2c | 833 | * statistics. Drivers must do one of the following: |
3cfde79c BH |
834 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
835 | * rtnl_link_stats64 structure passed by the caller. | |
82695d9b | 836 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
be1f3c2c BH |
837 | * (which should normally be dev->stats) and return a pointer to |
838 | * it. The structure may be changed asynchronously only if each | |
839 | * field is written atomically. | |
840 | * 3. Update dev->stats asynchronously and atomically, and define | |
841 | * neither operation. | |
d314774c | 842 | * |
80d5c368 PM |
843 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); |
844 | * If device support VLAN filtering this function is called when a | |
845 | * VLAN id is registered. | |
d314774c | 846 | * |
8e586137 | 847 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); |
80d5c368 PM |
848 | * If device support VLAN filtering this function is called when a |
849 | * VLAN id is unregistered. | |
d314774c SH |
850 | * |
851 | * void (*ndo_poll_controller)(struct net_device *dev); | |
95c26df8 WM |
852 | * |
853 | * SR-IOV management functions. | |
854 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | |
855 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | |
856 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | |
5f8444a3 | 857 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
95c26df8 WM |
858 | * int (*ndo_get_vf_config)(struct net_device *dev, |
859 | * int vf, struct ifla_vf_info *ivf); | |
1d8faf48 | 860 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
57b61080 SF |
861 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
862 | * struct nlattr *port[]); | |
863 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | |
4f57c087 JF |
864 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) |
865 | * Called to setup 'tc' number of traffic classes in the net device. This | |
866 | * is always called from the stack with the rtnl lock held and netif tx | |
867 | * queues stopped. This allows the netdevice to perform queue management | |
868 | * safely. | |
c445477d | 869 | * |
e9bce845 YZ |
870 | * Fiber Channel over Ethernet (FCoE) offload functions. |
871 | * int (*ndo_fcoe_enable)(struct net_device *dev); | |
872 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | |
873 | * so the underlying device can perform whatever needed configuration or | |
874 | * initialization to support acceleration of FCoE traffic. | |
875 | * | |
876 | * int (*ndo_fcoe_disable)(struct net_device *dev); | |
877 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | |
878 | * so the underlying device can perform whatever needed clean-ups to | |
879 | * stop supporting acceleration of FCoE traffic. | |
880 | * | |
881 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | |
882 | * struct scatterlist *sgl, unsigned int sgc); | |
883 | * Called when the FCoE Initiator wants to initialize an I/O that | |
884 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
885 | * perform necessary setup and returns 1 to indicate the device is set up | |
886 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
887 | * | |
888 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | |
889 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | |
890 | * indicated by the FC exchange id 'xid', so the underlying device can | |
891 | * clean up and reuse resources for later DDP requests. | |
892 | * | |
893 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | |
894 | * struct scatterlist *sgl, unsigned int sgc); | |
895 | * Called when the FCoE Target wants to initialize an I/O that | |
896 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
897 | * perform necessary setup and returns 1 to indicate the device is set up | |
898 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
899 | * | |
68bad94e NP |
900 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
901 | * struct netdev_fcoe_hbainfo *hbainfo); | |
902 | * Called when the FCoE Protocol stack wants information on the underlying | |
903 | * device. This information is utilized by the FCoE protocol stack to | |
904 | * register attributes with Fiber Channel management service as per the | |
905 | * FC-GS Fabric Device Management Information(FDMI) specification. | |
906 | * | |
e9bce845 YZ |
907 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
908 | * Called when the underlying device wants to override default World Wide | |
909 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | |
910 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | |
911 | * protocol stack to use. | |
912 | * | |
c445477d BH |
913 | * RFS acceleration. |
914 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | |
915 | * u16 rxq_index, u32 flow_id); | |
916 | * Set hardware filter for RFS. rxq_index is the target queue index; | |
917 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | |
918 | * Return the filter ID on success, or a negative error code. | |
fbaec0ea | 919 | * |
8b98a70c | 920 | * Slave management functions (for bridge, bonding, etc). |
fbaec0ea JP |
921 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); |
922 | * Called to make another netdev an underling. | |
923 | * | |
924 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | |
925 | * Called to release previously enslaved netdev. | |
5455c699 MM |
926 | * |
927 | * Feature/offload setting functions. | |
c8f44aff MM |
928 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
929 | * netdev_features_t features); | |
5455c699 MM |
930 | * Adjusts the requested feature flags according to device-specific |
931 | * constraints, and returns the resulting flags. Must not modify | |
932 | * the device state. | |
933 | * | |
c8f44aff | 934 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
5455c699 MM |
935 | * Called to update device configuration to new features. Passed |
936 | * feature set might be less than what was returned by ndo_fix_features()). | |
937 | * Must return >0 or -errno if it changed dev->features itself. | |
938 | * | |
edc7d573 | 939 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
940 | * struct net_device *dev, | |
6b6e2725 | 941 | * const unsigned char *addr, u16 flags) |
77162022 | 942 | * Adds an FDB entry to dev for addr. |
1690be63 VY |
943 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
944 | * struct net_device *dev, | |
6b6e2725 | 945 | * const unsigned char *addr) |
77162022 JF |
946 | * Deletes the FDB entry from dev coresponding to addr. |
947 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | |
948 | * struct net_device *dev, int idx) | |
949 | * Used to add FDB entries to dump requests. Implementers should add | |
950 | * entries to skb and update idx with the number of entries. | |
e5a55a89 JF |
951 | * |
952 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | |
953 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | |
24f11a5c | 954 | * struct net_device *dev, u32 filter_mask) |
4bf84c35 JP |
955 | * |
956 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | |
957 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | |
958 | * which do not represent real hardware may define this to allow their | |
959 | * userspace components to manage their virtual carrier state. Devices | |
960 | * that determine carrier state from physical hardware properties (eg | |
961 | * network cables) or protocol-dependent mechanisms (eg | |
962 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | |
66b52b0d JP |
963 | * |
964 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | |
965 | * struct netdev_phys_port_id *ppid); | |
966 | * Called to get ID of physical port of this device. If driver does | |
967 | * not implement this, it is assumed that the hw is not able to have | |
968 | * multiple net devices on single physical port. | |
53cf5275 JG |
969 | * |
970 | * void (*ndo_add_vxlan_port)(struct net_device *dev, | |
35e42379 | 971 | * sa_family_t sa_family, __be16 port); |
53cf5275 JG |
972 | * Called by vxlan to notiy a driver about the UDP port and socket |
973 | * address family that vxlan is listnening to. It is called only when | |
974 | * a new port starts listening. The operation is protected by the | |
975 | * vxlan_net->sock_lock. | |
976 | * | |
977 | * void (*ndo_del_vxlan_port)(struct net_device *dev, | |
35e42379 | 978 | * sa_family_t sa_family, __be16 port); |
53cf5275 JG |
979 | * Called by vxlan to notify the driver about a UDP port and socket |
980 | * address family that vxlan is not listening to anymore. The operation | |
981 | * is protected by the vxlan_net->sock_lock. | |
a6cc0cfa JF |
982 | * |
983 | * void* (*ndo_dfwd_add_station)(struct net_device *pdev, | |
984 | * struct net_device *dev) | |
985 | * Called by upper layer devices to accelerate switching or other | |
986 | * station functionality into hardware. 'pdev is the lowerdev | |
987 | * to use for the offload and 'dev' is the net device that will | |
988 | * back the offload. Returns a pointer to the private structure | |
989 | * the upper layer will maintain. | |
990 | * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) | |
991 | * Called by upper layer device to delete the station created | |
992 | * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing | |
993 | * the station and priv is the structure returned by the add | |
994 | * operation. | |
995 | * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb, | |
996 | * struct net_device *dev, | |
997 | * void *priv); | |
998 | * Callback to use for xmit over the accelerated station. This | |
999 | * is used in place of ndo_start_xmit on accelerated net | |
1000 | * devices. | |
d314774c SH |
1001 | */ |
1002 | struct net_device_ops { | |
1003 | int (*ndo_init)(struct net_device *dev); | |
1004 | void (*ndo_uninit)(struct net_device *dev); | |
1005 | int (*ndo_open)(struct net_device *dev); | |
1006 | int (*ndo_stop)(struct net_device *dev); | |
dc1f8bf6 | 1007 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
00829823 SH |
1008 | struct net_device *dev); |
1009 | u16 (*ndo_select_queue)(struct net_device *dev, | |
f663dd9a | 1010 | struct sk_buff *skb, |
99932d4f DB |
1011 | void *accel_priv, |
1012 | select_queue_fallback_t fallback); | |
d314774c SH |
1013 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1014 | int flags); | |
d314774c | 1015 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c SH |
1016 | int (*ndo_set_mac_address)(struct net_device *dev, |
1017 | void *addr); | |
d314774c | 1018 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
1019 | int (*ndo_do_ioctl)(struct net_device *dev, |
1020 | struct ifreq *ifr, int cmd); | |
d314774c SH |
1021 | int (*ndo_set_config)(struct net_device *dev, |
1022 | struct ifmap *map); | |
00829823 SH |
1023 | int (*ndo_change_mtu)(struct net_device *dev, |
1024 | int new_mtu); | |
1025 | int (*ndo_neigh_setup)(struct net_device *dev, | |
1026 | struct neigh_parms *); | |
d314774c SH |
1027 | void (*ndo_tx_timeout) (struct net_device *dev); |
1028 | ||
28172739 ED |
1029 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
1030 | struct rtnl_link_stats64 *storage); | |
d314774c SH |
1031 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1032 | ||
8e586137 | 1033 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
80d5c368 | 1034 | __be16 proto, u16 vid); |
8e586137 | 1035 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
80d5c368 | 1036 | __be16 proto, u16 vid); |
d314774c | 1037 | #ifdef CONFIG_NET_POLL_CONTROLLER |
d314774c | 1038 | void (*ndo_poll_controller)(struct net_device *dev); |
4247e161 | 1039 | int (*ndo_netpoll_setup)(struct net_device *dev, |
47be03a2 AW |
1040 | struct netpoll_info *info, |
1041 | gfp_t gfp); | |
0e34e931 | 1042 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
06021292 | 1043 | #endif |
e0d1095a | 1044 | #ifdef CONFIG_NET_RX_BUSY_POLL |
8b80cda5 | 1045 | int (*ndo_busy_poll)(struct napi_struct *dev); |
d314774c | 1046 | #endif |
95c26df8 WM |
1047 | int (*ndo_set_vf_mac)(struct net_device *dev, |
1048 | int queue, u8 *mac); | |
1049 | int (*ndo_set_vf_vlan)(struct net_device *dev, | |
1050 | int queue, u16 vlan, u8 qos); | |
1051 | int (*ndo_set_vf_tx_rate)(struct net_device *dev, | |
1052 | int vf, int rate); | |
5f8444a3 GR |
1053 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1054 | int vf, bool setting); | |
95c26df8 WM |
1055 | int (*ndo_get_vf_config)(struct net_device *dev, |
1056 | int vf, | |
1057 | struct ifla_vf_info *ivf); | |
1d8faf48 RE |
1058 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
1059 | int vf, int link_state); | |
57b61080 SF |
1060 | int (*ndo_set_vf_port)(struct net_device *dev, |
1061 | int vf, | |
1062 | struct nlattr *port[]); | |
1063 | int (*ndo_get_vf_port)(struct net_device *dev, | |
1064 | int vf, struct sk_buff *skb); | |
4f57c087 | 1065 | int (*ndo_setup_tc)(struct net_device *dev, u8 tc); |
d11ead75 | 1066 | #if IS_ENABLED(CONFIG_FCOE) |
cb454399 YZ |
1067 | int (*ndo_fcoe_enable)(struct net_device *dev); |
1068 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
1069 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
1070 | u16 xid, | |
1071 | struct scatterlist *sgl, | |
1072 | unsigned int sgc); | |
1073 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
1074 | u16 xid); | |
6247e086 YZ |
1075 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
1076 | u16 xid, | |
1077 | struct scatterlist *sgl, | |
1078 | unsigned int sgc); | |
68bad94e NP |
1079 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1080 | struct netdev_fcoe_hbainfo *hbainfo); | |
3c9c36bc BPG |
1081 | #endif |
1082 | ||
d11ead75 | 1083 | #if IS_ENABLED(CONFIG_LIBFCOE) |
df5c7945 YZ |
1084 | #define NETDEV_FCOE_WWNN 0 |
1085 | #define NETDEV_FCOE_WWPN 1 | |
1086 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
1087 | u64 *wwn, int type); | |
4d288d57 | 1088 | #endif |
3c9c36bc | 1089 | |
c445477d BH |
1090 | #ifdef CONFIG_RFS_ACCEL |
1091 | int (*ndo_rx_flow_steer)(struct net_device *dev, | |
1092 | const struct sk_buff *skb, | |
1093 | u16 rxq_index, | |
1094 | u32 flow_id); | |
1095 | #endif | |
fbaec0ea JP |
1096 | int (*ndo_add_slave)(struct net_device *dev, |
1097 | struct net_device *slave_dev); | |
1098 | int (*ndo_del_slave)(struct net_device *dev, | |
1099 | struct net_device *slave_dev); | |
c8f44aff MM |
1100 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1101 | netdev_features_t features); | |
5455c699 | 1102 | int (*ndo_set_features)(struct net_device *dev, |
c8f44aff | 1103 | netdev_features_t features); |
da6a8fa0 | 1104 | int (*ndo_neigh_construct)(struct neighbour *n); |
447f2191 | 1105 | void (*ndo_neigh_destroy)(struct neighbour *n); |
77162022 JF |
1106 | |
1107 | int (*ndo_fdb_add)(struct ndmsg *ndm, | |
edc7d573 | 1108 | struct nlattr *tb[], |
77162022 | 1109 | struct net_device *dev, |
6b6e2725 | 1110 | const unsigned char *addr, |
77162022 JF |
1111 | u16 flags); |
1112 | int (*ndo_fdb_del)(struct ndmsg *ndm, | |
1690be63 | 1113 | struct nlattr *tb[], |
77162022 | 1114 | struct net_device *dev, |
6b6e2725 | 1115 | const unsigned char *addr); |
77162022 JF |
1116 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1117 | struct netlink_callback *cb, | |
1118 | struct net_device *dev, | |
1119 | int idx); | |
e5a55a89 JF |
1120 | |
1121 | int (*ndo_bridge_setlink)(struct net_device *dev, | |
1122 | struct nlmsghdr *nlh); | |
1123 | int (*ndo_bridge_getlink)(struct sk_buff *skb, | |
1124 | u32 pid, u32 seq, | |
6cbdceeb VY |
1125 | struct net_device *dev, |
1126 | u32 filter_mask); | |
407af329 VY |
1127 | int (*ndo_bridge_dellink)(struct net_device *dev, |
1128 | struct nlmsghdr *nlh); | |
4bf84c35 JP |
1129 | int (*ndo_change_carrier)(struct net_device *dev, |
1130 | bool new_carrier); | |
66b52b0d JP |
1131 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
1132 | struct netdev_phys_port_id *ppid); | |
53cf5275 JG |
1133 | void (*ndo_add_vxlan_port)(struct net_device *dev, |
1134 | sa_family_t sa_family, | |
35e42379 | 1135 | __be16 port); |
53cf5275 JG |
1136 | void (*ndo_del_vxlan_port)(struct net_device *dev, |
1137 | sa_family_t sa_family, | |
35e42379 | 1138 | __be16 port); |
a6cc0cfa JF |
1139 | |
1140 | void* (*ndo_dfwd_add_station)(struct net_device *pdev, | |
1141 | struct net_device *dev); | |
1142 | void (*ndo_dfwd_del_station)(struct net_device *pdev, | |
1143 | void *priv); | |
1144 | ||
1145 | netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, | |
1146 | struct net_device *dev, | |
1147 | void *priv); | |
d314774c SH |
1148 | }; |
1149 | ||
1da177e4 LT |
1150 | /* |
1151 | * The DEVICE structure. | |
1152 | * Actually, this whole structure is a big mistake. It mixes I/O | |
1153 | * data with strictly "high-level" data, and it has to know about | |
1154 | * almost every data structure used in the INET module. | |
1155 | * | |
1156 | * FIXME: cleanup struct net_device such that network protocol info | |
1157 | * moves out. | |
1158 | */ | |
1159 | ||
d94d9fee | 1160 | struct net_device { |
1da177e4 LT |
1161 | |
1162 | /* | |
1163 | * This is the first field of the "visible" part of this structure | |
1164 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
724df615 | 1165 | * of the interface. |
1da177e4 LT |
1166 | */ |
1167 | char name[IFNAMSIZ]; | |
ed77134b | 1168 | |
9136461a | 1169 | /* device name hash chain, please keep it close to name[] */ |
9356b8fc | 1170 | struct hlist_node name_hlist; |
9136461a | 1171 | |
0b815a1a SH |
1172 | /* snmp alias */ |
1173 | char *ifalias; | |
1da177e4 LT |
1174 | |
1175 | /* | |
1176 | * I/O specific fields | |
1177 | * FIXME: Merge these and struct ifmap into one | |
1178 | */ | |
1179 | unsigned long mem_end; /* shared mem end */ | |
1180 | unsigned long mem_start; /* shared mem start */ | |
1181 | unsigned long base_addr; /* device I/O address */ | |
df42153c | 1182 | int irq; /* device IRQ number */ |
1da177e4 LT |
1183 | |
1184 | /* | |
1185 | * Some hardware also needs these fields, but they are not | |
1186 | * part of the usual set specified in Space.c. | |
1187 | */ | |
1188 | ||
1da177e4 LT |
1189 | unsigned long state; |
1190 | ||
7562f876 | 1191 | struct list_head dev_list; |
bea3348e | 1192 | struct list_head napi_list; |
44a0873d | 1193 | struct list_head unreg_list; |
5cde2829 | 1194 | struct list_head close_list; |
2f268f12 VF |
1195 | |
1196 | /* directly linked devices, like slaves for bonding */ | |
1197 | struct { | |
1198 | struct list_head upper; | |
1199 | struct list_head lower; | |
1200 | } adj_list; | |
1201 | ||
1202 | /* all linked devices, *including* neighbours */ | |
1203 | struct { | |
1204 | struct list_head upper; | |
1205 | struct list_head lower; | |
1206 | } all_adj_list; | |
4c3d5e7b | 1207 | |
1da177e4 | 1208 | |
5455c699 | 1209 | /* currently active device features */ |
c8f44aff | 1210 | netdev_features_t features; |
5455c699 | 1211 | /* user-changeable features */ |
c8f44aff | 1212 | netdev_features_t hw_features; |
5455c699 | 1213 | /* user-requested features */ |
c8f44aff | 1214 | netdev_features_t wanted_features; |
1aac6267 | 1215 | /* mask of features inheritable by VLAN devices */ |
c8f44aff | 1216 | netdev_features_t vlan_features; |
6a674e9c JG |
1217 | /* mask of features inherited by encapsulating devices |
1218 | * This field indicates what encapsulation offloads | |
1219 | * the hardware is capable of doing, and drivers will | |
1220 | * need to set them appropriately. | |
1221 | */ | |
1222 | netdev_features_t hw_enc_features; | |
0d89d203 SH |
1223 | /* mask of fetures inheritable by MPLS */ |
1224 | netdev_features_t mpls_features; | |
04ed3e74 | 1225 | |
1da177e4 LT |
1226 | /* Interface index. Unique device identifier */ |
1227 | int ifindex; | |
1228 | int iflink; | |
1229 | ||
c45d286e | 1230 | struct net_device_stats stats; |
caf586e5 ED |
1231 | atomic_long_t rx_dropped; /* dropped packets by core network |
1232 | * Do not use this in drivers. | |
1233 | */ | |
1da177e4 | 1234 | |
b86e0280 | 1235 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 LT |
1236 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
1237 | * See <net/iw_handler.h> for details. Jean II */ | |
1238 | const struct iw_handler_def * wireless_handlers; | |
1239 | /* Instance data managed by the core of Wireless Extensions. */ | |
1240 | struct iw_public_data * wireless_data; | |
b86e0280 | 1241 | #endif |
d314774c SH |
1242 | /* Management operations */ |
1243 | const struct net_device_ops *netdev_ops; | |
76fd8593 | 1244 | const struct ethtool_ops *ethtool_ops; |
a6cc0cfa | 1245 | const struct forwarding_accel_ops *fwd_ops; |
1da177e4 | 1246 | |
3b04ddde SH |
1247 | /* Hardware header description */ |
1248 | const struct header_ops *header_ops; | |
1249 | ||
b00055aa | 1250 | unsigned int flags; /* interface flags (a la BSD) */ |
3bdc0eba BG |
1251 | unsigned int priv_flags; /* Like 'flags' but invisible to userspace. |
1252 | * See if.h for definitions. */ | |
1da177e4 | 1253 | unsigned short gflags; |
1da177e4 LT |
1254 | unsigned short padded; /* How much padding added by alloc_netdev() */ |
1255 | ||
b00055aa SR |
1256 | unsigned char operstate; /* RFC2863 operstate */ |
1257 | unsigned char link_mode; /* mapping policy to operstate */ | |
1258 | ||
bdc220da JP |
1259 | unsigned char if_port; /* Selectable AUI, TP,..*/ |
1260 | unsigned char dma; /* DMA channel */ | |
1261 | ||
cd7b5396 | 1262 | unsigned int mtu; /* interface MTU value */ |
1da177e4 LT |
1263 | unsigned short type; /* interface hardware type */ |
1264 | unsigned short hard_header_len; /* hardware hdr length */ | |
1da177e4 | 1265 | |
f5184d26 JB |
1266 | /* extra head- and tailroom the hardware may need, but not in all cases |
1267 | * can this be guaranteed, especially tailroom. Some cases also use | |
1268 | * LL_MAX_HEADER instead to allocate the skb. | |
1269 | */ | |
1270 | unsigned short needed_headroom; | |
1271 | unsigned short needed_tailroom; | |
1272 | ||
1da177e4 | 1273 | /* Interface address info. */ |
a6f9a705 | 1274 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
c1f79426 | 1275 | unsigned char addr_assign_type; /* hw address assignment type */ |
1da177e4 | 1276 | unsigned char addr_len; /* hardware address length */ |
a0a9663d | 1277 | unsigned short neigh_priv_len; |
dffebd2c N |
1278 | unsigned short dev_id; /* Used to differentiate devices |
1279 | * that share the same link | |
1280 | * layer address | |
1281 | */ | |
ccffad25 | 1282 | spinlock_t addr_list_lock; |
22bedad3 JP |
1283 | struct netdev_hw_addr_list uc; /* Unicast mac addresses */ |
1284 | struct netdev_hw_addr_list mc; /* Multicast mac addresses */ | |
4c3d5e7b ED |
1285 | struct netdev_hw_addr_list dev_addrs; /* list of device |
1286 | * hw addresses | |
1287 | */ | |
1288 | #ifdef CONFIG_SYSFS | |
1289 | struct kset *queues_kset; | |
1290 | #endif | |
1291 | ||
2d348d1f | 1292 | bool uc_promisc; |
9d45abe1 WC |
1293 | unsigned int promiscuity; |
1294 | unsigned int allmulti; | |
1da177e4 | 1295 | |
1da177e4 LT |
1296 | |
1297 | /* Protocol specific pointers */ | |
65ac6a5f | 1298 | |
d11ead75 | 1299 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
5b9ea6e0 | 1300 | struct vlan_info __rcu *vlan_info; /* VLAN info */ |
65ac6a5f | 1301 | #endif |
34a430d7 | 1302 | #if IS_ENABLED(CONFIG_NET_DSA) |
cf50dcc2 | 1303 | struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ |
37cb0620 YX |
1304 | #endif |
1305 | #if IS_ENABLED(CONFIG_TIPC) | |
1306 | struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */ | |
91da11f8 | 1307 | #endif |
1da177e4 | 1308 | void *atalk_ptr; /* AppleTalk link */ |
95ae6b22 | 1309 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ |
fc766e4c | 1310 | struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ |
198caeca | 1311 | struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ |
1da177e4 | 1312 | void *ax25_ptr; /* AX.25 specific data */ |
704232c2 JB |
1313 | struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, |
1314 | assign before registering */ | |
1da177e4 | 1315 | |
9356b8fc | 1316 | /* |
cd13539b | 1317 | * Cache lines mostly used on receive path (including eth_type_trans()) |
9356b8fc | 1318 | */ |
4dc89133 ED |
1319 | unsigned long last_rx; /* Time of last Rx |
1320 | * This should not be set in | |
1321 | * drivers, unless really needed, | |
1322 | * because network stack (bonding) | |
1323 | * use it if/when necessary, to | |
1324 | * avoid dirtying this cache line. | |
1325 | */ | |
1326 | ||
9356b8fc | 1327 | /* Interface address info used in eth_type_trans() */ |
f001fde5 JP |
1328 | unsigned char *dev_addr; /* hw address, (before bcast |
1329 | because most packets are | |
1330 | unicast) */ | |
1331 | ||
0a9627f2 | 1332 | |
a953be53 | 1333 | #ifdef CONFIG_SYSFS |
0a9627f2 TH |
1334 | struct netdev_rx_queue *_rx; |
1335 | ||
62fe0b40 | 1336 | /* Number of RX queues allocated at register_netdev() time */ |
0a9627f2 | 1337 | unsigned int num_rx_queues; |
62fe0b40 BH |
1338 | |
1339 | /* Number of RX queues currently active in device */ | |
1340 | unsigned int real_num_rx_queues; | |
c445477d | 1341 | |
df334545 | 1342 | #endif |
0a9627f2 | 1343 | |
61391cde | 1344 | rx_handler_func_t __rcu *rx_handler; |
1345 | void __rcu *rx_handler_data; | |
e8a0464c | 1346 | |
24824a09 | 1347 | struct netdev_queue __rcu *ingress_queue; |
4c3d5e7b ED |
1348 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
1349 | ||
cd13539b ED |
1350 | |
1351 | /* | |
1352 | * Cache lines mostly used on transmit path | |
1353 | */ | |
e8a0464c | 1354 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
fd2ea0a7 DM |
1355 | |
1356 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | |
e8a0464c | 1357 | unsigned int num_tx_queues; |
fd2ea0a7 DM |
1358 | |
1359 | /* Number of TX queues currently active in device */ | |
1360 | unsigned int real_num_tx_queues; | |
1361 | ||
af356afa PM |
1362 | /* root qdisc from userspace point of view */ |
1363 | struct Qdisc *qdisc; | |
1364 | ||
1da177e4 | 1365 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
c3f26a26 | 1366 | spinlock_t tx_global_lock; |
cd13539b | 1367 | |
bf264145 | 1368 | #ifdef CONFIG_XPS |
a4177869 | 1369 | struct xps_dev_maps __rcu *xps_maps; |
bf264145 | 1370 | #endif |
4c3d5e7b ED |
1371 | #ifdef CONFIG_RFS_ACCEL |
1372 | /* CPU reverse-mapping for RX completion interrupts, indexed | |
1373 | * by RX queue number. Assigned by driver. This must only be | |
1374 | * set if the ndo_rx_flow_steer operation is defined. */ | |
1375 | struct cpu_rmap *rx_cpu_rmap; | |
1376 | #endif | |
1d24eb48 | 1377 | |
9356b8fc | 1378 | /* These may be needed for future network-power-down code. */ |
9d21493b ED |
1379 | |
1380 | /* | |
1381 | * trans_start here is expensive for high speed devices on SMP, | |
1382 | * please use netdev_queue->trans_start instead. | |
1383 | */ | |
9356b8fc ED |
1384 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ |
1385 | ||
1386 | int watchdog_timeo; /* used by dev_watchdog() */ | |
1387 | struct timer_list watchdog_timer; | |
1388 | ||
1da177e4 | 1389 | /* Number of references to this device */ |
29b4433d | 1390 | int __percpu *pcpu_refcnt; |
9356b8fc | 1391 | |
1da177e4 LT |
1392 | /* delayed register/unregister */ |
1393 | struct list_head todo_list; | |
1da177e4 LT |
1394 | /* device index hash chain */ |
1395 | struct hlist_node index_hlist; | |
1396 | ||
e014debe | 1397 | struct list_head link_watch_list; |
572a103d | 1398 | |
1da177e4 LT |
1399 | /* register/unregister state machine */ |
1400 | enum { NETREG_UNINITIALIZED=0, | |
b17a7c17 | 1401 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
1402 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
1403 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
1404 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 1405 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
449f4544 ED |
1406 | } reg_state:8; |
1407 | ||
1408 | bool dismantle; /* device is going do be freed */ | |
a2835763 PM |
1409 | |
1410 | enum { | |
1411 | RTNL_LINK_INITIALIZED, | |
1412 | RTNL_LINK_INITIALIZING, | |
1413 | } rtnl_link_state:16; | |
1da177e4 | 1414 | |
d314774c SH |
1415 | /* Called from unregister, can be used to call free_netdev */ |
1416 | void (*destructor)(struct net_device *dev); | |
1da177e4 | 1417 | |
1da177e4 | 1418 | #ifdef CONFIG_NETPOLL |
5fbee843 | 1419 | struct netpoll_info __rcu *npinfo; |
1da177e4 | 1420 | #endif |
eae792b7 | 1421 | |
c346dca1 | 1422 | #ifdef CONFIG_NET_NS |
4a1c5371 EB |
1423 | /* Network namespace this network device is inside */ |
1424 | struct net *nd_net; | |
c346dca1 | 1425 | #endif |
4a1c5371 | 1426 | |
4951704b | 1427 | /* mid-layer private */ |
a7855c78 ED |
1428 | union { |
1429 | void *ml_priv; | |
1430 | struct pcpu_lstats __percpu *lstats; /* loopback stats */ | |
8f84985f | 1431 | struct pcpu_sw_netstats __percpu *tstats; |
6d81f41c | 1432 | struct pcpu_dstats __percpu *dstats; /* dummy stats */ |
2681128f | 1433 | struct pcpu_vstats __percpu *vstats; /* veth stats */ |
a7855c78 | 1434 | }; |
eca9ebac | 1435 | /* GARP */ |
3cc77ec7 | 1436 | struct garp_port __rcu *garp_port; |
febf018d DW |
1437 | /* MRP */ |
1438 | struct mrp_port __rcu *mrp_port; | |
1da177e4 | 1439 | |
1da177e4 | 1440 | /* class/net/name entry */ |
43cb76d9 | 1441 | struct device dev; |
0c509a6c EB |
1442 | /* space for optional device, statistics, and wireless sysfs groups */ |
1443 | const struct attribute_group *sysfs_groups[4]; | |
a953be53 MD |
1444 | /* space for optional per-rx queue attributes */ |
1445 | const struct attribute_group *sysfs_rx_queue_group; | |
38f7b870 PM |
1446 | |
1447 | /* rtnetlink link ops */ | |
1448 | const struct rtnl_link_ops *rtnl_link_ops; | |
f25f4e44 | 1449 | |
82cc1a7a PWJ |
1450 | /* for setting kernel sock attribute on TCP connection setup */ |
1451 | #define GSO_MAX_SIZE 65536 | |
1452 | unsigned int gso_max_size; | |
30b678d8 BH |
1453 | #define GSO_MAX_SEGS 65535 |
1454 | u16 gso_max_segs; | |
d314774c | 1455 | |
7a6b6f51 | 1456 | #ifdef CONFIG_DCB |
2f90b865 | 1457 | /* Data Center Bridging netlink ops */ |
32953543 | 1458 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 | 1459 | #endif |
4f57c087 JF |
1460 | u8 num_tc; |
1461 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | |
1462 | u8 prio_tc_map[TC_BITMASK + 1]; | |
2f90b865 | 1463 | |
d11ead75 | 1464 | #if IS_ENABLED(CONFIG_FCOE) |
4d288d57 YZ |
1465 | /* max exchange id for FCoE LRO by ddp */ |
1466 | unsigned int fcoe_ddp_xid; | |
5bc1421e | 1467 | #endif |
86f8515f | 1468 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
5bc1421e | 1469 | struct netprio_map __rcu *priomap; |
4d288d57 | 1470 | #endif |
c1f19b51 RC |
1471 | /* phy device may attach itself for hardware timestamping */ |
1472 | struct phy_device *phydev; | |
cbda10fa | 1473 | |
23d3b8bf ED |
1474 | struct lock_class_key *qdisc_tx_busylock; |
1475 | ||
cbda10fa VD |
1476 | /* group the device belongs to */ |
1477 | int group; | |
9136461a ED |
1478 | |
1479 | struct pm_qos_request pm_qos_req; | |
1da177e4 | 1480 | }; |
43cb76d9 | 1481 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
1482 | |
1483 | #define NETDEV_ALIGN 32 | |
1da177e4 | 1484 | |
4f57c087 JF |
1485 | static inline |
1486 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | |
1487 | { | |
1488 | return dev->prio_tc_map[prio & TC_BITMASK]; | |
1489 | } | |
1490 | ||
1491 | static inline | |
1492 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |
1493 | { | |
1494 | if (tc >= dev->num_tc) | |
1495 | return -EINVAL; | |
1496 | ||
1497 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | |
1498 | return 0; | |
1499 | } | |
1500 | ||
1501 | static inline | |
1502 | void netdev_reset_tc(struct net_device *dev) | |
1503 | { | |
1504 | dev->num_tc = 0; | |
1505 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | |
1506 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | |
1507 | } | |
1508 | ||
1509 | static inline | |
1510 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | |
1511 | { | |
1512 | if (tc >= dev->num_tc) | |
1513 | return -EINVAL; | |
1514 | ||
1515 | dev->tc_to_txq[tc].count = count; | |
1516 | dev->tc_to_txq[tc].offset = offset; | |
1517 | return 0; | |
1518 | } | |
1519 | ||
1520 | static inline | |
1521 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | |
1522 | { | |
1523 | if (num_tc > TC_MAX_QUEUE) | |
1524 | return -EINVAL; | |
1525 | ||
1526 | dev->num_tc = num_tc; | |
1527 | return 0; | |
1528 | } | |
1529 | ||
1530 | static inline | |
1531 | int netdev_get_num_tc(struct net_device *dev) | |
1532 | { | |
1533 | return dev->num_tc; | |
1534 | } | |
1535 | ||
e8a0464c DM |
1536 | static inline |
1537 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
1538 | unsigned int index) | |
1539 | { | |
1540 | return &dev->_tx[index]; | |
1541 | } | |
1542 | ||
1543 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | |
1544 | void (*f)(struct net_device *, | |
1545 | struct netdev_queue *, | |
1546 | void *), | |
1547 | void *arg) | |
1548 | { | |
1549 | unsigned int i; | |
1550 | ||
1551 | for (i = 0; i < dev->num_tx_queues; i++) | |
1552 | f(dev, &dev->_tx[i], arg); | |
1553 | } | |
1554 | ||
f629d208 | 1555 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
f663dd9a JW |
1556 | struct sk_buff *skb, |
1557 | void *accel_priv); | |
8c4c49df | 1558 | |
c346dca1 YH |
1559 | /* |
1560 | * Net namespace inlines | |
1561 | */ | |
1562 | static inline | |
1563 | struct net *dev_net(const struct net_device *dev) | |
1564 | { | |
c2d9ba9b | 1565 | return read_pnet(&dev->nd_net); |
c346dca1 YH |
1566 | } |
1567 | ||
1568 | static inline | |
f5aa23fd | 1569 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 YH |
1570 | { |
1571 | #ifdef CONFIG_NET_NS | |
f3005d7f DL |
1572 | release_net(dev->nd_net); |
1573 | dev->nd_net = hold_net(net); | |
c346dca1 YH |
1574 | #endif |
1575 | } | |
1576 | ||
cf85d08f LB |
1577 | static inline bool netdev_uses_dsa_tags(struct net_device *dev) |
1578 | { | |
1579 | #ifdef CONFIG_NET_DSA_TAG_DSA | |
1580 | if (dev->dsa_ptr != NULL) | |
1581 | return dsa_uses_dsa_tags(dev->dsa_ptr); | |
1582 | #endif | |
1583 | ||
1584 | return 0; | |
1585 | } | |
1586 | ||
396138f0 LB |
1587 | static inline bool netdev_uses_trailer_tags(struct net_device *dev) |
1588 | { | |
1589 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | |
1590 | if (dev->dsa_ptr != NULL) | |
1591 | return dsa_uses_trailer_tags(dev->dsa_ptr); | |
1592 | #endif | |
1593 | ||
1594 | return 0; | |
1595 | } | |
1596 | ||
bea3348e SH |
1597 | /** |
1598 | * netdev_priv - access network device private data | |
1599 | * @dev: network device | |
1600 | * | |
1601 | * Get network device private data | |
1602 | */ | |
6472ce60 | 1603 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 1604 | { |
1ce8e7b5 | 1605 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
1606 | } |
1607 | ||
1da177e4 LT |
1608 | /* Set the sysfs physical device reference for the network logical device |
1609 | * if set prior to registration will cause a symlink during initialization. | |
1610 | */ | |
43cb76d9 | 1611 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 1612 | |
384912ed | 1613 | /* Set the sysfs device type for the network logical device to allow |
3f79410c | 1614 | * fine-grained identification of different network device types. For |
384912ed MH |
1615 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. |
1616 | */ | |
1617 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
1618 | ||
82dc3c63 ED |
1619 | /* Default NAPI poll() weight |
1620 | * Device drivers are strongly advised to not use bigger value | |
1621 | */ | |
1622 | #define NAPI_POLL_WEIGHT 64 | |
1623 | ||
3b582cc1 SH |
1624 | /** |
1625 | * netif_napi_add - initialize a napi context | |
1626 | * @dev: network device | |
1627 | * @napi: napi context | |
1628 | * @poll: polling function | |
1629 | * @weight: default weight | |
1630 | * | |
1631 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
1632 | * *any* of the other napi related functions. | |
1633 | */ | |
d565b0a1 HX |
1634 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
1635 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 1636 | |
d8156534 AD |
1637 | /** |
1638 | * netif_napi_del - remove a napi context | |
1639 | * @napi: napi context | |
1640 | * | |
1641 | * netif_napi_del() removes a napi context from the network device napi list | |
1642 | */ | |
d565b0a1 HX |
1643 | void netif_napi_del(struct napi_struct *napi); |
1644 | ||
1645 | struct napi_gro_cb { | |
78a478d0 HX |
1646 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
1647 | void *frag0; | |
1648 | ||
7489594c HX |
1649 | /* Length of frag0. */ |
1650 | unsigned int frag0_len; | |
1651 | ||
86911732 HX |
1652 | /* This indicates where we are processing relative to skb->data. */ |
1653 | int data_offset; | |
1654 | ||
d565b0a1 | 1655 | /* This is non-zero if the packet cannot be merged with the new skb. */ |
bf5a755f JC |
1656 | u16 flush; |
1657 | ||
1658 | /* Save the IP ID here and check when we get to the transport layer */ | |
1659 | u16 flush_id; | |
d565b0a1 HX |
1660 | |
1661 | /* Number of segments aggregated. */ | |
2e71a6f8 ED |
1662 | u16 count; |
1663 | ||
1664 | /* This is non-zero if the packet may be of the same flow. */ | |
1665 | u8 same_flow; | |
5d38a079 HX |
1666 | |
1667 | /* Free the skb? */ | |
2e71a6f8 | 1668 | u8 free; |
d7e8883c ED |
1669 | #define NAPI_GRO_FREE 1 |
1670 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | |
2e71a6f8 ED |
1671 | |
1672 | /* jiffies when first packet was created/queued */ | |
1673 | unsigned long age; | |
86347245 ED |
1674 | |
1675 | /* Used in ipv6_gro_receive() */ | |
b582ef09 OG |
1676 | u16 proto; |
1677 | ||
1678 | /* Used in udp_gro_receive */ | |
1679 | u16 udp_mark; | |
c3c7c254 | 1680 | |
bf5a755f JC |
1681 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
1682 | __wsum csum; | |
1683 | ||
c3c7c254 ED |
1684 | /* used in skb_gro_receive() slow path */ |
1685 | struct sk_buff *last; | |
d565b0a1 HX |
1686 | }; |
1687 | ||
1688 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 1689 | |
1da177e4 | 1690 | struct packet_type { |
f2ccd8fa DM |
1691 | __be16 type; /* This is really htons(ether_type). */ |
1692 | struct net_device *dev; /* NULL is wildcarded here */ | |
1693 | int (*func) (struct sk_buff *, | |
1694 | struct net_device *, | |
1695 | struct packet_type *, | |
1696 | struct net_device *); | |
c0de08d0 EL |
1697 | bool (*id_match)(struct packet_type *ptype, |
1698 | struct sock *sk); | |
1da177e4 LT |
1699 | void *af_packet_priv; |
1700 | struct list_head list; | |
1701 | }; | |
1702 | ||
f191a1d1 | 1703 | struct offload_callbacks { |
576a30eb | 1704 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
c8f44aff | 1705 | netdev_features_t features); |
a430a43d | 1706 | int (*gso_send_check)(struct sk_buff *skb); |
d565b0a1 HX |
1707 | struct sk_buff **(*gro_receive)(struct sk_buff **head, |
1708 | struct sk_buff *skb); | |
299603e8 | 1709 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
f191a1d1 VY |
1710 | }; |
1711 | ||
1712 | struct packet_offload { | |
1713 | __be16 type; /* This is really htons(ether_type). */ | |
1714 | struct offload_callbacks callbacks; | |
1715 | struct list_head list; | |
1da177e4 LT |
1716 | }; |
1717 | ||
b582ef09 OG |
1718 | struct udp_offload { |
1719 | __be16 port; | |
1720 | struct offload_callbacks callbacks; | |
1721 | }; | |
1722 | ||
8f84985f LR |
1723 | /* often modified stats are per cpu, other are shared (netdev->stats) */ |
1724 | struct pcpu_sw_netstats { | |
1725 | u64 rx_packets; | |
1726 | u64 rx_bytes; | |
1727 | u64 tx_packets; | |
1728 | u64 tx_bytes; | |
1729 | struct u64_stats_sync syncp; | |
1730 | }; | |
1731 | ||
1da177e4 LT |
1732 | #include <linux/notifier.h> |
1733 | ||
dcfe1421 AW |
1734 | /* netdevice notifier chain. Please remember to update the rtnetlink |
1735 | * notification exclusion list in rtnetlink_event() when adding new | |
1736 | * types. | |
1737 | */ | |
1738 | #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ | |
1739 | #define NETDEV_DOWN 0x0002 | |
1740 | #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface | |
1741 | detected a hardware crash and restarted | |
1742 | - we can use this eg to kick tcp sessions | |
1743 | once done */ | |
1744 | #define NETDEV_CHANGE 0x0004 /* Notify device state change */ | |
1745 | #define NETDEV_REGISTER 0x0005 | |
1746 | #define NETDEV_UNREGISTER 0x0006 | |
1d486bfb | 1747 | #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */ |
dcfe1421 AW |
1748 | #define NETDEV_CHANGEADDR 0x0008 |
1749 | #define NETDEV_GOING_DOWN 0x0009 | |
1750 | #define NETDEV_CHANGENAME 0x000A | |
1751 | #define NETDEV_FEAT_CHANGE 0x000B | |
1752 | #define NETDEV_BONDING_FAILOVER 0x000C | |
1753 | #define NETDEV_PRE_UP 0x000D | |
1754 | #define NETDEV_PRE_TYPE_CHANGE 0x000E | |
1755 | #define NETDEV_POST_TYPE_CHANGE 0x000F | |
1756 | #define NETDEV_POST_INIT 0x0010 | |
0115e8e3 | 1757 | #define NETDEV_UNREGISTER_FINAL 0x0011 |
dcfe1421 AW |
1758 | #define NETDEV_RELEASE 0x0012 |
1759 | #define NETDEV_NOTIFY_PEERS 0x0013 | |
1760 | #define NETDEV_JOIN 0x0014 | |
42e52bf9 | 1761 | #define NETDEV_CHANGEUPPER 0x0015 |
4aa5dee4 | 1762 | #define NETDEV_RESEND_IGMP 0x0016 |
1d486bfb | 1763 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ |
dcfe1421 | 1764 | |
f629d208 JP |
1765 | int register_netdevice_notifier(struct notifier_block *nb); |
1766 | int unregister_netdevice_notifier(struct notifier_block *nb); | |
351638e7 JP |
1767 | |
1768 | struct netdev_notifier_info { | |
1769 | struct net_device *dev; | |
1770 | }; | |
1771 | ||
be9efd36 JP |
1772 | struct netdev_notifier_change_info { |
1773 | struct netdev_notifier_info info; /* must be first */ | |
1774 | unsigned int flags_changed; | |
1775 | }; | |
1776 | ||
75538c2b CW |
1777 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
1778 | struct net_device *dev) | |
1779 | { | |
1780 | info->dev = dev; | |
1781 | } | |
1782 | ||
351638e7 JP |
1783 | static inline struct net_device * |
1784 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | |
1785 | { | |
1786 | return info->dev; | |
1787 | } | |
1788 | ||
f629d208 | 1789 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
dcfe1421 AW |
1790 | |
1791 | ||
1da177e4 LT |
1792 | extern rwlock_t dev_base_lock; /* Device list lock */ |
1793 | ||
881d966b EB |
1794 | #define for_each_netdev(net, d) \ |
1795 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
1796 | #define for_each_netdev_reverse(net, d) \ |
1797 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
1798 | #define for_each_netdev_rcu(net, d) \ |
1799 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
1800 | #define for_each_netdev_safe(net, d, n) \ |
1801 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
1802 | #define for_each_netdev_continue(net, d) \ | |
1803 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
254245d2 | 1804 | #define for_each_netdev_continue_rcu(net, d) \ |
1805 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
8a7fbfab | 1806 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
1807 | for_each_netdev_rcu(&init_net, slave) \ | |
1808 | if (netdev_master_upper_dev_get_rcu(slave) == bond) | |
881d966b | 1809 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 1810 | |
a050c33f DL |
1811 | static inline struct net_device *next_net_device(struct net_device *dev) |
1812 | { | |
1813 | struct list_head *lh; | |
1814 | struct net *net; | |
1815 | ||
c346dca1 | 1816 | net = dev_net(dev); |
a050c33f DL |
1817 | lh = dev->dev_list.next; |
1818 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1819 | } | |
1820 | ||
ce81b76a ED |
1821 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
1822 | { | |
1823 | struct list_head *lh; | |
1824 | struct net *net; | |
1825 | ||
1826 | net = dev_net(dev); | |
ccf43438 | 1827 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
ce81b76a ED |
1828 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
1829 | } | |
1830 | ||
a050c33f DL |
1831 | static inline struct net_device *first_net_device(struct net *net) |
1832 | { | |
1833 | return list_empty(&net->dev_base_head) ? NULL : | |
1834 | net_device_entry(net->dev_base_head.next); | |
1835 | } | |
7562f876 | 1836 | |
ccf43438 ED |
1837 | static inline struct net_device *first_net_device_rcu(struct net *net) |
1838 | { | |
1839 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | |
1840 | ||
1841 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
1842 | } | |
1843 | ||
f629d208 JP |
1844 | int netdev_boot_setup_check(struct net_device *dev); |
1845 | unsigned long netdev_boot_base(const char *prefix, int unit); | |
1846 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | |
1847 | const char *hwaddr); | |
1848 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1849 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1850 | void dev_add_pack(struct packet_type *pt); | |
1851 | void dev_remove_pack(struct packet_type *pt); | |
1852 | void __dev_remove_pack(struct packet_type *pt); | |
1853 | void dev_add_offload(struct packet_offload *po); | |
1854 | void dev_remove_offload(struct packet_offload *po); | |
f629d208 JP |
1855 | |
1856 | struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, | |
1857 | unsigned short mask); | |
1858 | struct net_device *dev_get_by_name(struct net *net, const char *name); | |
1859 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); | |
1860 | struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
1861 | int dev_alloc_name(struct net_device *dev, const char *name); | |
1862 | int dev_open(struct net_device *dev); | |
1863 | int dev_close(struct net_device *dev); | |
1864 | void dev_disable_lro(struct net_device *dev); | |
1865 | int dev_loopback_xmit(struct sk_buff *newskb); | |
1866 | int dev_queue_xmit(struct sk_buff *skb); | |
f663dd9a | 1867 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); |
f629d208 JP |
1868 | int register_netdevice(struct net_device *dev); |
1869 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | |
1870 | void unregister_netdevice_many(struct list_head *head); | |
44a0873d ED |
1871 | static inline void unregister_netdevice(struct net_device *dev) |
1872 | { | |
1873 | unregister_netdevice_queue(dev, NULL); | |
1874 | } | |
1875 | ||
f629d208 JP |
1876 | int netdev_refcnt_read(const struct net_device *dev); |
1877 | void free_netdev(struct net_device *dev); | |
74d332c1 | 1878 | void netdev_freemem(struct net_device *dev); |
f629d208 JP |
1879 | void synchronize_net(void); |
1880 | int init_dummy_netdev(struct net_device *dev); | |
937f1ba5 | 1881 | |
f629d208 JP |
1882 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
1883 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
1884 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | |
1885 | int netdev_get_name(struct net *net, char *name, int ifindex); | |
1886 | int dev_restart(struct net_device *dev); | |
1da177e4 | 1887 | #ifdef CONFIG_NETPOLL_TRAP |
f629d208 | 1888 | int netpoll_trap(void); |
1da177e4 | 1889 | #endif |
f629d208 | 1890 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); |
86911732 HX |
1891 | |
1892 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
1893 | { | |
1894 | return NAPI_GRO_CB(skb)->data_offset; | |
1895 | } | |
1896 | ||
1897 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
1898 | { | |
1899 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
1900 | } | |
1901 | ||
1902 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
1903 | { | |
1904 | NAPI_GRO_CB(skb)->data_offset += len; | |
1905 | } | |
1906 | ||
a5b1cf28 HX |
1907 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
1908 | unsigned int offset) | |
86911732 | 1909 | { |
a5b1cf28 HX |
1910 | return NAPI_GRO_CB(skb)->frag0 + offset; |
1911 | } | |
78a478d0 | 1912 | |
a5b1cf28 HX |
1913 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
1914 | { | |
1915 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
1916 | } | |
78a478d0 | 1917 | |
a5b1cf28 HX |
1918 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
1919 | unsigned int offset) | |
1920 | { | |
17dd759c HX |
1921 | if (!pskb_may_pull(skb, hlen)) |
1922 | return NULL; | |
1923 | ||
a5b1cf28 HX |
1924 | NAPI_GRO_CB(skb)->frag0 = NULL; |
1925 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
17dd759c | 1926 | return skb->data + offset; |
86911732 | 1927 | } |
1da177e4 | 1928 | |
aa4b9f53 HX |
1929 | static inline void *skb_gro_mac_header(struct sk_buff *skb) |
1930 | { | |
78d3fd0b | 1931 | return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); |
aa4b9f53 HX |
1932 | } |
1933 | ||
36e7b1b8 HX |
1934 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
1935 | { | |
78d3fd0b HX |
1936 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
1937 | skb_network_offset(skb); | |
36e7b1b8 HX |
1938 | } |
1939 | ||
bf5a755f JC |
1940 | static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, |
1941 | const void *start, unsigned int len) | |
1942 | { | |
1943 | if (skb->ip_summed == CHECKSUM_COMPLETE) | |
1944 | NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, | |
1945 | csum_partial(start, len, 0)); | |
1946 | } | |
1947 | ||
0c4e8581 SH |
1948 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1949 | unsigned short type, | |
3b04ddde | 1950 | const void *daddr, const void *saddr, |
95c96174 | 1951 | unsigned int len) |
0c4e8581 | 1952 | { |
f1ecfd5d | 1953 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 1954 | return 0; |
3b04ddde SH |
1955 | |
1956 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
1957 | } |
1958 | ||
b95cce35 SH |
1959 | static inline int dev_parse_header(const struct sk_buff *skb, |
1960 | unsigned char *haddr) | |
1961 | { | |
1962 | const struct net_device *dev = skb->dev; | |
1963 | ||
1b83336b | 1964 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 1965 | return 0; |
3b04ddde | 1966 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
1967 | } |
1968 | ||
2205369a DM |
1969 | static inline int dev_rebuild_header(struct sk_buff *skb) |
1970 | { | |
1971 | const struct net_device *dev = skb->dev; | |
1972 | ||
1973 | if (!dev->header_ops || !dev->header_ops->rebuild) | |
1974 | return 0; | |
1975 | return dev->header_ops->rebuild(skb); | |
1976 | } | |
1977 | ||
1da177e4 | 1978 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
f629d208 | 1979 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
1da177e4 LT |
1980 | static inline int unregister_gifconf(unsigned int family) |
1981 | { | |
1982 | return register_gifconf(family, NULL); | |
1983 | } | |
1984 | ||
99bbc707 | 1985 | #ifdef CONFIG_NET_FLOW_LIMIT |
5f121b9a | 1986 | #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ |
99bbc707 WB |
1987 | struct sd_flow_limit { |
1988 | u64 count; | |
1989 | unsigned int num_buckets; | |
1990 | unsigned int history_head; | |
1991 | u16 history[FLOW_LIMIT_HISTORY]; | |
1992 | u8 buckets[]; | |
1993 | }; | |
1994 | ||
1995 | extern int netdev_flow_limit_table_len; | |
1996 | #endif /* CONFIG_NET_FLOW_LIMIT */ | |
1997 | ||
1da177e4 | 1998 | /* |
88751275 | 1999 | * Incoming packets are placed on per-cpu queues |
1da177e4 | 2000 | */ |
d94d9fee | 2001 | struct softnet_data { |
37437bb2 | 2002 | struct Qdisc *output_queue; |
a9cbd588 | 2003 | struct Qdisc **output_queue_tailp; |
1da177e4 | 2004 | struct list_head poll_list; |
1da177e4 | 2005 | struct sk_buff *completion_queue; |
6e7676c1 | 2006 | struct sk_buff_head process_queue; |
1da177e4 | 2007 | |
dee42870 | 2008 | /* stats */ |
cd7b5396 DM |
2009 | unsigned int processed; |
2010 | unsigned int time_squeeze; | |
2011 | unsigned int cpu_collision; | |
2012 | unsigned int received_rps; | |
dee42870 | 2013 | |
fd793d89 | 2014 | #ifdef CONFIG_RPS |
88751275 ED |
2015 | struct softnet_data *rps_ipi_list; |
2016 | ||
2017 | /* Elements below can be accessed between CPUs for RPS */ | |
0a9627f2 | 2018 | struct call_single_data csd ____cacheline_aligned_in_smp; |
88751275 ED |
2019 | struct softnet_data *rps_ipi_next; |
2020 | unsigned int cpu; | |
fec5e652 | 2021 | unsigned int input_queue_head; |
76cc8b13 | 2022 | unsigned int input_queue_tail; |
1e94d72f | 2023 | #endif |
95c96174 | 2024 | unsigned int dropped; |
0a9627f2 | 2025 | struct sk_buff_head input_pkt_queue; |
bea3348e | 2026 | struct napi_struct backlog; |
99bbc707 WB |
2027 | |
2028 | #ifdef CONFIG_NET_FLOW_LIMIT | |
5f121b9a | 2029 | struct sd_flow_limit __rcu *flow_limit; |
99bbc707 | 2030 | #endif |
1da177e4 LT |
2031 | }; |
2032 | ||
76cc8b13 | 2033 | static inline void input_queue_head_incr(struct softnet_data *sd) |
fec5e652 TH |
2034 | { |
2035 | #ifdef CONFIG_RPS | |
76cc8b13 TH |
2036 | sd->input_queue_head++; |
2037 | #endif | |
2038 | } | |
2039 | ||
2040 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |
2041 | unsigned int *qtail) | |
2042 | { | |
2043 | #ifdef CONFIG_RPS | |
2044 | *qtail = ++sd->input_queue_tail; | |
fec5e652 TH |
2045 | #endif |
2046 | } | |
2047 | ||
0a9627f2 | 2048 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1da177e4 | 2049 | |
f629d208 | 2050 | void __netif_schedule(struct Qdisc *q); |
1da177e4 | 2051 | |
86d804e1 | 2052 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
1da177e4 | 2053 | { |
73466498 | 2054 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) |
37437bb2 | 2055 | __netif_schedule(txq->qdisc); |
86d804e1 DM |
2056 | } |
2057 | ||
fd2ea0a7 DM |
2058 | static inline void netif_tx_schedule_all(struct net_device *dev) |
2059 | { | |
2060 | unsigned int i; | |
2061 | ||
2062 | for (i = 0; i < dev->num_tx_queues; i++) | |
2063 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
2064 | } | |
2065 | ||
d29f749e DJ |
2066 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
2067 | { | |
73466498 | 2068 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2069 | } |
2070 | ||
bea3348e SH |
2071 | /** |
2072 | * netif_start_queue - allow transmit | |
2073 | * @dev: network device | |
2074 | * | |
2075 | * Allow upper layers to call the device hard_start_xmit routine. | |
2076 | */ | |
1da177e4 LT |
2077 | static inline void netif_start_queue(struct net_device *dev) |
2078 | { | |
e8a0464c | 2079 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2080 | } |
2081 | ||
fd2ea0a7 DM |
2082 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
2083 | { | |
2084 | unsigned int i; | |
2085 | ||
2086 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2087 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2088 | netif_tx_start_queue(txq); | |
2089 | } | |
2090 | } | |
2091 | ||
79d16385 | 2092 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1da177e4 LT |
2093 | { |
2094 | #ifdef CONFIG_NETPOLL_TRAP | |
5f286e11 | 2095 | if (netpoll_trap()) { |
7b3d3e4f | 2096 | netif_tx_start_queue(dev_queue); |
1da177e4 | 2097 | return; |
5f286e11 | 2098 | } |
1da177e4 | 2099 | #endif |
73466498 | 2100 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) |
37437bb2 | 2101 | __netif_schedule(dev_queue->qdisc); |
79d16385 DM |
2102 | } |
2103 | ||
d29f749e DJ |
2104 | /** |
2105 | * netif_wake_queue - restart transmit | |
2106 | * @dev: network device | |
2107 | * | |
2108 | * Allow upper layers to call the device hard_start_xmit routine. | |
2109 | * Used for flow control when transmit resources are available. | |
2110 | */ | |
79d16385 DM |
2111 | static inline void netif_wake_queue(struct net_device *dev) |
2112 | { | |
e8a0464c | 2113 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2114 | } |
2115 | ||
fd2ea0a7 DM |
2116 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
2117 | { | |
2118 | unsigned int i; | |
2119 | ||
2120 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2121 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2122 | netif_tx_wake_queue(txq); | |
2123 | } | |
2124 | } | |
2125 | ||
d29f749e DJ |
2126 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
2127 | { | |
18543a64 | 2128 | if (WARN_ON(!dev_queue)) { |
256ee435 | 2129 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); |
18543a64 GC |
2130 | return; |
2131 | } | |
73466498 | 2132 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2133 | } |
2134 | ||
bea3348e SH |
2135 | /** |
2136 | * netif_stop_queue - stop transmitted packets | |
2137 | * @dev: network device | |
2138 | * | |
2139 | * Stop upper layers calling the device hard_start_xmit routine. | |
2140 | * Used for flow control when transmit resources are unavailable. | |
2141 | */ | |
1da177e4 LT |
2142 | static inline void netif_stop_queue(struct net_device *dev) |
2143 | { | |
e8a0464c | 2144 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2145 | } |
2146 | ||
fd2ea0a7 DM |
2147 | static inline void netif_tx_stop_all_queues(struct net_device *dev) |
2148 | { | |
2149 | unsigned int i; | |
2150 | ||
2151 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2152 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2153 | netif_tx_stop_queue(txq); | |
2154 | } | |
2155 | } | |
2156 | ||
4d29515f | 2157 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
d29f749e | 2158 | { |
73466498 | 2159 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2160 | } |
2161 | ||
bea3348e SH |
2162 | /** |
2163 | * netif_queue_stopped - test if transmit queue is flowblocked | |
2164 | * @dev: network device | |
2165 | * | |
2166 | * Test if transmit queue on device is currently unable to send. | |
2167 | */ | |
4d29515f | 2168 | static inline bool netif_queue_stopped(const struct net_device *dev) |
1da177e4 | 2169 | { |
e8a0464c | 2170 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2171 | } |
2172 | ||
4d29515f | 2173 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) |
c3f26a26 | 2174 | { |
73466498 TH |
2175 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; |
2176 | } | |
2177 | ||
4d29515f | 2178 | static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) |
73466498 TH |
2179 | { |
2180 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | |
2181 | } | |
2182 | ||
c5d67bd7 TH |
2183 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
2184 | unsigned int bytes) | |
2185 | { | |
114cf580 TH |
2186 | #ifdef CONFIG_BQL |
2187 | dql_queued(&dev_queue->dql, bytes); | |
b37c0fbe AD |
2188 | |
2189 | if (likely(dql_avail(&dev_queue->dql) >= 0)) | |
2190 | return; | |
2191 | ||
2192 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
2193 | ||
2194 | /* | |
2195 | * The XOFF flag must be set before checking the dql_avail below, | |
2196 | * because in netdev_tx_completed_queue we update the dql_completed | |
2197 | * before checking the XOFF flag. | |
2198 | */ | |
2199 | smp_mb(); | |
2200 | ||
2201 | /* check again in case another CPU has just made room avail */ | |
2202 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | |
2203 | clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
114cf580 | 2204 | #endif |
c5d67bd7 TH |
2205 | } |
2206 | ||
0042d0c8 FF |
2207 | /** |
2208 | * netdev_sent_queue - report the number of bytes queued to hardware | |
2209 | * @dev: network device | |
2210 | * @bytes: number of bytes queued to the hardware device queue | |
2211 | * | |
2212 | * Report the number of bytes queued for sending/completion to the network | |
2213 | * device hardware queue. @bytes should be a good approximation and should | |
2214 | * exactly match netdev_completed_queue() @bytes | |
2215 | */ | |
c5d67bd7 TH |
2216 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
2217 | { | |
2218 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | |
2219 | } | |
2220 | ||
2221 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | |
95c96174 | 2222 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 | 2223 | { |
114cf580 | 2224 | #ifdef CONFIG_BQL |
b37c0fbe AD |
2225 | if (unlikely(!bytes)) |
2226 | return; | |
2227 | ||
2228 | dql_completed(&dev_queue->dql, bytes); | |
2229 | ||
2230 | /* | |
2231 | * Without the memory barrier there is a small possiblity that | |
2232 | * netdev_tx_sent_queue will miss the update and cause the queue to | |
2233 | * be stopped forever | |
2234 | */ | |
2235 | smp_mb(); | |
2236 | ||
2237 | if (dql_avail(&dev_queue->dql) < 0) | |
2238 | return; | |
2239 | ||
2240 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) | |
2241 | netif_schedule_queue(dev_queue); | |
114cf580 | 2242 | #endif |
c5d67bd7 TH |
2243 | } |
2244 | ||
0042d0c8 FF |
2245 | /** |
2246 | * netdev_completed_queue - report bytes and packets completed by device | |
2247 | * @dev: network device | |
2248 | * @pkts: actual number of packets sent over the medium | |
2249 | * @bytes: actual number of bytes sent over the medium | |
2250 | * | |
2251 | * Report the number of bytes and packets transmitted by the network device | |
2252 | * hardware queue over the physical medium, @bytes must exactly match the | |
2253 | * @bytes amount passed to netdev_sent_queue() | |
2254 | */ | |
c5d67bd7 | 2255 | static inline void netdev_completed_queue(struct net_device *dev, |
95c96174 | 2256 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 TH |
2257 | { |
2258 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | |
2259 | } | |
2260 | ||
2261 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | |
2262 | { | |
114cf580 | 2263 | #ifdef CONFIG_BQL |
5c490354 | 2264 | clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); |
114cf580 TH |
2265 | dql_reset(&q->dql); |
2266 | #endif | |
c5d67bd7 TH |
2267 | } |
2268 | ||
0042d0c8 FF |
2269 | /** |
2270 | * netdev_reset_queue - reset the packets and bytes count of a network device | |
2271 | * @dev_queue: network device | |
2272 | * | |
2273 | * Reset the bytes and packet count of a network device and clear the | |
2274 | * software flow control OFF bit for this network device | |
2275 | */ | |
c5d67bd7 TH |
2276 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
2277 | { | |
2278 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | |
c3f26a26 DM |
2279 | } |
2280 | ||
b9507bda DB |
2281 | /** |
2282 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues | |
2283 | * @dev: network device | |
2284 | * @queue_index: given tx queue index | |
2285 | * | |
2286 | * Returns 0 if given tx queue index >= number of device tx queues, | |
2287 | * otherwise returns the originally passed tx queue index. | |
2288 | */ | |
2289 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) | |
2290 | { | |
2291 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | |
2292 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | |
2293 | dev->name, queue_index, | |
2294 | dev->real_num_tx_queues); | |
2295 | return 0; | |
2296 | } | |
2297 | ||
2298 | return queue_index; | |
2299 | } | |
2300 | ||
bea3348e SH |
2301 | /** |
2302 | * netif_running - test if up | |
2303 | * @dev: network device | |
2304 | * | |
2305 | * Test if the device has been brought up. | |
2306 | */ | |
4d29515f | 2307 | static inline bool netif_running(const struct net_device *dev) |
1da177e4 LT |
2308 | { |
2309 | return test_bit(__LINK_STATE_START, &dev->state); | |
2310 | } | |
2311 | ||
f25f4e44 PWJ |
2312 | /* |
2313 | * Routines to manage the subqueues on a device. We only need start | |
2314 | * stop, and a check if it's stopped. All other device management is | |
2315 | * done at the overall netdevice level. | |
2316 | * Also test the device if we're multiqueue. | |
2317 | */ | |
bea3348e SH |
2318 | |
2319 | /** | |
2320 | * netif_start_subqueue - allow sending packets on subqueue | |
2321 | * @dev: network device | |
2322 | * @queue_index: sub queue index | |
2323 | * | |
2324 | * Start individual transmit queue of a device with multiple transmit queues. | |
2325 | */ | |
f25f4e44 PWJ |
2326 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
2327 | { | |
fd2ea0a7 | 2328 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
2329 | |
2330 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
2331 | } |
2332 | ||
bea3348e SH |
2333 | /** |
2334 | * netif_stop_subqueue - stop sending packets on subqueue | |
2335 | * @dev: network device | |
2336 | * @queue_index: sub queue index | |
2337 | * | |
2338 | * Stop individual transmit queue of a device with multiple transmit queues. | |
2339 | */ | |
f25f4e44 PWJ |
2340 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
2341 | { | |
fd2ea0a7 | 2342 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
2343 | #ifdef CONFIG_NETPOLL_TRAP |
2344 | if (netpoll_trap()) | |
2345 | return; | |
2346 | #endif | |
7b3d3e4f | 2347 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
2348 | } |
2349 | ||
bea3348e SH |
2350 | /** |
2351 | * netif_subqueue_stopped - test status of subqueue | |
2352 | * @dev: network device | |
2353 | * @queue_index: sub queue index | |
2354 | * | |
2355 | * Check individual transmit queue of a device with multiple transmit queues. | |
2356 | */ | |
4d29515f DM |
2357 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, |
2358 | u16 queue_index) | |
f25f4e44 | 2359 | { |
fd2ea0a7 | 2360 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
2361 | |
2362 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
2363 | } |
2364 | ||
4d29515f DM |
2365 | static inline bool netif_subqueue_stopped(const struct net_device *dev, |
2366 | struct sk_buff *skb) | |
668f895a PE |
2367 | { |
2368 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
2369 | } | |
bea3348e SH |
2370 | |
2371 | /** | |
2372 | * netif_wake_subqueue - allow sending packets on subqueue | |
2373 | * @dev: network device | |
2374 | * @queue_index: sub queue index | |
2375 | * | |
2376 | * Resume individual transmit queue of a device with multiple transmit queues. | |
2377 | */ | |
f25f4e44 PWJ |
2378 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
2379 | { | |
fd2ea0a7 | 2380 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
2381 | #ifdef CONFIG_NETPOLL_TRAP |
2382 | if (netpoll_trap()) | |
2383 | return; | |
2384 | #endif | |
73466498 | 2385 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) |
37437bb2 | 2386 | __netif_schedule(txq->qdisc); |
f25f4e44 PWJ |
2387 | } |
2388 | ||
537c00de | 2389 | #ifdef CONFIG_XPS |
53af53ae | 2390 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
f629d208 | 2391 | u16 index); |
537c00de AD |
2392 | #else |
2393 | static inline int netif_set_xps_queue(struct net_device *dev, | |
3573540c | 2394 | const struct cpumask *mask, |
537c00de AD |
2395 | u16 index) |
2396 | { | |
2397 | return 0; | |
2398 | } | |
2399 | #endif | |
2400 | ||
a3d22a68 VZ |
2401 | /* |
2402 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | |
2403 | * as a distribution range limit for the returned value. | |
2404 | */ | |
2405 | static inline u16 skb_tx_hash(const struct net_device *dev, | |
2406 | const struct sk_buff *skb) | |
2407 | { | |
2408 | return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); | |
2409 | } | |
2410 | ||
bea3348e SH |
2411 | /** |
2412 | * netif_is_multiqueue - test if device has multiple transmit queues | |
2413 | * @dev: network device | |
2414 | * | |
2415 | * Check if device has multiple transmit queues | |
bea3348e | 2416 | */ |
4d29515f | 2417 | static inline bool netif_is_multiqueue(const struct net_device *dev) |
f25f4e44 | 2418 | { |
a02cec21 | 2419 | return dev->num_tx_queues > 1; |
f25f4e44 | 2420 | } |
1da177e4 | 2421 | |
f629d208 | 2422 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); |
f0796d5c | 2423 | |
a953be53 | 2424 | #ifdef CONFIG_SYSFS |
f629d208 | 2425 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
62fe0b40 BH |
2426 | #else |
2427 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |
2428 | unsigned int rxq) | |
2429 | { | |
2430 | return 0; | |
2431 | } | |
2432 | #endif | |
2433 | ||
3171d026 BH |
2434 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, |
2435 | const struct net_device *from_dev) | |
2436 | { | |
ee6ae1a1 JP |
2437 | int err; |
2438 | ||
2439 | err = netif_set_real_num_tx_queues(to_dev, | |
2440 | from_dev->real_num_tx_queues); | |
2441 | if (err) | |
2442 | return err; | |
a953be53 | 2443 | #ifdef CONFIG_SYSFS |
3171d026 BH |
2444 | return netif_set_real_num_rx_queues(to_dev, |
2445 | from_dev->real_num_rx_queues); | |
2446 | #else | |
2447 | return 0; | |
2448 | #endif | |
2449 | } | |
2450 | ||
a953be53 MD |
2451 | #ifdef CONFIG_SYSFS |
2452 | static inline unsigned int get_netdev_rx_queue_index( | |
2453 | struct netdev_rx_queue *queue) | |
2454 | { | |
2455 | struct net_device *dev = queue->dev; | |
2456 | int index = queue - dev->_rx; | |
2457 | ||
2458 | BUG_ON(index >= dev->num_rx_queues); | |
2459 | return index; | |
2460 | } | |
2461 | #endif | |
2462 | ||
16917b87 | 2463 | #define DEFAULT_MAX_NUM_RSS_QUEUES (8) |
f629d208 | 2464 | int netif_get_num_default_rss_queues(void); |
16917b87 | 2465 | |
e6247027 ED |
2466 | enum skb_free_reason { |
2467 | SKB_REASON_CONSUMED, | |
2468 | SKB_REASON_DROPPED, | |
2469 | }; | |
2470 | ||
2471 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); | |
2472 | void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); | |
1da177e4 | 2473 | |
e6247027 ED |
2474 | /* |
2475 | * It is not allowed to call kfree_skb() or consume_skb() from hardware | |
2476 | * interrupt context or with hardware interrupts being disabled. | |
2477 | * (in_irq() || irqs_disabled()) | |
2478 | * | |
2479 | * We provide four helpers that can be used in following contexts : | |
2480 | * | |
2481 | * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, | |
2482 | * replacing kfree_skb(skb) | |
2483 | * | |
2484 | * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. | |
2485 | * Typically used in place of consume_skb(skb) in TX completion path | |
2486 | * | |
2487 | * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, | |
2488 | * replacing kfree_skb(skb) | |
2489 | * | |
2490 | * dev_consume_skb_any(skb) when caller doesn't know its current irq context, | |
2491 | * and consumed a packet. Used in place of consume_skb(skb) | |
1da177e4 | 2492 | */ |
e6247027 ED |
2493 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
2494 | { | |
2495 | __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); | |
2496 | } | |
2497 | ||
2498 | static inline void dev_consume_skb_irq(struct sk_buff *skb) | |
2499 | { | |
2500 | __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); | |
2501 | } | |
2502 | ||
2503 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | |
2504 | { | |
2505 | __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); | |
2506 | } | |
2507 | ||
2508 | static inline void dev_consume_skb_any(struct sk_buff *skb) | |
2509 | { | |
2510 | __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); | |
2511 | } | |
1da177e4 | 2512 | |
f629d208 JP |
2513 | int netif_rx(struct sk_buff *skb); |
2514 | int netif_rx_ni(struct sk_buff *skb); | |
2515 | int netif_receive_skb(struct sk_buff *skb); | |
2516 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); | |
2517 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | |
2518 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | |
2519 | gro_result_t napi_gro_frags(struct napi_struct *napi); | |
bf5a755f JC |
2520 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
2521 | struct packet_offload *gro_find_complete_by_type(__be16 type); | |
76620aaf HX |
2522 | |
2523 | static inline void napi_free_frags(struct napi_struct *napi) | |
2524 | { | |
2525 | kfree_skb(napi->skb); | |
2526 | napi->skb = NULL; | |
2527 | } | |
2528 | ||
f629d208 JP |
2529 | int netdev_rx_handler_register(struct net_device *dev, |
2530 | rx_handler_func_t *rx_handler, | |
2531 | void *rx_handler_data); | |
2532 | void netdev_rx_handler_unregister(struct net_device *dev); | |
2533 | ||
2534 | bool dev_valid_name(const char *name); | |
2535 | int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | |
2536 | int dev_ethtool(struct net *net, struct ifreq *); | |
2537 | unsigned int dev_get_flags(const struct net_device *); | |
2538 | int __dev_change_flags(struct net_device *, unsigned int flags); | |
2539 | int dev_change_flags(struct net_device *, unsigned int); | |
cb178190 DM |
2540 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
2541 | unsigned int gchanges); | |
f629d208 JP |
2542 | int dev_change_name(struct net_device *, const char *); |
2543 | int dev_set_alias(struct net_device *, const char *, size_t); | |
2544 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); | |
2545 | int dev_set_mtu(struct net_device *, int); | |
2546 | void dev_set_group(struct net_device *, int); | |
2547 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | |
2548 | int dev_change_carrier(struct net_device *, bool new_carrier); | |
2549 | int dev_get_phys_port_id(struct net_device *dev, | |
2550 | struct netdev_phys_port_id *ppid); | |
2551 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |
f663dd9a | 2552 | struct netdev_queue *txq); |
f629d208 | 2553 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
1da177e4 | 2554 | |
20380731 | 2555 | extern int netdev_budget; |
1da177e4 LT |
2556 | |
2557 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
f629d208 | 2558 | void netdev_run_todo(void); |
1da177e4 | 2559 | |
bea3348e SH |
2560 | /** |
2561 | * dev_put - release reference to device | |
2562 | * @dev: network device | |
2563 | * | |
9ef4429b | 2564 | * Release reference to device to allow it to be freed. |
bea3348e | 2565 | */ |
1da177e4 LT |
2566 | static inline void dev_put(struct net_device *dev) |
2567 | { | |
933393f5 | 2568 | this_cpu_dec(*dev->pcpu_refcnt); |
1da177e4 LT |
2569 | } |
2570 | ||
bea3348e SH |
2571 | /** |
2572 | * dev_hold - get reference to device | |
2573 | * @dev: network device | |
2574 | * | |
9ef4429b | 2575 | * Hold reference to device to keep it from being freed. |
bea3348e | 2576 | */ |
15333061 SH |
2577 | static inline void dev_hold(struct net_device *dev) |
2578 | { | |
933393f5 | 2579 | this_cpu_inc(*dev->pcpu_refcnt); |
15333061 | 2580 | } |
1da177e4 LT |
2581 | |
2582 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
2583 | * and _off may be called from IRQ context, but it is caller | |
2584 | * who is responsible for serialization of these calls. | |
b00055aa SR |
2585 | * |
2586 | * The name carrier is inappropriate, these functions should really be | |
2587 | * called netif_lowerlayer_*() because they represent the state of any | |
2588 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
2589 | */ |
2590 | ||
f629d208 JP |
2591 | void linkwatch_init_dev(struct net_device *dev); |
2592 | void linkwatch_fire_event(struct net_device *dev); | |
2593 | void linkwatch_forget_dev(struct net_device *dev); | |
1da177e4 | 2594 | |
bea3348e SH |
2595 | /** |
2596 | * netif_carrier_ok - test if carrier present | |
2597 | * @dev: network device | |
2598 | * | |
2599 | * Check if carrier is present on device | |
2600 | */ | |
4d29515f | 2601 | static inline bool netif_carrier_ok(const struct net_device *dev) |
1da177e4 LT |
2602 | { |
2603 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
2604 | } | |
2605 | ||
f629d208 | 2606 | unsigned long dev_trans_start(struct net_device *dev); |
9d21493b | 2607 | |
f629d208 | 2608 | void __netdev_watchdog_up(struct net_device *dev); |
1da177e4 | 2609 | |
f629d208 | 2610 | void netif_carrier_on(struct net_device *dev); |
1da177e4 | 2611 | |
f629d208 | 2612 | void netif_carrier_off(struct net_device *dev); |
1da177e4 | 2613 | |
bea3348e SH |
2614 | /** |
2615 | * netif_dormant_on - mark device as dormant. | |
2616 | * @dev: network device | |
2617 | * | |
2618 | * Mark device as dormant (as per RFC2863). | |
2619 | * | |
2620 | * The dormant state indicates that the relevant interface is not | |
2621 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
2622 | * in a "pending" state, waiting for some external event. For "on- | |
2623 | * demand" interfaces, this new state identifies the situation where the | |
2624 | * interface is waiting for events to place it in the up state. | |
2625 | * | |
2626 | */ | |
b00055aa SR |
2627 | static inline void netif_dormant_on(struct net_device *dev) |
2628 | { | |
2629 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
2630 | linkwatch_fire_event(dev); | |
2631 | } | |
2632 | ||
bea3348e SH |
2633 | /** |
2634 | * netif_dormant_off - set device as not dormant. | |
2635 | * @dev: network device | |
2636 | * | |
2637 | * Device is not in dormant state. | |
2638 | */ | |
b00055aa SR |
2639 | static inline void netif_dormant_off(struct net_device *dev) |
2640 | { | |
2641 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
2642 | linkwatch_fire_event(dev); | |
2643 | } | |
2644 | ||
bea3348e SH |
2645 | /** |
2646 | * netif_dormant - test if carrier present | |
2647 | * @dev: network device | |
2648 | * | |
2649 | * Check if carrier is present on device | |
2650 | */ | |
4d29515f | 2651 | static inline bool netif_dormant(const struct net_device *dev) |
b00055aa SR |
2652 | { |
2653 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
2654 | } | |
2655 | ||
2656 | ||
bea3348e SH |
2657 | /** |
2658 | * netif_oper_up - test if device is operational | |
2659 | * @dev: network device | |
2660 | * | |
2661 | * Check if carrier is operational | |
2662 | */ | |
4d29515f | 2663 | static inline bool netif_oper_up(const struct net_device *dev) |
d94d9fee | 2664 | { |
b00055aa SR |
2665 | return (dev->operstate == IF_OPER_UP || |
2666 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
2667 | } | |
2668 | ||
bea3348e SH |
2669 | /** |
2670 | * netif_device_present - is device available or removed | |
2671 | * @dev: network device | |
2672 | * | |
2673 | * Check if device has not been removed from system. | |
2674 | */ | |
4d29515f | 2675 | static inline bool netif_device_present(struct net_device *dev) |
1da177e4 LT |
2676 | { |
2677 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
2678 | } | |
2679 | ||
f629d208 | 2680 | void netif_device_detach(struct net_device *dev); |
1da177e4 | 2681 | |
f629d208 | 2682 | void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
2683 | |
2684 | /* | |
2685 | * Network interface message level settings | |
2686 | */ | |
1da177e4 LT |
2687 | |
2688 | enum { | |
2689 | NETIF_MSG_DRV = 0x0001, | |
2690 | NETIF_MSG_PROBE = 0x0002, | |
2691 | NETIF_MSG_LINK = 0x0004, | |
2692 | NETIF_MSG_TIMER = 0x0008, | |
2693 | NETIF_MSG_IFDOWN = 0x0010, | |
2694 | NETIF_MSG_IFUP = 0x0020, | |
2695 | NETIF_MSG_RX_ERR = 0x0040, | |
2696 | NETIF_MSG_TX_ERR = 0x0080, | |
2697 | NETIF_MSG_TX_QUEUED = 0x0100, | |
2698 | NETIF_MSG_INTR = 0x0200, | |
2699 | NETIF_MSG_TX_DONE = 0x0400, | |
2700 | NETIF_MSG_RX_STATUS = 0x0800, | |
2701 | NETIF_MSG_PKTDATA = 0x1000, | |
2702 | NETIF_MSG_HW = 0x2000, | |
2703 | NETIF_MSG_WOL = 0x4000, | |
2704 | }; | |
2705 | ||
2706 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
2707 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
2708 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
2709 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
2710 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
2711 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
2712 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
2713 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
2714 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
2715 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
2716 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
2717 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
2718 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
2719 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
2720 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
2721 | ||
2722 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
2723 | { | |
2724 | /* use default */ | |
2725 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
2726 | return default_msg_enable_bits; | |
2727 | if (debug_value == 0) /* no output */ | |
2728 | return 0; | |
2729 | /* set low N bits */ | |
2730 | return (1 << debug_value) - 1; | |
2731 | } | |
2732 | ||
c773e847 | 2733 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 2734 | { |
c773e847 DM |
2735 | spin_lock(&txq->_xmit_lock); |
2736 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
2737 | } |
2738 | ||
fd2ea0a7 DM |
2739 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
2740 | { | |
2741 | spin_lock_bh(&txq->_xmit_lock); | |
2742 | txq->xmit_lock_owner = smp_processor_id(); | |
2743 | } | |
2744 | ||
4d29515f | 2745 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
c3f26a26 | 2746 | { |
4d29515f | 2747 | bool ok = spin_trylock(&txq->_xmit_lock); |
c3f26a26 DM |
2748 | if (likely(ok)) |
2749 | txq->xmit_lock_owner = smp_processor_id(); | |
2750 | return ok; | |
2751 | } | |
2752 | ||
2753 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
2754 | { | |
2755 | txq->xmit_lock_owner = -1; | |
2756 | spin_unlock(&txq->_xmit_lock); | |
2757 | } | |
2758 | ||
2759 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
2760 | { | |
2761 | txq->xmit_lock_owner = -1; | |
2762 | spin_unlock_bh(&txq->_xmit_lock); | |
2763 | } | |
2764 | ||
08baf561 ED |
2765 | static inline void txq_trans_update(struct netdev_queue *txq) |
2766 | { | |
2767 | if (txq->xmit_lock_owner != -1) | |
2768 | txq->trans_start = jiffies; | |
2769 | } | |
2770 | ||
d29f749e DJ |
2771 | /** |
2772 | * netif_tx_lock - grab network device transmit lock | |
2773 | * @dev: network device | |
d29f749e DJ |
2774 | * |
2775 | * Get network device transmit lock | |
2776 | */ | |
22dd7495 JHS |
2777 | static inline void netif_tx_lock(struct net_device *dev) |
2778 | { | |
e8a0464c | 2779 | unsigned int i; |
c3f26a26 | 2780 | int cpu; |
c773e847 | 2781 | |
c3f26a26 DM |
2782 | spin_lock(&dev->tx_global_lock); |
2783 | cpu = smp_processor_id(); | |
e8a0464c DM |
2784 | for (i = 0; i < dev->num_tx_queues; i++) { |
2785 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
2786 | |
2787 | /* We are the only thread of execution doing a | |
2788 | * freeze, but we have to grab the _xmit_lock in | |
2789 | * order to synchronize with threads which are in | |
2790 | * the ->hard_start_xmit() handler and already | |
2791 | * checked the frozen bit. | |
2792 | */ | |
e8a0464c | 2793 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
2794 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
2795 | __netif_tx_unlock(txq); | |
e8a0464c | 2796 | } |
932ff279 HX |
2797 | } |
2798 | ||
2799 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
2800 | { | |
e8a0464c DM |
2801 | local_bh_disable(); |
2802 | netif_tx_lock(dev); | |
932ff279 HX |
2803 | } |
2804 | ||
932ff279 HX |
2805 | static inline void netif_tx_unlock(struct net_device *dev) |
2806 | { | |
e8a0464c DM |
2807 | unsigned int i; |
2808 | ||
2809 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2810 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 2811 | |
c3f26a26 DM |
2812 | /* No need to grab the _xmit_lock here. If the |
2813 | * queue is not stopped for another reason, we | |
2814 | * force a schedule. | |
2815 | */ | |
2816 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 2817 | netif_schedule_queue(txq); |
c3f26a26 DM |
2818 | } |
2819 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
2820 | } |
2821 | ||
2822 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
2823 | { | |
e8a0464c DM |
2824 | netif_tx_unlock(dev); |
2825 | local_bh_enable(); | |
932ff279 HX |
2826 | } |
2827 | ||
c773e847 | 2828 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 2829 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 2830 | __netif_tx_lock(txq, cpu); \ |
22dd7495 JHS |
2831 | } \ |
2832 | } | |
2833 | ||
c773e847 | 2834 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 2835 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 2836 | __netif_tx_unlock(txq); \ |
22dd7495 JHS |
2837 | } \ |
2838 | } | |
2839 | ||
1da177e4 LT |
2840 | static inline void netif_tx_disable(struct net_device *dev) |
2841 | { | |
fd2ea0a7 | 2842 | unsigned int i; |
c3f26a26 | 2843 | int cpu; |
fd2ea0a7 | 2844 | |
c3f26a26 DM |
2845 | local_bh_disable(); |
2846 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
2847 | for (i = 0; i < dev->num_tx_queues; i++) { |
2848 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
2849 | |
2850 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 2851 | netif_tx_stop_queue(txq); |
c3f26a26 | 2852 | __netif_tx_unlock(txq); |
fd2ea0a7 | 2853 | } |
c3f26a26 | 2854 | local_bh_enable(); |
1da177e4 LT |
2855 | } |
2856 | ||
e308a5d8 DM |
2857 | static inline void netif_addr_lock(struct net_device *dev) |
2858 | { | |
2859 | spin_lock(&dev->addr_list_lock); | |
2860 | } | |
2861 | ||
2429f7ac JP |
2862 | static inline void netif_addr_lock_nested(struct net_device *dev) |
2863 | { | |
2864 | spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); | |
2865 | } | |
2866 | ||
e308a5d8 DM |
2867 | static inline void netif_addr_lock_bh(struct net_device *dev) |
2868 | { | |
2869 | spin_lock_bh(&dev->addr_list_lock); | |
2870 | } | |
2871 | ||
2872 | static inline void netif_addr_unlock(struct net_device *dev) | |
2873 | { | |
2874 | spin_unlock(&dev->addr_list_lock); | |
2875 | } | |
2876 | ||
2877 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
2878 | { | |
2879 | spin_unlock_bh(&dev->addr_list_lock); | |
2880 | } | |
2881 | ||
f001fde5 | 2882 | /* |
31278e71 | 2883 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
2884 | * rcu_read_lock held. |
2885 | */ | |
2886 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 2887 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 2888 | |
1da177e4 LT |
2889 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
2890 | ||
f629d208 | 2891 | void ether_setup(struct net_device *dev); |
1da177e4 LT |
2892 | |
2893 | /* Support for loadable net-drivers */ | |
f629d208 JP |
2894 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
2895 | void (*setup)(struct net_device *), | |
2896 | unsigned int txqs, unsigned int rxqs); | |
f25f4e44 | 2897 | #define alloc_netdev(sizeof_priv, name, setup) \ |
36909ea4 TH |
2898 | alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) |
2899 | ||
2900 | #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ | |
2901 | alloc_netdev_mqs(sizeof_priv, name, setup, count, count) | |
2902 | ||
f629d208 JP |
2903 | int register_netdev(struct net_device *dev); |
2904 | void unregister_netdev(struct net_device *dev); | |
f001fde5 | 2905 | |
22bedad3 | 2906 | /* General hardware address lists handling functions */ |
f629d208 JP |
2907 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
2908 | struct netdev_hw_addr_list *from_list, int addr_len); | |
2909 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |
2910 | struct netdev_hw_addr_list *from_list, int addr_len); | |
f629d208 | 2911 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
22bedad3 | 2912 | |
f001fde5 | 2913 | /* Functions used for device addresses handling */ |
f629d208 JP |
2914 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
2915 | unsigned char addr_type); | |
2916 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, | |
2917 | unsigned char addr_type); | |
f629d208 JP |
2918 | void dev_addr_flush(struct net_device *dev); |
2919 | int dev_addr_init(struct net_device *dev); | |
a748ee24 JP |
2920 | |
2921 | /* Functions used for unicast addresses handling */ | |
f629d208 JP |
2922 | int dev_uc_add(struct net_device *dev, const unsigned char *addr); |
2923 | int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); | |
2924 | int dev_uc_del(struct net_device *dev, const unsigned char *addr); | |
2925 | int dev_uc_sync(struct net_device *to, struct net_device *from); | |
2926 | int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); | |
2927 | void dev_uc_unsync(struct net_device *to, struct net_device *from); | |
2928 | void dev_uc_flush(struct net_device *dev); | |
2929 | void dev_uc_init(struct net_device *dev); | |
f001fde5 | 2930 | |
22bedad3 | 2931 | /* Functions used for multicast addresses handling */ |
f629d208 JP |
2932 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
2933 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | |
2934 | int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); | |
2935 | int dev_mc_del(struct net_device *dev, const unsigned char *addr); | |
2936 | int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); | |
2937 | int dev_mc_sync(struct net_device *to, struct net_device *from); | |
2938 | int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); | |
2939 | void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
2940 | void dev_mc_flush(struct net_device *dev); | |
2941 | void dev_mc_init(struct net_device *dev); | |
f001fde5 | 2942 | |
4417da66 | 2943 | /* Functions used for secondary unicast and multicast support */ |
f629d208 JP |
2944 | void dev_set_rx_mode(struct net_device *dev); |
2945 | void __dev_set_rx_mode(struct net_device *dev); | |
2946 | int dev_set_promiscuity(struct net_device *dev, int inc); | |
2947 | int dev_set_allmulti(struct net_device *dev, int inc); | |
2948 | void netdev_state_change(struct net_device *dev); | |
2949 | void netdev_notify_peers(struct net_device *dev); | |
2950 | void netdev_features_change(struct net_device *dev); | |
1da177e4 | 2951 | /* Load a device via the kmod */ |
f629d208 JP |
2952 | void dev_load(struct net *net, const char *name); |
2953 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |
2954 | struct rtnl_link_stats64 *storage); | |
2955 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | |
2956 | const struct net_device_stats *netdev_stats); | |
eeda3fd6 | 2957 | |
1da177e4 | 2958 | extern int netdev_max_backlog; |
3b098e2d | 2959 | extern int netdev_tstamp_prequeue; |
1da177e4 | 2960 | extern int weight_p; |
0a14842f | 2961 | extern int bpf_jit_enable; |
9ff162a8 | 2962 | |
f629d208 | 2963 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
f629d208 JP |
2964 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
2965 | struct list_head **iter); | |
8b5be856 VF |
2966 | |
2967 | /* iterate through upper list, must be called under RCU read lock */ | |
2f268f12 VF |
2968 | #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ |
2969 | for (iter = &(dev)->all_adj_list.upper, \ | |
2970 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ | |
2971 | updev; \ | |
2972 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) | |
8b5be856 | 2973 | |
f629d208 JP |
2974 | void *netdev_lower_get_next_private(struct net_device *dev, |
2975 | struct list_head **iter); | |
2976 | void *netdev_lower_get_next_private_rcu(struct net_device *dev, | |
2977 | struct list_head **iter); | |
31088a11 VF |
2978 | |
2979 | #define netdev_for_each_lower_private(dev, priv, iter) \ | |
2980 | for (iter = (dev)->adj_list.lower.next, \ | |
2981 | priv = netdev_lower_get_next_private(dev, &(iter)); \ | |
2982 | priv; \ | |
2983 | priv = netdev_lower_get_next_private(dev, &(iter))) | |
2984 | ||
2985 | #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ | |
2986 | for (iter = &(dev)->adj_list.lower, \ | |
2987 | priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ | |
2988 | priv; \ | |
2989 | priv = netdev_lower_get_next_private_rcu(dev, &(iter))) | |
2990 | ||
f629d208 | 2991 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
e001bfad | 2992 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
f629d208 JP |
2993 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
2994 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | |
2995 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); | |
2996 | int netdev_master_upper_dev_link(struct net_device *dev, | |
9ff162a8 | 2997 | struct net_device *upper_dev); |
f629d208 JP |
2998 | int netdev_master_upper_dev_link_private(struct net_device *dev, |
2999 | struct net_device *upper_dev, | |
3000 | void *private); | |
3001 | void netdev_upper_dev_unlink(struct net_device *dev, | |
3002 | struct net_device *upper_dev); | |
5bb025fa | 3003 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
f629d208 JP |
3004 | void *netdev_lower_dev_get_private(struct net_device *dev, |
3005 | struct net_device *lower_dev); | |
3006 | int skb_checksum_help(struct sk_buff *skb); | |
3007 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, | |
3008 | netdev_features_t features, bool tx_path); | |
3009 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |
3010 | netdev_features_t features); | |
12b0004d CW |
3011 | |
3012 | static inline | |
3013 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |
3014 | { | |
3015 | return __skb_gso_segment(skb, features, true); | |
3016 | } | |
ec5f0615 PS |
3017 | __be16 skb_network_protocol(struct sk_buff *skb); |
3018 | ||
3019 | static inline bool can_checksum_protocol(netdev_features_t features, | |
3020 | __be16 protocol) | |
3021 | { | |
3022 | return ((features & NETIF_F_GEN_CSUM) || | |
3023 | ((features & NETIF_F_V4_CSUM) && | |
3024 | protocol == htons(ETH_P_IP)) || | |
3025 | ((features & NETIF_F_V6_CSUM) && | |
3026 | protocol == htons(ETH_P_IPV6)) || | |
3027 | ((features & NETIF_F_FCOE_CRC) && | |
3028 | protocol == htons(ETH_P_FCOE))); | |
3029 | } | |
12b0004d | 3030 | |
fb286bb2 | 3031 | #ifdef CONFIG_BUG |
f629d208 | 3032 | void netdev_rx_csum_fault(struct net_device *dev); |
fb286bb2 HX |
3033 | #else |
3034 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
3035 | { | |
3036 | } | |
3037 | #endif | |
1da177e4 | 3038 | /* rx skb timestamps */ |
f629d208 JP |
3039 | void net_enable_timestamp(void); |
3040 | void net_disable_timestamp(void); | |
1da177e4 | 3041 | |
20380731 | 3042 | #ifdef CONFIG_PROC_FS |
f629d208 | 3043 | int __init dev_proc_init(void); |
900ff8c6 CW |
3044 | #else |
3045 | #define dev_proc_init() 0 | |
20380731 ACM |
3046 | #endif |
3047 | ||
42a2d923 LT |
3048 | int netdev_class_create_file_ns(struct class_attribute *class_attr, |
3049 | const void *ns); | |
3050 | void netdev_class_remove_file_ns(struct class_attribute *class_attr, | |
3051 | const void *ns); | |
58292cbe TH |
3052 | |
3053 | static inline int netdev_class_create_file(struct class_attribute *class_attr) | |
3054 | { | |
3055 | return netdev_class_create_file_ns(class_attr, NULL); | |
3056 | } | |
3057 | ||
3058 | static inline void netdev_class_remove_file(struct class_attribute *class_attr) | |
3059 | { | |
3060 | netdev_class_remove_file_ns(class_attr, NULL); | |
3061 | } | |
b8a9787e | 3062 | |
04600794 JB |
3063 | extern struct kobj_ns_type_operations net_ns_type_operations; |
3064 | ||
f629d208 | 3065 | const char *netdev_drivername(const struct net_device *dev); |
6579e57b | 3066 | |
f629d208 | 3067 | void linkwatch_run_queue(void); |
20380731 | 3068 | |
c8f44aff MM |
3069 | static inline netdev_features_t netdev_get_wanted_features( |
3070 | struct net_device *dev) | |
5455c699 MM |
3071 | { |
3072 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | |
3073 | } | |
c8f44aff MM |
3074 | netdev_features_t netdev_increment_features(netdev_features_t all, |
3075 | netdev_features_t one, netdev_features_t mask); | |
b0ce3508 ED |
3076 | |
3077 | /* Allow TSO being used on stacked device : | |
3078 | * Performing the GSO segmentation before last device | |
3079 | * is a performance improvement. | |
3080 | */ | |
3081 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, | |
3082 | netdev_features_t mask) | |
3083 | { | |
3084 | return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); | |
3085 | } | |
3086 | ||
6cb6a27c | 3087 | int __netdev_update_features(struct net_device *dev); |
5455c699 | 3088 | void netdev_update_features(struct net_device *dev); |
afe12cc8 | 3089 | void netdev_change_features(struct net_device *dev); |
7f353bf2 | 3090 | |
fc4a7489 PM |
3091 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
3092 | struct net_device *dev); | |
3093 | ||
d2069403 FW |
3094 | netdev_features_t netif_skb_dev_features(struct sk_buff *skb, |
3095 | const struct net_device *dev); | |
3096 | static inline netdev_features_t netif_skb_features(struct sk_buff *skb) | |
3097 | { | |
3098 | return netif_skb_dev_features(skb, skb->dev); | |
3099 | } | |
58e998c6 | 3100 | |
4d29515f | 3101 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
576a30eb | 3102 | { |
c8f44aff | 3103 | netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; |
0345e186 MM |
3104 | |
3105 | /* check flags correspondence */ | |
3106 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | |
3107 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); | |
3108 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); | |
3109 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | |
3110 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | |
3111 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | |
3112 | ||
d6b4991a | 3113 | return (features & feature) == feature; |
576a30eb HX |
3114 | } |
3115 | ||
4d29515f | 3116 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
bcd76111 | 3117 | { |
278b2513 | 3118 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
21dc3301 | 3119 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
3120 | } |
3121 | ||
4d29515f DM |
3122 | static inline bool netif_needs_gso(struct sk_buff *skb, |
3123 | netdev_features_t features) | |
7967168c | 3124 | { |
fc741216 | 3125 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
cdbee74c YZ |
3126 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && |
3127 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); | |
7967168c HX |
3128 | } |
3129 | ||
82cc1a7a PWJ |
3130 | static inline void netif_set_gso_max_size(struct net_device *dev, |
3131 | unsigned int size) | |
3132 | { | |
3133 | dev->gso_max_size = size; | |
3134 | } | |
3135 | ||
7a7ffbab WCC |
3136 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, |
3137 | int pulled_hlen, u16 mac_offset, | |
3138 | int mac_len) | |
3139 | { | |
3140 | skb->protocol = protocol; | |
3141 | skb->encapsulation = 1; | |
3142 | skb_push(skb, pulled_hlen); | |
3143 | skb_reset_transport_header(skb); | |
3144 | skb->mac_header = mac_offset; | |
3145 | skb->network_header = skb->mac_header + mac_len; | |
3146 | skb->mac_len = mac_len; | |
3147 | } | |
3148 | ||
a6cc0cfa JF |
3149 | static inline bool netif_is_macvlan(struct net_device *dev) |
3150 | { | |
3151 | return dev->priv_flags & IFF_MACVLAN; | |
3152 | } | |
3153 | ||
8a7fbfab | 3154 | static inline bool netif_is_bond_master(struct net_device *dev) |
3155 | { | |
3156 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | |
3157 | } | |
3158 | ||
4d29515f | 3159 | static inline bool netif_is_bond_slave(struct net_device *dev) |
1765a575 JP |
3160 | { |
3161 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | |
3162 | } | |
3163 | ||
3bdc0eba BG |
3164 | static inline bool netif_supports_nofcs(struct net_device *dev) |
3165 | { | |
3166 | return dev->priv_flags & IFF_SUPP_NOFCS; | |
3167 | } | |
3168 | ||
505d4f73 | 3169 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 | 3170 | |
571ba423 JP |
3171 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
3172 | ||
3173 | /* netdev_printk helpers, similar to dev_printk */ | |
3174 | ||
3175 | static inline const char *netdev_name(const struct net_device *dev) | |
3176 | { | |
3177 | if (dev->reg_state != NETREG_REGISTERED) | |
3178 | return "(unregistered net_device)"; | |
3179 | return dev->name; | |
3180 | } | |
3181 | ||
f629d208 | 3182 | __printf(3, 4) |
b9075fa9 JP |
3183 | int netdev_printk(const char *level, const struct net_device *dev, |
3184 | const char *format, ...); | |
f629d208 | 3185 | __printf(2, 3) |
b9075fa9 | 3186 | int netdev_emerg(const struct net_device *dev, const char *format, ...); |
f629d208 | 3187 | __printf(2, 3) |
b9075fa9 | 3188 | int netdev_alert(const struct net_device *dev, const char *format, ...); |
f629d208 | 3189 | __printf(2, 3) |
b9075fa9 | 3190 | int netdev_crit(const struct net_device *dev, const char *format, ...); |
f629d208 | 3191 | __printf(2, 3) |
b9075fa9 | 3192 | int netdev_err(const struct net_device *dev, const char *format, ...); |
f629d208 | 3193 | __printf(2, 3) |
b9075fa9 | 3194 | int netdev_warn(const struct net_device *dev, const char *format, ...); |
f629d208 | 3195 | __printf(2, 3) |
b9075fa9 | 3196 | int netdev_notice(const struct net_device *dev, const char *format, ...); |
f629d208 | 3197 | __printf(2, 3) |
b9075fa9 | 3198 | int netdev_info(const struct net_device *dev, const char *format, ...); |
571ba423 | 3199 | |
8909c9ad VK |
3200 | #define MODULE_ALIAS_NETDEV(device) \ |
3201 | MODULE_ALIAS("netdev-" device) | |
3202 | ||
b558c96f | 3203 | #if defined(CONFIG_DYNAMIC_DEBUG) |
571ba423 JP |
3204 | #define netdev_dbg(__dev, format, args...) \ |
3205 | do { \ | |
ffa10cb4 | 3206 | dynamic_netdev_dbg(__dev, format, ##args); \ |
571ba423 | 3207 | } while (0) |
b558c96f JC |
3208 | #elif defined(DEBUG) |
3209 | #define netdev_dbg(__dev, format, args...) \ | |
3210 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | |
571ba423 JP |
3211 | #else |
3212 | #define netdev_dbg(__dev, format, args...) \ | |
3213 | ({ \ | |
3214 | if (0) \ | |
3215 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | |
3216 | 0; \ | |
3217 | }) | |
3218 | #endif | |
3219 | ||
3220 | #if defined(VERBOSE_DEBUG) | |
3221 | #define netdev_vdbg netdev_dbg | |
3222 | #else | |
3223 | ||
3224 | #define netdev_vdbg(dev, format, args...) \ | |
3225 | ({ \ | |
3226 | if (0) \ | |
3227 | netdev_printk(KERN_DEBUG, dev, format, ##args); \ | |
3228 | 0; \ | |
3229 | }) | |
3230 | #endif | |
3231 | ||
3232 | /* | |
3233 | * netdev_WARN() acts like dev_printk(), but with the key difference | |
3234 | * of using a WARN/WARN_ON to get the message out, including the | |
3235 | * file/line information and a backtrace. | |
3236 | */ | |
3237 | #define netdev_WARN(dev, format, args...) \ | |
7cc7c5e5 | 3238 | WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args) |
571ba423 | 3239 | |
b3d95c5c JP |
3240 | /* netif printk helpers, similar to netdev_printk */ |
3241 | ||
3242 | #define netif_printk(priv, type, level, dev, fmt, args...) \ | |
3243 | do { \ | |
3244 | if (netif_msg_##type(priv)) \ | |
3245 | netdev_printk(level, (dev), fmt, ##args); \ | |
3246 | } while (0) | |
3247 | ||
f45f4321 JP |
3248 | #define netif_level(level, priv, type, dev, fmt, args...) \ |
3249 | do { \ | |
3250 | if (netif_msg_##type(priv)) \ | |
3251 | netdev_##level(dev, fmt, ##args); \ | |
3252 | } while (0) | |
3253 | ||
b3d95c5c | 3254 | #define netif_emerg(priv, type, dev, fmt, args...) \ |
f45f4321 | 3255 | netif_level(emerg, priv, type, dev, fmt, ##args) |
b3d95c5c | 3256 | #define netif_alert(priv, type, dev, fmt, args...) \ |
f45f4321 | 3257 | netif_level(alert, priv, type, dev, fmt, ##args) |
b3d95c5c | 3258 | #define netif_crit(priv, type, dev, fmt, args...) \ |
f45f4321 | 3259 | netif_level(crit, priv, type, dev, fmt, ##args) |
b3d95c5c | 3260 | #define netif_err(priv, type, dev, fmt, args...) \ |
f45f4321 | 3261 | netif_level(err, priv, type, dev, fmt, ##args) |
b3d95c5c | 3262 | #define netif_warn(priv, type, dev, fmt, args...) \ |
f45f4321 | 3263 | netif_level(warn, priv, type, dev, fmt, ##args) |
b3d95c5c | 3264 | #define netif_notice(priv, type, dev, fmt, args...) \ |
f45f4321 | 3265 | netif_level(notice, priv, type, dev, fmt, ##args) |
b3d95c5c | 3266 | #define netif_info(priv, type, dev, fmt, args...) \ |
f45f4321 | 3267 | netif_level(info, priv, type, dev, fmt, ##args) |
b3d95c5c | 3268 | |
0053ea9c | 3269 | #if defined(CONFIG_DYNAMIC_DEBUG) |
b3d95c5c JP |
3270 | #define netif_dbg(priv, type, netdev, format, args...) \ |
3271 | do { \ | |
3272 | if (netif_msg_##type(priv)) \ | |
b5fb0a03 | 3273 | dynamic_netdev_dbg(netdev, format, ##args); \ |
b3d95c5c | 3274 | } while (0) |
0053ea9c JP |
3275 | #elif defined(DEBUG) |
3276 | #define netif_dbg(priv, type, dev, format, args...) \ | |
3277 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | |
b3d95c5c JP |
3278 | #else |
3279 | #define netif_dbg(priv, type, dev, format, args...) \ | |
3280 | ({ \ | |
3281 | if (0) \ | |
3282 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | |
3283 | 0; \ | |
3284 | }) | |
3285 | #endif | |
3286 | ||
3287 | #if defined(VERBOSE_DEBUG) | |
bcfcc450 | 3288 | #define netif_vdbg netif_dbg |
b3d95c5c JP |
3289 | #else |
3290 | #define netif_vdbg(priv, type, dev, format, args...) \ | |
3291 | ({ \ | |
3292 | if (0) \ | |
a4ed89cb | 3293 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
b3d95c5c JP |
3294 | 0; \ |
3295 | }) | |
3296 | #endif | |
571ba423 | 3297 | |
900ff8c6 CW |
3298 | /* |
3299 | * The list of packet types we will receive (as opposed to discard) | |
3300 | * and the routines to invoke. | |
3301 | * | |
3302 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
3303 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
3304 | * | |
3305 | * NOTE: That is no longer true with the addition of VLAN tags. Not | |
3306 | * sure which should go first, but I bet it won't make much | |
3307 | * difference if we are running VLANs. The good news is that | |
3308 | * this protocol won't be in the list unless compiled in, so | |
3309 | * the average user (w/out VLANs) will not be adversely affected. | |
3310 | * --BLG | |
3311 | * | |
3312 | * 0800 IP | |
3313 | * 8100 802.1Q VLAN | |
3314 | * 0001 802.3 | |
3315 | * 0002 AX.25 | |
3316 | * 0004 802.2 | |
3317 | * 8035 RARP | |
3318 | * 0005 SNAP | |
3319 | * 0805 X.25 | |
3320 | * 0806 ARP | |
3321 | * 8137 IPX | |
3322 | * 0009 Localtalk | |
3323 | * 86DD IPv6 | |
3324 | */ | |
3325 | #define PTYPE_HASH_SIZE (16) | |
3326 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | |
3327 | ||
385a154c | 3328 | #endif /* _LINUX_NETDEVICE_H */ |