3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
10 2) Allows for queueing incoming traffic for shaping instead of
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
17 You need the tc action mirror or redirect to feed this device
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
25 Authors: Jamal Hadi Salim (2005)
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/moduleparam.h>
37 #include <net/pkt_sched.h>
38 #include <net/net_namespace.h>
42 struct tasklet_struct ifb_tasklet
;
44 struct sk_buff_head rq
;
45 struct sk_buff_head tq
;
48 static int numifbs
= 2;
50 static void ri_tasklet(unsigned long dev
);
51 static netdev_tx_t
ifb_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
52 static int ifb_open(struct net_device
*dev
);
53 static int ifb_close(struct net_device
*dev
);
55 static void ri_tasklet(unsigned long dev
)
58 struct net_device
*_dev
= (struct net_device
*)dev
;
59 struct ifb_private
*dp
= netdev_priv(_dev
);
60 struct net_device_stats
*stats
= &_dev
->stats
;
61 struct netdev_queue
*txq
;
64 txq
= netdev_get_tx_queue(_dev
, 0);
65 if ((skb
= skb_peek(&dp
->tq
)) == NULL
) {
66 if (__netif_tx_trylock(txq
)) {
67 skb_queue_splice_tail_init(&dp
->rq
, &dp
->tq
);
68 __netif_tx_unlock(txq
);
75 while ((skb
= __skb_dequeue(&dp
->tq
)) != NULL
) {
76 u32 from
= G_TC_FROM(skb
->tc_verd
);
79 skb
->tc_verd
= SET_TC_NCLS(skb
->tc_verd
);
81 stats
->tx_bytes
+=skb
->len
;
84 skb
->dev
= dev_get_by_index_rcu(&init_net
, skb
->skb_iif
);
89 if (skb_queue_len(&dp
->tq
) != 0)
94 skb
->skb_iif
= _dev
->ifindex
;
96 if (from
& AT_EGRESS
) {
98 } else if (from
& AT_INGRESS
) {
99 skb_pull(skb
, skb
->dev
->hard_header_len
);
100 netif_receive_skb(skb
);
105 if (__netif_tx_trylock(txq
)) {
106 if ((skb
= skb_peek(&dp
->rq
)) == NULL
) {
107 dp
->tasklet_pending
= 0;
108 if (netif_queue_stopped(_dev
))
109 netif_wake_queue(_dev
);
111 __netif_tx_unlock(txq
);
114 __netif_tx_unlock(txq
);
117 dp
->tasklet_pending
= 1;
118 tasklet_schedule(&dp
->ifb_tasklet
);
123 static const struct net_device_ops ifb_netdev_ops
= {
124 .ndo_open
= ifb_open
,
125 .ndo_stop
= ifb_close
,
126 .ndo_start_xmit
= ifb_xmit
,
127 .ndo_validate_addr
= eth_validate_addr
,
130 #define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
131 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
132 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
134 static void ifb_setup(struct net_device
*dev
)
136 /* Initialize the device structure. */
137 dev
->destructor
= free_netdev
;
138 dev
->netdev_ops
= &ifb_netdev_ops
;
140 /* Fill in device structure with ethernet-generic values. */
142 dev
->tx_queue_len
= TX_Q_LIMIT
;
144 dev
->features
|= IFB_FEATURES
;
145 dev
->vlan_features
|= IFB_FEATURES
;
147 dev
->flags
|= IFF_NOARP
;
148 dev
->flags
&= ~IFF_MULTICAST
;
149 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
150 random_ether_addr(dev
->dev_addr
);
153 static netdev_tx_t
ifb_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
155 struct ifb_private
*dp
= netdev_priv(dev
);
156 struct net_device_stats
*stats
= &dev
->stats
;
157 u32 from
= G_TC_FROM(skb
->tc_verd
);
160 stats
->rx_bytes
+=skb
->len
;
162 if (!(from
& (AT_INGRESS
|AT_EGRESS
)) || !skb
->skb_iif
) {
168 if (skb_queue_len(&dp
->rq
) >= dev
->tx_queue_len
) {
169 netif_stop_queue(dev
);
172 __skb_queue_tail(&dp
->rq
, skb
);
173 if (!dp
->tasklet_pending
) {
174 dp
->tasklet_pending
= 1;
175 tasklet_schedule(&dp
->ifb_tasklet
);
181 static int ifb_close(struct net_device
*dev
)
183 struct ifb_private
*dp
= netdev_priv(dev
);
185 tasklet_kill(&dp
->ifb_tasklet
);
186 netif_stop_queue(dev
);
187 __skb_queue_purge(&dp
->rq
);
188 __skb_queue_purge(&dp
->tq
);
192 static int ifb_open(struct net_device
*dev
)
194 struct ifb_private
*dp
= netdev_priv(dev
);
196 tasklet_init(&dp
->ifb_tasklet
, ri_tasklet
, (unsigned long)dev
);
197 __skb_queue_head_init(&dp
->rq
);
198 __skb_queue_head_init(&dp
->tq
);
199 netif_start_queue(dev
);
204 static int ifb_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
206 if (tb
[IFLA_ADDRESS
]) {
207 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
209 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
210 return -EADDRNOTAVAIL
;
215 static struct rtnl_link_ops ifb_link_ops __read_mostly
= {
217 .priv_size
= sizeof(struct ifb_private
),
219 .validate
= ifb_validate
,
222 /* Number of ifb devices to be set up by this module. */
223 module_param(numifbs
, int, 0);
224 MODULE_PARM_DESC(numifbs
, "Number of ifb devices");
226 static int __init
ifb_init_one(int index
)
228 struct net_device
*dev_ifb
;
231 dev_ifb
= alloc_netdev(sizeof(struct ifb_private
),
237 dev_ifb
->rtnl_link_ops
= &ifb_link_ops
;
238 err
= register_netdevice(dev_ifb
);
245 free_netdev(dev_ifb
);
249 static int __init
ifb_init_module(void)
254 err
= __rtnl_link_register(&ifb_link_ops
);
256 for (i
= 0; i
< numifbs
&& !err
; i
++)
257 err
= ifb_init_one(i
);
259 __rtnl_link_unregister(&ifb_link_ops
);
265 static void __exit
ifb_cleanup_module(void)
267 rtnl_link_unregister(&ifb_link_ops
);
270 module_init(ifb_init_module
);
271 module_exit(ifb_cleanup_module
);
272 MODULE_LICENSE("GPL");
273 MODULE_AUTHOR("Jamal Hadi Salim");
274 MODULE_ALIAS_RTNL_LINK("ifb");