ifb: remove unused macro TX_TIMEOUT
[deliverable/linux.git] / drivers / net / ifb.c
CommitLineData
6aa20a22 1/* drivers/net/ifb.c:
253af423
JHS
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
6aa20a22
JG
11 dropping.
12
253af423
JHS
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
6aa20a22 24
253af423 25 Authors: Jamal Hadi Salim (2005)
6aa20a22 26
253af423
JHS
27*/
28
29
253af423
JHS
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
35#include <linux/moduleparam.h>
6aa20a22 36#include <net/pkt_sched.h>
881d966b 37#include <net/net_namespace.h>
253af423 38
253af423
JHS
39#define TX_Q_LIMIT 32
40struct ifb_private {
253af423
JHS
41 struct tasklet_struct ifb_tasklet;
42 int tasklet_pending;
253af423
JHS
43 struct sk_buff_head rq;
44 struct sk_buff_head tq;
45};
46
35eaa31e 47static int numifbs = 2;
253af423
JHS
48
49static void ri_tasklet(unsigned long dev);
424efe9c 50static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
253af423
JHS
51static int ifb_open(struct net_device *dev);
52static int ifb_close(struct net_device *dev);
53
6aa20a22 54static void ri_tasklet(unsigned long dev)
253af423
JHS
55{
56
57 struct net_device *_dev = (struct net_device *)dev;
58 struct ifb_private *dp = netdev_priv(_dev);
09f75cd7 59 struct net_device_stats *stats = &_dev->stats;
c3f26a26 60 struct netdev_queue *txq;
253af423
JHS
61 struct sk_buff *skb;
62
c3f26a26 63 txq = netdev_get_tx_queue(_dev, 0);
253af423 64 if ((skb = skb_peek(&dp->tq)) == NULL) {
c3f26a26 65 if (__netif_tx_trylock(txq)) {
253af423
JHS
66 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
67 skb_queue_tail(&dp->tq, skb);
253af423 68 }
c3f26a26 69 __netif_tx_unlock(txq);
253af423
JHS
70 } else {
71 /* reschedule */
253af423
JHS
72 goto resched;
73 }
74 }
75
76 while ((skb = skb_dequeue(&dp->tq)) != NULL) {
77 u32 from = G_TC_FROM(skb->tc_verd);
78
79 skb->tc_verd = 0;
80 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
81 stats->tx_packets++;
82 stats->tx_bytes +=skb->len;
c01003c2 83
05e8689c 84 rcu_read_lock();
8964be4a 85 skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
c01003c2 86 if (!skb->dev) {
05e8689c 87 rcu_read_unlock();
c01003c2
PM
88 dev_kfree_skb(skb);
89 stats->tx_dropped++;
75c1c825
CG
90 if (skb_queue_len(&dp->tq) != 0)
91 goto resched;
c01003c2
PM
92 break;
93 }
05e8689c 94 rcu_read_unlock();
8964be4a 95 skb->skb_iif = _dev->ifindex;
c01003c2 96
253af423 97 if (from & AT_EGRESS) {
253af423
JHS
98 dev_queue_xmit(skb);
99 } else if (from & AT_INGRESS) {
c01003c2 100 skb_pull(skb, skb->dev->hard_header_len);
253af423 101 netif_rx(skb);
c01003c2
PM
102 } else
103 BUG();
253af423
JHS
104 }
105
c3f26a26 106 if (__netif_tx_trylock(txq)) {
253af423
JHS
107 if ((skb = skb_peek(&dp->rq)) == NULL) {
108 dp->tasklet_pending = 0;
109 if (netif_queue_stopped(_dev))
110 netif_wake_queue(_dev);
111 } else {
c3f26a26 112 __netif_tx_unlock(txq);
253af423
JHS
113 goto resched;
114 }
c3f26a26 115 __netif_tx_unlock(txq);
253af423
JHS
116 } else {
117resched:
118 dp->tasklet_pending = 1;
119 tasklet_schedule(&dp->ifb_tasklet);
120 }
121
122}
123
8dfcdf34 124static const struct net_device_ops ifb_netdev_ops = {
8dfcdf34
SH
125 .ndo_open = ifb_open,
126 .ndo_stop = ifb_close,
00829823
SH
127 .ndo_start_xmit = ifb_xmit,
128 .ndo_validate_addr = eth_validate_addr,
8dfcdf34
SH
129};
130
9ba2cd65 131static void ifb_setup(struct net_device *dev)
253af423
JHS
132{
133 /* Initialize the device structure. */
9ba2cd65 134 dev->destructor = free_netdev;
8dfcdf34 135 dev->netdev_ops = &ifb_netdev_ops;
253af423
JHS
136
137 /* Fill in device structure with ethernet-generic values. */
138 ether_setup(dev);
139 dev->tx_queue_len = TX_Q_LIMIT;
8dfcdf34 140
253af423
JHS
141 dev->flags |= IFF_NOARP;
142 dev->flags &= ~IFF_MULTICAST;
93f154b5 143 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
253af423
JHS
144 random_ether_addr(dev->dev_addr);
145}
146
424efe9c 147static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
253af423
JHS
148{
149 struct ifb_private *dp = netdev_priv(dev);
09f75cd7 150 struct net_device_stats *stats = &dev->stats;
253af423
JHS
151 u32 from = G_TC_FROM(skb->tc_verd);
152
3136dcb3 153 stats->rx_packets++;
154 stats->rx_bytes+=skb->len;
253af423 155
8964be4a 156 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
253af423
JHS
157 dev_kfree_skb(skb);
158 stats->rx_dropped++;
424efe9c 159 return NETDEV_TX_OK;
253af423
JHS
160 }
161
162 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
163 netif_stop_queue(dev);
164 }
165
253af423
JHS
166 skb_queue_tail(&dp->rq, skb);
167 if (!dp->tasklet_pending) {
168 dp->tasklet_pending = 1;
169 tasklet_schedule(&dp->ifb_tasklet);
170 }
171
424efe9c 172 return NETDEV_TX_OK;
253af423
JHS
173}
174
253af423
JHS
175static int ifb_close(struct net_device *dev)
176{
177 struct ifb_private *dp = netdev_priv(dev);
178
179 tasklet_kill(&dp->ifb_tasklet);
180 netif_stop_queue(dev);
181 skb_queue_purge(&dp->rq);
182 skb_queue_purge(&dp->tq);
183 return 0;
184}
185
186static int ifb_open(struct net_device *dev)
187{
188 struct ifb_private *dp = netdev_priv(dev);
189
190 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
191 skb_queue_head_init(&dp->rq);
192 skb_queue_head_init(&dp->tq);
193 netif_start_queue(dev);
194
195 return 0;
196}
197
0e06877c
PM
198static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
199{
200 if (tb[IFLA_ADDRESS]) {
201 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
202 return -EINVAL;
203 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
204 return -EADDRNOTAVAIL;
205 }
206 return 0;
207}
208
9ba2cd65
PM
209static struct rtnl_link_ops ifb_link_ops __read_mostly = {
210 .kind = "ifb",
211 .priv_size = sizeof(struct ifb_private),
212 .setup = ifb_setup,
0e06877c 213 .validate = ifb_validate,
9ba2cd65
PM
214};
215
2d85cba2
PM
216/* Number of ifb devices to be set up by this module. */
217module_param(numifbs, int, 0);
218MODULE_PARM_DESC(numifbs, "Number of ifb devices");
219
253af423
JHS
220static int __init ifb_init_one(int index)
221{
222 struct net_device *dev_ifb;
223 int err;
224
225 dev_ifb = alloc_netdev(sizeof(struct ifb_private),
226 "ifb%d", ifb_setup);
227
228 if (!dev_ifb)
229 return -ENOMEM;
230
9ba2cd65
PM
231 err = dev_alloc_name(dev_ifb, dev_ifb->name);
232 if (err < 0)
233 goto err;
253af423 234
9ba2cd65
PM
235 dev_ifb->rtnl_link_ops = &ifb_link_ops;
236 err = register_netdevice(dev_ifb);
237 if (err < 0)
238 goto err;
94833dfb 239
9ba2cd65 240 return 0;
62b7ffca 241
9ba2cd65
PM
242err:
243 free_netdev(dev_ifb);
244 return err;
6aa20a22 245}
253af423
JHS
246
247static int __init ifb_init_module(void)
6aa20a22 248{
9ba2cd65
PM
249 int i, err;
250
251 rtnl_lock();
252 err = __rtnl_link_register(&ifb_link_ops);
62b7ffca 253
253af423 254 for (i = 0; i < numifbs && !err; i++)
6aa20a22 255 err = ifb_init_one(i);
2d85cba2 256 if (err)
9ba2cd65 257 __rtnl_link_unregister(&ifb_link_ops);
9ba2cd65 258 rtnl_unlock();
253af423
JHS
259
260 return err;
6aa20a22 261}
253af423
JHS
262
263static void __exit ifb_cleanup_module(void)
264{
2d85cba2 265 rtnl_link_unregister(&ifb_link_ops);
253af423
JHS
266}
267
268module_init(ifb_init_module);
269module_exit(ifb_cleanup_module);
270MODULE_LICENSE("GPL");
271MODULE_AUTHOR("Jamal Hadi Salim");
9ba2cd65 272MODULE_ALIAS_RTNL_LINK("ifb");
This page took 0.634361 seconds and 5 git commands to generate.