[NET] ifb: set separate lockdep classes for queue locks
[deliverable/linux.git] / drivers / net / ifb.c
CommitLineData
6aa20a22 1/* drivers/net/ifb.c:
253af423
JHS
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
6aa20a22
JG
11 dropping.
12
253af423
JHS
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
6aa20a22 24
253af423 25 Authors: Jamal Hadi Salim (2005)
6aa20a22 26
253af423
JHS
27*/
28
29
253af423
JHS
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
35#include <linux/moduleparam.h>
6aa20a22 36#include <net/pkt_sched.h>
881d966b 37#include <net/net_namespace.h>
94833dfb 38#include <linux/lockdep.h>
253af423
JHS
39
40#define TX_TIMEOUT (2*HZ)
6aa20a22 41
253af423
JHS
42#define TX_Q_LIMIT 32
43struct ifb_private {
253af423
JHS
44 struct tasklet_struct ifb_tasklet;
45 int tasklet_pending;
46 /* mostly debug stats leave in for now */
47 unsigned long st_task_enter; /* tasklet entered */
48 unsigned long st_txq_refl_try; /* transmit queue refill attempt */
49 unsigned long st_rxq_enter; /* receive queue entered */
50 unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
51 unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
52 unsigned long st_rx_frm_egr; /* received from egress path */
53 unsigned long st_rx_frm_ing; /* received from ingress path */
54 unsigned long st_rxq_check;
55 unsigned long st_rxq_rsch;
56 struct sk_buff_head rq;
57 struct sk_buff_head tq;
58};
59
35eaa31e 60static int numifbs = 2;
253af423
JHS
61
62static void ri_tasklet(unsigned long dev);
63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
253af423
JHS
64static int ifb_open(struct net_device *dev);
65static int ifb_close(struct net_device *dev);
66
6aa20a22 67static void ri_tasklet(unsigned long dev)
253af423
JHS
68{
69
70 struct net_device *_dev = (struct net_device *)dev;
71 struct ifb_private *dp = netdev_priv(_dev);
09f75cd7 72 struct net_device_stats *stats = &_dev->stats;
253af423
JHS
73 struct sk_buff *skb;
74
75 dp->st_task_enter++;
76 if ((skb = skb_peek(&dp->tq)) == NULL) {
77 dp->st_txq_refl_try++;
932ff279 78 if (netif_tx_trylock(_dev)) {
253af423
JHS
79 dp->st_rxq_enter++;
80 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
81 skb_queue_tail(&dp->tq, skb);
82 dp->st_rx2tx_tran++;
83 }
932ff279 84 netif_tx_unlock(_dev);
253af423
JHS
85 } else {
86 /* reschedule */
87 dp->st_rxq_notenter++;
88 goto resched;
89 }
90 }
91
92 while ((skb = skb_dequeue(&dp->tq)) != NULL) {
93 u32 from = G_TC_FROM(skb->tc_verd);
94
95 skb->tc_verd = 0;
96 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
97 stats->tx_packets++;
98 stats->tx_bytes +=skb->len;
c01003c2 99
881d966b 100 skb->dev = __dev_get_by_index(&init_net, skb->iif);
c01003c2
PM
101 if (!skb->dev) {
102 dev_kfree_skb(skb);
103 stats->tx_dropped++;
104 break;
105 }
106 skb->iif = _dev->ifindex;
107
253af423
JHS
108 if (from & AT_EGRESS) {
109 dp->st_rx_frm_egr++;
110 dev_queue_xmit(skb);
111 } else if (from & AT_INGRESS) {
253af423 112 dp->st_rx_frm_ing++;
c01003c2 113 skb_pull(skb, skb->dev->hard_header_len);
253af423 114 netif_rx(skb);
c01003c2
PM
115 } else
116 BUG();
253af423
JHS
117 }
118
932ff279 119 if (netif_tx_trylock(_dev)) {
253af423
JHS
120 dp->st_rxq_check++;
121 if ((skb = skb_peek(&dp->rq)) == NULL) {
122 dp->tasklet_pending = 0;
123 if (netif_queue_stopped(_dev))
124 netif_wake_queue(_dev);
125 } else {
126 dp->st_rxq_rsch++;
932ff279 127 netif_tx_unlock(_dev);
253af423
JHS
128 goto resched;
129 }
932ff279 130 netif_tx_unlock(_dev);
253af423
JHS
131 } else {
132resched:
133 dp->tasklet_pending = 1;
134 tasklet_schedule(&dp->ifb_tasklet);
135 }
136
137}
138
9ba2cd65 139static void ifb_setup(struct net_device *dev)
253af423
JHS
140{
141 /* Initialize the device structure. */
253af423
JHS
142 dev->hard_start_xmit = ifb_xmit;
143 dev->open = &ifb_open;
144 dev->stop = &ifb_close;
9ba2cd65 145 dev->destructor = free_netdev;
253af423
JHS
146
147 /* Fill in device structure with ethernet-generic values. */
148 ether_setup(dev);
149 dev->tx_queue_len = TX_Q_LIMIT;
150 dev->change_mtu = NULL;
151 dev->flags |= IFF_NOARP;
152 dev->flags &= ~IFF_MULTICAST;
253af423
JHS
153 random_ether_addr(dev->dev_addr);
154}
155
156static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
157{
158 struct ifb_private *dp = netdev_priv(dev);
09f75cd7 159 struct net_device_stats *stats = &dev->stats;
253af423
JHS
160 int ret = 0;
161 u32 from = G_TC_FROM(skb->tc_verd);
162
3136dcb3 163 stats->rx_packets++;
164 stats->rx_bytes+=skb->len;
253af423 165
c01003c2 166 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) {
253af423
JHS
167 dev_kfree_skb(skb);
168 stats->rx_dropped++;
169 return ret;
253af423
JHS
170 }
171
172 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
173 netif_stop_queue(dev);
174 }
175
176 dev->trans_start = jiffies;
177 skb_queue_tail(&dp->rq, skb);
178 if (!dp->tasklet_pending) {
179 dp->tasklet_pending = 1;
180 tasklet_schedule(&dp->ifb_tasklet);
181 }
182
183 return ret;
184}
185
253af423
JHS
186static int ifb_close(struct net_device *dev)
187{
188 struct ifb_private *dp = netdev_priv(dev);
189
190 tasklet_kill(&dp->ifb_tasklet);
191 netif_stop_queue(dev);
192 skb_queue_purge(&dp->rq);
193 skb_queue_purge(&dp->tq);
194 return 0;
195}
196
197static int ifb_open(struct net_device *dev)
198{
199 struct ifb_private *dp = netdev_priv(dev);
200
201 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
202 skb_queue_head_init(&dp->rq);
203 skb_queue_head_init(&dp->tq);
204 netif_start_queue(dev);
205
206 return 0;
207}
208
0e06877c
PM
209static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
210{
211 if (tb[IFLA_ADDRESS]) {
212 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
213 return -EINVAL;
214 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
215 return -EADDRNOTAVAIL;
216 }
217 return 0;
218}
219
9ba2cd65
PM
220static struct rtnl_link_ops ifb_link_ops __read_mostly = {
221 .kind = "ifb",
222 .priv_size = sizeof(struct ifb_private),
223 .setup = ifb_setup,
0e06877c 224 .validate = ifb_validate,
9ba2cd65
PM
225};
226
2d85cba2
PM
227/* Number of ifb devices to be set up by this module. */
228module_param(numifbs, int, 0);
229MODULE_PARM_DESC(numifbs, "Number of ifb devices");
230
94833dfb
JP
231/*
232 * dev_ifb->queue_lock is usually taken after dev->ingress_lock,
233 * reversely to e.g. qdisc_lock_tree(). It should be safe until
234 * ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock.
235 * But lockdep should know that ifb has different locks from dev.
236 */
237static struct lock_class_key ifb_queue_lock_key;
238static struct lock_class_key ifb_ingress_lock_key;
239
240
253af423
JHS
241static int __init ifb_init_one(int index)
242{
243 struct net_device *dev_ifb;
244 int err;
245
246 dev_ifb = alloc_netdev(sizeof(struct ifb_private),
247 "ifb%d", ifb_setup);
248
249 if (!dev_ifb)
250 return -ENOMEM;
251
9ba2cd65
PM
252 err = dev_alloc_name(dev_ifb, dev_ifb->name);
253 if (err < 0)
254 goto err;
253af423 255
9ba2cd65
PM
256 dev_ifb->rtnl_link_ops = &ifb_link_ops;
257 err = register_netdevice(dev_ifb);
258 if (err < 0)
259 goto err;
94833dfb
JP
260
261 lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key);
262 lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
263
9ba2cd65 264 return 0;
62b7ffca 265
9ba2cd65
PM
266err:
267 free_netdev(dev_ifb);
268 return err;
6aa20a22 269}
253af423
JHS
270
271static int __init ifb_init_module(void)
6aa20a22 272{
9ba2cd65
PM
273 int i, err;
274
275 rtnl_lock();
276 err = __rtnl_link_register(&ifb_link_ops);
62b7ffca 277
253af423 278 for (i = 0; i < numifbs && !err; i++)
6aa20a22 279 err = ifb_init_one(i);
2d85cba2 280 if (err)
9ba2cd65 281 __rtnl_link_unregister(&ifb_link_ops);
9ba2cd65 282 rtnl_unlock();
253af423
JHS
283
284 return err;
6aa20a22 285}
253af423
JHS
286
287static void __exit ifb_cleanup_module(void)
288{
2d85cba2 289 rtnl_link_unregister(&ifb_link_ops);
253af423
JHS
290}
291
292module_init(ifb_init_module);
293module_exit(ifb_cleanup_module);
294MODULE_LICENSE("GPL");
295MODULE_AUTHOR("Jamal Hadi Salim");
9ba2cd65 296MODULE_ALIAS_RTNL_LINK("ifb");
This page took 0.283183 seconds and 5 git commands to generate.