Commit | Line | Data |
---|---|---|
f89efd52 MP |
1 | /* |
2 | * rionet - Ethernet driver over RapidIO messaging services | |
3 | * | |
4 | * Copyright 2005 MontaVista Software, Inc. | |
5 | * Matt Porter <mporter@kernel.crashing.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the | |
9 | * Free Software Foundation; either version 2 of the License, or (at your | |
10 | * option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/rio.h> | |
18 | #include <linux/rio_drv.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
f89efd52 MP |
20 | #include <linux/rio_ids.h> |
21 | ||
22 | #include <linux/netdevice.h> | |
23 | #include <linux/etherdevice.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/crc32.h> | |
26 | #include <linux/ethtool.h> | |
f41e2472 | 27 | #include <linux/reboot.h> |
f89efd52 MP |
28 | |
29 | #define DRV_NAME "rionet" | |
2fb717ec | 30 | #define DRV_VERSION "0.3" |
f89efd52 MP |
31 | #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>" |
32 | #define DRV_DESC "Ethernet over RapidIO" | |
33 | ||
34 | MODULE_AUTHOR(DRV_AUTHOR); | |
35 | MODULE_DESCRIPTION(DRV_DESC); | |
36 | MODULE_LICENSE("GPL"); | |
37 | ||
38 | #define RIONET_DEFAULT_MSGLEVEL \ | |
39 | (NETIF_MSG_DRV | \ | |
40 | NETIF_MSG_LINK | \ | |
41 | NETIF_MSG_RX_ERR | \ | |
42 | NETIF_MSG_TX_ERR) | |
43 | ||
44 | #define RIONET_DOORBELL_JOIN 0x1000 | |
45 | #define RIONET_DOORBELL_LEAVE 0x1001 | |
46 | ||
47 | #define RIONET_MAILBOX 0 | |
48 | ||
49 | #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE | |
50 | #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE | |
2fb717ec | 51 | #define RIONET_MAX_NETS 8 |
92444bb3 AJ |
52 | #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE |
53 | #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN) | |
f89efd52 MP |
54 | |
55 | struct rionet_private { | |
56 | struct rio_mport *mport; | |
57 | struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; | |
58 | struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; | |
f89efd52 MP |
59 | int rx_slot; |
60 | int tx_slot; | |
61 | int tx_cnt; | |
62 | int ack_slot; | |
63 | spinlock_t lock; | |
64 | spinlock_t tx_lock; | |
65 | u32 msg_enable; | |
34ed2ebb | 66 | bool open; |
f89efd52 MP |
67 | }; |
68 | ||
69 | struct rionet_peer { | |
70 | struct list_head node; | |
71 | struct rio_dev *rdev; | |
72 | struct resource *res; | |
73 | }; | |
74 | ||
2fb717ec AB |
75 | struct rionet_net { |
76 | struct net_device *ndev; | |
77 | struct list_head peers; | |
34ed2ebb | 78 | spinlock_t lock; /* net info access lock */ |
2fb717ec AB |
79 | struct rio_dev **active; |
80 | int nact; /* number of active peers */ | |
81 | }; | |
f89efd52 | 82 | |
2fb717ec | 83 | static struct rionet_net nets[RIONET_MAX_NETS]; |
f89efd52 | 84 | |
284fb68d AB |
85 | #define is_rionet_capable(src_ops, dst_ops) \ |
86 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ | |
87 | (dst_ops & RIO_DST_OPS_DATA_MSG) && \ | |
f89efd52 MP |
88 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ |
89 | (dst_ops & RIO_DST_OPS_DOORBELL)) | |
90 | #define dev_rionet_capable(dev) \ | |
284fb68d | 91 | is_rionet_capable(dev->src_ops, dev->dst_ops) |
f89efd52 | 92 | |
e0c87bd9 AB |
93 | #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4)) |
94 | #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5)) | |
f89efd52 | 95 | |
f89efd52 MP |
96 | static int rionet_rx_clean(struct net_device *ndev) |
97 | { | |
98 | int i; | |
99 | int error = 0; | |
4cf1653a | 100 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
101 | void *data; |
102 | ||
103 | i = rnet->rx_slot; | |
104 | ||
105 | do { | |
106 | if (!rnet->rx_skb[i]) | |
107 | continue; | |
108 | ||
109 | if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) | |
110 | break; | |
111 | ||
112 | rnet->rx_skb[i]->data = data; | |
113 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); | |
f89efd52 MP |
114 | rnet->rx_skb[i]->protocol = |
115 | eth_type_trans(rnet->rx_skb[i], ndev); | |
116 | error = netif_rx(rnet->rx_skb[i]); | |
117 | ||
118 | if (error == NET_RX_DROP) { | |
09f75cd7 | 119 | ndev->stats.rx_dropped++; |
f89efd52 | 120 | } else { |
09f75cd7 JG |
121 | ndev->stats.rx_packets++; |
122 | ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; | |
f89efd52 MP |
123 | } |
124 | ||
125 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); | |
126 | ||
127 | return i; | |
128 | } | |
129 | ||
130 | static void rionet_rx_fill(struct net_device *ndev, int end) | |
131 | { | |
132 | int i; | |
4cf1653a | 133 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
134 | |
135 | i = rnet->rx_slot; | |
136 | do { | |
137 | rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); | |
138 | ||
139 | if (!rnet->rx_skb[i]) | |
140 | break; | |
141 | ||
142 | rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, | |
143 | rnet->rx_skb[i]->data); | |
144 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); | |
145 | ||
146 | rnet->rx_slot = i; | |
147 | } | |
148 | ||
149 | static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, | |
150 | struct rio_dev *rdev) | |
151 | { | |
4cf1653a | 152 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
153 | |
154 | rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); | |
155 | rnet->tx_skb[rnet->tx_slot] = skb; | |
156 | ||
09f75cd7 JG |
157 | ndev->stats.tx_packets++; |
158 | ndev->stats.tx_bytes += skb->len; | |
f89efd52 MP |
159 | |
160 | if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) | |
161 | netif_stop_queue(ndev); | |
162 | ||
163 | ++rnet->tx_slot; | |
164 | rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); | |
165 | ||
166 | if (netif_msg_tx_queued(rnet)) | |
8df8a475 DM |
167 | printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME, |
168 | skb->len); | |
f89efd52 MP |
169 | |
170 | return 0; | |
171 | } | |
172 | ||
173 | static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
174 | { | |
175 | int i; | |
4cf1653a | 176 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
177 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
178 | u16 destid; | |
179 | unsigned long flags; | |
7c4a6106 | 180 | int add_num = 1; |
f89efd52 MP |
181 | |
182 | local_irq_save(flags); | |
183 | if (!spin_trylock(&rnet->tx_lock)) { | |
184 | local_irq_restore(flags); | |
185 | return NETDEV_TX_LOCKED; | |
186 | } | |
187 | ||
7c4a6106 | 188 | if (is_multicast_ether_addr(eth->h_dest)) |
2fb717ec | 189 | add_num = nets[rnet->mport->id].nact; |
7c4a6106 AB |
190 | |
191 | if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { | |
f89efd52 MP |
192 | netif_stop_queue(ndev); |
193 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | |
194 | printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", | |
195 | ndev->name); | |
196 | return NETDEV_TX_BUSY; | |
197 | } | |
198 | ||
abfc89c7 | 199 | if (is_multicast_ether_addr(eth->h_dest)) { |
7c4a6106 | 200 | int count = 0; |
2fb717ec | 201 | |
e0423236 ZW |
202 | for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); |
203 | i++) | |
2fb717ec | 204 | if (nets[rnet->mport->id].active[i]) { |
f89efd52 | 205 | rionet_queue_tx_msg(skb, ndev, |
2fb717ec | 206 | nets[rnet->mport->id].active[i]); |
7c4a6106 AB |
207 | if (count) |
208 | atomic_inc(&skb->users); | |
209 | count++; | |
210 | } | |
f89efd52 MP |
211 | } else if (RIONET_MAC_MATCH(eth->h_dest)) { |
212 | destid = RIONET_GET_DESTID(eth->h_dest); | |
2fb717ec AB |
213 | if (nets[rnet->mport->id].active[destid]) |
214 | rionet_queue_tx_msg(skb, ndev, | |
215 | nets[rnet->mport->id].active[destid]); | |
e6161d64 AB |
216 | else { |
217 | /* | |
218 | * If the target device was removed from the list of | |
219 | * active peers but we still have TX packets targeting | |
220 | * it just report sending a packet to the target | |
221 | * (without actual packet transfer). | |
222 | */ | |
223 | dev_kfree_skb_any(skb); | |
224 | ndev->stats.tx_packets++; | |
225 | ndev->stats.tx_bytes += skb->len; | |
226 | } | |
f89efd52 MP |
227 | } |
228 | ||
229 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | |
230 | ||
6ed10654 | 231 | return NETDEV_TX_OK; |
f89efd52 MP |
232 | } |
233 | ||
234 | static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, | |
235 | u16 info) | |
236 | { | |
237 | struct net_device *ndev = dev_id; | |
4cf1653a | 238 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 239 | struct rionet_peer *peer; |
34ed2ebb | 240 | unsigned char netid = rnet->mport->id; |
f89efd52 MP |
241 | |
242 | if (netif_msg_intr(rnet)) | |
243 | printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", | |
244 | DRV_NAME, sid, tid, info); | |
245 | if (info == RIONET_DOORBELL_JOIN) { | |
34ed2ebb AB |
246 | if (!nets[netid].active[sid]) { |
247 | spin_lock(&nets[netid].lock); | |
248 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
7c4a6106 | 249 | if (peer->rdev->destid == sid) { |
34ed2ebb AB |
250 | nets[netid].active[sid] = peer->rdev; |
251 | nets[netid].nact++; | |
7c4a6106 | 252 | } |
f89efd52 | 253 | } |
34ed2ebb AB |
254 | spin_unlock(&nets[netid].lock); |
255 | ||
f89efd52 MP |
256 | rio_mport_send_doorbell(mport, sid, |
257 | RIONET_DOORBELL_JOIN); | |
258 | } | |
259 | } else if (info == RIONET_DOORBELL_LEAVE) { | |
34ed2ebb AB |
260 | spin_lock(&nets[netid].lock); |
261 | if (nets[netid].active[sid]) { | |
262 | nets[netid].active[sid] = NULL; | |
263 | nets[netid].nact--; | |
264 | } | |
265 | spin_unlock(&nets[netid].lock); | |
f89efd52 MP |
266 | } else { |
267 | if (netif_msg_intr(rnet)) | |
268 | printk(KERN_WARNING "%s: unhandled doorbell\n", | |
269 | DRV_NAME); | |
270 | } | |
271 | } | |
272 | ||
273 | static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | |
274 | { | |
275 | int n; | |
276 | struct net_device *ndev = dev_id; | |
4cf1653a | 277 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
278 | |
279 | if (netif_msg_intr(rnet)) | |
280 | printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", | |
281 | DRV_NAME, mbox, slot); | |
282 | ||
283 | spin_lock(&rnet->lock); | |
284 | if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) | |
285 | rionet_rx_fill(ndev, n); | |
286 | spin_unlock(&rnet->lock); | |
287 | } | |
288 | ||
289 | static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | |
290 | { | |
291 | struct net_device *ndev = dev_id; | |
4cf1653a | 292 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 293 | |
36915976 | 294 | spin_lock(&rnet->tx_lock); |
f89efd52 MP |
295 | |
296 | if (netif_msg_intr(rnet)) | |
297 | printk(KERN_INFO | |
298 | "%s: outbound message event, mbox %d slot %d\n", | |
299 | DRV_NAME, mbox, slot); | |
300 | ||
301 | while (rnet->tx_cnt && (rnet->ack_slot != slot)) { | |
302 | /* dma unmap single */ | |
303 | dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); | |
304 | rnet->tx_skb[rnet->ack_slot] = NULL; | |
305 | ++rnet->ack_slot; | |
306 | rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); | |
307 | rnet->tx_cnt--; | |
308 | } | |
309 | ||
310 | if (rnet->tx_cnt < RIONET_TX_RING_SIZE) | |
311 | netif_wake_queue(ndev); | |
312 | ||
36915976 | 313 | spin_unlock(&rnet->tx_lock); |
f89efd52 MP |
314 | } |
315 | ||
316 | static int rionet_open(struct net_device *ndev) | |
317 | { | |
318 | int i, rc = 0; | |
34ed2ebb | 319 | struct rionet_peer *peer; |
4cf1653a | 320 | struct rionet_private *rnet = netdev_priv(ndev); |
34ed2ebb AB |
321 | unsigned char netid = rnet->mport->id; |
322 | unsigned long flags; | |
f89efd52 MP |
323 | |
324 | if (netif_msg_ifup(rnet)) | |
325 | printk(KERN_INFO "%s: open\n", DRV_NAME); | |
326 | ||
327 | if ((rc = rio_request_inb_dbell(rnet->mport, | |
328 | (void *)ndev, | |
329 | RIONET_DOORBELL_JOIN, | |
330 | RIONET_DOORBELL_LEAVE, | |
331 | rionet_dbell_event)) < 0) | |
332 | goto out; | |
333 | ||
334 | if ((rc = rio_request_inb_mbox(rnet->mport, | |
335 | (void *)ndev, | |
336 | RIONET_MAILBOX, | |
337 | RIONET_RX_RING_SIZE, | |
338 | rionet_inb_msg_event)) < 0) | |
339 | goto out; | |
340 | ||
341 | if ((rc = rio_request_outb_mbox(rnet->mport, | |
342 | (void *)ndev, | |
343 | RIONET_MAILBOX, | |
344 | RIONET_TX_RING_SIZE, | |
345 | rionet_outb_msg_event)) < 0) | |
346 | goto out; | |
347 | ||
348 | /* Initialize inbound message ring */ | |
349 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | |
350 | rnet->rx_skb[i] = NULL; | |
351 | rnet->rx_slot = 0; | |
352 | rionet_rx_fill(ndev, 0); | |
353 | ||
354 | rnet->tx_slot = 0; | |
355 | rnet->tx_cnt = 0; | |
356 | rnet->ack_slot = 0; | |
357 | ||
358 | netif_carrier_on(ndev); | |
359 | netif_start_queue(ndev); | |
360 | ||
34ed2ebb AB |
361 | spin_lock_irqsave(&nets[netid].lock, flags); |
362 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
284fb68d AB |
363 | /* Send a join message */ |
364 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | |
f89efd52 | 365 | } |
34ed2ebb AB |
366 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
367 | rnet->open = true; | |
f89efd52 MP |
368 | |
369 | out: | |
370 | return rc; | |
371 | } | |
372 | ||
373 | static int rionet_close(struct net_device *ndev) | |
374 | { | |
4cf1653a | 375 | struct rionet_private *rnet = netdev_priv(ndev); |
34ed2ebb AB |
376 | struct rionet_peer *peer; |
377 | unsigned char netid = rnet->mport->id; | |
378 | unsigned long flags; | |
f89efd52 MP |
379 | int i; |
380 | ||
381 | if (netif_msg_ifup(rnet)) | |
2fb717ec | 382 | printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name); |
f89efd52 MP |
383 | |
384 | netif_stop_queue(ndev); | |
385 | netif_carrier_off(ndev); | |
34ed2ebb | 386 | rnet->open = false; |
f89efd52 MP |
387 | |
388 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | |
aaff1e19 | 389 | kfree_skb(rnet->rx_skb[i]); |
f89efd52 | 390 | |
34ed2ebb AB |
391 | spin_lock_irqsave(&nets[netid].lock, flags); |
392 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
393 | if (nets[netid].active[peer->rdev->destid]) { | |
f89efd52 | 394 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); |
34ed2ebb | 395 | nets[netid].active[peer->rdev->destid] = NULL; |
f89efd52 | 396 | } |
34ed2ebb AB |
397 | if (peer->res) |
398 | rio_release_outb_dbell(peer->rdev, peer->res); | |
f89efd52 | 399 | } |
34ed2ebb | 400 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
f89efd52 MP |
401 | |
402 | rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, | |
403 | RIONET_DOORBELL_LEAVE); | |
404 | rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); | |
405 | rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
71db87ba | 410 | static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) |
f89efd52 | 411 | { |
e6161d64 | 412 | struct rio_dev *rdev = to_rio_dev(dev); |
2fb717ec | 413 | unsigned char netid = rdev->net->hport->id; |
34ed2ebb AB |
414 | struct rionet_peer *peer; |
415 | int state, found = 0; | |
416 | unsigned long flags; | |
f89efd52 | 417 | |
34ed2ebb AB |
418 | if (!dev_rionet_capable(rdev)) |
419 | return; | |
420 | ||
421 | spin_lock_irqsave(&nets[netid].lock, flags); | |
422 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
423 | if (peer->rdev == rdev) { | |
424 | list_del(&peer->node); | |
425 | if (nets[netid].active[rdev->destid]) { | |
426 | state = atomic_read(&rdev->state); | |
427 | if (state != RIO_DEVICE_GONE && | |
428 | state != RIO_DEVICE_INITIALIZING) { | |
429 | rio_send_doorbell(rdev, | |
430 | RIONET_DOORBELL_LEAVE); | |
e6161d64 | 431 | } |
34ed2ebb AB |
432 | nets[netid].active[rdev->destid] = NULL; |
433 | nets[netid].nact--; | |
e6161d64 | 434 | } |
34ed2ebb AB |
435 | found = 1; |
436 | break; | |
e6161d64 | 437 | } |
f89efd52 | 438 | } |
34ed2ebb AB |
439 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
440 | ||
441 | if (found) { | |
442 | if (peer->res) | |
443 | rio_release_outb_dbell(rdev, peer->res); | |
444 | kfree(peer); | |
445 | } | |
f89efd52 MP |
446 | } |
447 | ||
448 | static void rionet_get_drvinfo(struct net_device *ndev, | |
449 | struct ethtool_drvinfo *info) | |
450 | { | |
4cf1653a | 451 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 452 | |
7826d43f JP |
453 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
454 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
455 | strlcpy(info->fw_version, "n/a", sizeof(info->fw_version)); | |
456 | strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info)); | |
f89efd52 MP |
457 | } |
458 | ||
459 | static u32 rionet_get_msglevel(struct net_device *ndev) | |
460 | { | |
4cf1653a | 461 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
462 | |
463 | return rnet->msg_enable; | |
464 | } | |
465 | ||
466 | static void rionet_set_msglevel(struct net_device *ndev, u32 value) | |
467 | { | |
4cf1653a | 468 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
469 | |
470 | rnet->msg_enable = value; | |
471 | } | |
472 | ||
92444bb3 AJ |
473 | static int rionet_change_mtu(struct net_device *ndev, int new_mtu) |
474 | { | |
475 | if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) { | |
476 | printk(KERN_ERR "%s: Invalid MTU size %d\n", | |
477 | ndev->name, new_mtu); | |
478 | return -EINVAL; | |
479 | } | |
480 | ndev->mtu = new_mtu; | |
481 | return 0; | |
482 | } | |
483 | ||
7282d491 | 484 | static const struct ethtool_ops rionet_ethtool_ops = { |
f89efd52 MP |
485 | .get_drvinfo = rionet_get_drvinfo, |
486 | .get_msglevel = rionet_get_msglevel, | |
487 | .set_msglevel = rionet_set_msglevel, | |
488 | .get_link = ethtool_op_get_link, | |
489 | }; | |
490 | ||
a33a2bb3 AB |
491 | static const struct net_device_ops rionet_netdev_ops = { |
492 | .ndo_open = rionet_open, | |
493 | .ndo_stop = rionet_close, | |
494 | .ndo_start_xmit = rionet_start_xmit, | |
92444bb3 | 495 | .ndo_change_mtu = rionet_change_mtu, |
a33a2bb3 AB |
496 | .ndo_validate_addr = eth_validate_addr, |
497 | .ndo_set_mac_address = eth_mac_addr, | |
498 | }; | |
499 | ||
55caa924 | 500 | static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) |
f89efd52 MP |
501 | { |
502 | int rc = 0; | |
f89efd52 MP |
503 | struct rionet_private *rnet; |
504 | u16 device_id; | |
acc65632 AM |
505 | const size_t rionet_active_bytes = sizeof(void *) * |
506 | RIO_MAX_ROUTE_ENTRIES(mport->sys_size); | |
f89efd52 | 507 | |
2fb717ec AB |
508 | nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, |
509 | get_order(rionet_active_bytes)); | |
510 | if (!nets[mport->id].active) { | |
e0423236 ZW |
511 | rc = -ENOMEM; |
512 | goto out; | |
513 | } | |
2fb717ec | 514 | memset((void *)nets[mport->id].active, 0, rionet_active_bytes); |
e0423236 | 515 | |
f89efd52 | 516 | /* Set up private area */ |
4cf1653a | 517 | rnet = netdev_priv(ndev); |
f89efd52 | 518 | rnet->mport = mport; |
34ed2ebb | 519 | rnet->open = false; |
f89efd52 MP |
520 | |
521 | /* Set the default MAC address */ | |
522 | device_id = rio_local_get_device_id(mport); | |
523 | ndev->dev_addr[0] = 0x00; | |
524 | ndev->dev_addr[1] = 0x01; | |
525 | ndev->dev_addr[2] = 0x00; | |
526 | ndev->dev_addr[3] = 0x01; | |
527 | ndev->dev_addr[4] = device_id >> 8; | |
528 | ndev->dev_addr[5] = device_id & 0xff; | |
529 | ||
a33a2bb3 | 530 | ndev->netdev_ops = &rionet_netdev_ops; |
92444bb3 | 531 | ndev->mtu = RIONET_MAX_MTU; |
f89efd52 | 532 | ndev->features = NETIF_F_LLTX; |
2aaf308b | 533 | SET_NETDEV_DEV(ndev, &mport->dev); |
7ad24ea4 | 534 | ndev->ethtool_ops = &rionet_ethtool_ops; |
f89efd52 | 535 | |
f89efd52 MP |
536 | spin_lock_init(&rnet->lock); |
537 | spin_lock_init(&rnet->tx_lock); | |
538 | ||
539 | rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; | |
540 | ||
541 | rc = register_netdev(ndev); | |
34ed2ebb AB |
542 | if (rc != 0) { |
543 | free_pages((unsigned long)nets[mport->id].active, | |
544 | get_order(rionet_active_bytes)); | |
f89efd52 | 545 | goto out; |
34ed2ebb | 546 | } |
f89efd52 | 547 | |
2fb717ec | 548 | printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", |
f89efd52 MP |
549 | ndev->name, |
550 | DRV_NAME, | |
551 | DRV_DESC, | |
552 | DRV_VERSION, | |
2fb717ec AB |
553 | ndev->dev_addr, |
554 | mport->name); | |
f89efd52 MP |
555 | |
556 | out: | |
557 | return rc; | |
558 | } | |
559 | ||
e6161d64 | 560 | static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) |
f89efd52 MP |
561 | { |
562 | int rc = -ENODEV; | |
284fb68d | 563 | u32 lsrc_ops, ldst_ops; |
f89efd52 | 564 | struct rionet_peer *peer; |
55caa924 | 565 | struct net_device *ndev = NULL; |
e6161d64 | 566 | struct rio_dev *rdev = to_rio_dev(dev); |
2fb717ec | 567 | unsigned char netid = rdev->net->hport->id; |
f89efd52 | 568 | |
2fb717ec AB |
569 | if (netid >= RIONET_MAX_NETS) |
570 | return rc; | |
f89efd52 MP |
571 | |
572 | /* | |
e6161d64 AB |
573 | * If first time through this net, make sure local device is rionet |
574 | * capable and setup netdev (this step will be skipped in later probes | |
575 | * on the same net). | |
f89efd52 | 576 | */ |
34ed2ebb | 577 | if (!nets[netid].ndev) { |
f89efd52 MP |
578 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
579 | &lsrc_ops); | |
580 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | |
581 | &ldst_ops); | |
284fb68d | 582 | if (!is_rionet_capable(lsrc_ops, ldst_ops)) { |
f89efd52 | 583 | printk(KERN_ERR |
2fb717ec AB |
584 | "%s: local device %s is not network capable\n", |
585 | DRV_NAME, rdev->net->hport->name); | |
f89efd52 MP |
586 | goto out; |
587 | } | |
588 | ||
2fb717ec AB |
589 | /* Allocate our net_device structure */ |
590 | ndev = alloc_etherdev(sizeof(struct rionet_private)); | |
591 | if (ndev == NULL) { | |
592 | rc = -ENOMEM; | |
593 | goto out; | |
594 | } | |
34ed2ebb | 595 | |
55caa924 | 596 | rc = rionet_setup_netdev(rdev->net->hport, ndev); |
e6161d64 AB |
597 | if (rc) { |
598 | printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", | |
599 | DRV_NAME, rc); | |
34ed2ebb | 600 | free_netdev(ndev); |
e6161d64 AB |
601 | goto out; |
602 | } | |
603 | ||
2fb717ec | 604 | INIT_LIST_HEAD(&nets[netid].peers); |
34ed2ebb | 605 | spin_lock_init(&nets[netid].lock); |
2fb717ec | 606 | nets[netid].nact = 0; |
34ed2ebb AB |
607 | nets[netid].ndev = ndev; |
608 | } | |
f89efd52 MP |
609 | |
610 | /* | |
611 | * If the remote device has mailbox/doorbell capabilities, | |
612 | * add it to the peer list. | |
613 | */ | |
614 | if (dev_rionet_capable(rdev)) { | |
34ed2ebb AB |
615 | struct rionet_private *rnet; |
616 | unsigned long flags; | |
617 | ||
618 | rnet = netdev_priv(nets[netid].ndev); | |
619 | ||
620 | peer = kzalloc(sizeof(*peer), GFP_KERNEL); | |
621 | if (!peer) { | |
f89efd52 MP |
622 | rc = -ENOMEM; |
623 | goto out; | |
624 | } | |
625 | peer->rdev = rdev; | |
34ed2ebb AB |
626 | peer->res = rio_request_outb_dbell(peer->rdev, |
627 | RIONET_DOORBELL_JOIN, | |
628 | RIONET_DOORBELL_LEAVE); | |
629 | if (!peer->res) { | |
630 | pr_err("%s: error requesting doorbells\n", DRV_NAME); | |
631 | kfree(peer); | |
632 | rc = -ENOMEM; | |
633 | goto out; | |
634 | } | |
635 | ||
636 | spin_lock_irqsave(&nets[netid].lock, flags); | |
2fb717ec | 637 | list_add_tail(&peer->node, &nets[netid].peers); |
34ed2ebb AB |
638 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
639 | pr_debug("%s: %s add peer %s\n", | |
640 | DRV_NAME, __func__, rio_name(rdev)); | |
641 | ||
642 | /* If netdev is already opened, send join request to new peer */ | |
643 | if (rnet->open) | |
644 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | |
f89efd52 MP |
645 | } |
646 | ||
e6161d64 AB |
647 | return 0; |
648 | out: | |
f89efd52 MP |
649 | return rc; |
650 | } | |
651 | ||
f41e2472 AB |
652 | static int rionet_shutdown(struct notifier_block *nb, unsigned long code, |
653 | void *unused) | |
654 | { | |
34ed2ebb AB |
655 | struct rionet_peer *peer; |
656 | unsigned long flags; | |
f41e2472 AB |
657 | int i; |
658 | ||
659 | pr_debug("%s: %s\n", DRV_NAME, __func__); | |
660 | ||
661 | for (i = 0; i < RIONET_MAX_NETS; i++) { | |
662 | if (!nets[i].ndev) | |
663 | continue; | |
664 | ||
34ed2ebb AB |
665 | spin_lock_irqsave(&nets[i].lock, flags); |
666 | list_for_each_entry(peer, &nets[i].peers, node) { | |
f41e2472 AB |
667 | if (nets[i].active[peer->rdev->destid]) { |
668 | rio_send_doorbell(peer->rdev, | |
669 | RIONET_DOORBELL_LEAVE); | |
670 | nets[i].active[peer->rdev->destid] = NULL; | |
671 | } | |
672 | } | |
34ed2ebb | 673 | spin_unlock_irqrestore(&nets[i].lock, flags); |
f41e2472 AB |
674 | } |
675 | ||
676 | return NOTIFY_DONE; | |
677 | } | |
678 | ||
b7dfca8b AB |
679 | static void rionet_remove_mport(struct device *dev, |
680 | struct class_interface *class_intf) | |
681 | { | |
682 | struct rio_mport *mport = to_rio_mport(dev); | |
683 | struct net_device *ndev; | |
684 | int id = mport->id; | |
685 | ||
686 | pr_debug("%s %s\n", __func__, mport->name); | |
687 | ||
688 | WARN(nets[id].nact, "%s called when connected to %d peers\n", | |
689 | __func__, nets[id].nact); | |
690 | WARN(!nets[id].ndev, "%s called for mport without NDEV\n", | |
691 | __func__); | |
692 | ||
693 | if (nets[id].ndev) { | |
694 | ndev = nets[id].ndev; | |
695 | netif_stop_queue(ndev); | |
696 | unregister_netdev(ndev); | |
697 | ||
698 | free_pages((unsigned long)nets[id].active, | |
699 | get_order(sizeof(void *) * | |
700 | RIO_MAX_ROUTE_ENTRIES(mport->sys_size))); | |
701 | nets[id].active = NULL; | |
702 | free_netdev(ndev); | |
703 | nets[id].ndev = NULL; | |
704 | } | |
705 | } | |
706 | ||
e6161d64 | 707 | #ifdef MODULE |
f89efd52 | 708 | static struct rio_device_id rionet_id_table[] = { |
e6161d64 AB |
709 | {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, |
710 | { 0, } /* terminate list */ | |
f89efd52 MP |
711 | }; |
712 | ||
e6161d64 AB |
713 | MODULE_DEVICE_TABLE(rapidio, rionet_id_table); |
714 | #endif | |
715 | ||
716 | static struct subsys_interface rionet_interface = { | |
717 | .name = "rionet", | |
718 | .subsys = &rio_bus_type, | |
719 | .add_dev = rionet_add_dev, | |
720 | .remove_dev = rionet_remove_dev, | |
f89efd52 MP |
721 | }; |
722 | ||
f41e2472 AB |
723 | static struct notifier_block rionet_notifier = { |
724 | .notifier_call = rionet_shutdown, | |
725 | }; | |
726 | ||
b7dfca8b AB |
727 | /* the rio_mport_interface is used to handle local mport devices */ |
728 | static struct class_interface rio_mport_interface __refdata = { | |
729 | .class = &rio_mport_class, | |
730 | .add_dev = NULL, | |
731 | .remove_dev = rionet_remove_mport, | |
732 | }; | |
733 | ||
f89efd52 MP |
734 | static int __init rionet_init(void) |
735 | { | |
f41e2472 AB |
736 | int ret; |
737 | ||
738 | ret = register_reboot_notifier(&rionet_notifier); | |
739 | if (ret) { | |
740 | pr_err("%s: failed to register reboot notifier (err=%d)\n", | |
741 | DRV_NAME, ret); | |
742 | return ret; | |
743 | } | |
b7dfca8b AB |
744 | |
745 | ret = class_interface_register(&rio_mport_interface); | |
746 | if (ret) { | |
747 | pr_err("%s: class_interface_register error: %d\n", | |
748 | DRV_NAME, ret); | |
749 | return ret; | |
750 | } | |
751 | ||
e6161d64 | 752 | return subsys_interface_register(&rionet_interface); |
f89efd52 MP |
753 | } |
754 | ||
755 | static void __exit rionet_exit(void) | |
756 | { | |
f41e2472 | 757 | unregister_reboot_notifier(&rionet_notifier); |
e6161d64 | 758 | subsys_interface_unregister(&rionet_interface); |
b7dfca8b | 759 | class_interface_unregister(&rio_mport_interface); |
f89efd52 MP |
760 | } |
761 | ||
2f809985 | 762 | late_initcall(rionet_init); |
f89efd52 | 763 | module_exit(rionet_exit); |