tcp: Change tcp_slow_start function to return void
[deliverable/linux.git] / drivers / net / ethernet / sun / sunvnet.c
CommitLineData
4c521e42
DM
1/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3d452e55 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4c521e42
DM
4 */
5
4d5870ec
JP
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
4c521e42
DM
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/etherdevice.h>
9184a046 17#include <linux/mutex.h>
4c521e42
DM
18
19#include <asm/vio.h>
20#include <asm/ldc.h>
21
22#include "sunvnet.h"
23
24#define DRV_MODULE_NAME "sunvnet"
4c521e42
DM
25#define DRV_MODULE_VERSION "1.0"
26#define DRV_MODULE_RELDATE "June 25, 2007"
27
f73d12bd 28static char version[] =
4c521e42
DM
29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
31MODULE_DESCRIPTION("Sun LDOM virtual network driver");
32MODULE_LICENSE("GPL");
33MODULE_VERSION(DRV_MODULE_VERSION);
34
adddc32d
SV
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
d1015645
SV
40static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
41
4c521e42
DM
42/* Ordered from largest major to lowest */
43static struct vio_version vnet_versions[] = {
44 { .major = 1, .minor = 0 },
45};
46
47static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
48{
49 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
50}
51
52static int vnet_handle_unknown(struct vnet_port *port, void *arg)
53{
54 struct vio_msg_tag *pkt = arg;
55
4d5870ec 56 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
4c521e42 57 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
4d5870ec 58 pr_err("Resetting connection\n");
4c521e42
DM
59
60 ldc_disconnect(port->vio.lp);
61
62 return -ECONNRESET;
63}
64
65static int vnet_send_attr(struct vio_driver_state *vio)
66{
67 struct vnet_port *port = to_vnet_port(vio);
68 struct net_device *dev = port->vp->dev;
69 struct vio_net_attr_info pkt;
70 int i;
71
72 memset(&pkt, 0, sizeof(pkt));
73 pkt.tag.type = VIO_TYPE_CTRL;
74 pkt.tag.stype = VIO_SUBTYPE_INFO;
75 pkt.tag.stype_env = VIO_ATTR_INFO;
76 pkt.tag.sid = vio_send_sid(vio);
77 pkt.xfer_mode = VIO_DRING_MODE;
78 pkt.addr_type = VNET_ADDR_ETHERMAC;
79 pkt.ack_freq = 0;
80 for (i = 0; i < 6; i++)
81 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
82 pkt.mtu = ETH_FRAME_LEN;
83
84 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
85 "ackfreq[%u] mtu[%llu]\n",
86 pkt.xfer_mode, pkt.addr_type,
87 (unsigned long long) pkt.addr,
88 pkt.ack_freq,
89 (unsigned long long) pkt.mtu);
90
91 return vio_ldc_send(vio, &pkt, sizeof(pkt));
92}
93
94static int handle_attr_info(struct vio_driver_state *vio,
95 struct vio_net_attr_info *pkt)
96{
97 viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
98 "ackfreq[%u] mtu[%llu]\n",
99 pkt->xfer_mode, pkt->addr_type,
100 (unsigned long long) pkt->addr,
101 pkt->ack_freq,
102 (unsigned long long) pkt->mtu);
103
104 pkt->tag.sid = vio_send_sid(vio);
105
106 if (pkt->xfer_mode != VIO_DRING_MODE ||
107 pkt->addr_type != VNET_ADDR_ETHERMAC ||
108 pkt->mtu != ETH_FRAME_LEN) {
109 viodbg(HS, "SEND NET ATTR NACK\n");
110
111 pkt->tag.stype = VIO_SUBTYPE_NACK;
112
113 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
114
115 return -ECONNRESET;
116 } else {
117 viodbg(HS, "SEND NET ATTR ACK\n");
118
119 pkt->tag.stype = VIO_SUBTYPE_ACK;
120
121 return vio_ldc_send(vio, pkt, sizeof(*pkt));
122 }
123
124}
125
126static int handle_attr_ack(struct vio_driver_state *vio,
127 struct vio_net_attr_info *pkt)
128{
129 viodbg(HS, "GOT NET ATTR ACK\n");
130
131 return 0;
132}
133
134static int handle_attr_nack(struct vio_driver_state *vio,
135 struct vio_net_attr_info *pkt)
136{
137 viodbg(HS, "GOT NET ATTR NACK\n");
138
139 return -ECONNRESET;
140}
141
142static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
143{
144 struct vio_net_attr_info *pkt = arg;
145
146 switch (pkt->tag.stype) {
147 case VIO_SUBTYPE_INFO:
148 return handle_attr_info(vio, pkt);
149
150 case VIO_SUBTYPE_ACK:
151 return handle_attr_ack(vio, pkt);
152
153 case VIO_SUBTYPE_NACK:
154 return handle_attr_nack(vio, pkt);
155
156 default:
157 return -ECONNRESET;
158 }
159}
160
161static void vnet_handshake_complete(struct vio_driver_state *vio)
162{
163 struct vio_dring_state *dr;
164
165 dr = &vio->drings[VIO_DRIVER_RX_RING];
166 dr->snd_nxt = dr->rcv_nxt = 1;
167
168 dr = &vio->drings[VIO_DRIVER_TX_RING];
169 dr->snd_nxt = dr->rcv_nxt = 1;
170}
171
172/* The hypervisor interface that implements copying to/from imported
173 * memory from another domain requires that copies are done to 8-byte
174 * aligned buffers, and that the lengths of such copies are also 8-byte
175 * multiples.
176 *
177 * So we align skb->data to an 8-byte multiple and pad-out the data
178 * area so we can round the copy length up to the next multiple of
179 * 8 for the copy.
180 *
181 * The transmitter puts the actual start of the packet 6 bytes into
182 * the buffer it sends over, so that the IP headers after the ethernet
183 * header are aligned properly. These 6 bytes are not in the descriptor
184 * length, they are simply implied. This offset is represented using
185 * the VNET_PACKET_SKIP macro.
186 */
187static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
188 unsigned int len)
189{
190 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
191 unsigned long addr, off;
192
193 if (unlikely(!skb))
194 return NULL;
195
196 addr = (unsigned long) skb->data;
197 off = ((addr + 7UL) & ~7UL) - addr;
198 if (off)
199 skb_reserve(skb, off);
200
201 return skb;
202}
203
204static int vnet_rx_one(struct vnet_port *port, unsigned int len,
205 struct ldc_trans_cookie *cookies, int ncookies)
206{
207 struct net_device *dev = port->vp->dev;
208 unsigned int copy_len;
209 struct sk_buff *skb;
210 int err;
211
212 err = -EMSGSIZE;
213 if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
214 dev->stats.rx_length_errors++;
215 goto out_dropped;
216 }
217
218 skb = alloc_and_align_skb(dev, len);
219 err = -ENOMEM;
220 if (unlikely(!skb)) {
221 dev->stats.rx_missed_errors++;
222 goto out_dropped;
223 }
224
225 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
226 skb_put(skb, copy_len);
227 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
228 skb->data, copy_len, 0,
229 cookies, ncookies);
230 if (unlikely(err < 0)) {
231 dev->stats.rx_frame_errors++;
232 goto out_free_skb;
233 }
234
235 skb_pull(skb, VNET_PACKET_SKIP);
236 skb_trim(skb, len);
237 skb->protocol = eth_type_trans(skb, dev);
238
239 dev->stats.rx_packets++;
240 dev->stats.rx_bytes += len;
241
242 netif_rx(skb);
243
244 return 0;
245
246out_free_skb:
247 kfree_skb(skb);
248
249out_dropped:
250 dev->stats.rx_dropped++;
251 return err;
252}
253
254static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
255 u32 start, u32 end, u8 vio_dring_state)
256{
257 struct vio_dring_data hdr = {
258 .tag = {
259 .type = VIO_TYPE_DATA,
260 .stype = VIO_SUBTYPE_ACK,
261 .stype_env = VIO_DRING_DATA,
262 .sid = vio_send_sid(&port->vio),
263 },
264 .dring_ident = dr->ident,
265 .start_idx = start,
266 .end_idx = end,
267 .state = vio_dring_state,
268 };
269 int err, delay;
adddc32d 270 int retries = 0;
4c521e42
DM
271
272 hdr.seq = dr->snd_nxt;
273 delay = 1;
274 do {
275 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
276 if (err > 0) {
277 dr->snd_nxt++;
278 break;
279 }
280 udelay(delay);
281 if ((delay <<= 1) > 128)
282 delay = 128;
adddc32d
SV
283 if (retries++ > VNET_MAX_RETRIES) {
284 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
285 port->raddr[0], port->raddr[1],
286 port->raddr[2], port->raddr[3],
287 port->raddr[4], port->raddr[5]);
d1015645 288 break;
adddc32d 289 }
4c521e42
DM
290 } while (err == -EAGAIN);
291
d1015645
SV
292 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
293 port->stop_rx_idx = end;
294 port->stop_rx = true;
295 } else {
296 port->stop_rx_idx = 0;
297 port->stop_rx = false;
298 }
299
4c521e42
DM
300 return err;
301}
302
303static u32 next_idx(u32 idx, struct vio_dring_state *dr)
304{
305 if (++idx == dr->num_entries)
306 idx = 0;
307 return idx;
308}
309
310static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
311{
312 if (idx == 0)
313 idx = dr->num_entries - 1;
314 else
315 idx--;
316
317 return idx;
318}
319
320static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
321 struct vio_dring_state *dr,
322 u32 index)
323{
324 struct vio_net_desc *desc = port->vio.desc_buf;
325 int err;
326
327 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
328 (index * dr->entry_size),
329 dr->cookies, dr->ncookies);
330 if (err < 0)
331 return ERR_PTR(err);
332
333 return desc;
334}
335
336static int put_rx_desc(struct vnet_port *port,
337 struct vio_dring_state *dr,
338 struct vio_net_desc *desc,
339 u32 index)
340{
341 int err;
342
343 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
344 (index * dr->entry_size),
345 dr->cookies, dr->ncookies);
346 if (err < 0)
347 return err;
348
349 return 0;
350}
351
352static int vnet_walk_rx_one(struct vnet_port *port,
353 struct vio_dring_state *dr,
354 u32 index, int *needs_ack)
355{
356 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
357 struct vio_driver_state *vio = &port->vio;
358 int err;
359
360 if (IS_ERR(desc))
361 return PTR_ERR(desc);
362
78dcff7b
DS
363 if (desc->hdr.state != VIO_DESC_READY)
364 return 1;
365
366 rmb();
367
3f4528d6 368 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
4c521e42
DM
369 desc->hdr.state, desc->hdr.ack,
370 desc->size, desc->ncookies,
371 desc->cookies[0].cookie_addr,
372 desc->cookies[0].cookie_size);
373
4c521e42
DM
374 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
375 if (err == -ECONNRESET)
376 return err;
377 desc->hdr.state = VIO_DESC_DONE;
378 err = put_rx_desc(port, dr, desc, index);
379 if (err < 0)
380 return err;
381 *needs_ack = desc->hdr.ack;
382 return 0;
383}
384
385static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
386 u32 start, u32 end)
387{
388 struct vio_driver_state *vio = &port->vio;
389 int ack_start = -1, ack_end = -1;
390
391 end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
392
393 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
394
395 while (start != end) {
396 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
397 if (err == -ECONNRESET)
398 return err;
399 if (err != 0)
400 break;
401 if (ack_start == -1)
402 ack_start = start;
403 ack_end = start;
404 start = next_idx(start, dr);
405 if (ack && start != end) {
406 err = vnet_send_ack(port, dr, ack_start, ack_end,
407 VIO_DRING_ACTIVE);
408 if (err == -ECONNRESET)
409 return err;
410 ack_start = -1;
411 }
412 }
413 if (unlikely(ack_start == -1))
414 ack_start = ack_end = prev_idx(start, dr);
415 return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
416}
417
418static int vnet_rx(struct vnet_port *port, void *msgbuf)
419{
420 struct vio_dring_data *pkt = msgbuf;
421 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
422 struct vio_driver_state *vio = &port->vio;
423
3f4528d6 424 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
4c521e42
DM
425 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
426
427 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
428 return 0;
429 if (unlikely(pkt->seq != dr->rcv_nxt)) {
4d5870ec
JP
430 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
431 pkt->seq, dr->rcv_nxt);
4c521e42
DM
432 return 0;
433 }
434
435 dr->rcv_nxt++;
436
437 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
438
439 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
440}
441
442static int idx_is_pending(struct vio_dring_state *dr, u32 end)
443{
444 u32 idx = dr->cons;
445 int found = 0;
446
447 while (idx != dr->prod) {
448 if (idx == end) {
449 found = 1;
450 break;
451 }
452 idx = next_idx(idx, dr);
453 }
454 return found;
455}
456
457static int vnet_ack(struct vnet_port *port, void *msgbuf)
458{
459 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
460 struct vio_dring_data *pkt = msgbuf;
461 struct net_device *dev;
462 struct vnet *vp;
463 u32 end;
d1015645 464 struct vio_net_desc *desc;
4c521e42
DM
465 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
466 return 0;
467
468 end = pkt->end_idx;
469 if (unlikely(!idx_is_pending(dr, end)))
470 return 0;
471
d1015645
SV
472 /* sync for race conditions with vnet_start_xmit() and tell xmit it
473 * is time to send a trigger.
474 */
4c521e42 475 dr->cons = next_idx(end, dr);
d1015645
SV
476 desc = vio_dring_entry(dr, dr->cons);
477 if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
478 /* vnet_start_xmit() just populated this dring but missed
479 * sending the "start" LDC message to the consumer.
480 * Send a "start" trigger on its behalf.
481 */
482 if (__vnet_tx_trigger(port, dr->cons) > 0)
483 port->start_cons = false;
484 else
485 port->start_cons = true;
486 } else {
487 port->start_cons = true;
488 }
489
4c521e42
DM
490
491 vp = port->vp;
492 dev = vp->dev;
493 if (unlikely(netif_queue_stopped(dev) &&
494 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
495 return 1;
496
497 return 0;
498}
499
500static int vnet_nack(struct vnet_port *port, void *msgbuf)
501{
502 /* XXX just reset or similar XXX */
503 return 0;
504}
505
028ebff2
DM
506static int handle_mcast(struct vnet_port *port, void *msgbuf)
507{
508 struct vio_net_mcast_info *pkt = msgbuf;
509
510 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
4d5870ec 511 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
028ebff2
DM
512 port->vp->dev->name,
513 pkt->tag.type,
514 pkt->tag.stype,
515 pkt->tag.stype_env,
516 pkt->tag.sid);
517
518 return 0;
519}
520
1d311ad2 521static void maybe_tx_wakeup(unsigned long param)
4c521e42 522{
1d311ad2 523 struct vnet *vp = (struct vnet *)param;
4c521e42
DM
524 struct net_device *dev = vp->dev;
525
526 netif_tx_lock(dev);
527 if (likely(netif_queue_stopped(dev))) {
528 struct vnet_port *port;
529 int wake = 1;
530
531 list_for_each_entry(port, &vp->port_list, list) {
532 struct vio_dring_state *dr;
533
534 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
535 if (vnet_tx_dring_avail(dr) <
536 VNET_TX_WAKEUP_THRESH(dr)) {
537 wake = 0;
538 break;
539 }
540 }
541 if (wake)
542 netif_wake_queue(dev);
543 }
544 netif_tx_unlock(dev);
545}
546
547static void vnet_event(void *arg, int event)
548{
549 struct vnet_port *port = arg;
550 struct vio_driver_state *vio = &port->vio;
551 unsigned long flags;
552 int tx_wakeup, err;
553
554 spin_lock_irqsave(&vio->lock, flags);
555
556 if (unlikely(event == LDC_EVENT_RESET ||
557 event == LDC_EVENT_UP)) {
558 vio_link_state_change(vio, event);
559 spin_unlock_irqrestore(&vio->lock, flags);
560
d762acdb
DM
561 if (event == LDC_EVENT_RESET)
562 vio_port_up(vio);
4c521e42
DM
563 return;
564 }
565
566 if (unlikely(event != LDC_EVENT_DATA_READY)) {
fe3881cf 567 pr_warn("Unexpected LDC event %d\n", event);
4c521e42
DM
568 spin_unlock_irqrestore(&vio->lock, flags);
569 return;
570 }
571
572 tx_wakeup = err = 0;
573 while (1) {
574 union {
575 struct vio_msg_tag tag;
576 u64 raw[8];
577 } msgbuf;
578
579 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
580 if (unlikely(err < 0)) {
581 if (err == -ECONNRESET)
582 vio_conn_reset(vio);
583 break;
584 }
585 if (err == 0)
586 break;
587 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
588 msgbuf.tag.type,
589 msgbuf.tag.stype,
590 msgbuf.tag.stype_env,
591 msgbuf.tag.sid);
592 err = vio_validate_sid(vio, &msgbuf.tag);
593 if (err < 0)
594 break;
595
596 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
597 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
598 err = vnet_rx(port, &msgbuf);
599 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
600 err = vnet_ack(port, &msgbuf);
601 if (err > 0)
602 tx_wakeup |= err;
603 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
604 err = vnet_nack(port, &msgbuf);
605 }
606 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
028ebff2
DM
607 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
608 err = handle_mcast(port, &msgbuf);
609 else
610 err = vio_control_pkt_engine(vio, &msgbuf);
4c521e42
DM
611 if (err)
612 break;
613 } else {
614 err = vnet_handle_unknown(port, &msgbuf);
615 }
616 if (err == -ECONNRESET)
617 break;
618 }
619 spin_unlock(&vio->lock);
1d311ad2
SV
620 /* Kick off a tasklet to wake the queue. We cannot call
621 * maybe_tx_wakeup directly here because we could deadlock on
622 * netif_tx_lock() with dev_watchdog()
623 */
4c521e42 624 if (unlikely(tx_wakeup && err != -ECONNRESET))
1d311ad2
SV
625 tasklet_schedule(&port->vp->vnet_tx_wakeup);
626
4c521e42
DM
627 local_irq_restore(flags);
628}
629
d1015645 630static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
4c521e42
DM
631{
632 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
633 struct vio_dring_data hdr = {
634 .tag = {
635 .type = VIO_TYPE_DATA,
636 .stype = VIO_SUBTYPE_INFO,
637 .stype_env = VIO_DRING_DATA,
638 .sid = vio_send_sid(&port->vio),
639 },
640 .dring_ident = dr->ident,
d1015645 641 .start_idx = start,
4c521e42
DM
642 .end_idx = (u32) -1,
643 };
644 int err, delay;
adddc32d 645 int retries = 0;
4c521e42 646
d1015645
SV
647 if (port->stop_rx) {
648 err = vnet_send_ack(port,
649 &port->vio.drings[VIO_DRIVER_RX_RING],
650 port->stop_rx_idx, -1,
651 VIO_DRING_STOPPED);
652 if (err <= 0)
653 return err;
654 }
655
4c521e42
DM
656 hdr.seq = dr->snd_nxt;
657 delay = 1;
658 do {
659 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
660 if (err > 0) {
661 dr->snd_nxt++;
662 break;
663 }
664 udelay(delay);
665 if ((delay <<= 1) > 128)
666 delay = 128;
adddc32d
SV
667 if (retries++ > VNET_MAX_RETRIES)
668 break;
4c521e42
DM
669 } while (err == -EAGAIN);
670
671 return err;
672}
673
8266f5fc
DS
674static inline bool port_is_up(struct vnet_port *vnet)
675{
676 struct vio_driver_state *vio = &vnet->vio;
677
678 return !!(vio->hs_state & VIO_HS_COMPLETE);
679}
680
4c521e42
DM
681struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
682{
683 unsigned int hash = vnet_hashfn(skb->data);
684 struct hlist_head *hp = &vp->port_hash[hash];
4c521e42
DM
685 struct vnet_port *port;
686
b67bfe0d 687 hlist_for_each_entry(port, hp, hash) {
8266f5fc
DS
688 if (!port_is_up(port))
689 continue;
2e42e474 690 if (ether_addr_equal(port->raddr, skb->data))
4c521e42
DM
691 return port;
692 }
8266f5fc
DS
693 list_for_each_entry(port, &vp->port_list, list) {
694 if (!port->switch_port)
695 continue;
696 if (!port_is_up(port))
697 continue;
698 return port;
699 }
700 return NULL;
4c521e42
DM
701}
702
703struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
704{
705 struct vnet_port *ret;
706 unsigned long flags;
707
708 spin_lock_irqsave(&vp->lock, flags);
709 ret = __tx_port_find(vp, skb);
710 spin_unlock_irqrestore(&vp->lock, flags);
711
712 return ret;
713}
714
715static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
716{
717 struct vnet *vp = netdev_priv(dev);
718 struct vnet_port *port = tx_port_find(vp, skb);
719 struct vio_dring_state *dr;
720 struct vio_net_desc *d;
721 unsigned long flags;
722 unsigned int len;
723 void *tx_buf;
724 int i, err;
725
726 if (unlikely(!port))
727 goto out_dropped;
728
729 spin_lock_irqsave(&port->vio.lock, flags);
730
731 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
732 if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
733 if (!netif_queue_stopped(dev)) {
734 netif_stop_queue(dev);
735
736 /* This is a hard error, log it. */
4d5870ec 737 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
4c521e42
DM
738 dev->stats.tx_errors++;
739 }
740 spin_unlock_irqrestore(&port->vio.lock, flags);
741 return NETDEV_TX_BUSY;
742 }
743
744 d = vio_dring_cur(dr);
745
746 tx_buf = port->tx_bufs[dr->prod].buf;
747 skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
748
749 len = skb->len;
750 if (len < ETH_ZLEN) {
751 len = ETH_ZLEN;
752 memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
753 }
754
1f6394e3
SV
755 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
756 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
757 * the protocol itself does not require it as long as the peer
758 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
759 *
760 * An ACK for every packet in the ring is expensive as the
761 * sending of LDC messages is slow and affects performance.
762 */
763 d->hdr.ack = VIO_ACK_DISABLE;
4c521e42
DM
764 d->size = len;
765 d->ncookies = port->tx_bufs[dr->prod].ncookies;
766 for (i = 0; i < d->ncookies; i++)
767 d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
768
769 /* This has to be a non-SMP write barrier because we are writing
770 * to memory which is shared with the peer LDOM.
771 */
772 wmb();
773
774 d->hdr.state = VIO_DESC_READY;
775
d1015645
SV
776 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
777 * to notify the consumer that some descriptors are READY.
778 * After that "start" trigger, no additional triggers are needed until
779 * a DRING_STOPPED is received from the consumer. The dr->cons field
780 * (set up by vnet_ack()) has the value of the next dring index
781 * that has not yet been ack-ed. We send a "start" trigger here
782 * if, and only if, start_cons is true (reset it afterward). Conversely,
783 * vnet_ack() should check if the dring corresponding to cons
784 * is marked READY, but start_cons was false.
785 * If so, vnet_ack() should send out the missed "start" trigger.
786 *
787 * Note that the wmb() above makes sure the cookies et al. are
788 * not globally visible before the VIO_DESC_READY, and that the
789 * stores are ordered correctly by the compiler. The consumer will
790 * not proceed until the VIO_DESC_READY is visible assuring that
791 * the consumer does not observe anything related to descriptors
792 * out of order. The HV trap from the LDC start trigger is the
793 * producer to consumer announcement that work is available to the
794 * consumer
795 */
796 if (!port->start_cons)
797 goto ldc_start_done; /* previous trigger suffices */
798
799 err = __vnet_tx_trigger(port, dr->cons);
4c521e42 800 if (unlikely(err < 0)) {
4d5870ec 801 netdev_info(dev, "TX trigger error %d\n", err);
4c521e42
DM
802 d->hdr.state = VIO_DESC_FREE;
803 dev->stats.tx_carrier_errors++;
804 goto out_dropped_unlock;
805 }
806
d1015645
SV
807ldc_start_done:
808 port->start_cons = false;
809
4c521e42
DM
810 dev->stats.tx_packets++;
811 dev->stats.tx_bytes += skb->len;
812
813 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
814 if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
815 netif_stop_queue(dev);
816 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
817 netif_wake_queue(dev);
818 }
819
820 spin_unlock_irqrestore(&port->vio.lock, flags);
821
822 dev_kfree_skb(skb);
823
4c521e42
DM
824 return NETDEV_TX_OK;
825
826out_dropped_unlock:
827 spin_unlock_irqrestore(&port->vio.lock, flags);
828
829out_dropped:
830 dev_kfree_skb(skb);
831 dev->stats.tx_dropped++;
832 return NETDEV_TX_OK;
833}
834
835static void vnet_tx_timeout(struct net_device *dev)
836{
837 /* XXX Implement me XXX */
838}
839
840static int vnet_open(struct net_device *dev)
841{
842 netif_carrier_on(dev);
843 netif_start_queue(dev);
844
845 return 0;
846}
847
848static int vnet_close(struct net_device *dev)
849{
850 netif_stop_queue(dev);
851 netif_carrier_off(dev);
852
853 return 0;
854}
855
028ebff2
DM
856static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
857{
858 struct vnet_mcast_entry *m;
859
860 for (m = vp->mcast_list; m; m = m->next) {
00fa4ce9 861 if (ether_addr_equal(m->addr, addr))
028ebff2
DM
862 return m;
863 }
864 return NULL;
865}
866
867static void __update_mc_list(struct vnet *vp, struct net_device *dev)
868{
22bedad3 869 struct netdev_hw_addr *ha;
028ebff2 870
22bedad3 871 netdev_for_each_mc_addr(ha, dev) {
028ebff2
DM
872 struct vnet_mcast_entry *m;
873
22bedad3 874 m = __vnet_mc_find(vp, ha->addr);
028ebff2
DM
875 if (m) {
876 m->hit = 1;
877 continue;
878 }
879
880 if (!m) {
881 m = kzalloc(sizeof(*m), GFP_ATOMIC);
882 if (!m)
883 continue;
22bedad3 884 memcpy(m->addr, ha->addr, ETH_ALEN);
028ebff2
DM
885 m->hit = 1;
886
887 m->next = vp->mcast_list;
888 vp->mcast_list = m;
889 }
890 }
891}
892
893static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
894{
895 struct vio_net_mcast_info info;
896 struct vnet_mcast_entry *m, **pp;
897 int n_addrs;
898
899 memset(&info, 0, sizeof(info));
900
901 info.tag.type = VIO_TYPE_CTRL;
902 info.tag.stype = VIO_SUBTYPE_INFO;
903 info.tag.stype_env = VNET_MCAST_INFO;
904 info.tag.sid = vio_send_sid(&port->vio);
905 info.set = 1;
906
907 n_addrs = 0;
908 for (m = vp->mcast_list; m; m = m->next) {
909 if (m->sent)
910 continue;
911 m->sent = 1;
912 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
913 m->addr, ETH_ALEN);
914 if (++n_addrs == VNET_NUM_MCAST) {
915 info.count = n_addrs;
916
917 (void) vio_ldc_send(&port->vio, &info,
918 sizeof(info));
919 n_addrs = 0;
920 }
921 }
922 if (n_addrs) {
923 info.count = n_addrs;
924 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
925 }
926
927 info.set = 0;
928
929 n_addrs = 0;
930 pp = &vp->mcast_list;
931 while ((m = *pp) != NULL) {
932 if (m->hit) {
933 m->hit = 0;
934 pp = &m->next;
935 continue;
936 }
937
938 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
939 m->addr, ETH_ALEN);
940 if (++n_addrs == VNET_NUM_MCAST) {
941 info.count = n_addrs;
942 (void) vio_ldc_send(&port->vio, &info,
943 sizeof(info));
944 n_addrs = 0;
945 }
946
947 *pp = m->next;
948 kfree(m);
949 }
950 if (n_addrs) {
951 info.count = n_addrs;
952 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
953 }
954}
955
4c521e42
DM
956static void vnet_set_rx_mode(struct net_device *dev)
957{
028ebff2
DM
958 struct vnet *vp = netdev_priv(dev);
959 struct vnet_port *port;
960 unsigned long flags;
961
962 spin_lock_irqsave(&vp->lock, flags);
963 if (!list_empty(&vp->port_list)) {
964 port = list_entry(vp->port_list.next, struct vnet_port, list);
965
966 if (port->switch_port) {
967 __update_mc_list(vp, dev);
968 __send_mc_list(vp, port);
969 }
970 }
971 spin_unlock_irqrestore(&vp->lock, flags);
4c521e42
DM
972}
973
974static int vnet_change_mtu(struct net_device *dev, int new_mtu)
975{
976 if (new_mtu != ETH_DATA_LEN)
977 return -EINVAL;
978
979 dev->mtu = new_mtu;
980 return 0;
981}
982
983static int vnet_set_mac_addr(struct net_device *dev, void *p)
984{
985 return -EINVAL;
986}
987
988static void vnet_get_drvinfo(struct net_device *dev,
989 struct ethtool_drvinfo *info)
990{
7826d43f
JP
991 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
992 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4c521e42
DM
993}
994
995static u32 vnet_get_msglevel(struct net_device *dev)
996{
997 struct vnet *vp = netdev_priv(dev);
998 return vp->msg_enable;
999}
1000
1001static void vnet_set_msglevel(struct net_device *dev, u32 value)
1002{
1003 struct vnet *vp = netdev_priv(dev);
1004 vp->msg_enable = value;
1005}
1006
1007static const struct ethtool_ops vnet_ethtool_ops = {
1008 .get_drvinfo = vnet_get_drvinfo,
1009 .get_msglevel = vnet_get_msglevel,
1010 .set_msglevel = vnet_set_msglevel,
1011 .get_link = ethtool_op_get_link,
4c521e42
DM
1012};
1013
1014static void vnet_port_free_tx_bufs(struct vnet_port *port)
1015{
1016 struct vio_dring_state *dr;
1017 int i;
1018
1019 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1020 if (dr->base) {
1021 ldc_free_exp_dring(port->vio.lp, dr->base,
1022 (dr->entry_size * dr->num_entries),
1023 dr->cookies, dr->ncookies);
1024 dr->base = NULL;
1025 dr->entry_size = 0;
1026 dr->num_entries = 0;
1027 dr->pending = 0;
1028 dr->ncookies = 0;
1029 }
1030
1031 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1032 void *buf = port->tx_bufs[i].buf;
1033
1034 if (!buf)
1035 continue;
1036
1037 ldc_unmap(port->vio.lp,
1038 port->tx_bufs[i].cookies,
1039 port->tx_bufs[i].ncookies);
1040
1041 kfree(buf);
1042 port->tx_bufs[i].buf = NULL;
1043 }
1044}
1045
f73d12bd 1046static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
4c521e42
DM
1047{
1048 struct vio_dring_state *dr;
1049 unsigned long len;
1050 int i, err, ncookies;
1051 void *dring;
1052
1053 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1054 void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
1055 int map_len = (ETH_FRAME_LEN + 7) & ~7;
1056
1057 err = -ENOMEM;
e404decb 1058 if (!buf)
4c521e42 1059 goto err_out;
e404decb 1060
4c521e42
DM
1061 err = -EFAULT;
1062 if ((unsigned long)buf & (8UL - 1)) {
4d5870ec 1063 pr_err("TX buffer misaligned\n");
4c521e42
DM
1064 kfree(buf);
1065 goto err_out;
1066 }
1067
1068 err = ldc_map_single(port->vio.lp, buf, map_len,
1069 port->tx_bufs[i].cookies, 2,
1070 (LDC_MAP_SHADOW |
1071 LDC_MAP_DIRECT |
1072 LDC_MAP_RW));
1073 if (err < 0) {
1074 kfree(buf);
1075 goto err_out;
1076 }
1077 port->tx_bufs[i].buf = buf;
1078 port->tx_bufs[i].ncookies = err;
1079 }
1080
1081 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1082
1083 len = (VNET_TX_RING_SIZE *
1084 (sizeof(struct vio_net_desc) +
1085 (sizeof(struct ldc_trans_cookie) * 2)));
1086
1087 ncookies = VIO_MAX_RING_COOKIES;
1088 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1089 dr->cookies, &ncookies,
1090 (LDC_MAP_SHADOW |
1091 LDC_MAP_DIRECT |
1092 LDC_MAP_RW));
1093 if (IS_ERR(dring)) {
1094 err = PTR_ERR(dring);
1095 goto err_out;
1096 }
1097
1098 dr->base = dring;
1099 dr->entry_size = (sizeof(struct vio_net_desc) +
1100 (sizeof(struct ldc_trans_cookie) * 2));
1101 dr->num_entries = VNET_TX_RING_SIZE;
1102 dr->prod = dr->cons = 0;
d1015645 1103 port->start_cons = true; /* need an initial trigger */
4c521e42
DM
1104 dr->pending = VNET_TX_RING_SIZE;
1105 dr->ncookies = ncookies;
1106
1107 return 0;
1108
1109err_out:
1110 vnet_port_free_tx_bufs(port);
1111
1112 return err;
1113}
1114
9184a046
DM
1115static LIST_HEAD(vnet_list);
1116static DEFINE_MUTEX(vnet_list_mutex);
1117
8fd17f2e
DM
1118static const struct net_device_ops vnet_ops = {
1119 .ndo_open = vnet_open,
1120 .ndo_stop = vnet_close,
afc4b13d 1121 .ndo_set_rx_mode = vnet_set_rx_mode,
8fd17f2e 1122 .ndo_set_mac_address = vnet_set_mac_addr,
240c102d 1123 .ndo_validate_addr = eth_validate_addr,
8fd17f2e
DM
1124 .ndo_tx_timeout = vnet_tx_timeout,
1125 .ndo_change_mtu = vnet_change_mtu,
1126 .ndo_start_xmit = vnet_start_xmit,
1127};
1128
f73d12bd 1129static struct vnet *vnet_new(const u64 *local_mac)
9184a046
DM
1130{
1131 struct net_device *dev;
1132 struct vnet *vp;
1133 int err, i;
1134
1135 dev = alloc_etherdev(sizeof(*vp));
41de8d4c 1136 if (!dev)
9184a046 1137 return ERR_PTR(-ENOMEM);
9184a046
DM
1138
1139 for (i = 0; i < ETH_ALEN; i++)
1140 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
1141
9184a046
DM
1142 vp = netdev_priv(dev);
1143
1144 spin_lock_init(&vp->lock);
1d311ad2 1145 tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
9184a046
DM
1146 vp->dev = dev;
1147
1148 INIT_LIST_HEAD(&vp->port_list);
1149 for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1150 INIT_HLIST_HEAD(&vp->port_hash[i]);
1151 INIT_LIST_HEAD(&vp->list);
1152 vp->local_mac = *local_mac;
1153
8fd17f2e 1154 dev->netdev_ops = &vnet_ops;
9184a046
DM
1155 dev->ethtool_ops = &vnet_ethtool_ops;
1156 dev->watchdog_timeo = VNET_TX_TIMEOUT;
9184a046
DM
1157
1158 err = register_netdev(dev);
1159 if (err) {
4d5870ec 1160 pr_err("Cannot register net device, aborting\n");
9184a046
DM
1161 goto err_out_free_dev;
1162 }
1163
4d5870ec 1164 netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
9184a046
DM
1165
1166 list_add(&vp->list, &vnet_list);
1167
1168 return vp;
1169
1170err_out_free_dev:
1171 free_netdev(dev);
1172
1173 return ERR_PTR(err);
1174}
1175
f73d12bd 1176static struct vnet *vnet_find_or_create(const u64 *local_mac)
9184a046
DM
1177{
1178 struct vnet *iter, *vp;
1179
1180 mutex_lock(&vnet_list_mutex);
1181 vp = NULL;
1182 list_for_each_entry(iter, &vnet_list, list) {
1183 if (iter->local_mac == *local_mac) {
1184 vp = iter;
1185 break;
1186 }
1187 }
1188 if (!vp)
1189 vp = vnet_new(local_mac);
1190 mutex_unlock(&vnet_list_mutex);
1191
1192 return vp;
1193}
1194
a4b70a07
SV
1195static void vnet_cleanup(void)
1196{
1197 struct vnet *vp;
1198 struct net_device *dev;
1199
1200 mutex_lock(&vnet_list_mutex);
1201 while (!list_empty(&vnet_list)) {
1202 vp = list_first_entry(&vnet_list, struct vnet, list);
1203 list_del(&vp->list);
1204 dev = vp->dev;
1d311ad2 1205 tasklet_kill(&vp->vnet_tx_wakeup);
a4b70a07
SV
1206 /* vio_unregister_driver() should have cleaned up port_list */
1207 BUG_ON(!list_empty(&vp->port_list));
1208 unregister_netdev(dev);
1209 free_netdev(dev);
1210 }
1211 mutex_unlock(&vnet_list_mutex);
1212}
1213
9184a046
DM
1214static const char *local_mac_prop = "local-mac-address";
1215
f73d12bd 1216static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
9184a046
DM
1217 u64 port_node)
1218{
1219 const u64 *local_mac = NULL;
1220 u64 a;
1221
1222 mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
1223 u64 target = mdesc_arc_target(hp, a);
1224 const char *name;
1225
1226 name = mdesc_get_property(hp, target, "name", NULL);
1227 if (!name || strcmp(name, "network"))
1228 continue;
1229
1230 local_mac = mdesc_get_property(hp, target,
1231 local_mac_prop, NULL);
1232 if (local_mac)
1233 break;
1234 }
1235 if (!local_mac)
1236 return ERR_PTR(-ENODEV);
1237
1238 return vnet_find_or_create(local_mac);
1239}
1240
4c521e42
DM
1241static struct ldc_channel_config vnet_ldc_cfg = {
1242 .event = vnet_event,
1243 .mtu = 64,
1244 .mode = LDC_MODE_UNRELIABLE,
1245};
1246
1247static struct vio_driver_ops vnet_vio_ops = {
1248 .send_attr = vnet_send_attr,
1249 .handle_attr = vnet_handle_attr,
1250 .handshake_complete = vnet_handshake_complete,
1251};
1252
f73d12bd 1253static void print_version(void)
9184a046 1254{
4d5870ec 1255 printk_once(KERN_INFO "%s", version);
9184a046
DM
1256}
1257
4c521e42
DM
1258const char *remote_macaddr_prop = "remote-mac-address";
1259
1dd06ae8 1260static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4c521e42 1261{
43fdf274 1262 struct mdesc_handle *hp;
4c521e42
DM
1263 struct vnet_port *port;
1264 unsigned long flags;
1265 struct vnet *vp;
1266 const u64 *rmac;
1267 int len, i, err, switch_port;
1268
9184a046 1269 print_version();
4c521e42 1270
43fdf274
DM
1271 hp = mdesc_grab();
1272
9184a046
DM
1273 vp = vnet_find_parent(hp, vdev->mp);
1274 if (IS_ERR(vp)) {
4d5870ec 1275 pr_err("Cannot find port parent vnet\n");
9184a046
DM
1276 err = PTR_ERR(vp);
1277 goto err_out_put_mdesc;
1278 }
1279
43fdf274
DM
1280 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1281 err = -ENODEV;
4c521e42 1282 if (!rmac) {
4d5870ec 1283 pr_err("Port lacks %s property\n", remote_macaddr_prop);
43fdf274 1284 goto err_out_put_mdesc;
4c521e42
DM
1285 }
1286
1287 port = kzalloc(sizeof(*port), GFP_KERNEL);
43fdf274 1288 err = -ENOMEM;
e404decb 1289 if (!port)
43fdf274 1290 goto err_out_put_mdesc;
4c521e42
DM
1291
1292 for (i = 0; i < ETH_ALEN; i++)
1293 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
1294
1295 port->vp = vp;
1296
43fdf274 1297 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
4c521e42
DM
1298 vnet_versions, ARRAY_SIZE(vnet_versions),
1299 &vnet_vio_ops, vp->dev->name);
1300 if (err)
1301 goto err_out_free_port;
1302
1303 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
1304 if (err)
1305 goto err_out_free_port;
1306
1307 err = vnet_port_alloc_tx_bufs(port);
1308 if (err)
1309 goto err_out_free_ldc;
1310
1311 INIT_HLIST_NODE(&port->hash);
1312 INIT_LIST_HEAD(&port->list);
1313
1314 switch_port = 0;
43fdf274 1315 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
4c521e42 1316 switch_port = 1;
028ebff2 1317 port->switch_port = switch_port;
4c521e42
DM
1318
1319 spin_lock_irqsave(&vp->lock, flags);
1320 if (switch_port)
1321 list_add(&port->list, &vp->port_list);
1322 else
1323 list_add_tail(&port->list, &vp->port_list);
1324 hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
1325 spin_unlock_irqrestore(&vp->lock, flags);
1326
1327 dev_set_drvdata(&vdev->dev, port);
1328
4d5870ec
JP
1329 pr_info("%s: PORT ( remote-mac %pM%s )\n",
1330 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
4c521e42
DM
1331
1332 vio_port_up(&port->vio);
1333
43fdf274
DM
1334 mdesc_release(hp);
1335
4c521e42
DM
1336 return 0;
1337
1338err_out_free_ldc:
1339 vio_ldc_free(&port->vio);
1340
1341err_out_free_port:
1342 kfree(port);
1343
43fdf274
DM
1344err_out_put_mdesc:
1345 mdesc_release(hp);
4c521e42
DM
1346 return err;
1347}
1348
1349static int vnet_port_remove(struct vio_dev *vdev)
1350{
1351 struct vnet_port *port = dev_get_drvdata(&vdev->dev);
1352
1353 if (port) {
1354 struct vnet *vp = port->vp;
1355 unsigned long flags;
1356
1357 del_timer_sync(&port->vio.timer);
1358
1359 spin_lock_irqsave(&vp->lock, flags);
1360 list_del(&port->list);
1361 hlist_del(&port->hash);
1362 spin_unlock_irqrestore(&vp->lock, flags);
1363
1364 vnet_port_free_tx_bufs(port);
1365 vio_ldc_free(&port->vio);
1366
1367 dev_set_drvdata(&vdev->dev, NULL);
1368
1369 kfree(port);
aabb9875 1370
4c521e42
DM
1371 }
1372 return 0;
1373}
1374
3d452e55 1375static const struct vio_device_id vnet_port_match[] = {
4c521e42
DM
1376 {
1377 .type = "vnet-port",
1378 },
1379 {},
1380};
da68e081 1381MODULE_DEVICE_TABLE(vio, vnet_port_match);
4c521e42
DM
1382
1383static struct vio_driver vnet_port_driver = {
1384 .id_table = vnet_port_match,
1385 .probe = vnet_port_probe,
1386 .remove = vnet_port_remove,
cb52d897 1387 .name = "vnet_port",
4c521e42
DM
1388};
1389
4c521e42
DM
1390static int __init vnet_init(void)
1391{
9184a046 1392 return vio_register_driver(&vnet_port_driver);
4c521e42
DM
1393}
1394
1395static void __exit vnet_exit(void)
1396{
1397 vio_unregister_driver(&vnet_port_driver);
a4b70a07 1398 vnet_cleanup();
4c521e42
DM
1399}
1400
1401module_init(vnet_init);
1402module_exit(vnet_exit);
This page took 0.992858 seconds and 5 git commands to generate.