1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
21 #define DRV_MODULE_NAME "sunvnet"
22 #define PFX DRV_MODULE_NAME ": "
23 #define DRV_MODULE_VERSION "1.0"
24 #define DRV_MODULE_RELDATE "June 25, 2007"
26 static char version
[] __devinitdata
=
27 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
28 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
29 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION
);
33 /* Ordered from largest major to lowest */
34 static struct vio_version vnet_versions
[] = {
35 { .major
= 1, .minor
= 0 },
38 static inline u32
vnet_tx_dring_avail(struct vio_dring_state
*dr
)
40 return vio_dring_avail(dr
, VNET_TX_RING_SIZE
);
43 static int vnet_handle_unknown(struct vnet_port
*port
, void *arg
)
45 struct vio_msg_tag
*pkt
= arg
;
47 printk(KERN_ERR PFX
"Received unknown msg [%02x:%02x:%04x:%08x]\n",
48 pkt
->type
, pkt
->stype
, pkt
->stype_env
, pkt
->sid
);
49 printk(KERN_ERR PFX
"Resetting connection.\n");
51 ldc_disconnect(port
->vio
.lp
);
56 static int vnet_send_attr(struct vio_driver_state
*vio
)
58 struct vnet_port
*port
= to_vnet_port(vio
);
59 struct net_device
*dev
= port
->vp
->dev
;
60 struct vio_net_attr_info pkt
;
63 memset(&pkt
, 0, sizeof(pkt
));
64 pkt
.tag
.type
= VIO_TYPE_CTRL
;
65 pkt
.tag
.stype
= VIO_SUBTYPE_INFO
;
66 pkt
.tag
.stype_env
= VIO_ATTR_INFO
;
67 pkt
.tag
.sid
= vio_send_sid(vio
);
68 pkt
.xfer_mode
= VIO_DRING_MODE
;
69 pkt
.addr_type
= VNET_ADDR_ETHERMAC
;
71 for (i
= 0; i
< 6; i
++)
72 pkt
.addr
|= (u64
)dev
->dev_addr
[i
] << ((5 - i
) * 8);
73 pkt
.mtu
= ETH_FRAME_LEN
;
75 viodbg(HS
, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
76 "ackfreq[%u] mtu[%llu]\n",
77 pkt
.xfer_mode
, pkt
.addr_type
,
78 (unsigned long long) pkt
.addr
,
80 (unsigned long long) pkt
.mtu
);
82 return vio_ldc_send(vio
, &pkt
, sizeof(pkt
));
85 static int handle_attr_info(struct vio_driver_state
*vio
,
86 struct vio_net_attr_info
*pkt
)
88 viodbg(HS
, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
89 "ackfreq[%u] mtu[%llu]\n",
90 pkt
->xfer_mode
, pkt
->addr_type
,
91 (unsigned long long) pkt
->addr
,
93 (unsigned long long) pkt
->mtu
);
95 pkt
->tag
.sid
= vio_send_sid(vio
);
97 if (pkt
->xfer_mode
!= VIO_DRING_MODE
||
98 pkt
->addr_type
!= VNET_ADDR_ETHERMAC
||
99 pkt
->mtu
!= ETH_FRAME_LEN
) {
100 viodbg(HS
, "SEND NET ATTR NACK\n");
102 pkt
->tag
.stype
= VIO_SUBTYPE_NACK
;
104 (void) vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
108 viodbg(HS
, "SEND NET ATTR ACK\n");
110 pkt
->tag
.stype
= VIO_SUBTYPE_ACK
;
112 return vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
117 static int handle_attr_ack(struct vio_driver_state
*vio
,
118 struct vio_net_attr_info
*pkt
)
120 viodbg(HS
, "GOT NET ATTR ACK\n");
125 static int handle_attr_nack(struct vio_driver_state
*vio
,
126 struct vio_net_attr_info
*pkt
)
128 viodbg(HS
, "GOT NET ATTR NACK\n");
133 static int vnet_handle_attr(struct vio_driver_state
*vio
, void *arg
)
135 struct vio_net_attr_info
*pkt
= arg
;
137 switch (pkt
->tag
.stype
) {
138 case VIO_SUBTYPE_INFO
:
139 return handle_attr_info(vio
, pkt
);
141 case VIO_SUBTYPE_ACK
:
142 return handle_attr_ack(vio
, pkt
);
144 case VIO_SUBTYPE_NACK
:
145 return handle_attr_nack(vio
, pkt
);
152 static void vnet_handshake_complete(struct vio_driver_state
*vio
)
154 struct vio_dring_state
*dr
;
156 dr
= &vio
->drings
[VIO_DRIVER_RX_RING
];
157 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
159 dr
= &vio
->drings
[VIO_DRIVER_TX_RING
];
160 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
163 /* The hypervisor interface that implements copying to/from imported
164 * memory from another domain requires that copies are done to 8-byte
165 * aligned buffers, and that the lengths of such copies are also 8-byte
168 * So we align skb->data to an 8-byte multiple and pad-out the data
169 * area so we can round the copy length up to the next multiple of
172 * The transmitter puts the actual start of the packet 6 bytes into
173 * the buffer it sends over, so that the IP headers after the ethernet
174 * header are aligned properly. These 6 bytes are not in the descriptor
175 * length, they are simply implied. This offset is represented using
176 * the VNET_PACKET_SKIP macro.
178 static struct sk_buff
*alloc_and_align_skb(struct net_device
*dev
,
181 struct sk_buff
*skb
= netdev_alloc_skb(dev
, len
+VNET_PACKET_SKIP
+8+8);
182 unsigned long addr
, off
;
187 addr
= (unsigned long) skb
->data
;
188 off
= ((addr
+ 7UL) & ~7UL) - addr
;
190 skb_reserve(skb
, off
);
195 static int vnet_rx_one(struct vnet_port
*port
, unsigned int len
,
196 struct ldc_trans_cookie
*cookies
, int ncookies
)
198 struct net_device
*dev
= port
->vp
->dev
;
199 unsigned int copy_len
;
204 if (unlikely(len
< ETH_ZLEN
|| len
> ETH_FRAME_LEN
)) {
205 dev
->stats
.rx_length_errors
++;
209 skb
= alloc_and_align_skb(dev
, len
);
211 if (unlikely(!skb
)) {
212 dev
->stats
.rx_missed_errors
++;
216 copy_len
= (len
+ VNET_PACKET_SKIP
+ 7U) & ~7U;
217 skb_put(skb
, copy_len
);
218 err
= ldc_copy(port
->vio
.lp
, LDC_COPY_IN
,
219 skb
->data
, copy_len
, 0,
221 if (unlikely(err
< 0)) {
222 dev
->stats
.rx_frame_errors
++;
226 skb_pull(skb
, VNET_PACKET_SKIP
);
228 skb
->protocol
= eth_type_trans(skb
, dev
);
230 dev
->stats
.rx_packets
++;
231 dev
->stats
.rx_bytes
+= len
;
241 dev
->stats
.rx_dropped
++;
245 static int vnet_send_ack(struct vnet_port
*port
, struct vio_dring_state
*dr
,
246 u32 start
, u32 end
, u8 vio_dring_state
)
248 struct vio_dring_data hdr
= {
250 .type
= VIO_TYPE_DATA
,
251 .stype
= VIO_SUBTYPE_ACK
,
252 .stype_env
= VIO_DRING_DATA
,
253 .sid
= vio_send_sid(&port
->vio
),
255 .dring_ident
= dr
->ident
,
258 .state
= vio_dring_state
,
262 hdr
.seq
= dr
->snd_nxt
;
265 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
271 if ((delay
<<= 1) > 128)
273 } while (err
== -EAGAIN
);
278 static u32
next_idx(u32 idx
, struct vio_dring_state
*dr
)
280 if (++idx
== dr
->num_entries
)
285 static u32
prev_idx(u32 idx
, struct vio_dring_state
*dr
)
288 idx
= dr
->num_entries
- 1;
295 static struct vio_net_desc
*get_rx_desc(struct vnet_port
*port
,
296 struct vio_dring_state
*dr
,
299 struct vio_net_desc
*desc
= port
->vio
.desc_buf
;
302 err
= ldc_get_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
303 (index
* dr
->entry_size
),
304 dr
->cookies
, dr
->ncookies
);
311 static int put_rx_desc(struct vnet_port
*port
,
312 struct vio_dring_state
*dr
,
313 struct vio_net_desc
*desc
,
318 err
= ldc_put_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
319 (index
* dr
->entry_size
),
320 dr
->cookies
, dr
->ncookies
);
327 static int vnet_walk_rx_one(struct vnet_port
*port
,
328 struct vio_dring_state
*dr
,
329 u32 index
, int *needs_ack
)
331 struct vio_net_desc
*desc
= get_rx_desc(port
, dr
, index
);
332 struct vio_driver_state
*vio
= &port
->vio
;
336 return PTR_ERR(desc
);
338 viodbg(DATA
, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n",
339 desc
->hdr
.state
, desc
->hdr
.ack
,
340 desc
->size
, desc
->ncookies
,
341 desc
->cookies
[0].cookie_addr
,
342 desc
->cookies
[0].cookie_size
);
344 if (desc
->hdr
.state
!= VIO_DESC_READY
)
346 err
= vnet_rx_one(port
, desc
->size
, desc
->cookies
, desc
->ncookies
);
347 if (err
== -ECONNRESET
)
349 desc
->hdr
.state
= VIO_DESC_DONE
;
350 err
= put_rx_desc(port
, dr
, desc
, index
);
353 *needs_ack
= desc
->hdr
.ack
;
357 static int vnet_walk_rx(struct vnet_port
*port
, struct vio_dring_state
*dr
,
360 struct vio_driver_state
*vio
= &port
->vio
;
361 int ack_start
= -1, ack_end
= -1;
363 end
= (end
== (u32
) -1) ? prev_idx(start
, dr
) : next_idx(end
, dr
);
365 viodbg(DATA
, "vnet_walk_rx start[%08x] end[%08x]\n", start
, end
);
367 while (start
!= end
) {
368 int ack
= 0, err
= vnet_walk_rx_one(port
, dr
, start
, &ack
);
369 if (err
== -ECONNRESET
)
376 start
= next_idx(start
, dr
);
377 if (ack
&& start
!= end
) {
378 err
= vnet_send_ack(port
, dr
, ack_start
, ack_end
,
380 if (err
== -ECONNRESET
)
385 if (unlikely(ack_start
== -1))
386 ack_start
= ack_end
= prev_idx(start
, dr
);
387 return vnet_send_ack(port
, dr
, ack_start
, ack_end
, VIO_DRING_STOPPED
);
390 static int vnet_rx(struct vnet_port
*port
, void *msgbuf
)
392 struct vio_dring_data
*pkt
= msgbuf
;
393 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_RX_RING
];
394 struct vio_driver_state
*vio
= &port
->vio
;
396 viodbg(DATA
, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n",
397 pkt
->tag
.stype_env
, pkt
->seq
, dr
->rcv_nxt
);
399 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
401 if (unlikely(pkt
->seq
!= dr
->rcv_nxt
)) {
402 printk(KERN_ERR PFX
"RX out of sequence seq[0x%lx] "
403 "rcv_nxt[0x%lx]\n", pkt
->seq
, dr
->rcv_nxt
);
409 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
411 return vnet_walk_rx(port
, dr
, pkt
->start_idx
, pkt
->end_idx
);
414 static int idx_is_pending(struct vio_dring_state
*dr
, u32 end
)
419 while (idx
!= dr
->prod
) {
424 idx
= next_idx(idx
, dr
);
429 static int vnet_ack(struct vnet_port
*port
, void *msgbuf
)
431 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
432 struct vio_dring_data
*pkt
= msgbuf
;
433 struct net_device
*dev
;
437 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
441 if (unlikely(!idx_is_pending(dr
, end
)))
444 dr
->cons
= next_idx(end
, dr
);
448 if (unlikely(netif_queue_stopped(dev
) &&
449 vnet_tx_dring_avail(dr
) >= VNET_TX_WAKEUP_THRESH(dr
)))
455 static int vnet_nack(struct vnet_port
*port
, void *msgbuf
)
457 /* XXX just reset or similar XXX */
461 static void maybe_tx_wakeup(struct vnet
*vp
)
463 struct net_device
*dev
= vp
->dev
;
466 if (likely(netif_queue_stopped(dev
))) {
467 struct vnet_port
*port
;
470 list_for_each_entry(port
, &vp
->port_list
, list
) {
471 struct vio_dring_state
*dr
;
473 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
474 if (vnet_tx_dring_avail(dr
) <
475 VNET_TX_WAKEUP_THRESH(dr
)) {
481 netif_wake_queue(dev
);
483 netif_tx_unlock(dev
);
486 static void vnet_event(void *arg
, int event
)
488 struct vnet_port
*port
= arg
;
489 struct vio_driver_state
*vio
= &port
->vio
;
493 spin_lock_irqsave(&vio
->lock
, flags
);
495 if (unlikely(event
== LDC_EVENT_RESET
||
496 event
== LDC_EVENT_UP
)) {
497 vio_link_state_change(vio
, event
);
498 spin_unlock_irqrestore(&vio
->lock
, flags
);
503 if (unlikely(event
!= LDC_EVENT_DATA_READY
)) {
504 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
505 spin_unlock_irqrestore(&vio
->lock
, flags
);
512 struct vio_msg_tag tag
;
516 err
= ldc_read(vio
->lp
, &msgbuf
, sizeof(msgbuf
));
517 if (unlikely(err
< 0)) {
518 if (err
== -ECONNRESET
)
524 viodbg(DATA
, "TAG [%02x:%02x:%04x:%08x]\n",
527 msgbuf
.tag
.stype_env
,
529 err
= vio_validate_sid(vio
, &msgbuf
.tag
);
533 if (likely(msgbuf
.tag
.type
== VIO_TYPE_DATA
)) {
534 if (msgbuf
.tag
.stype
== VIO_SUBTYPE_INFO
) {
535 err
= vnet_rx(port
, &msgbuf
);
536 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_ACK
) {
537 err
= vnet_ack(port
, &msgbuf
);
540 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_NACK
) {
541 err
= vnet_nack(port
, &msgbuf
);
543 } else if (msgbuf
.tag
.type
== VIO_TYPE_CTRL
) {
544 err
= vio_control_pkt_engine(vio
, &msgbuf
);
548 err
= vnet_handle_unknown(port
, &msgbuf
);
550 if (err
== -ECONNRESET
)
553 spin_unlock(&vio
->lock
);
554 if (unlikely(tx_wakeup
&& err
!= -ECONNRESET
))
555 maybe_tx_wakeup(port
->vp
);
556 local_irq_restore(flags
);
559 static int __vnet_tx_trigger(struct vnet_port
*port
)
561 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
562 struct vio_dring_data hdr
= {
564 .type
= VIO_TYPE_DATA
,
565 .stype
= VIO_SUBTYPE_INFO
,
566 .stype_env
= VIO_DRING_DATA
,
567 .sid
= vio_send_sid(&port
->vio
),
569 .dring_ident
= dr
->ident
,
570 .start_idx
= dr
->prod
,
575 hdr
.seq
= dr
->snd_nxt
;
578 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
584 if ((delay
<<= 1) > 128)
586 } while (err
== -EAGAIN
);
591 struct vnet_port
*__tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
593 unsigned int hash
= vnet_hashfn(skb
->data
);
594 struct hlist_head
*hp
= &vp
->port_hash
[hash
];
595 struct hlist_node
*n
;
596 struct vnet_port
*port
;
598 hlist_for_each_entry(port
, n
, hp
, hash
) {
599 if (!compare_ether_addr(port
->raddr
, skb
->data
))
603 if (!list_empty(&vp
->port_list
))
604 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
609 struct vnet_port
*tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
611 struct vnet_port
*ret
;
614 spin_lock_irqsave(&vp
->lock
, flags
);
615 ret
= __tx_port_find(vp
, skb
);
616 spin_unlock_irqrestore(&vp
->lock
, flags
);
621 static int vnet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
623 struct vnet
*vp
= netdev_priv(dev
);
624 struct vnet_port
*port
= tx_port_find(vp
, skb
);
625 struct vio_dring_state
*dr
;
626 struct vio_net_desc
*d
;
635 spin_lock_irqsave(&port
->vio
.lock
, flags
);
637 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
638 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
639 if (!netif_queue_stopped(dev
)) {
640 netif_stop_queue(dev
);
642 /* This is a hard error, log it. */
643 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
644 "queue awake!\n", dev
->name
);
645 dev
->stats
.tx_errors
++;
647 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
648 return NETDEV_TX_BUSY
;
651 d
= vio_dring_cur(dr
);
653 tx_buf
= port
->tx_bufs
[dr
->prod
].buf
;
654 skb_copy_from_linear_data(skb
, tx_buf
+ VNET_PACKET_SKIP
, skb
->len
);
657 if (len
< ETH_ZLEN
) {
659 memset(tx_buf
+VNET_PACKET_SKIP
+skb
->len
, 0, len
- skb
->len
);
662 d
->hdr
.ack
= VIO_ACK_ENABLE
;
664 d
->ncookies
= port
->tx_bufs
[dr
->prod
].ncookies
;
665 for (i
= 0; i
< d
->ncookies
; i
++)
666 d
->cookies
[i
] = port
->tx_bufs
[dr
->prod
].cookies
[i
];
668 /* This has to be a non-SMP write barrier because we are writing
669 * to memory which is shared with the peer LDOM.
673 d
->hdr
.state
= VIO_DESC_READY
;
675 err
= __vnet_tx_trigger(port
);
676 if (unlikely(err
< 0)) {
677 printk(KERN_INFO PFX
"%s: TX trigger error %d\n",
679 d
->hdr
.state
= VIO_DESC_FREE
;
680 dev
->stats
.tx_carrier_errors
++;
681 goto out_dropped_unlock
;
684 dev
->stats
.tx_packets
++;
685 dev
->stats
.tx_bytes
+= skb
->len
;
687 dr
->prod
= (dr
->prod
+ 1) & (VNET_TX_RING_SIZE
- 1);
688 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
689 netif_stop_queue(dev
);
690 if (vnet_tx_dring_avail(dr
) > VNET_TX_WAKEUP_THRESH(dr
))
691 netif_wake_queue(dev
);
694 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
698 dev
->trans_start
= jiffies
;
702 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
706 dev
->stats
.tx_dropped
++;
710 static void vnet_tx_timeout(struct net_device
*dev
)
712 /* XXX Implement me XXX */
715 static int vnet_open(struct net_device
*dev
)
717 netif_carrier_on(dev
);
718 netif_start_queue(dev
);
723 static int vnet_close(struct net_device
*dev
)
725 netif_stop_queue(dev
);
726 netif_carrier_off(dev
);
731 static void vnet_set_rx_mode(struct net_device
*dev
)
733 /* XXX Implement multicast support XXX */
736 static int vnet_change_mtu(struct net_device
*dev
, int new_mtu
)
738 if (new_mtu
!= ETH_DATA_LEN
)
745 static int vnet_set_mac_addr(struct net_device
*dev
, void *p
)
750 static void vnet_get_drvinfo(struct net_device
*dev
,
751 struct ethtool_drvinfo
*info
)
753 strcpy(info
->driver
, DRV_MODULE_NAME
);
754 strcpy(info
->version
, DRV_MODULE_VERSION
);
757 static u32
vnet_get_msglevel(struct net_device
*dev
)
759 struct vnet
*vp
= netdev_priv(dev
);
760 return vp
->msg_enable
;
763 static void vnet_set_msglevel(struct net_device
*dev
, u32 value
)
765 struct vnet
*vp
= netdev_priv(dev
);
766 vp
->msg_enable
= value
;
769 static const struct ethtool_ops vnet_ethtool_ops
= {
770 .get_drvinfo
= vnet_get_drvinfo
,
771 .get_msglevel
= vnet_get_msglevel
,
772 .set_msglevel
= vnet_set_msglevel
,
773 .get_link
= ethtool_op_get_link
,
774 .get_perm_addr
= ethtool_op_get_perm_addr
,
777 static void vnet_port_free_tx_bufs(struct vnet_port
*port
)
779 struct vio_dring_state
*dr
;
782 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
784 ldc_free_exp_dring(port
->vio
.lp
, dr
->base
,
785 (dr
->entry_size
* dr
->num_entries
),
786 dr
->cookies
, dr
->ncookies
);
794 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
795 void *buf
= port
->tx_bufs
[i
].buf
;
800 ldc_unmap(port
->vio
.lp
,
801 port
->tx_bufs
[i
].cookies
,
802 port
->tx_bufs
[i
].ncookies
);
805 port
->tx_bufs
[i
].buf
= NULL
;
809 static int __devinit
vnet_port_alloc_tx_bufs(struct vnet_port
*port
)
811 struct vio_dring_state
*dr
;
813 int i
, err
, ncookies
;
816 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
817 void *buf
= kzalloc(ETH_FRAME_LEN
+ 8, GFP_KERNEL
);
818 int map_len
= (ETH_FRAME_LEN
+ 7) & ~7;
822 printk(KERN_ERR
"TX buffer allocation failure\n");
826 if ((unsigned long)buf
& (8UL - 1)) {
827 printk(KERN_ERR
"TX buffer misaligned\n");
832 err
= ldc_map_single(port
->vio
.lp
, buf
, map_len
,
833 port
->tx_bufs
[i
].cookies
, 2,
841 port
->tx_bufs
[i
].buf
= buf
;
842 port
->tx_bufs
[i
].ncookies
= err
;
845 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
847 len
= (VNET_TX_RING_SIZE
*
848 (sizeof(struct vio_net_desc
) +
849 (sizeof(struct ldc_trans_cookie
) * 2)));
851 ncookies
= VIO_MAX_RING_COOKIES
;
852 dring
= ldc_alloc_exp_dring(port
->vio
.lp
, len
,
853 dr
->cookies
, &ncookies
,
858 err
= PTR_ERR(dring
);
863 dr
->entry_size
= (sizeof(struct vio_net_desc
) +
864 (sizeof(struct ldc_trans_cookie
) * 2));
865 dr
->num_entries
= VNET_TX_RING_SIZE
;
866 dr
->prod
= dr
->cons
= 0;
867 dr
->pending
= VNET_TX_RING_SIZE
;
868 dr
->ncookies
= ncookies
;
873 vnet_port_free_tx_bufs(port
);
878 static struct ldc_channel_config vnet_ldc_cfg
= {
881 .mode
= LDC_MODE_UNRELIABLE
,
884 static struct vio_driver_ops vnet_vio_ops
= {
885 .send_attr
= vnet_send_attr
,
886 .handle_attr
= vnet_handle_attr
,
887 .handshake_complete
= vnet_handshake_complete
,
890 const char *remote_macaddr_prop
= "remote-mac-address";
892 static int __devinit
vnet_port_probe(struct vio_dev
*vdev
,
893 const struct vio_device_id
*id
)
895 struct mdesc_handle
*hp
;
896 struct vnet_port
*port
;
900 int len
, i
, err
, switch_port
;
902 vp
= dev_get_drvdata(vdev
->dev
.parent
);
904 printk(KERN_ERR PFX
"Cannot find port parent vnet.\n");
910 rmac
= mdesc_get_property(hp
, vdev
->mp
, remote_macaddr_prop
, &len
);
913 printk(KERN_ERR PFX
"Port lacks %s property.\n",
914 remote_macaddr_prop
);
915 goto err_out_put_mdesc
;
918 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
921 printk(KERN_ERR PFX
"Cannot allocate vnet_port.\n");
922 goto err_out_put_mdesc
;
925 for (i
= 0; i
< ETH_ALEN
; i
++)
926 port
->raddr
[i
] = (*rmac
>> (5 - i
) * 8) & 0xff;
930 err
= vio_driver_init(&port
->vio
, vdev
, VDEV_NETWORK
,
931 vnet_versions
, ARRAY_SIZE(vnet_versions
),
932 &vnet_vio_ops
, vp
->dev
->name
);
934 goto err_out_free_port
;
936 err
= vio_ldc_alloc(&port
->vio
, &vnet_ldc_cfg
, port
);
938 goto err_out_free_port
;
940 err
= vnet_port_alloc_tx_bufs(port
);
942 goto err_out_free_ldc
;
944 INIT_HLIST_NODE(&port
->hash
);
945 INIT_LIST_HEAD(&port
->list
);
948 if (mdesc_get_property(hp
, vdev
->mp
, "switch-port", NULL
) != NULL
)
951 spin_lock_irqsave(&vp
->lock
, flags
);
953 list_add(&port
->list
, &vp
->port_list
);
955 list_add_tail(&port
->list
, &vp
->port_list
);
956 hlist_add_head(&port
->hash
, &vp
->port_hash
[vnet_hashfn(port
->raddr
)]);
957 spin_unlock_irqrestore(&vp
->lock
, flags
);
959 dev_set_drvdata(&vdev
->dev
, port
);
961 printk(KERN_INFO
"%s: PORT ( remote-mac ", vp
->dev
->name
);
962 for (i
= 0; i
< 6; i
++)
963 printk("%2.2x%c", port
->raddr
[i
], i
== 5 ? ' ' : ':');
965 printk("switch-port ");
968 vio_port_up(&port
->vio
);
975 vio_ldc_free(&port
->vio
);
985 static int vnet_port_remove(struct vio_dev
*vdev
)
987 struct vnet_port
*port
= dev_get_drvdata(&vdev
->dev
);
990 struct vnet
*vp
= port
->vp
;
993 del_timer_sync(&port
->vio
.timer
);
995 spin_lock_irqsave(&vp
->lock
, flags
);
996 list_del(&port
->list
);
997 hlist_del(&port
->hash
);
998 spin_unlock_irqrestore(&vp
->lock
, flags
);
1000 vnet_port_free_tx_bufs(port
);
1001 vio_ldc_free(&port
->vio
);
1003 dev_set_drvdata(&vdev
->dev
, NULL
);
1010 static struct vio_device_id vnet_port_match
[] = {
1012 .type
= "vnet-port",
1016 MODULE_DEVICE_TABLE(vio
, vnet_match
);
1018 static struct vio_driver vnet_port_driver
= {
1019 .id_table
= vnet_port_match
,
1020 .probe
= vnet_port_probe
,
1021 .remove
= vnet_port_remove
,
1023 .name
= "vnet_port",
1024 .owner
= THIS_MODULE
,
1028 const char *local_mac_prop
= "local-mac-address";
1030 static int __devinit
vnet_probe(struct vio_dev
*vdev
,
1031 const struct vio_device_id
*id
)
1033 static int vnet_version_printed
;
1034 struct mdesc_handle
*hp
;
1035 struct net_device
*dev
;
1040 if (vnet_version_printed
++ == 0)
1041 printk(KERN_INFO
"%s", version
);
1045 mac
= mdesc_get_property(hp
, vdev
->mp
, local_mac_prop
, &len
);
1047 printk(KERN_ERR PFX
"vnet lacks %s property.\n",
1053 dev
= alloc_etherdev(sizeof(*vp
));
1055 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1060 for (i
= 0; i
< ETH_ALEN
; i
++)
1061 dev
->dev_addr
[i
] = (*mac
>> (5 - i
) * 8) & 0xff;
1063 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
1065 SET_NETDEV_DEV(dev
, &vdev
->dev
);
1067 vp
= netdev_priv(dev
);
1069 spin_lock_init(&vp
->lock
);
1073 INIT_LIST_HEAD(&vp
->port_list
);
1074 for (i
= 0; i
< VNET_PORT_HASH_SIZE
; i
++)
1075 INIT_HLIST_HEAD(&vp
->port_hash
[i
]);
1077 dev
->open
= vnet_open
;
1078 dev
->stop
= vnet_close
;
1079 dev
->set_multicast_list
= vnet_set_rx_mode
;
1080 dev
->set_mac_address
= vnet_set_mac_addr
;
1081 dev
->tx_timeout
= vnet_tx_timeout
;
1082 dev
->ethtool_ops
= &vnet_ethtool_ops
;
1083 dev
->watchdog_timeo
= VNET_TX_TIMEOUT
;
1084 dev
->change_mtu
= vnet_change_mtu
;
1085 dev
->hard_start_xmit
= vnet_start_xmit
;
1087 err
= register_netdev(dev
);
1089 printk(KERN_ERR PFX
"Cannot register net device, "
1091 goto err_out_free_dev
;
1094 printk(KERN_INFO
"%s: Sun LDOM vnet ", dev
->name
);
1096 for (i
= 0; i
< 6; i
++)
1097 printk("%2.2x%c", dev
->dev_addr
[i
], i
== 5 ? '\n' : ':');
1099 dev_set_drvdata(&vdev
->dev
, vp
);
1113 static int vnet_remove(struct vio_dev
*vdev
)
1116 struct vnet
*vp
= dev_get_drvdata(&vdev
->dev
);
1119 /* XXX unregister port, or at least check XXX */
1120 unregister_netdevice(vp
->dev
);
1121 dev_set_drvdata(&vdev
->dev
, NULL
);
1126 static struct vio_device_id vnet_match
[] = {
1132 MODULE_DEVICE_TABLE(vio
, vnet_match
);
1134 static struct vio_driver vnet_driver
= {
1135 .id_table
= vnet_match
,
1136 .probe
= vnet_probe
,
1137 .remove
= vnet_remove
,
1140 .owner
= THIS_MODULE
,
1144 static int __init
vnet_init(void)
1146 int err
= vio_register_driver(&vnet_driver
);
1149 err
= vio_register_driver(&vnet_port_driver
);
1151 vio_unregister_driver(&vnet_driver
);
1157 static void __exit
vnet_exit(void)
1159 vio_unregister_driver(&vnet_port_driver
);
1160 vio_unregister_driver(&vnet_driver
);
1163 module_init(vnet_init
);
1164 module_exit(vnet_exit
);