1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kernel.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/delay.h>
48 #include <linux/ethtool.h>
49 #include <linux/proc_fs.h>
50 #include <asm/semaphore.h>
51 #include <asm/hvcall.h>
52 #include <asm/atomic.h>
53 #include <asm/iommu.h>
55 #include <asm/uaccess.h>
56 #include <linux/seq_file.h>
62 #define ibmveth_printk(fmt, args...) \
63 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
65 #define ibmveth_error_printk(fmt, args...) \
66 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
69 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
70 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
71 #define ibmveth_debug_printk(fmt, args...) \
72 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
73 #define ibmveth_assert(expr) \
75 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
79 #define ibmveth_debug_printk_no_adapter(fmt, args...)
80 #define ibmveth_debug_printk(fmt, args...)
81 #define ibmveth_assert(expr)
84 static int ibmveth_open(struct net_device
*dev
);
85 static int ibmveth_close(struct net_device
*dev
);
86 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
87 static int ibmveth_poll(struct net_device
*dev
, int *budget
);
88 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
89 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
);
90 static void ibmveth_set_multicast_list(struct net_device
*dev
);
91 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
);
92 static void ibmveth_proc_register_driver(void);
93 static void ibmveth_proc_unregister_driver(void);
94 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
);
95 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
);
96 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
);
97 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
);
98 static struct kobj_type ktype_veth_pool
;
100 #ifdef CONFIG_PROC_FS
101 #define IBMVETH_PROC_DIR "net/ibmveth"
102 static struct proc_dir_entry
*ibmveth_proc_dir
;
105 static const char ibmveth_driver_name
[] = "ibmveth";
106 static const char ibmveth_driver_string
[] = "IBM i/pSeries Virtual Ethernet Driver";
107 #define ibmveth_driver_version "1.03"
109 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
110 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(ibmveth_driver_version
);
114 /* simple methods of getting data from the current rxq entry */
115 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter
*adapter
)
117 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].toggle
== adapter
->rx_queue
.toggle
);
120 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter
*adapter
)
122 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].valid
);
125 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter
*adapter
)
127 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].offset
);
130 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter
*adapter
)
132 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].length
);
135 /* setup the initial settings for a buffer pool */
136 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool
*pool
, u32 pool_index
, u32 pool_size
, u32 buff_size
, u32 pool_active
)
138 pool
->size
= pool_size
;
139 pool
->index
= pool_index
;
140 pool
->buff_size
= buff_size
;
141 pool
->threshold
= pool_size
/ 2;
142 pool
->active
= pool_active
;
145 /* allocate and setup an buffer pool - called during open */
146 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool
*pool
)
150 pool
->free_map
= kmalloc(sizeof(u16
) * pool
->size
, GFP_KERNEL
);
152 if(!pool
->free_map
) {
156 pool
->dma_addr
= kmalloc(sizeof(dma_addr_t
) * pool
->size
, GFP_KERNEL
);
157 if(!pool
->dma_addr
) {
158 kfree(pool
->free_map
);
159 pool
->free_map
= NULL
;
163 pool
->skbuff
= kmalloc(sizeof(void*) * pool
->size
, GFP_KERNEL
);
166 kfree(pool
->dma_addr
);
167 pool
->dma_addr
= NULL
;
169 kfree(pool
->free_map
);
170 pool
->free_map
= NULL
;
174 memset(pool
->skbuff
, 0, sizeof(void*) * pool
->size
);
175 memset(pool
->dma_addr
, 0, sizeof(dma_addr_t
) * pool
->size
);
177 for(i
= 0; i
< pool
->size
; ++i
) {
178 pool
->free_map
[i
] = i
;
181 atomic_set(&pool
->available
, 0);
182 pool
->producer_index
= 0;
183 pool
->consumer_index
= 0;
188 /* replenish the buffers for a pool. note that we don't need to
189 * skb_reserve these since they are used for incoming...
191 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
194 u32 count
= pool
->size
- atomic_read(&pool
->available
);
195 u32 buffers_added
= 0;
199 for(i
= 0; i
< count
; ++i
) {
201 unsigned int free_index
, index
;
203 union ibmveth_buf_desc desc
;
204 unsigned long lpar_rc
;
207 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
210 ibmveth_debug_printk("replenish: unable to allocate skb\n");
211 adapter
->replenish_no_mem
++;
215 free_index
= pool
->consumer_index
++ % pool
->size
;
216 index
= pool
->free_map
[free_index
];
218 ibmveth_assert(index
!= IBM_VETH_INVALID_MAP
);
219 ibmveth_assert(pool
->skbuff
[index
] == NULL
);
221 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
222 pool
->buff_size
, DMA_FROM_DEVICE
);
224 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
225 pool
->dma_addr
[index
] = dma_addr
;
226 pool
->skbuff
[index
] = skb
;
228 correlator
= ((u64
)pool
->index
<< 32) | index
;
229 *(u64
*)skb
->data
= correlator
;
232 desc
.fields
.valid
= 1;
233 desc
.fields
.length
= pool
->buff_size
;
234 desc
.fields
.address
= dma_addr
;
236 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
238 if(lpar_rc
!= H_SUCCESS
) {
239 pool
->free_map
[free_index
] = index
;
240 pool
->skbuff
[index
] = NULL
;
241 pool
->consumer_index
--;
242 dma_unmap_single(&adapter
->vdev
->dev
,
243 pool
->dma_addr
[index
], pool
->buff_size
,
245 dev_kfree_skb_any(skb
);
246 adapter
->replenish_add_buff_failure
++;
250 adapter
->replenish_add_buff_success
++;
255 atomic_add(buffers_added
, &(pool
->available
));
258 /* replenish routine */
259 static void ibmveth_replenish_task(struct ibmveth_adapter
*adapter
)
263 adapter
->replenish_task_cycles
++;
265 for(i
= 0; i
< IbmVethNumBufferPools
; i
++)
266 if(adapter
->rx_buff_pool
[i
].active
)
267 ibmveth_replenish_buffer_pool(adapter
,
268 &adapter
->rx_buff_pool
[i
]);
270 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
273 /* empty and free ana buffer pool - also used to do cleanup in error paths */
274 static void ibmveth_free_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
278 kfree(pool
->free_map
);
279 pool
->free_map
= NULL
;
281 if(pool
->skbuff
&& pool
->dma_addr
) {
282 for(i
= 0; i
< pool
->size
; ++i
) {
283 struct sk_buff
*skb
= pool
->skbuff
[i
];
285 dma_unmap_single(&adapter
->vdev
->dev
,
289 dev_kfree_skb_any(skb
);
290 pool
->skbuff
[i
] = NULL
;
296 kfree(pool
->dma_addr
);
297 pool
->dma_addr
= NULL
;
306 /* remove a buffer from a pool */
307 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter
*adapter
, u64 correlator
)
309 unsigned int pool
= correlator
>> 32;
310 unsigned int index
= correlator
& 0xffffffffUL
;
311 unsigned int free_index
;
314 ibmveth_assert(pool
< IbmVethNumBufferPools
);
315 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
317 skb
= adapter
->rx_buff_pool
[pool
].skbuff
[index
];
319 ibmveth_assert(skb
!= NULL
);
321 adapter
->rx_buff_pool
[pool
].skbuff
[index
] = NULL
;
323 dma_unmap_single(&adapter
->vdev
->dev
,
324 adapter
->rx_buff_pool
[pool
].dma_addr
[index
],
325 adapter
->rx_buff_pool
[pool
].buff_size
,
328 free_index
= adapter
->rx_buff_pool
[pool
].producer_index
++ % adapter
->rx_buff_pool
[pool
].size
;
329 adapter
->rx_buff_pool
[pool
].free_map
[free_index
] = index
;
333 atomic_dec(&(adapter
->rx_buff_pool
[pool
].available
));
336 /* get the current buffer on the rx queue */
337 static inline struct sk_buff
*ibmveth_rxq_get_buffer(struct ibmveth_adapter
*adapter
)
339 u64 correlator
= adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
;
340 unsigned int pool
= correlator
>> 32;
341 unsigned int index
= correlator
& 0xffffffffUL
;
343 ibmveth_assert(pool
< IbmVethNumBufferPools
);
344 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
346 return adapter
->rx_buff_pool
[pool
].skbuff
[index
];
349 /* recycle the current buffer on the rx queue */
350 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter
*adapter
)
352 u32 q_index
= adapter
->rx_queue
.index
;
353 u64 correlator
= adapter
->rx_queue
.queue_addr
[q_index
].correlator
;
354 unsigned int pool
= correlator
>> 32;
355 unsigned int index
= correlator
& 0xffffffffUL
;
356 union ibmveth_buf_desc desc
;
357 unsigned long lpar_rc
;
359 ibmveth_assert(pool
< IbmVethNumBufferPools
);
360 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
362 if(!adapter
->rx_buff_pool
[pool
].active
) {
363 ibmveth_rxq_harvest_buffer(adapter
);
364 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[pool
]);
369 desc
.fields
.valid
= 1;
370 desc
.fields
.length
= adapter
->rx_buff_pool
[pool
].buff_size
;
371 desc
.fields
.address
= adapter
->rx_buff_pool
[pool
].dma_addr
[index
];
373 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
375 if(lpar_rc
!= H_SUCCESS
) {
376 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc
);
377 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
380 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
381 adapter
->rx_queue
.index
= 0;
382 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
386 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
)
388 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
390 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
391 adapter
->rx_queue
.index
= 0;
392 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
396 static void ibmveth_cleanup(struct ibmveth_adapter
*adapter
)
400 if(adapter
->buffer_list_addr
!= NULL
) {
401 if(!dma_mapping_error(adapter
->buffer_list_dma
)) {
402 dma_unmap_single(&adapter
->vdev
->dev
,
403 adapter
->buffer_list_dma
, 4096,
405 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
407 free_page((unsigned long)adapter
->buffer_list_addr
);
408 adapter
->buffer_list_addr
= NULL
;
411 if(adapter
->filter_list_addr
!= NULL
) {
412 if(!dma_mapping_error(adapter
->filter_list_dma
)) {
413 dma_unmap_single(&adapter
->vdev
->dev
,
414 adapter
->filter_list_dma
, 4096,
416 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
418 free_page((unsigned long)adapter
->filter_list_addr
);
419 adapter
->filter_list_addr
= NULL
;
422 if(adapter
->rx_queue
.queue_addr
!= NULL
) {
423 if(!dma_mapping_error(adapter
->rx_queue
.queue_dma
)) {
424 dma_unmap_single(&adapter
->vdev
->dev
,
425 adapter
->rx_queue
.queue_dma
,
426 adapter
->rx_queue
.queue_len
,
428 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
430 kfree(adapter
->rx_queue
.queue_addr
);
431 adapter
->rx_queue
.queue_addr
= NULL
;
434 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
435 if (adapter
->rx_buff_pool
[i
].active
)
436 ibmveth_free_buffer_pool(adapter
,
437 &adapter
->rx_buff_pool
[i
]);
440 static int ibmveth_register_logical_lan(struct ibmveth_adapter
*adapter
,
441 union ibmveth_buf_desc rxq_desc
, u64 mac_address
)
443 int rc
, try_again
= 1;
445 /* After a kexec the adapter will still be open, so our attempt to
446 * open it will fail. So if we get a failure we free the adapter and
447 * try again, but only once. */
449 rc
= h_register_logical_lan(adapter
->vdev
->unit_address
,
450 adapter
->buffer_list_dma
, rxq_desc
.desc
,
451 adapter
->filter_list_dma
, mac_address
);
453 if (rc
!= H_SUCCESS
&& try_again
) {
455 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
456 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
465 static int ibmveth_open(struct net_device
*netdev
)
467 struct ibmveth_adapter
*adapter
= netdev
->priv
;
470 unsigned long lpar_rc
;
472 union ibmveth_buf_desc rxq_desc
;
475 ibmveth_debug_printk("open starting\n");
477 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
478 rxq_entries
+= adapter
->rx_buff_pool
[i
].size
;
480 adapter
->buffer_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
481 adapter
->filter_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
483 if(!adapter
->buffer_list_addr
|| !adapter
->filter_list_addr
) {
484 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
485 ibmveth_cleanup(adapter
);
489 adapter
->rx_queue
.queue_len
= sizeof(struct ibmveth_rx_q_entry
) * rxq_entries
;
490 adapter
->rx_queue
.queue_addr
= kmalloc(adapter
->rx_queue
.queue_len
, GFP_KERNEL
);
492 if(!adapter
->rx_queue
.queue_addr
) {
493 ibmveth_error_printk("unable to allocate rx queue pages\n");
494 ibmveth_cleanup(adapter
);
498 adapter
->buffer_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
499 adapter
->buffer_list_addr
, 4096, DMA_BIDIRECTIONAL
);
500 adapter
->filter_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
501 adapter
->filter_list_addr
, 4096, DMA_BIDIRECTIONAL
);
502 adapter
->rx_queue
.queue_dma
= dma_map_single(&adapter
->vdev
->dev
,
503 adapter
->rx_queue
.queue_addr
,
504 adapter
->rx_queue
.queue_len
, DMA_BIDIRECTIONAL
);
506 if((dma_mapping_error(adapter
->buffer_list_dma
) ) ||
507 (dma_mapping_error(adapter
->filter_list_dma
)) ||
508 (dma_mapping_error(adapter
->rx_queue
.queue_dma
))) {
509 ibmveth_error_printk("unable to map filter or buffer list pages\n");
510 ibmveth_cleanup(adapter
);
514 adapter
->rx_queue
.index
= 0;
515 adapter
->rx_queue
.num_slots
= rxq_entries
;
516 adapter
->rx_queue
.toggle
= 1;
518 memcpy(&mac_address
, netdev
->dev_addr
, netdev
->addr_len
);
519 mac_address
= mac_address
>> 16;
522 rxq_desc
.fields
.valid
= 1;
523 rxq_desc
.fields
.length
= adapter
->rx_queue
.queue_len
;
524 rxq_desc
.fields
.address
= adapter
->rx_queue
.queue_dma
;
526 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter
->buffer_list_addr
);
527 ibmveth_debug_printk("filter list @ 0x%p\n", adapter
->filter_list_addr
);
528 ibmveth_debug_printk("receive q @ 0x%p\n", adapter
->rx_queue
.queue_addr
);
530 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
532 lpar_rc
= ibmveth_register_logical_lan(adapter
, rxq_desc
, mac_address
);
534 if(lpar_rc
!= H_SUCCESS
) {
535 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc
);
536 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
537 adapter
->buffer_list_dma
,
538 adapter
->filter_list_dma
,
541 ibmveth_cleanup(adapter
);
545 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
546 if(!adapter
->rx_buff_pool
[i
].active
)
548 if (ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[i
])) {
549 ibmveth_error_printk("unable to alloc pool\n");
550 adapter
->rx_buff_pool
[i
].active
= 0;
551 ibmveth_cleanup(adapter
);
556 ibmveth_debug_printk("registering irq 0x%x\n", netdev
->irq
);
557 if((rc
= request_irq(netdev
->irq
, &ibmveth_interrupt
, 0, netdev
->name
, netdev
)) != 0) {
558 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev
->irq
, rc
);
560 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
561 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
563 ibmveth_cleanup(adapter
);
567 ibmveth_debug_printk("initial replenish cycle\n");
568 ibmveth_interrupt(netdev
->irq
, netdev
, NULL
);
570 netif_start_queue(netdev
);
572 ibmveth_debug_printk("open complete\n");
577 static int ibmveth_close(struct net_device
*netdev
)
579 struct ibmveth_adapter
*adapter
= netdev
->priv
;
582 ibmveth_debug_printk("close starting\n");
584 if (!adapter
->pool_config
)
585 netif_stop_queue(netdev
);
587 free_irq(netdev
->irq
, netdev
);
590 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
591 } while (H_IS_LONG_BUSY(lpar_rc
) || (lpar_rc
== H_BUSY
));
593 if(lpar_rc
!= H_SUCCESS
)
595 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
599 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
601 ibmveth_cleanup(adapter
);
603 ibmveth_debug_printk("close complete\n");
608 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
) {
609 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE
);
610 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
| ADVERTISED_FIBRE
);
611 cmd
->speed
= SPEED_1000
;
612 cmd
->duplex
= DUPLEX_FULL
;
613 cmd
->port
= PORT_FIBRE
;
614 cmd
->phy_address
= 0;
615 cmd
->transceiver
= XCVR_INTERNAL
;
616 cmd
->autoneg
= AUTONEG_ENABLE
;
622 static void netdev_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
) {
623 strncpy(info
->driver
, ibmveth_driver_name
, sizeof(info
->driver
) - 1);
624 strncpy(info
->version
, ibmveth_driver_version
, sizeof(info
->version
) - 1);
627 static u32
netdev_get_link(struct net_device
*dev
) {
631 static const struct ethtool_ops netdev_ethtool_ops
= {
632 .get_drvinfo
= netdev_get_drvinfo
,
633 .get_settings
= netdev_get_settings
,
634 .get_link
= netdev_get_link
,
635 .get_sg
= ethtool_op_get_sg
,
636 .get_tx_csum
= ethtool_op_get_tx_csum
,
639 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
644 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
646 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
648 struct ibmveth_adapter
*adapter
= netdev
->priv
;
649 union ibmveth_buf_desc desc
[IbmVethMaxSendFrags
];
650 unsigned long lpar_rc
;
651 int nfrags
= 0, curfrag
;
652 unsigned long correlator
;
654 unsigned int retry_count
;
655 unsigned int tx_dropped
= 0;
656 unsigned int tx_bytes
= 0;
657 unsigned int tx_packets
= 0;
658 unsigned int tx_send_failed
= 0;
659 unsigned int tx_map_failed
= 0;
662 if ((skb_shinfo(skb
)->nr_frags
+ 1) > IbmVethMaxSendFrags
) {
667 memset(&desc
, 0, sizeof(desc
));
669 /* nfrags = number of frags after the initial fragment */
670 nfrags
= skb_shinfo(skb
)->nr_frags
;
673 adapter
->tx_multidesc_send
++;
675 /* map the initial fragment */
676 desc
[0].fields
.length
= nfrags
? skb
->len
- skb
->data_len
: skb
->len
;
677 desc
[0].fields
.address
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
678 desc
[0].fields
.length
, DMA_TO_DEVICE
);
679 desc
[0].fields
.valid
= 1;
681 if(dma_mapping_error(desc
[0].fields
.address
)) {
682 ibmveth_error_printk("tx: unable to map initial fragment\n");
690 /* map fragments past the initial portion if there are any */
692 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[curfrag
];
693 desc
[curfrag
+1].fields
.address
694 = dma_map_single(&adapter
->vdev
->dev
,
695 page_address(frag
->page
) + frag
->page_offset
,
696 frag
->size
, DMA_TO_DEVICE
);
697 desc
[curfrag
+1].fields
.length
= frag
->size
;
698 desc
[curfrag
+1].fields
.valid
= 1;
700 if(dma_mapping_error(desc
[curfrag
+1].fields
.address
)) {
701 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag
);
704 /* Free all the mappings we just created */
705 while(curfrag
< nfrags
) {
706 dma_unmap_single(&adapter
->vdev
->dev
,
707 desc
[curfrag
+1].fields
.address
,
708 desc
[curfrag
+1].fields
.length
,
716 /* send the frame. Arbitrarily set retrycount to 1024 */
720 lpar_rc
= h_send_logical_lan(adapter
->vdev
->unit_address
,
729 } while ((lpar_rc
== H_BUSY
) && (retry_count
--));
731 if(lpar_rc
!= H_SUCCESS
&& lpar_rc
!= H_DROPPED
) {
733 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc
);
734 for(i
= 0; i
< 6; i
++) {
735 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i
,
736 desc
[i
].fields
.valid
, desc
[i
].fields
.length
, desc
[i
].fields
.address
);
742 tx_bytes
+= skb
->len
;
743 netdev
->trans_start
= jiffies
;
747 dma_unmap_single(&adapter
->vdev
->dev
,
748 desc
[nfrags
].fields
.address
,
749 desc
[nfrags
].fields
.length
, DMA_TO_DEVICE
);
750 } while(--nfrags
>= 0);
752 out
: spin_lock_irqsave(&adapter
->stats_lock
, flags
);
753 adapter
->stats
.tx_dropped
+= tx_dropped
;
754 adapter
->stats
.tx_bytes
+= tx_bytes
;
755 adapter
->stats
.tx_packets
+= tx_packets
;
756 adapter
->tx_send_failed
+= tx_send_failed
;
757 adapter
->tx_map_failed
+= tx_map_failed
;
758 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
764 static int ibmveth_poll(struct net_device
*netdev
, int *budget
)
766 struct ibmveth_adapter
*adapter
= netdev
->priv
;
767 int max_frames_to_process
= netdev
->quota
;
768 int frames_processed
= 0;
770 unsigned long lpar_rc
;
774 struct net_device
*netdev
= adapter
->netdev
;
776 if(ibmveth_rxq_pending_buffer(adapter
)) {
781 if(!ibmveth_rxq_buffer_valid(adapter
)) {
782 wmb(); /* suggested by larson1 */
783 adapter
->rx_invalid_buffer
++;
784 ibmveth_debug_printk("recycling invalid buffer\n");
785 ibmveth_rxq_recycle_buffer(adapter
);
787 int length
= ibmveth_rxq_frame_length(adapter
);
788 int offset
= ibmveth_rxq_frame_offset(adapter
);
789 skb
= ibmveth_rxq_get_buffer(adapter
);
791 ibmveth_rxq_harvest_buffer(adapter
);
793 skb_reserve(skb
, offset
);
794 skb_put(skb
, length
);
796 skb
->protocol
= eth_type_trans(skb
, netdev
);
798 netif_receive_skb(skb
); /* send it up */
800 adapter
->stats
.rx_packets
++;
801 adapter
->stats
.rx_bytes
+= length
;
803 netdev
->last_rx
= jiffies
;
808 } while(more_work
&& (frames_processed
< max_frames_to_process
));
810 ibmveth_replenish_task(adapter
);
813 /* more work to do - return that we are not done yet */
814 netdev
->quota
-= frames_processed
;
815 *budget
-= frames_processed
;
819 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
820 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_ENABLE
);
822 ibmveth_assert(lpar_rc
== H_SUCCESS
);
824 netif_rx_complete(netdev
);
826 if(ibmveth_rxq_pending_buffer(adapter
) && netif_rx_reschedule(netdev
, frames_processed
))
828 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
829 ibmveth_assert(lpar_rc
== H_SUCCESS
);
834 netdev
->quota
-= frames_processed
;
835 *budget
-= frames_processed
;
837 /* we really are done */
841 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
843 struct net_device
*netdev
= dev_instance
;
844 struct ibmveth_adapter
*adapter
= netdev
->priv
;
845 unsigned long lpar_rc
;
847 if(netif_rx_schedule_prep(netdev
)) {
848 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
849 ibmveth_assert(lpar_rc
== H_SUCCESS
);
850 __netif_rx_schedule(netdev
);
855 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
)
857 struct ibmveth_adapter
*adapter
= dev
->priv
;
858 return &adapter
->stats
;
861 static void ibmveth_set_multicast_list(struct net_device
*netdev
)
863 struct ibmveth_adapter
*adapter
= netdev
->priv
;
864 unsigned long lpar_rc
;
866 if((netdev
->flags
& IFF_PROMISC
) || (netdev
->mc_count
> adapter
->mcastFilterSize
)) {
867 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
868 IbmVethMcastEnableRecv
|
869 IbmVethMcastDisableFiltering
,
871 if(lpar_rc
!= H_SUCCESS
) {
872 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc
);
875 struct dev_mc_list
*mclist
= netdev
->mc_list
;
877 /* clear the filter table & disable filtering */
878 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
879 IbmVethMcastEnableRecv
|
880 IbmVethMcastDisableFiltering
|
881 IbmVethMcastClearFilterTable
,
883 if(lpar_rc
!= H_SUCCESS
) {
884 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc
);
886 /* add the addresses to the filter table */
887 for(i
= 0; i
< netdev
->mc_count
; ++i
, mclist
= mclist
->next
) {
888 // add the multicast address to the filter table
889 unsigned long mcast_addr
= 0;
890 memcpy(((char *)&mcast_addr
)+2, mclist
->dmi_addr
, 6);
891 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
892 IbmVethMcastAddFilter
,
894 if(lpar_rc
!= H_SUCCESS
) {
895 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc
);
899 /* re-enable filtering */
900 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
901 IbmVethMcastEnableFiltering
,
903 if(lpar_rc
!= H_SUCCESS
) {
904 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc
);
909 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
)
911 struct ibmveth_adapter
*adapter
= dev
->priv
;
912 int new_mtu_oh
= new_mtu
+ IBMVETH_BUFF_OH
;
915 if (new_mtu
< IBMVETH_MAX_MTU
)
918 /* Look for an active buffer pool that can hold the new MTU */
919 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
920 if (!adapter
->rx_buff_pool
[i
].active
)
922 if (new_mtu_oh
< adapter
->rx_buff_pool
[i
].buff_size
) {
930 #ifdef CONFIG_NET_POLL_CONTROLLER
931 static void ibmveth_poll_controller(struct net_device
*dev
)
933 ibmveth_replenish_task(dev
->priv
);
934 ibmveth_interrupt(dev
->irq
, dev
, NULL
);
938 static int __devinit
ibmveth_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
941 struct net_device
*netdev
;
942 struct ibmveth_adapter
*adapter
= NULL
;
944 unsigned char *mac_addr_p
;
945 unsigned int *mcastFilterSize_p
;
948 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
951 mac_addr_p
= (unsigned char *) vio_get_attribute(dev
, VETH_MAC_ADDR
, 0);
953 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
954 "attribute\n", __FILE__
, __LINE__
);
958 mcastFilterSize_p
= (unsigned int *) vio_get_attribute(dev
, VETH_MCAST_FILTER_SIZE
, 0);
959 if(!mcastFilterSize_p
) {
960 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find "
961 "VETH_MCAST_FILTER_SIZE attribute\n",
966 netdev
= alloc_etherdev(sizeof(struct ibmveth_adapter
));
971 SET_MODULE_OWNER(netdev
);
973 adapter
= netdev
->priv
;
974 memset(adapter
, 0, sizeof(adapter
));
975 dev
->dev
.driver_data
= netdev
;
978 adapter
->netdev
= netdev
;
979 adapter
->mcastFilterSize
= *mcastFilterSize_p
;
980 adapter
->pool_config
= 0;
982 /* Some older boxes running PHYP non-natively have an OF that
983 returns a 8-byte local-mac-address field (and the first
984 2 bytes have to be ignored) while newer boxes' OF return
985 a 6-byte field. Note that IEEE 1275 specifies that
986 local-mac-address must be a 6-byte field.
987 The RPA doc specifies that the first byte must be 10b, so
988 we'll just look for it to solve this 8 vs. 6 byte field issue */
990 if ((*mac_addr_p
& 0x3) != 0x02)
993 adapter
->mac_addr
= 0;
994 memcpy(&adapter
->mac_addr
, mac_addr_p
, 6);
996 adapter
->liobn
= dev
->iommu_table
->it_index
;
998 netdev
->irq
= dev
->irq
;
999 netdev
->open
= ibmveth_open
;
1000 netdev
->poll
= ibmveth_poll
;
1001 netdev
->weight
= 16;
1002 netdev
->stop
= ibmveth_close
;
1003 netdev
->hard_start_xmit
= ibmveth_start_xmit
;
1004 netdev
->get_stats
= ibmveth_get_stats
;
1005 netdev
->set_multicast_list
= ibmveth_set_multicast_list
;
1006 netdev
->do_ioctl
= ibmveth_ioctl
;
1007 netdev
->ethtool_ops
= &netdev_ethtool_ops
;
1008 netdev
->change_mtu
= ibmveth_change_mtu
;
1009 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1010 #ifdef CONFIG_NET_POLL_CONTROLLER
1011 netdev
->poll_controller
= ibmveth_poll_controller
;
1013 netdev
->features
|= NETIF_F_LLTX
;
1014 spin_lock_init(&adapter
->stats_lock
);
1016 memcpy(&netdev
->dev_addr
, &adapter
->mac_addr
, netdev
->addr_len
);
1018 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
1019 struct kobject
*kobj
= &adapter
->rx_buff_pool
[i
].kobj
;
1020 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[i
], i
,
1021 pool_count
[i
], pool_size
[i
],
1023 kobj
->parent
= &dev
->dev
.kobj
;
1024 sprintf(kobj
->name
, "pool%d", i
);
1025 kobj
->ktype
= &ktype_veth_pool
;
1026 kobject_register(kobj
);
1029 ibmveth_debug_printk("adapter @ 0x%p\n", adapter
);
1031 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
1032 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
1033 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
1035 ibmveth_debug_printk("registering netdev...\n");
1037 rc
= register_netdev(netdev
);
1040 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc
);
1041 free_netdev(netdev
);
1045 ibmveth_debug_printk("registered\n");
1047 ibmveth_proc_register_adapter(adapter
);
1052 static int __devexit
ibmveth_remove(struct vio_dev
*dev
)
1054 struct net_device
*netdev
= dev
->dev
.driver_data
;
1055 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1058 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
1059 kobject_unregister(&adapter
->rx_buff_pool
[i
].kobj
);
1061 unregister_netdev(netdev
);
1063 ibmveth_proc_unregister_adapter(adapter
);
1065 free_netdev(netdev
);
1069 #ifdef CONFIG_PROC_FS
1070 static void ibmveth_proc_register_driver(void)
1072 ibmveth_proc_dir
= proc_mkdir(IBMVETH_PROC_DIR
, NULL
);
1073 if (ibmveth_proc_dir
) {
1074 SET_MODULE_OWNER(ibmveth_proc_dir
);
1078 static void ibmveth_proc_unregister_driver(void)
1080 remove_proc_entry(IBMVETH_PROC_DIR
, NULL
);
1083 static void *ibmveth_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1092 static void *ibmveth_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1098 static void ibmveth_seq_stop(struct seq_file
*seq
, void *v
)
1102 static int ibmveth_seq_show(struct seq_file
*seq
, void *v
)
1104 struct ibmveth_adapter
*adapter
= seq
->private;
1105 char *current_mac
= ((char*) &adapter
->netdev
->dev_addr
);
1106 char *firmware_mac
= ((char*) &adapter
->mac_addr
) ;
1108 seq_printf(seq
, "%s %s\n\n", ibmveth_driver_string
, ibmveth_driver_version
);
1110 seq_printf(seq
, "Unit Address: 0x%x\n", adapter
->vdev
->unit_address
);
1111 seq_printf(seq
, "LIOBN: 0x%lx\n", adapter
->liobn
);
1112 seq_printf(seq
, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1113 current_mac
[0], current_mac
[1], current_mac
[2],
1114 current_mac
[3], current_mac
[4], current_mac
[5]);
1115 seq_printf(seq
, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1116 firmware_mac
[0], firmware_mac
[1], firmware_mac
[2],
1117 firmware_mac
[3], firmware_mac
[4], firmware_mac
[5]);
1119 seq_printf(seq
, "\nAdapter Statistics:\n");
1120 seq_printf(seq
, " TX: skbuffs linearized: %ld\n", adapter
->tx_linearized
);
1121 seq_printf(seq
, " multi-descriptor sends: %ld\n", adapter
->tx_multidesc_send
);
1122 seq_printf(seq
, " skb_linearize failures: %ld\n", adapter
->tx_linearize_failed
);
1123 seq_printf(seq
, " vio_map_single failres: %ld\n", adapter
->tx_map_failed
);
1124 seq_printf(seq
, " send failures: %ld\n", adapter
->tx_send_failed
);
1125 seq_printf(seq
, " RX: replenish task cycles: %ld\n", adapter
->replenish_task_cycles
);
1126 seq_printf(seq
, " alloc_skb_failures: %ld\n", adapter
->replenish_no_mem
);
1127 seq_printf(seq
, " add buffer failures: %ld\n", adapter
->replenish_add_buff_failure
);
1128 seq_printf(seq
, " invalid buffers: %ld\n", adapter
->rx_invalid_buffer
);
1129 seq_printf(seq
, " no buffers: %ld\n", adapter
->rx_no_buffer
);
1133 static struct seq_operations ibmveth_seq_ops
= {
1134 .start
= ibmveth_seq_start
,
1135 .next
= ibmveth_seq_next
,
1136 .stop
= ibmveth_seq_stop
,
1137 .show
= ibmveth_seq_show
,
1140 static int ibmveth_proc_open(struct inode
*inode
, struct file
*file
)
1142 struct seq_file
*seq
;
1143 struct proc_dir_entry
*proc
;
1146 rc
= seq_open(file
, &ibmveth_seq_ops
);
1148 /* recover the pointer buried in proc_dir_entry data */
1149 seq
= file
->private_data
;
1151 seq
->private = proc
->data
;
1156 static struct file_operations ibmveth_proc_fops
= {
1157 .owner
= THIS_MODULE
,
1158 .open
= ibmveth_proc_open
,
1160 .llseek
= seq_lseek
,
1161 .release
= seq_release
,
1164 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1166 struct proc_dir_entry
*entry
;
1167 if (ibmveth_proc_dir
) {
1169 sprintf(u_addr
, "%x", adapter
->vdev
->unit_address
);
1170 entry
= create_proc_entry(u_addr
, S_IFREG
, ibmveth_proc_dir
);
1172 ibmveth_error_printk("Cannot create adapter proc entry");
1174 entry
->data
= (void *) adapter
;
1175 entry
->proc_fops
= &ibmveth_proc_fops
;
1176 SET_MODULE_OWNER(entry
);
1182 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1184 if (ibmveth_proc_dir
) {
1186 sprintf(u_addr
, "%x", adapter
->vdev
->unit_address
);
1187 remove_proc_entry(u_addr
, ibmveth_proc_dir
);
1191 #else /* CONFIG_PROC_FS */
1192 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1196 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1199 static void ibmveth_proc_register_driver(void)
1203 static void ibmveth_proc_unregister_driver(void)
1206 #endif /* CONFIG_PROC_FS */
1208 static struct attribute veth_active_attr
;
1209 static struct attribute veth_num_attr
;
1210 static struct attribute veth_size_attr
;
1212 static ssize_t
veth_pool_show(struct kobject
* kobj
,
1213 struct attribute
* attr
, char * buf
)
1215 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1216 struct ibmveth_buff_pool
,
1219 if (attr
== &veth_active_attr
)
1220 return sprintf(buf
, "%d\n", pool
->active
);
1221 else if (attr
== &veth_num_attr
)
1222 return sprintf(buf
, "%d\n", pool
->size
);
1223 else if (attr
== &veth_size_attr
)
1224 return sprintf(buf
, "%d\n", pool
->buff_size
);
1228 static ssize_t
veth_pool_store(struct kobject
* kobj
, struct attribute
* attr
,
1229 const char * buf
, size_t count
)
1231 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1232 struct ibmveth_buff_pool
,
1234 struct net_device
*netdev
=
1235 container_of(kobj
->parent
, struct device
, kobj
)->driver_data
;
1236 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1237 long value
= simple_strtol(buf
, NULL
, 10);
1240 if (attr
== &veth_active_attr
) {
1241 if (value
&& !pool
->active
) {
1242 if(ibmveth_alloc_buffer_pool(pool
)) {
1243 ibmveth_error_printk("unable to alloc pool\n");
1247 adapter
->pool_config
= 1;
1248 ibmveth_close(netdev
);
1249 adapter
->pool_config
= 0;
1250 if ((rc
= ibmveth_open(netdev
)))
1252 } else if (!value
&& pool
->active
) {
1253 int mtu
= netdev
->mtu
+ IBMVETH_BUFF_OH
;
1255 /* Make sure there is a buffer pool with buffers that
1256 can hold a packet of the size of the MTU */
1257 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
1258 if (pool
== &adapter
->rx_buff_pool
[i
])
1260 if (!adapter
->rx_buff_pool
[i
].active
)
1262 if (mtu
< adapter
->rx_buff_pool
[i
].buff_size
) {
1264 h_free_logical_lan_buffer(adapter
->
1272 ibmveth_error_printk("no active pool >= MTU\n");
1276 } else if (attr
== &veth_num_attr
) {
1277 if (value
<= 0 || value
> IBMVETH_MAX_POOL_COUNT
)
1280 adapter
->pool_config
= 1;
1281 ibmveth_close(netdev
);
1282 adapter
->pool_config
= 0;
1284 if ((rc
= ibmveth_open(netdev
)))
1287 } else if (attr
== &veth_size_attr
) {
1288 if (value
<= IBMVETH_BUFF_OH
|| value
> IBMVETH_MAX_BUF_SIZE
)
1291 adapter
->pool_config
= 1;
1292 ibmveth_close(netdev
);
1293 adapter
->pool_config
= 0;
1294 pool
->buff_size
= value
;
1295 if ((rc
= ibmveth_open(netdev
)))
1300 /* kick the interrupt handler to allocate/deallocate pools */
1301 ibmveth_interrupt(netdev
->irq
, netdev
, NULL
);
1306 #define ATTR(_name, _mode) \
1307 struct attribute veth_##_name##_attr = { \
1308 .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
1311 static ATTR(active
, 0644);
1312 static ATTR(num
, 0644);
1313 static ATTR(size
, 0644);
1315 static struct attribute
* veth_pool_attrs
[] = {
1322 static struct sysfs_ops veth_pool_ops
= {
1323 .show
= veth_pool_show
,
1324 .store
= veth_pool_store
,
1327 static struct kobj_type ktype_veth_pool
= {
1329 .sysfs_ops
= &veth_pool_ops
,
1330 .default_attrs
= veth_pool_attrs
,
1334 static struct vio_device_id ibmveth_device_table
[] __devinitdata
= {
1335 { "network", "IBM,l-lan"},
1338 MODULE_DEVICE_TABLE(vio
, ibmveth_device_table
);
1340 static struct vio_driver ibmveth_driver
= {
1341 .id_table
= ibmveth_device_table
,
1342 .probe
= ibmveth_probe
,
1343 .remove
= ibmveth_remove
,
1345 .name
= ibmveth_driver_name
,
1346 .owner
= THIS_MODULE
,
1350 static int __init
ibmveth_module_init(void)
1352 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name
, ibmveth_driver_string
, ibmveth_driver_version
);
1354 ibmveth_proc_register_driver();
1356 return vio_register_driver(&ibmveth_driver
);
1359 static void __exit
ibmveth_module_exit(void)
1361 vio_unregister_driver(&ibmveth_driver
);
1362 ibmveth_proc_unregister_driver();
1365 module_init(ibmveth_module_init
);
1366 module_exit(ibmveth_module_exit
);