1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kernel.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/delay.h>
48 #include <linux/ethtool.h>
49 #include <linux/proc_fs.h>
52 #include <net/net_namespace.h>
53 #include <asm/semaphore.h>
54 #include <asm/hvcall.h>
55 #include <asm/atomic.h>
57 #include <asm/uaccess.h>
58 #include <linux/seq_file.h>
64 #define ibmveth_printk(fmt, args...) \
65 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
67 #define ibmveth_error_printk(fmt, args...) \
68 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
71 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
72 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
73 #define ibmveth_debug_printk(fmt, args...) \
74 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
75 #define ibmveth_assert(expr) \
77 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 #define ibmveth_debug_printk_no_adapter(fmt, args...)
82 #define ibmveth_debug_printk(fmt, args...)
83 #define ibmveth_assert(expr)
86 static int ibmveth_open(struct net_device
*dev
);
87 static int ibmveth_close(struct net_device
*dev
);
88 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
89 static int ibmveth_poll(struct napi_struct
*napi
, int budget
);
90 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
91 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
);
92 static void ibmveth_set_multicast_list(struct net_device
*dev
);
93 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
);
94 static void ibmveth_proc_register_driver(void);
95 static void ibmveth_proc_unregister_driver(void);
96 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
);
97 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
);
98 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
);
99 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
);
100 static struct kobj_type ktype_veth_pool
;
102 #ifdef CONFIG_PROC_FS
103 #define IBMVETH_PROC_DIR "ibmveth"
104 static struct proc_dir_entry
*ibmveth_proc_dir
;
107 static const char ibmveth_driver_name
[] = "ibmveth";
108 static const char ibmveth_driver_string
[] = "IBM i/pSeries Virtual Ethernet Driver";
109 #define ibmveth_driver_version "1.03"
111 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
112 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(ibmveth_driver_version
);
116 /* simple methods of getting data from the current rxq entry */
117 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter
*adapter
)
119 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].toggle
== adapter
->rx_queue
.toggle
);
122 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter
*adapter
)
124 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].valid
);
127 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter
*adapter
)
129 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].offset
);
132 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter
*adapter
)
134 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].length
);
137 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter
*adapter
)
139 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].csum_good
);
142 /* setup the initial settings for a buffer pool */
143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool
*pool
, u32 pool_index
, u32 pool_size
, u32 buff_size
, u32 pool_active
)
145 pool
->size
= pool_size
;
146 pool
->index
= pool_index
;
147 pool
->buff_size
= buff_size
;
148 pool
->threshold
= pool_size
/ 2;
149 pool
->active
= pool_active
;
152 /* allocate and setup an buffer pool - called during open */
153 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool
*pool
)
157 pool
->free_map
= kmalloc(sizeof(u16
) * pool
->size
, GFP_KERNEL
);
159 if(!pool
->free_map
) {
163 pool
->dma_addr
= kmalloc(sizeof(dma_addr_t
) * pool
->size
, GFP_KERNEL
);
164 if(!pool
->dma_addr
) {
165 kfree(pool
->free_map
);
166 pool
->free_map
= NULL
;
170 pool
->skbuff
= kmalloc(sizeof(void*) * pool
->size
, GFP_KERNEL
);
173 kfree(pool
->dma_addr
);
174 pool
->dma_addr
= NULL
;
176 kfree(pool
->free_map
);
177 pool
->free_map
= NULL
;
181 memset(pool
->skbuff
, 0, sizeof(void*) * pool
->size
);
182 memset(pool
->dma_addr
, 0, sizeof(dma_addr_t
) * pool
->size
);
184 for(i
= 0; i
< pool
->size
; ++i
) {
185 pool
->free_map
[i
] = i
;
188 atomic_set(&pool
->available
, 0);
189 pool
->producer_index
= 0;
190 pool
->consumer_index
= 0;
195 /* replenish the buffers for a pool. note that we don't need to
196 * skb_reserve these since they are used for incoming...
198 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
201 u32 count
= pool
->size
- atomic_read(&pool
->available
);
202 u32 buffers_added
= 0;
206 for(i
= 0; i
< count
; ++i
) {
208 unsigned int free_index
, index
;
210 union ibmveth_buf_desc desc
;
211 unsigned long lpar_rc
;
214 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
217 ibmveth_debug_printk("replenish: unable to allocate skb\n");
218 adapter
->replenish_no_mem
++;
222 free_index
= pool
->consumer_index
;
223 pool
->consumer_index
= (pool
->consumer_index
+ 1) % pool
->size
;
224 index
= pool
->free_map
[free_index
];
226 ibmveth_assert(index
!= IBM_VETH_INVALID_MAP
);
227 ibmveth_assert(pool
->skbuff
[index
] == NULL
);
229 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
230 pool
->buff_size
, DMA_FROM_DEVICE
);
232 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
233 pool
->dma_addr
[index
] = dma_addr
;
234 pool
->skbuff
[index
] = skb
;
236 correlator
= ((u64
)pool
->index
<< 32) | index
;
237 *(u64
*)skb
->data
= correlator
;
240 desc
.fields
.valid
= 1;
241 desc
.fields
.length
= pool
->buff_size
;
242 desc
.fields
.address
= dma_addr
;
244 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
246 if(lpar_rc
!= H_SUCCESS
) {
247 pool
->free_map
[free_index
] = index
;
248 pool
->skbuff
[index
] = NULL
;
249 if (pool
->consumer_index
== 0)
250 pool
->consumer_index
= pool
->size
- 1;
252 pool
->consumer_index
--;
253 dma_unmap_single(&adapter
->vdev
->dev
,
254 pool
->dma_addr
[index
], pool
->buff_size
,
256 dev_kfree_skb_any(skb
);
257 adapter
->replenish_add_buff_failure
++;
261 adapter
->replenish_add_buff_success
++;
266 atomic_add(buffers_added
, &(pool
->available
));
269 /* replenish routine */
270 static void ibmveth_replenish_task(struct ibmveth_adapter
*adapter
)
274 adapter
->replenish_task_cycles
++;
276 for(i
= 0; i
< IbmVethNumBufferPools
; i
++)
277 if(adapter
->rx_buff_pool
[i
].active
)
278 ibmveth_replenish_buffer_pool(adapter
,
279 &adapter
->rx_buff_pool
[i
]);
281 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
284 /* empty and free ana buffer pool - also used to do cleanup in error paths */
285 static void ibmveth_free_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
289 kfree(pool
->free_map
);
290 pool
->free_map
= NULL
;
292 if(pool
->skbuff
&& pool
->dma_addr
) {
293 for(i
= 0; i
< pool
->size
; ++i
) {
294 struct sk_buff
*skb
= pool
->skbuff
[i
];
296 dma_unmap_single(&adapter
->vdev
->dev
,
300 dev_kfree_skb_any(skb
);
301 pool
->skbuff
[i
] = NULL
;
307 kfree(pool
->dma_addr
);
308 pool
->dma_addr
= NULL
;
317 /* remove a buffer from a pool */
318 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter
*adapter
, u64 correlator
)
320 unsigned int pool
= correlator
>> 32;
321 unsigned int index
= correlator
& 0xffffffffUL
;
322 unsigned int free_index
;
325 ibmveth_assert(pool
< IbmVethNumBufferPools
);
326 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
328 skb
= adapter
->rx_buff_pool
[pool
].skbuff
[index
];
330 ibmveth_assert(skb
!= NULL
);
332 adapter
->rx_buff_pool
[pool
].skbuff
[index
] = NULL
;
334 dma_unmap_single(&adapter
->vdev
->dev
,
335 adapter
->rx_buff_pool
[pool
].dma_addr
[index
],
336 adapter
->rx_buff_pool
[pool
].buff_size
,
339 free_index
= adapter
->rx_buff_pool
[pool
].producer_index
;
340 adapter
->rx_buff_pool
[pool
].producer_index
341 = (adapter
->rx_buff_pool
[pool
].producer_index
+ 1)
342 % adapter
->rx_buff_pool
[pool
].size
;
343 adapter
->rx_buff_pool
[pool
].free_map
[free_index
] = index
;
347 atomic_dec(&(adapter
->rx_buff_pool
[pool
].available
));
350 /* get the current buffer on the rx queue */
351 static inline struct sk_buff
*ibmveth_rxq_get_buffer(struct ibmveth_adapter
*adapter
)
353 u64 correlator
= adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
;
354 unsigned int pool
= correlator
>> 32;
355 unsigned int index
= correlator
& 0xffffffffUL
;
357 ibmveth_assert(pool
< IbmVethNumBufferPools
);
358 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
360 return adapter
->rx_buff_pool
[pool
].skbuff
[index
];
363 /* recycle the current buffer on the rx queue */
364 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter
*adapter
)
366 u32 q_index
= adapter
->rx_queue
.index
;
367 u64 correlator
= adapter
->rx_queue
.queue_addr
[q_index
].correlator
;
368 unsigned int pool
= correlator
>> 32;
369 unsigned int index
= correlator
& 0xffffffffUL
;
370 union ibmveth_buf_desc desc
;
371 unsigned long lpar_rc
;
373 ibmveth_assert(pool
< IbmVethNumBufferPools
);
374 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
376 if(!adapter
->rx_buff_pool
[pool
].active
) {
377 ibmveth_rxq_harvest_buffer(adapter
);
378 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[pool
]);
383 desc
.fields
.valid
= 1;
384 desc
.fields
.length
= adapter
->rx_buff_pool
[pool
].buff_size
;
385 desc
.fields
.address
= adapter
->rx_buff_pool
[pool
].dma_addr
[index
];
387 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
389 if(lpar_rc
!= H_SUCCESS
) {
390 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc
);
391 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
394 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
395 adapter
->rx_queue
.index
= 0;
396 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
400 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
)
402 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
404 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
405 adapter
->rx_queue
.index
= 0;
406 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
410 static void ibmveth_cleanup(struct ibmveth_adapter
*adapter
)
414 if(adapter
->buffer_list_addr
!= NULL
) {
415 if(!dma_mapping_error(adapter
->buffer_list_dma
)) {
416 dma_unmap_single(&adapter
->vdev
->dev
,
417 adapter
->buffer_list_dma
, 4096,
419 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
421 free_page((unsigned long)adapter
->buffer_list_addr
);
422 adapter
->buffer_list_addr
= NULL
;
425 if(adapter
->filter_list_addr
!= NULL
) {
426 if(!dma_mapping_error(adapter
->filter_list_dma
)) {
427 dma_unmap_single(&adapter
->vdev
->dev
,
428 adapter
->filter_list_dma
, 4096,
430 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
432 free_page((unsigned long)adapter
->filter_list_addr
);
433 adapter
->filter_list_addr
= NULL
;
436 if(adapter
->rx_queue
.queue_addr
!= NULL
) {
437 if(!dma_mapping_error(adapter
->rx_queue
.queue_dma
)) {
438 dma_unmap_single(&adapter
->vdev
->dev
,
439 adapter
->rx_queue
.queue_dma
,
440 adapter
->rx_queue
.queue_len
,
442 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
444 kfree(adapter
->rx_queue
.queue_addr
);
445 adapter
->rx_queue
.queue_addr
= NULL
;
448 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
449 if (adapter
->rx_buff_pool
[i
].active
)
450 ibmveth_free_buffer_pool(adapter
,
451 &adapter
->rx_buff_pool
[i
]);
454 static int ibmveth_register_logical_lan(struct ibmveth_adapter
*adapter
,
455 union ibmveth_buf_desc rxq_desc
, u64 mac_address
)
457 int rc
, try_again
= 1;
459 /* After a kexec the adapter will still be open, so our attempt to
460 * open it will fail. So if we get a failure we free the adapter and
461 * try again, but only once. */
463 rc
= h_register_logical_lan(adapter
->vdev
->unit_address
,
464 adapter
->buffer_list_dma
, rxq_desc
.desc
,
465 adapter
->filter_list_dma
, mac_address
);
467 if (rc
!= H_SUCCESS
&& try_again
) {
469 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
470 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
479 static int ibmveth_open(struct net_device
*netdev
)
481 struct ibmveth_adapter
*adapter
= netdev
->priv
;
484 unsigned long lpar_rc
;
486 union ibmveth_buf_desc rxq_desc
;
489 ibmveth_debug_printk("open starting\n");
491 napi_enable(&adapter
->napi
);
493 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
494 rxq_entries
+= adapter
->rx_buff_pool
[i
].size
;
496 adapter
->buffer_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
497 adapter
->filter_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
499 if(!adapter
->buffer_list_addr
|| !adapter
->filter_list_addr
) {
500 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
501 ibmveth_cleanup(adapter
);
502 napi_disable(&adapter
->napi
);
506 adapter
->rx_queue
.queue_len
= sizeof(struct ibmveth_rx_q_entry
) * rxq_entries
;
507 adapter
->rx_queue
.queue_addr
= kmalloc(adapter
->rx_queue
.queue_len
, GFP_KERNEL
);
509 if(!adapter
->rx_queue
.queue_addr
) {
510 ibmveth_error_printk("unable to allocate rx queue pages\n");
511 ibmveth_cleanup(adapter
);
512 napi_disable(&adapter
->napi
);
516 adapter
->buffer_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
517 adapter
->buffer_list_addr
, 4096, DMA_BIDIRECTIONAL
);
518 adapter
->filter_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
519 adapter
->filter_list_addr
, 4096, DMA_BIDIRECTIONAL
);
520 adapter
->rx_queue
.queue_dma
= dma_map_single(&adapter
->vdev
->dev
,
521 adapter
->rx_queue
.queue_addr
,
522 adapter
->rx_queue
.queue_len
, DMA_BIDIRECTIONAL
);
524 if((dma_mapping_error(adapter
->buffer_list_dma
) ) ||
525 (dma_mapping_error(adapter
->filter_list_dma
)) ||
526 (dma_mapping_error(adapter
->rx_queue
.queue_dma
))) {
527 ibmveth_error_printk("unable to map filter or buffer list pages\n");
528 ibmveth_cleanup(adapter
);
529 napi_disable(&adapter
->napi
);
533 adapter
->rx_queue
.index
= 0;
534 adapter
->rx_queue
.num_slots
= rxq_entries
;
535 adapter
->rx_queue
.toggle
= 1;
537 memcpy(&mac_address
, netdev
->dev_addr
, netdev
->addr_len
);
538 mac_address
= mac_address
>> 16;
541 rxq_desc
.fields
.valid
= 1;
542 rxq_desc
.fields
.length
= adapter
->rx_queue
.queue_len
;
543 rxq_desc
.fields
.address
= adapter
->rx_queue
.queue_dma
;
545 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter
->buffer_list_addr
);
546 ibmveth_debug_printk("filter list @ 0x%p\n", adapter
->filter_list_addr
);
547 ibmveth_debug_printk("receive q @ 0x%p\n", adapter
->rx_queue
.queue_addr
);
549 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
551 lpar_rc
= ibmveth_register_logical_lan(adapter
, rxq_desc
, mac_address
);
553 if(lpar_rc
!= H_SUCCESS
) {
554 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc
);
555 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
556 adapter
->buffer_list_dma
,
557 adapter
->filter_list_dma
,
560 ibmveth_cleanup(adapter
);
561 napi_disable(&adapter
->napi
);
565 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
566 if(!adapter
->rx_buff_pool
[i
].active
)
568 if (ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[i
])) {
569 ibmveth_error_printk("unable to alloc pool\n");
570 adapter
->rx_buff_pool
[i
].active
= 0;
571 ibmveth_cleanup(adapter
);
572 napi_disable(&adapter
->napi
);
577 ibmveth_debug_printk("registering irq 0x%x\n", netdev
->irq
);
578 if((rc
= request_irq(netdev
->irq
, &ibmveth_interrupt
, 0, netdev
->name
, netdev
)) != 0) {
579 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev
->irq
, rc
);
581 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
582 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
584 ibmveth_cleanup(adapter
);
585 napi_disable(&adapter
->napi
);
589 ibmveth_debug_printk("initial replenish cycle\n");
590 ibmveth_interrupt(netdev
->irq
, netdev
);
592 netif_start_queue(netdev
);
594 ibmveth_debug_printk("open complete\n");
599 static int ibmveth_close(struct net_device
*netdev
)
601 struct ibmveth_adapter
*adapter
= netdev
->priv
;
604 ibmveth_debug_printk("close starting\n");
606 napi_disable(&adapter
->napi
);
608 if (!adapter
->pool_config
)
609 netif_stop_queue(netdev
);
611 free_irq(netdev
->irq
, netdev
);
614 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
615 } while (H_IS_LONG_BUSY(lpar_rc
) || (lpar_rc
== H_BUSY
));
617 if(lpar_rc
!= H_SUCCESS
)
619 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
623 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
625 ibmveth_cleanup(adapter
);
627 ibmveth_debug_printk("close complete\n");
632 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
) {
633 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE
);
634 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
| ADVERTISED_FIBRE
);
635 cmd
->speed
= SPEED_1000
;
636 cmd
->duplex
= DUPLEX_FULL
;
637 cmd
->port
= PORT_FIBRE
;
638 cmd
->phy_address
= 0;
639 cmd
->transceiver
= XCVR_INTERNAL
;
640 cmd
->autoneg
= AUTONEG_ENABLE
;
646 static void netdev_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
) {
647 strncpy(info
->driver
, ibmveth_driver_name
, sizeof(info
->driver
) - 1);
648 strncpy(info
->version
, ibmveth_driver_version
, sizeof(info
->version
) - 1);
651 static u32
netdev_get_link(struct net_device
*dev
) {
655 static void ibmveth_set_rx_csum_flags(struct net_device
*dev
, u32 data
)
657 struct ibmveth_adapter
*adapter
= dev
->priv
;
660 adapter
->rx_csum
= 1;
663 * Since the ibmveth firmware interface does not have the concept of
664 * separate tx/rx checksum offload enable, if rx checksum is disabled
665 * we also have to disable tx checksum offload. Once we disable rx
666 * checksum offload, we are no longer allowed to send tx buffers that
667 * are not properly checksummed.
669 adapter
->rx_csum
= 0;
670 dev
->features
&= ~NETIF_F_IP_CSUM
;
674 static void ibmveth_set_tx_csum_flags(struct net_device
*dev
, u32 data
)
676 struct ibmveth_adapter
*adapter
= dev
->priv
;
679 dev
->features
|= NETIF_F_IP_CSUM
;
680 adapter
->rx_csum
= 1;
682 dev
->features
&= ~NETIF_F_IP_CSUM
;
685 static int ibmveth_set_csum_offload(struct net_device
*dev
, u32 data
,
686 void (*done
) (struct net_device
*, u32
))
688 struct ibmveth_adapter
*adapter
= dev
->priv
;
689 union ibmveth_illan_attributes set_attr
, clr_attr
, ret_attr
;
691 int rc1
= 0, rc2
= 0;
694 if (netif_running(dev
)) {
696 adapter
->pool_config
= 1;
698 adapter
->pool_config
= 0;
705 set_attr
.fields
.tcp_csum_offload_ipv4
= 1;
707 clr_attr
.fields
.tcp_csum_offload_ipv4
= 1;
709 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, 0, 0, &ret_attr
.desc
);
711 if (ret
== H_SUCCESS
&& !ret_attr
.fields
.active_trunk
&&
712 !ret_attr
.fields
.trunk_priority
&&
713 ret_attr
.fields
.csum_offload_padded_pkt_support
) {
714 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, clr_attr
.desc
,
715 set_attr
.desc
, &ret_attr
.desc
);
717 if (ret
!= H_SUCCESS
) {
719 ibmveth_error_printk("unable to change checksum offload settings."
720 " %d rc=%ld\n", data
, ret
);
722 ret
= h_illan_attributes(adapter
->vdev
->unit_address
,
723 set_attr
.desc
, clr_attr
.desc
, &ret_attr
.desc
);
728 ibmveth_error_printk("unable to change checksum offload settings."
729 " %d rc=%ld ret_attr=%lx\n", data
, ret
, ret_attr
.desc
);
733 rc2
= ibmveth_open(dev
);
735 return rc1
? rc1
: rc2
;
738 static int ibmveth_set_rx_csum(struct net_device
*dev
, u32 data
)
740 struct ibmveth_adapter
*adapter
= dev
->priv
;
742 if ((data
&& adapter
->rx_csum
) || (!data
&& !adapter
->rx_csum
))
745 return ibmveth_set_csum_offload(dev
, data
, ibmveth_set_rx_csum_flags
);
748 static int ibmveth_set_tx_csum(struct net_device
*dev
, u32 data
)
750 struct ibmveth_adapter
*adapter
= dev
->priv
;
753 if (data
&& (dev
->features
& NETIF_F_IP_CSUM
))
755 if (!data
&& !(dev
->features
& NETIF_F_IP_CSUM
))
758 if (data
&& !adapter
->rx_csum
)
759 rc
= ibmveth_set_csum_offload(dev
, data
, ibmveth_set_tx_csum_flags
);
761 ibmveth_set_tx_csum_flags(dev
, data
);
766 static u32
ibmveth_get_rx_csum(struct net_device
*dev
)
768 struct ibmveth_adapter
*adapter
= dev
->priv
;
769 return adapter
->rx_csum
;
772 static const struct ethtool_ops netdev_ethtool_ops
= {
773 .get_drvinfo
= netdev_get_drvinfo
,
774 .get_settings
= netdev_get_settings
,
775 .get_link
= netdev_get_link
,
776 .get_sg
= ethtool_op_get_sg
,
777 .get_tx_csum
= ethtool_op_get_tx_csum
,
778 .set_tx_csum
= ibmveth_set_tx_csum
,
779 .get_rx_csum
= ibmveth_get_rx_csum
,
780 .set_rx_csum
= ibmveth_set_rx_csum
,
781 .get_tso
= ethtool_op_get_tso
,
782 .get_ufo
= ethtool_op_get_ufo
,
785 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
790 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
792 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
794 struct ibmveth_adapter
*adapter
= netdev
->priv
;
795 union ibmveth_buf_desc desc
[IbmVethMaxSendFrags
];
796 unsigned long lpar_rc
;
797 int nfrags
= 0, curfrag
;
798 unsigned long correlator
;
800 unsigned int retry_count
;
801 unsigned int tx_dropped
= 0;
802 unsigned int tx_bytes
= 0;
803 unsigned int tx_packets
= 0;
804 unsigned int tx_send_failed
= 0;
805 unsigned int tx_map_failed
= 0;
808 if ((skb_shinfo(skb
)->nr_frags
+ 1) > IbmVethMaxSendFrags
) {
813 memset(&desc
, 0, sizeof(desc
));
815 /* nfrags = number of frags after the initial fragment */
816 nfrags
= skb_shinfo(skb
)->nr_frags
;
819 adapter
->tx_multidesc_send
++;
821 /* map the initial fragment */
822 desc
[0].fields
.length
= nfrags
? skb
->len
- skb
->data_len
: skb
->len
;
823 desc
[0].fields
.address
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
824 desc
[0].fields
.length
, DMA_TO_DEVICE
);
825 desc
[0].fields
.valid
= 1;
827 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
828 ip_hdr(skb
)->protocol
!= IPPROTO_TCP
&& skb_checksum_help(skb
)) {
829 ibmveth_error_printk("tx: failed to checksum packet\n");
834 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
835 unsigned char *buf
= skb_transport_header(skb
) + skb
->csum_offset
;
837 desc
[0].fields
.no_csum
= 1;
838 desc
[0].fields
.csum_good
= 1;
840 /* Need to zero out the checksum */
845 if(dma_mapping_error(desc
[0].fields
.address
)) {
846 ibmveth_error_printk("tx: unable to map initial fragment\n");
854 /* map fragments past the initial portion if there are any */
856 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[curfrag
];
857 desc
[curfrag
+1].fields
.address
858 = dma_map_single(&adapter
->vdev
->dev
,
859 page_address(frag
->page
) + frag
->page_offset
,
860 frag
->size
, DMA_TO_DEVICE
);
861 desc
[curfrag
+1].fields
.length
= frag
->size
;
862 desc
[curfrag
+1].fields
.valid
= 1;
863 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
864 desc
[curfrag
+1].fields
.no_csum
= 1;
865 desc
[curfrag
+1].fields
.csum_good
= 1;
868 if(dma_mapping_error(desc
[curfrag
+1].fields
.address
)) {
869 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag
);
872 /* Free all the mappings we just created */
873 while(curfrag
< nfrags
) {
874 dma_unmap_single(&adapter
->vdev
->dev
,
875 desc
[curfrag
+1].fields
.address
,
876 desc
[curfrag
+1].fields
.length
,
884 /* send the frame. Arbitrarily set retrycount to 1024 */
888 lpar_rc
= h_send_logical_lan(adapter
->vdev
->unit_address
,
897 } while ((lpar_rc
== H_BUSY
) && (retry_count
--));
899 if(lpar_rc
!= H_SUCCESS
&& lpar_rc
!= H_DROPPED
) {
901 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc
);
902 for(i
= 0; i
< 6; i
++) {
903 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i
,
904 desc
[i
].fields
.valid
, desc
[i
].fields
.length
, desc
[i
].fields
.address
);
910 tx_bytes
+= skb
->len
;
911 netdev
->trans_start
= jiffies
;
915 dma_unmap_single(&adapter
->vdev
->dev
,
916 desc
[nfrags
].fields
.address
,
917 desc
[nfrags
].fields
.length
, DMA_TO_DEVICE
);
918 } while(--nfrags
>= 0);
920 out
: spin_lock_irqsave(&adapter
->stats_lock
, flags
);
921 adapter
->stats
.tx_dropped
+= tx_dropped
;
922 adapter
->stats
.tx_bytes
+= tx_bytes
;
923 adapter
->stats
.tx_packets
+= tx_packets
;
924 adapter
->tx_send_failed
+= tx_send_failed
;
925 adapter
->tx_map_failed
+= tx_map_failed
;
926 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
932 static int ibmveth_poll(struct napi_struct
*napi
, int budget
)
934 struct ibmveth_adapter
*adapter
= container_of(napi
, struct ibmveth_adapter
, napi
);
935 struct net_device
*netdev
= adapter
->netdev
;
936 int frames_processed
= 0;
937 unsigned long lpar_rc
;
943 if (!ibmveth_rxq_pending_buffer(adapter
))
947 if (!ibmveth_rxq_buffer_valid(adapter
)) {
948 wmb(); /* suggested by larson1 */
949 adapter
->rx_invalid_buffer
++;
950 ibmveth_debug_printk("recycling invalid buffer\n");
951 ibmveth_rxq_recycle_buffer(adapter
);
953 int length
= ibmveth_rxq_frame_length(adapter
);
954 int offset
= ibmveth_rxq_frame_offset(adapter
);
955 int csum_good
= ibmveth_rxq_csum_good(adapter
);
957 skb
= ibmveth_rxq_get_buffer(adapter
);
959 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
961 ibmveth_rxq_harvest_buffer(adapter
);
963 skb_reserve(skb
, offset
);
964 skb_put(skb
, length
);
965 skb
->protocol
= eth_type_trans(skb
, netdev
);
967 netif_receive_skb(skb
); /* send it up */
969 adapter
->stats
.rx_packets
++;
970 adapter
->stats
.rx_bytes
+= length
;
972 netdev
->last_rx
= jiffies
;
974 } while (frames_processed
< budget
);
976 ibmveth_replenish_task(adapter
);
978 if (frames_processed
< budget
) {
979 /* We think we are done - reenable interrupts,
980 * then check once more to make sure we are done.
982 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
985 ibmveth_assert(lpar_rc
== H_SUCCESS
);
987 netif_rx_complete(netdev
, napi
);
989 if (ibmveth_rxq_pending_buffer(adapter
) &&
990 netif_rx_reschedule(netdev
, napi
)) {
991 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
997 return frames_processed
;
1000 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
)
1002 struct net_device
*netdev
= dev_instance
;
1003 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1004 unsigned long lpar_rc
;
1006 if (netif_rx_schedule_prep(netdev
, &adapter
->napi
)) {
1007 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1009 ibmveth_assert(lpar_rc
== H_SUCCESS
);
1010 __netif_rx_schedule(netdev
, &adapter
->napi
);
1015 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
)
1017 struct ibmveth_adapter
*adapter
= dev
->priv
;
1018 return &adapter
->stats
;
1021 static void ibmveth_set_multicast_list(struct net_device
*netdev
)
1023 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1024 unsigned long lpar_rc
;
1026 if((netdev
->flags
& IFF_PROMISC
) || (netdev
->mc_count
> adapter
->mcastFilterSize
)) {
1027 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1028 IbmVethMcastEnableRecv
|
1029 IbmVethMcastDisableFiltering
,
1031 if(lpar_rc
!= H_SUCCESS
) {
1032 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc
);
1035 struct dev_mc_list
*mclist
= netdev
->mc_list
;
1037 /* clear the filter table & disable filtering */
1038 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1039 IbmVethMcastEnableRecv
|
1040 IbmVethMcastDisableFiltering
|
1041 IbmVethMcastClearFilterTable
,
1043 if(lpar_rc
!= H_SUCCESS
) {
1044 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc
);
1046 /* add the addresses to the filter table */
1047 for(i
= 0; i
< netdev
->mc_count
; ++i
, mclist
= mclist
->next
) {
1048 // add the multicast address to the filter table
1049 unsigned long mcast_addr
= 0;
1050 memcpy(((char *)&mcast_addr
)+2, mclist
->dmi_addr
, 6);
1051 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1052 IbmVethMcastAddFilter
,
1054 if(lpar_rc
!= H_SUCCESS
) {
1055 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc
);
1059 /* re-enable filtering */
1060 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1061 IbmVethMcastEnableFiltering
,
1063 if(lpar_rc
!= H_SUCCESS
) {
1064 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc
);
1069 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
)
1071 struct ibmveth_adapter
*adapter
= dev
->priv
;
1072 int new_mtu_oh
= new_mtu
+ IBMVETH_BUFF_OH
;
1076 if (new_mtu
< IBMVETH_MAX_MTU
)
1079 for (i
= 0; i
< IbmVethNumBufferPools
; i
++)
1080 if (new_mtu_oh
< adapter
->rx_buff_pool
[i
].buff_size
)
1083 if (i
== IbmVethNumBufferPools
)
1086 /* Look for an active buffer pool that can hold the new MTU */
1087 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
1088 if (!adapter
->rx_buff_pool
[i
].active
) {
1089 adapter
->rx_buff_pool
[i
].active
= 1;
1093 if (new_mtu_oh
< adapter
->rx_buff_pool
[i
].buff_size
) {
1094 if (reinit
&& netif_running(adapter
->netdev
)) {
1095 adapter
->pool_config
= 1;
1096 ibmveth_close(adapter
->netdev
);
1097 adapter
->pool_config
= 0;
1099 if ((rc
= ibmveth_open(adapter
->netdev
)))
1109 #ifdef CONFIG_NET_POLL_CONTROLLER
1110 static void ibmveth_poll_controller(struct net_device
*dev
)
1112 ibmveth_replenish_task(dev
->priv
);
1113 ibmveth_interrupt(dev
->irq
, dev
);
1117 static int __devinit
ibmveth_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
1121 struct net_device
*netdev
;
1122 struct ibmveth_adapter
*adapter
;
1123 union ibmveth_illan_attributes set_attr
, ret_attr
;
1125 unsigned char *mac_addr_p
;
1126 unsigned int *mcastFilterSize_p
;
1129 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
1132 mac_addr_p
= (unsigned char *) vio_get_attribute(dev
,
1133 VETH_MAC_ADDR
, NULL
);
1135 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1136 "attribute\n", __FILE__
, __LINE__
);
1140 mcastFilterSize_p
= (unsigned int *) vio_get_attribute(dev
,
1141 VETH_MCAST_FILTER_SIZE
, NULL
);
1142 if(!mcastFilterSize_p
) {
1143 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find "
1144 "VETH_MCAST_FILTER_SIZE attribute\n",
1145 __FILE__
, __LINE__
);
1149 netdev
= alloc_etherdev(sizeof(struct ibmveth_adapter
));
1154 SET_MODULE_OWNER(netdev
);
1156 adapter
= netdev
->priv
;
1157 dev
->dev
.driver_data
= netdev
;
1159 adapter
->vdev
= dev
;
1160 adapter
->netdev
= netdev
;
1161 adapter
->mcastFilterSize
= *mcastFilterSize_p
;
1162 adapter
->pool_config
= 0;
1164 netif_napi_add(netdev
, &adapter
->napi
, ibmveth_poll
, 16);
1166 /* Some older boxes running PHYP non-natively have an OF that
1167 returns a 8-byte local-mac-address field (and the first
1168 2 bytes have to be ignored) while newer boxes' OF return
1169 a 6-byte field. Note that IEEE 1275 specifies that
1170 local-mac-address must be a 6-byte field.
1171 The RPA doc specifies that the first byte must be 10b, so
1172 we'll just look for it to solve this 8 vs. 6 byte field issue */
1174 if ((*mac_addr_p
& 0x3) != 0x02)
1177 adapter
->mac_addr
= 0;
1178 memcpy(&adapter
->mac_addr
, mac_addr_p
, 6);
1180 netdev
->irq
= dev
->irq
;
1181 netdev
->open
= ibmveth_open
;
1182 netdev
->stop
= ibmveth_close
;
1183 netdev
->hard_start_xmit
= ibmveth_start_xmit
;
1184 netdev
->get_stats
= ibmveth_get_stats
;
1185 netdev
->set_multicast_list
= ibmveth_set_multicast_list
;
1186 netdev
->do_ioctl
= ibmveth_ioctl
;
1187 netdev
->ethtool_ops
= &netdev_ethtool_ops
;
1188 netdev
->change_mtu
= ibmveth_change_mtu
;
1189 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1190 #ifdef CONFIG_NET_POLL_CONTROLLER
1191 netdev
->poll_controller
= ibmveth_poll_controller
;
1193 netdev
->features
|= NETIF_F_LLTX
;
1194 spin_lock_init(&adapter
->stats_lock
);
1196 memcpy(&netdev
->dev_addr
, &adapter
->mac_addr
, netdev
->addr_len
);
1198 for(i
= 0; i
<IbmVethNumBufferPools
; i
++) {
1199 struct kobject
*kobj
= &adapter
->rx_buff_pool
[i
].kobj
;
1200 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[i
], i
,
1201 pool_count
[i
], pool_size
[i
],
1203 kobj
->parent
= &dev
->dev
.kobj
;
1204 sprintf(kobj
->name
, "pool%d", i
);
1205 kobj
->ktype
= &ktype_veth_pool
;
1206 kobject_register(kobj
);
1209 ibmveth_debug_printk("adapter @ 0x%p\n", adapter
);
1211 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
1212 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
1213 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
1215 ibmveth_debug_printk("registering netdev...\n");
1217 ret
= h_illan_attributes(dev
->unit_address
, 0, 0, &ret_attr
.desc
);
1219 if (ret
== H_SUCCESS
&& !ret_attr
.fields
.active_trunk
&&
1220 !ret_attr
.fields
.trunk_priority
&&
1221 ret_attr
.fields
.csum_offload_padded_pkt_support
) {
1223 set_attr
.fields
.tcp_csum_offload_ipv4
= 1;
1225 ret
= h_illan_attributes(dev
->unit_address
, 0, set_attr
.desc
,
1228 if (ret
== H_SUCCESS
) {
1229 adapter
->rx_csum
= 1;
1230 netdev
->features
|= NETIF_F_IP_CSUM
;
1232 ret
= h_illan_attributes(dev
->unit_address
, set_attr
.desc
,
1236 rc
= register_netdev(netdev
);
1239 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc
);
1240 free_netdev(netdev
);
1244 ibmveth_debug_printk("registered\n");
1246 ibmveth_proc_register_adapter(adapter
);
1251 static int __devexit
ibmveth_remove(struct vio_dev
*dev
)
1253 struct net_device
*netdev
= dev
->dev
.driver_data
;
1254 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1257 for(i
= 0; i
<IbmVethNumBufferPools
; i
++)
1258 kobject_unregister(&adapter
->rx_buff_pool
[i
].kobj
);
1260 unregister_netdev(netdev
);
1262 ibmveth_proc_unregister_adapter(adapter
);
1264 free_netdev(netdev
);
1268 #ifdef CONFIG_PROC_FS
1269 static void ibmveth_proc_register_driver(void)
1271 ibmveth_proc_dir
= proc_mkdir(IBMVETH_PROC_DIR
, init_net
.proc_net
);
1272 if (ibmveth_proc_dir
) {
1273 SET_MODULE_OWNER(ibmveth_proc_dir
);
1277 static void ibmveth_proc_unregister_driver(void)
1279 remove_proc_entry(IBMVETH_PROC_DIR
, init_net
.proc_net
);
1282 static void *ibmveth_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1291 static void *ibmveth_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1297 static void ibmveth_seq_stop(struct seq_file
*seq
, void *v
)
1301 static int ibmveth_seq_show(struct seq_file
*seq
, void *v
)
1303 struct ibmveth_adapter
*adapter
= seq
->private;
1304 char *current_mac
= ((char*) &adapter
->netdev
->dev_addr
);
1305 char *firmware_mac
= ((char*) &adapter
->mac_addr
) ;
1307 seq_printf(seq
, "%s %s\n\n", ibmveth_driver_string
, ibmveth_driver_version
);
1309 seq_printf(seq
, "Unit Address: 0x%x\n", adapter
->vdev
->unit_address
);
1310 seq_printf(seq
, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1311 current_mac
[0], current_mac
[1], current_mac
[2],
1312 current_mac
[3], current_mac
[4], current_mac
[5]);
1313 seq_printf(seq
, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1314 firmware_mac
[0], firmware_mac
[1], firmware_mac
[2],
1315 firmware_mac
[3], firmware_mac
[4], firmware_mac
[5]);
1317 seq_printf(seq
, "\nAdapter Statistics:\n");
1318 seq_printf(seq
, " TX: skbuffs linearized: %ld\n", adapter
->tx_linearized
);
1319 seq_printf(seq
, " multi-descriptor sends: %ld\n", adapter
->tx_multidesc_send
);
1320 seq_printf(seq
, " skb_linearize failures: %ld\n", adapter
->tx_linearize_failed
);
1321 seq_printf(seq
, " vio_map_single failres: %ld\n", adapter
->tx_map_failed
);
1322 seq_printf(seq
, " send failures: %ld\n", adapter
->tx_send_failed
);
1323 seq_printf(seq
, " RX: replenish task cycles: %ld\n", adapter
->replenish_task_cycles
);
1324 seq_printf(seq
, " alloc_skb_failures: %ld\n", adapter
->replenish_no_mem
);
1325 seq_printf(seq
, " add buffer failures: %ld\n", adapter
->replenish_add_buff_failure
);
1326 seq_printf(seq
, " invalid buffers: %ld\n", adapter
->rx_invalid_buffer
);
1327 seq_printf(seq
, " no buffers: %ld\n", adapter
->rx_no_buffer
);
1331 static struct seq_operations ibmveth_seq_ops
= {
1332 .start
= ibmveth_seq_start
,
1333 .next
= ibmveth_seq_next
,
1334 .stop
= ibmveth_seq_stop
,
1335 .show
= ibmveth_seq_show
,
1338 static int ibmveth_proc_open(struct inode
*inode
, struct file
*file
)
1340 struct seq_file
*seq
;
1341 struct proc_dir_entry
*proc
;
1344 rc
= seq_open(file
, &ibmveth_seq_ops
);
1346 /* recover the pointer buried in proc_dir_entry data */
1347 seq
= file
->private_data
;
1349 seq
->private = proc
->data
;
1354 static const struct file_operations ibmveth_proc_fops
= {
1355 .owner
= THIS_MODULE
,
1356 .open
= ibmveth_proc_open
,
1358 .llseek
= seq_lseek
,
1359 .release
= seq_release
,
1362 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1364 struct proc_dir_entry
*entry
;
1365 if (ibmveth_proc_dir
) {
1367 sprintf(u_addr
, "%x", adapter
->vdev
->unit_address
);
1368 entry
= create_proc_entry(u_addr
, S_IFREG
, ibmveth_proc_dir
);
1370 ibmveth_error_printk("Cannot create adapter proc entry");
1372 entry
->data
= (void *) adapter
;
1373 entry
->proc_fops
= &ibmveth_proc_fops
;
1374 SET_MODULE_OWNER(entry
);
1380 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1382 if (ibmveth_proc_dir
) {
1384 sprintf(u_addr
, "%x", adapter
->vdev
->unit_address
);
1385 remove_proc_entry(u_addr
, ibmveth_proc_dir
);
1389 #else /* CONFIG_PROC_FS */
1390 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1394 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1397 static void ibmveth_proc_register_driver(void)
1401 static void ibmveth_proc_unregister_driver(void)
1404 #endif /* CONFIG_PROC_FS */
1406 static struct attribute veth_active_attr
;
1407 static struct attribute veth_num_attr
;
1408 static struct attribute veth_size_attr
;
1410 static ssize_t
veth_pool_show(struct kobject
* kobj
,
1411 struct attribute
* attr
, char * buf
)
1413 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1414 struct ibmveth_buff_pool
,
1417 if (attr
== &veth_active_attr
)
1418 return sprintf(buf
, "%d\n", pool
->active
);
1419 else if (attr
== &veth_num_attr
)
1420 return sprintf(buf
, "%d\n", pool
->size
);
1421 else if (attr
== &veth_size_attr
)
1422 return sprintf(buf
, "%d\n", pool
->buff_size
);
1426 static ssize_t
veth_pool_store(struct kobject
* kobj
, struct attribute
* attr
,
1427 const char * buf
, size_t count
)
1429 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1430 struct ibmveth_buff_pool
,
1432 struct net_device
*netdev
=
1433 container_of(kobj
->parent
, struct device
, kobj
)->driver_data
;
1434 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1435 long value
= simple_strtol(buf
, NULL
, 10);
1438 if (attr
== &veth_active_attr
) {
1439 if (value
&& !pool
->active
) {
1440 if (netif_running(netdev
)) {
1441 if(ibmveth_alloc_buffer_pool(pool
)) {
1442 ibmveth_error_printk("unable to alloc pool\n");
1446 adapter
->pool_config
= 1;
1447 ibmveth_close(netdev
);
1448 adapter
->pool_config
= 0;
1449 if ((rc
= ibmveth_open(netdev
)))
1453 } else if (!value
&& pool
->active
) {
1454 int mtu
= netdev
->mtu
+ IBMVETH_BUFF_OH
;
1456 /* Make sure there is a buffer pool with buffers that
1457 can hold a packet of the size of the MTU */
1458 for (i
= 0; i
< IbmVethNumBufferPools
; i
++) {
1459 if (pool
== &adapter
->rx_buff_pool
[i
])
1461 if (!adapter
->rx_buff_pool
[i
].active
)
1463 if (mtu
<= adapter
->rx_buff_pool
[i
].buff_size
)
1467 if (i
== IbmVethNumBufferPools
) {
1468 ibmveth_error_printk("no active pool >= MTU\n");
1473 if (netif_running(netdev
)) {
1474 adapter
->pool_config
= 1;
1475 ibmveth_close(netdev
);
1476 adapter
->pool_config
= 0;
1477 if ((rc
= ibmveth_open(netdev
)))
1481 } else if (attr
== &veth_num_attr
) {
1482 if (value
<= 0 || value
> IBMVETH_MAX_POOL_COUNT
)
1485 if (netif_running(netdev
)) {
1486 adapter
->pool_config
= 1;
1487 ibmveth_close(netdev
);
1488 adapter
->pool_config
= 0;
1490 if ((rc
= ibmveth_open(netdev
)))
1495 } else if (attr
== &veth_size_attr
) {
1496 if (value
<= IBMVETH_BUFF_OH
|| value
> IBMVETH_MAX_BUF_SIZE
)
1499 if (netif_running(netdev
)) {
1500 adapter
->pool_config
= 1;
1501 ibmveth_close(netdev
);
1502 adapter
->pool_config
= 0;
1503 pool
->buff_size
= value
;
1504 if ((rc
= ibmveth_open(netdev
)))
1507 pool
->buff_size
= value
;
1511 /* kick the interrupt handler to allocate/deallocate pools */
1512 ibmveth_interrupt(netdev
->irq
, netdev
);
1517 #define ATTR(_name, _mode) \
1518 struct attribute veth_##_name##_attr = { \
1519 .name = __stringify(_name), .mode = _mode, \
1522 static ATTR(active
, 0644);
1523 static ATTR(num
, 0644);
1524 static ATTR(size
, 0644);
1526 static struct attribute
* veth_pool_attrs
[] = {
1533 static struct sysfs_ops veth_pool_ops
= {
1534 .show
= veth_pool_show
,
1535 .store
= veth_pool_store
,
1538 static struct kobj_type ktype_veth_pool
= {
1540 .sysfs_ops
= &veth_pool_ops
,
1541 .default_attrs
= veth_pool_attrs
,
1545 static struct vio_device_id ibmveth_device_table
[] __devinitdata
= {
1546 { "network", "IBM,l-lan"},
1549 MODULE_DEVICE_TABLE(vio
, ibmveth_device_table
);
1551 static struct vio_driver ibmveth_driver
= {
1552 .id_table
= ibmveth_device_table
,
1553 .probe
= ibmveth_probe
,
1554 .remove
= ibmveth_remove
,
1556 .name
= ibmveth_driver_name
,
1557 .owner
= THIS_MODULE
,
1561 static int __init
ibmveth_module_init(void)
1563 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name
, ibmveth_driver_string
, ibmveth_driver_version
);
1565 ibmveth_proc_register_driver();
1567 return vio_register_driver(&ibmveth_driver
);
1570 static void __exit
ibmveth_module_exit(void)
1572 vio_unregister_driver(&ibmveth_driver
);
1573 ibmveth_proc_unregister_driver();
1576 module_init(ibmveth_module_init
);
1577 module_exit(ibmveth_module_exit
);