2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Copyright (C) IBM Corporation, 2003, 2010
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
39 #include <linux/ethtool.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
52 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev
*vdev
);
56 static struct kobj_type ktype_veth_pool
;
59 static const char ibmveth_driver_name
[] = "ibmveth";
60 static const char ibmveth_driver_string
[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.05"
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version
);
68 static unsigned int tx_copybreak __read_mostly
= 128;
69 module_param(tx_copybreak
, uint
, 0644);
70 MODULE_PARM_DESC(tx_copybreak
,
71 "Maximum size of packet that is copied to a new buffer on transmit");
73 static unsigned int rx_copybreak __read_mostly
= 128;
74 module_param(rx_copybreak
, uint
, 0644);
75 MODULE_PARM_DESC(rx_copybreak
,
76 "Maximum size of packet that is copied to a new buffer on receive");
78 static unsigned int rx_flush __read_mostly
= 0;
79 module_param(rx_flush
, uint
, 0644);
80 MODULE_PARM_DESC(rx_flush
, "Flush receive buffers before use");
82 static bool old_large_send __read_mostly
;
83 module_param(old_large_send
, bool, S_IRUGO
);
84 MODULE_PARM_DESC(old_large_send
,
85 "Use old large send method on firmware that supports the new method");
88 char name
[ETH_GSTRING_LEN
];
92 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
95 struct ibmveth_stat ibmveth_stats
[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles
) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem
) },
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure
) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success
) },
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer
) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer
) },
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed
) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed
) },
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support
) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support
) },
108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets
) },
109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets
) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support
) }
113 /* simple methods of getting data from the current rxq entry */
114 static inline u32
ibmveth_rxq_flags(struct ibmveth_adapter
*adapter
)
116 return be32_to_cpu(adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].flags_off
);
119 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter
*adapter
)
121 return (ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_TOGGLE
) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT
;
125 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter
*adapter
)
127 return ibmveth_rxq_toggle(adapter
) == adapter
->rx_queue
.toggle
;
130 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter
*adapter
)
132 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_VALID
;
135 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter
*adapter
)
137 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_OFF_MASK
;
140 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter
*adapter
)
142 return be32_to_cpu(adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].length
);
145 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter
*adapter
)
147 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_CSUM_GOOD
;
150 /* setup the initial settings for a buffer pool */
151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool
*pool
,
152 u32 pool_index
, u32 pool_size
,
153 u32 buff_size
, u32 pool_active
)
155 pool
->size
= pool_size
;
156 pool
->index
= pool_index
;
157 pool
->buff_size
= buff_size
;
158 pool
->threshold
= pool_size
* 7 / 8;
159 pool
->active
= pool_active
;
162 /* allocate and setup an buffer pool - called during open */
163 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool
*pool
)
167 pool
->free_map
= kmalloc(sizeof(u16
) * pool
->size
, GFP_KERNEL
);
172 pool
->dma_addr
= kmalloc(sizeof(dma_addr_t
) * pool
->size
, GFP_KERNEL
);
173 if (!pool
->dma_addr
) {
174 kfree(pool
->free_map
);
175 pool
->free_map
= NULL
;
179 pool
->skbuff
= kcalloc(pool
->size
, sizeof(void *), GFP_KERNEL
);
182 kfree(pool
->dma_addr
);
183 pool
->dma_addr
= NULL
;
185 kfree(pool
->free_map
);
186 pool
->free_map
= NULL
;
190 memset(pool
->dma_addr
, 0, sizeof(dma_addr_t
) * pool
->size
);
192 for (i
= 0; i
< pool
->size
; ++i
)
193 pool
->free_map
[i
] = i
;
195 atomic_set(&pool
->available
, 0);
196 pool
->producer_index
= 0;
197 pool
->consumer_index
= 0;
202 static inline void ibmveth_flush_buffer(void *addr
, unsigned long length
)
204 unsigned long offset
;
206 for (offset
= 0; offset
< length
; offset
+= SMP_CACHE_BYTES
)
207 asm("dcbfl %0,%1" :: "b" (addr
), "r" (offset
));
210 /* replenish the buffers for a pool. note that we don't need to
211 * skb_reserve these since they are used for incoming...
213 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter
*adapter
,
214 struct ibmveth_buff_pool
*pool
)
217 u32 count
= pool
->size
- atomic_read(&pool
->available
);
218 u32 buffers_added
= 0;
220 unsigned int free_index
, index
;
222 unsigned long lpar_rc
;
227 for (i
= 0; i
< count
; ++i
) {
228 union ibmveth_buf_desc desc
;
230 skb
= netdev_alloc_skb(adapter
->netdev
, pool
->buff_size
);
233 netdev_dbg(adapter
->netdev
,
234 "replenish: unable to allocate skb\n");
235 adapter
->replenish_no_mem
++;
239 free_index
= pool
->consumer_index
;
240 pool
->consumer_index
++;
241 if (pool
->consumer_index
>= pool
->size
)
242 pool
->consumer_index
= 0;
243 index
= pool
->free_map
[free_index
];
245 BUG_ON(index
== IBM_VETH_INVALID_MAP
);
246 BUG_ON(pool
->skbuff
[index
] != NULL
);
248 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
249 pool
->buff_size
, DMA_FROM_DEVICE
);
251 if (dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
254 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
255 pool
->dma_addr
[index
] = dma_addr
;
256 pool
->skbuff
[index
] = skb
;
258 correlator
= ((u64
)pool
->index
<< 32) | index
;
259 *(u64
*)skb
->data
= correlator
;
261 desc
.fields
.flags_len
= IBMVETH_BUF_VALID
| pool
->buff_size
;
262 desc
.fields
.address
= dma_addr
;
265 unsigned int len
= min(pool
->buff_size
,
266 adapter
->netdev
->mtu
+
268 ibmveth_flush_buffer(skb
->data
, len
);
270 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
,
273 if (lpar_rc
!= H_SUCCESS
) {
277 adapter
->replenish_add_buff_success
++;
282 atomic_add(buffers_added
, &(pool
->available
));
286 pool
->free_map
[free_index
] = index
;
287 pool
->skbuff
[index
] = NULL
;
288 if (pool
->consumer_index
== 0)
289 pool
->consumer_index
= pool
->size
- 1;
291 pool
->consumer_index
--;
292 if (!dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
293 dma_unmap_single(&adapter
->vdev
->dev
,
294 pool
->dma_addr
[index
], pool
->buff_size
,
296 dev_kfree_skb_any(skb
);
297 adapter
->replenish_add_buff_failure
++;
300 atomic_add(buffers_added
, &(pool
->available
));
304 * The final 8 bytes of the buffer list is a counter of frames dropped
305 * because there was not a buffer in the buffer list capable of holding
308 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter
*adapter
)
310 __be64
*p
= adapter
->buffer_list_addr
+ 4096 - 8;
312 adapter
->rx_no_buffer
= be64_to_cpup(p
);
315 /* replenish routine */
316 static void ibmveth_replenish_task(struct ibmveth_adapter
*adapter
)
320 adapter
->replenish_task_cycles
++;
322 for (i
= (IBMVETH_NUM_BUFF_POOLS
- 1); i
>= 0; i
--) {
323 struct ibmveth_buff_pool
*pool
= &adapter
->rx_buff_pool
[i
];
326 (atomic_read(&pool
->available
) < pool
->threshold
))
327 ibmveth_replenish_buffer_pool(adapter
, pool
);
330 ibmveth_update_rx_no_buffer(adapter
);
333 /* empty and free ana buffer pool - also used to do cleanup in error paths */
334 static void ibmveth_free_buffer_pool(struct ibmveth_adapter
*adapter
,
335 struct ibmveth_buff_pool
*pool
)
339 kfree(pool
->free_map
);
340 pool
->free_map
= NULL
;
342 if (pool
->skbuff
&& pool
->dma_addr
) {
343 for (i
= 0; i
< pool
->size
; ++i
) {
344 struct sk_buff
*skb
= pool
->skbuff
[i
];
346 dma_unmap_single(&adapter
->vdev
->dev
,
350 dev_kfree_skb_any(skb
);
351 pool
->skbuff
[i
] = NULL
;
356 if (pool
->dma_addr
) {
357 kfree(pool
->dma_addr
);
358 pool
->dma_addr
= NULL
;
367 /* remove a buffer from a pool */
368 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter
*adapter
,
371 unsigned int pool
= correlator
>> 32;
372 unsigned int index
= correlator
& 0xffffffffUL
;
373 unsigned int free_index
;
376 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
377 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
379 skb
= adapter
->rx_buff_pool
[pool
].skbuff
[index
];
383 adapter
->rx_buff_pool
[pool
].skbuff
[index
] = NULL
;
385 dma_unmap_single(&adapter
->vdev
->dev
,
386 adapter
->rx_buff_pool
[pool
].dma_addr
[index
],
387 adapter
->rx_buff_pool
[pool
].buff_size
,
390 free_index
= adapter
->rx_buff_pool
[pool
].producer_index
;
391 adapter
->rx_buff_pool
[pool
].producer_index
++;
392 if (adapter
->rx_buff_pool
[pool
].producer_index
>=
393 adapter
->rx_buff_pool
[pool
].size
)
394 adapter
->rx_buff_pool
[pool
].producer_index
= 0;
395 adapter
->rx_buff_pool
[pool
].free_map
[free_index
] = index
;
399 atomic_dec(&(adapter
->rx_buff_pool
[pool
].available
));
402 /* get the current buffer on the rx queue */
403 static inline struct sk_buff
*ibmveth_rxq_get_buffer(struct ibmveth_adapter
*adapter
)
405 u64 correlator
= adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
;
406 unsigned int pool
= correlator
>> 32;
407 unsigned int index
= correlator
& 0xffffffffUL
;
409 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
410 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
412 return adapter
->rx_buff_pool
[pool
].skbuff
[index
];
415 /* recycle the current buffer on the rx queue */
416 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter
*adapter
)
418 u32 q_index
= adapter
->rx_queue
.index
;
419 u64 correlator
= adapter
->rx_queue
.queue_addr
[q_index
].correlator
;
420 unsigned int pool
= correlator
>> 32;
421 unsigned int index
= correlator
& 0xffffffffUL
;
422 union ibmveth_buf_desc desc
;
423 unsigned long lpar_rc
;
426 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
427 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
429 if (!adapter
->rx_buff_pool
[pool
].active
) {
430 ibmveth_rxq_harvest_buffer(adapter
);
431 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[pool
]);
435 desc
.fields
.flags_len
= IBMVETH_BUF_VALID
|
436 adapter
->rx_buff_pool
[pool
].buff_size
;
437 desc
.fields
.address
= adapter
->rx_buff_pool
[pool
].dma_addr
[index
];
439 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
441 if (lpar_rc
!= H_SUCCESS
) {
442 netdev_dbg(adapter
->netdev
, "h_add_logical_lan_buffer failed "
443 "during recycle rc=%ld", lpar_rc
);
444 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
448 if (++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
449 adapter
->rx_queue
.index
= 0;
450 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
457 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
)
459 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
461 if (++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
462 adapter
->rx_queue
.index
= 0;
463 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
467 static void ibmveth_cleanup(struct ibmveth_adapter
*adapter
)
470 struct device
*dev
= &adapter
->vdev
->dev
;
472 if (adapter
->buffer_list_addr
!= NULL
) {
473 if (!dma_mapping_error(dev
, adapter
->buffer_list_dma
)) {
474 dma_unmap_single(dev
, adapter
->buffer_list_dma
, 4096,
476 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
478 free_page((unsigned long)adapter
->buffer_list_addr
);
479 adapter
->buffer_list_addr
= NULL
;
482 if (adapter
->filter_list_addr
!= NULL
) {
483 if (!dma_mapping_error(dev
, adapter
->filter_list_dma
)) {
484 dma_unmap_single(dev
, adapter
->filter_list_dma
, 4096,
486 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
488 free_page((unsigned long)adapter
->filter_list_addr
);
489 adapter
->filter_list_addr
= NULL
;
492 if (adapter
->rx_queue
.queue_addr
!= NULL
) {
493 dma_free_coherent(dev
, adapter
->rx_queue
.queue_len
,
494 adapter
->rx_queue
.queue_addr
,
495 adapter
->rx_queue
.queue_dma
);
496 adapter
->rx_queue
.queue_addr
= NULL
;
499 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
500 if (adapter
->rx_buff_pool
[i
].active
)
501 ibmveth_free_buffer_pool(adapter
,
502 &adapter
->rx_buff_pool
[i
]);
504 if (adapter
->bounce_buffer
!= NULL
) {
505 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
506 dma_unmap_single(&adapter
->vdev
->dev
,
507 adapter
->bounce_buffer_dma
,
508 adapter
->netdev
->mtu
+ IBMVETH_BUFF_OH
,
510 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
512 kfree(adapter
->bounce_buffer
);
513 adapter
->bounce_buffer
= NULL
;
517 static int ibmveth_register_logical_lan(struct ibmveth_adapter
*adapter
,
518 union ibmveth_buf_desc rxq_desc
, u64 mac_address
)
520 int rc
, try_again
= 1;
523 * After a kexec the adapter will still be open, so our attempt to
524 * open it will fail. So if we get a failure we free the adapter and
525 * try again, but only once.
528 rc
= h_register_logical_lan(adapter
->vdev
->unit_address
,
529 adapter
->buffer_list_dma
, rxq_desc
.desc
,
530 adapter
->filter_list_dma
, mac_address
);
532 if (rc
!= H_SUCCESS
&& try_again
) {
534 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
535 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
544 static u64
ibmveth_encode_mac_addr(u8
*mac
)
549 for (i
= 0; i
< ETH_ALEN
; i
++)
550 encoded
= (encoded
<< 8) | mac
[i
];
555 static int ibmveth_open(struct net_device
*netdev
)
557 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
560 unsigned long lpar_rc
;
562 union ibmveth_buf_desc rxq_desc
;
566 netdev_dbg(netdev
, "open starting\n");
568 napi_enable(&adapter
->napi
);
570 for(i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
571 rxq_entries
+= adapter
->rx_buff_pool
[i
].size
;
573 adapter
->buffer_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
574 adapter
->filter_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
576 if (!adapter
->buffer_list_addr
|| !adapter
->filter_list_addr
) {
577 netdev_err(netdev
, "unable to allocate filter or buffer list "
583 dev
= &adapter
->vdev
->dev
;
585 adapter
->rx_queue
.queue_len
= sizeof(struct ibmveth_rx_q_entry
) *
587 adapter
->rx_queue
.queue_addr
=
588 dma_alloc_coherent(dev
, adapter
->rx_queue
.queue_len
,
589 &adapter
->rx_queue
.queue_dma
, GFP_KERNEL
);
590 if (!adapter
->rx_queue
.queue_addr
) {
595 adapter
->buffer_list_dma
= dma_map_single(dev
,
596 adapter
->buffer_list_addr
, 4096, DMA_BIDIRECTIONAL
);
597 adapter
->filter_list_dma
= dma_map_single(dev
,
598 adapter
->filter_list_addr
, 4096, DMA_BIDIRECTIONAL
);
600 if ((dma_mapping_error(dev
, adapter
->buffer_list_dma
)) ||
601 (dma_mapping_error(dev
, adapter
->filter_list_dma
))) {
602 netdev_err(netdev
, "unable to map filter or buffer list "
608 adapter
->rx_queue
.index
= 0;
609 adapter
->rx_queue
.num_slots
= rxq_entries
;
610 adapter
->rx_queue
.toggle
= 1;
612 mac_address
= ibmveth_encode_mac_addr(netdev
->dev_addr
);
614 rxq_desc
.fields
.flags_len
= IBMVETH_BUF_VALID
|
615 adapter
->rx_queue
.queue_len
;
616 rxq_desc
.fields
.address
= adapter
->rx_queue
.queue_dma
;
618 netdev_dbg(netdev
, "buffer list @ 0x%p\n", adapter
->buffer_list_addr
);
619 netdev_dbg(netdev
, "filter list @ 0x%p\n", adapter
->filter_list_addr
);
620 netdev_dbg(netdev
, "receive q @ 0x%p\n", adapter
->rx_queue
.queue_addr
);
622 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
624 lpar_rc
= ibmveth_register_logical_lan(adapter
, rxq_desc
, mac_address
);
626 if (lpar_rc
!= H_SUCCESS
) {
627 netdev_err(netdev
, "h_register_logical_lan failed with %ld\n",
629 netdev_err(netdev
, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
630 "desc:0x%llx MAC:0x%llx\n",
631 adapter
->buffer_list_dma
,
632 adapter
->filter_list_dma
,
639 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
640 if (!adapter
->rx_buff_pool
[i
].active
)
642 if (ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[i
])) {
643 netdev_err(netdev
, "unable to alloc pool\n");
644 adapter
->rx_buff_pool
[i
].active
= 0;
650 netdev_dbg(netdev
, "registering irq 0x%x\n", netdev
->irq
);
651 rc
= request_irq(netdev
->irq
, ibmveth_interrupt
, 0, netdev
->name
,
654 netdev_err(netdev
, "unable to request irq 0x%x, rc %d\n",
657 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
658 } while (H_IS_LONG_BUSY(lpar_rc
) || (lpar_rc
== H_BUSY
));
663 adapter
->bounce_buffer
=
664 kmalloc(netdev
->mtu
+ IBMVETH_BUFF_OH
, GFP_KERNEL
);
665 if (!adapter
->bounce_buffer
) {
667 goto err_out_free_irq
;
669 adapter
->bounce_buffer_dma
=
670 dma_map_single(&adapter
->vdev
->dev
, adapter
->bounce_buffer
,
671 netdev
->mtu
+ IBMVETH_BUFF_OH
, DMA_BIDIRECTIONAL
);
672 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
673 netdev_err(netdev
, "unable to map bounce buffer\n");
675 goto err_out_free_irq
;
678 netdev_dbg(netdev
, "initial replenish cycle\n");
679 ibmveth_interrupt(netdev
->irq
, netdev
);
681 netif_start_queue(netdev
);
683 netdev_dbg(netdev
, "open complete\n");
688 free_irq(netdev
->irq
, netdev
);
690 ibmveth_cleanup(adapter
);
691 napi_disable(&adapter
->napi
);
695 static int ibmveth_close(struct net_device
*netdev
)
697 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
700 netdev_dbg(netdev
, "close starting\n");
702 napi_disable(&adapter
->napi
);
704 if (!adapter
->pool_config
)
705 netif_stop_queue(netdev
);
707 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
710 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
711 } while (H_IS_LONG_BUSY(lpar_rc
) || (lpar_rc
== H_BUSY
));
713 if (lpar_rc
!= H_SUCCESS
) {
714 netdev_err(netdev
, "h_free_logical_lan failed with %lx, "
715 "continuing with close\n", lpar_rc
);
718 free_irq(netdev
->irq
, netdev
);
720 ibmveth_update_rx_no_buffer(adapter
);
722 ibmveth_cleanup(adapter
);
724 netdev_dbg(netdev
, "close complete\n");
729 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
731 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
733 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
735 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
736 cmd
->duplex
= DUPLEX_FULL
;
737 cmd
->port
= PORT_FIBRE
;
738 cmd
->phy_address
= 0;
739 cmd
->transceiver
= XCVR_INTERNAL
;
740 cmd
->autoneg
= AUTONEG_ENABLE
;
746 static void netdev_get_drvinfo(struct net_device
*dev
,
747 struct ethtool_drvinfo
*info
)
749 strlcpy(info
->driver
, ibmveth_driver_name
, sizeof(info
->driver
));
750 strlcpy(info
->version
, ibmveth_driver_version
, sizeof(info
->version
));
753 static netdev_features_t
ibmveth_fix_features(struct net_device
*dev
,
754 netdev_features_t features
)
757 * Since the ibmveth firmware interface does not have the
758 * concept of separate tx/rx checksum offload enable, if rx
759 * checksum is disabled we also have to disable tx checksum
760 * offload. Once we disable rx checksum offload, we are no
761 * longer allowed to send tx buffers that are not properly
765 if (!(features
& NETIF_F_RXCSUM
))
766 features
&= ~NETIF_F_CSUM_MASK
;
771 static int ibmveth_set_csum_offload(struct net_device
*dev
, u32 data
)
773 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
774 unsigned long set_attr
, clr_attr
, ret_attr
;
775 unsigned long set_attr6
, clr_attr6
;
776 long ret
, ret4
, ret6
;
777 int rc1
= 0, rc2
= 0;
780 if (netif_running(dev
)) {
782 adapter
->pool_config
= 1;
784 adapter
->pool_config
= 0;
793 set_attr
= IBMVETH_ILLAN_IPV4_TCP_CSUM
;
794 set_attr6
= IBMVETH_ILLAN_IPV6_TCP_CSUM
;
796 clr_attr
= IBMVETH_ILLAN_IPV4_TCP_CSUM
;
797 clr_attr6
= IBMVETH_ILLAN_IPV6_TCP_CSUM
;
800 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, 0, 0, &ret_attr
);
802 if (ret
== H_SUCCESS
&& !(ret_attr
& IBMVETH_ILLAN_ACTIVE_TRUNK
) &&
803 !(ret_attr
& IBMVETH_ILLAN_TRUNK_PRI_MASK
) &&
804 (ret_attr
& IBMVETH_ILLAN_PADDED_PKT_CSUM
)) {
805 ret4
= h_illan_attributes(adapter
->vdev
->unit_address
, clr_attr
,
806 set_attr
, &ret_attr
);
808 if (ret4
!= H_SUCCESS
) {
809 netdev_err(dev
, "unable to change IPv4 checksum "
810 "offload settings. %d rc=%ld\n",
813 h_illan_attributes(adapter
->vdev
->unit_address
,
814 set_attr
, clr_attr
, &ret_attr
);
817 dev
->features
&= ~NETIF_F_IP_CSUM
;
820 adapter
->fw_ipv4_csum_support
= data
;
823 ret6
= h_illan_attributes(adapter
->vdev
->unit_address
,
824 clr_attr6
, set_attr6
, &ret_attr
);
826 if (ret6
!= H_SUCCESS
) {
827 netdev_err(dev
, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
831 h_illan_attributes(adapter
->vdev
->unit_address
,
832 set_attr6
, clr_attr6
, &ret_attr
);
835 dev
->features
&= ~NETIF_F_IPV6_CSUM
;
838 adapter
->fw_ipv6_csum_support
= data
;
840 if (ret4
== H_SUCCESS
|| ret6
== H_SUCCESS
)
841 adapter
->rx_csum
= data
;
846 netdev_err(dev
, "unable to change checksum offload settings."
847 " %d rc=%ld ret_attr=%lx\n", data
, ret
,
852 rc2
= ibmveth_open(dev
);
854 return rc1
? rc1
: rc2
;
857 static int ibmveth_set_tso(struct net_device
*dev
, u32 data
)
859 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
860 unsigned long set_attr
, clr_attr
, ret_attr
;
862 int rc1
= 0, rc2
= 0;
865 if (netif_running(dev
)) {
867 adapter
->pool_config
= 1;
869 adapter
->pool_config
= 0;
876 set_attr
= IBMVETH_ILLAN_LRG_SR_ENABLED
;
878 clr_attr
= IBMVETH_ILLAN_LRG_SR_ENABLED
;
880 ret1
= h_illan_attributes(adapter
->vdev
->unit_address
, 0, 0, &ret_attr
);
882 if (ret1
== H_SUCCESS
&& (ret_attr
& IBMVETH_ILLAN_LRG_SND_SUPPORT
) &&
884 ret2
= h_illan_attributes(adapter
->vdev
->unit_address
, clr_attr
,
885 set_attr
, &ret_attr
);
887 if (ret2
!= H_SUCCESS
) {
888 netdev_err(dev
, "unable to change tso settings. %d rc=%ld\n",
891 h_illan_attributes(adapter
->vdev
->unit_address
,
892 set_attr
, clr_attr
, &ret_attr
);
895 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
899 adapter
->fw_large_send_support
= data
;
900 adapter
->large_send
= data
;
903 /* Older firmware version of large send offload does not
907 dev
->features
&= ~NETIF_F_TSO6
;
908 netdev_info(dev
, "TSO feature requires all partitions to have updated driver");
910 adapter
->large_send
= data
;
914 rc2
= ibmveth_open(dev
);
916 return rc1
? rc1
: rc2
;
919 static int ibmveth_set_features(struct net_device
*dev
,
920 netdev_features_t features
)
922 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
923 int rx_csum
= !!(features
& NETIF_F_RXCSUM
);
924 int large_send
= !!(features
& (NETIF_F_TSO
| NETIF_F_TSO6
));
925 int rc1
= 0, rc2
= 0;
927 if (rx_csum
!= adapter
->rx_csum
) {
928 rc1
= ibmveth_set_csum_offload(dev
, rx_csum
);
929 if (rc1
&& !adapter
->rx_csum
)
931 features
& ~(NETIF_F_CSUM_MASK
|
935 if (large_send
!= adapter
->large_send
) {
936 rc2
= ibmveth_set_tso(dev
, large_send
);
937 if (rc2
&& !adapter
->large_send
)
939 features
& ~(NETIF_F_TSO
| NETIF_F_TSO6
);
942 return rc1
? rc1
: rc2
;
945 static void ibmveth_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
949 if (stringset
!= ETH_SS_STATS
)
952 for (i
= 0; i
< ARRAY_SIZE(ibmveth_stats
); i
++, data
+= ETH_GSTRING_LEN
)
953 memcpy(data
, ibmveth_stats
[i
].name
, ETH_GSTRING_LEN
);
956 static int ibmveth_get_sset_count(struct net_device
*dev
, int sset
)
960 return ARRAY_SIZE(ibmveth_stats
);
966 static void ibmveth_get_ethtool_stats(struct net_device
*dev
,
967 struct ethtool_stats
*stats
, u64
*data
)
970 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
972 for (i
= 0; i
< ARRAY_SIZE(ibmveth_stats
); i
++)
973 data
[i
] = IBMVETH_GET_STAT(adapter
, ibmveth_stats
[i
].offset
);
976 static const struct ethtool_ops netdev_ethtool_ops
= {
977 .get_drvinfo
= netdev_get_drvinfo
,
978 .get_settings
= netdev_get_settings
,
979 .get_link
= ethtool_op_get_link
,
980 .get_strings
= ibmveth_get_strings
,
981 .get_sset_count
= ibmveth_get_sset_count
,
982 .get_ethtool_stats
= ibmveth_get_ethtool_stats
,
985 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
990 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
992 static int ibmveth_send(struct ibmveth_adapter
*adapter
,
993 union ibmveth_buf_desc
*descs
, unsigned long mss
)
995 unsigned long correlator
;
996 unsigned int retry_count
;
1000 * The retry count sets a maximum for the number of broadcast and
1001 * multicast destinations within the system.
1006 ret
= h_send_logical_lan(adapter
->vdev
->unit_address
,
1007 descs
[0].desc
, descs
[1].desc
,
1008 descs
[2].desc
, descs
[3].desc
,
1009 descs
[4].desc
, descs
[5].desc
,
1010 correlator
, &correlator
, mss
,
1011 adapter
->fw_large_send_support
);
1012 } while ((ret
== H_BUSY
) && (retry_count
--));
1014 if (ret
!= H_SUCCESS
&& ret
!= H_DROPPED
) {
1015 netdev_err(adapter
->netdev
, "tx: h_send_logical_lan failed "
1016 "with rc=%ld\n", ret
);
1023 static netdev_tx_t
ibmveth_start_xmit(struct sk_buff
*skb
,
1024 struct net_device
*netdev
)
1026 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1027 unsigned int desc_flags
;
1028 union ibmveth_buf_desc descs
[6];
1030 int force_bounce
= 0;
1031 dma_addr_t dma_addr
;
1032 unsigned long mss
= 0;
1035 * veth handles a maximum of 6 segments including the header, so
1036 * we have to linearize the skb if there are more than this.
1038 if (skb_shinfo(skb
)->nr_frags
> 5 && __skb_linearize(skb
)) {
1039 netdev
->stats
.tx_dropped
++;
1043 /* veth can't checksum offload UDP */
1044 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
1045 ((skb
->protocol
== htons(ETH_P_IP
) &&
1046 ip_hdr(skb
)->protocol
!= IPPROTO_TCP
) ||
1047 (skb
->protocol
== htons(ETH_P_IPV6
) &&
1048 ipv6_hdr(skb
)->nexthdr
!= IPPROTO_TCP
)) &&
1049 skb_checksum_help(skb
)) {
1051 netdev_err(netdev
, "tx: failed to checksum packet\n");
1052 netdev
->stats
.tx_dropped
++;
1056 desc_flags
= IBMVETH_BUF_VALID
;
1058 if (skb_is_gso(skb
) && adapter
->fw_large_send_support
)
1059 desc_flags
|= IBMVETH_BUF_LRG_SND
;
1061 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1062 unsigned char *buf
= skb_transport_header(skb
) +
1065 desc_flags
|= (IBMVETH_BUF_NO_CSUM
| IBMVETH_BUF_CSUM_GOOD
);
1067 /* Need to zero out the checksum */
1073 memset(descs
, 0, sizeof(descs
));
1076 * If a linear packet is below the rx threshold then
1077 * copy it into the static bounce buffer. This avoids the
1078 * cost of a TCE insert and remove.
1080 if (force_bounce
|| (!skb_is_nonlinear(skb
) &&
1081 (skb
->len
< tx_copybreak
))) {
1082 skb_copy_from_linear_data(skb
, adapter
->bounce_buffer
,
1085 descs
[0].fields
.flags_len
= desc_flags
| skb
->len
;
1086 descs
[0].fields
.address
= adapter
->bounce_buffer_dma
;
1088 if (ibmveth_send(adapter
, descs
, 0)) {
1089 adapter
->tx_send_failed
++;
1090 netdev
->stats
.tx_dropped
++;
1092 netdev
->stats
.tx_packets
++;
1093 netdev
->stats
.tx_bytes
+= skb
->len
;
1099 /* Map the header */
1100 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
1101 skb_headlen(skb
), DMA_TO_DEVICE
);
1102 if (dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
1105 descs
[0].fields
.flags_len
= desc_flags
| skb_headlen(skb
);
1106 descs
[0].fields
.address
= dma_addr
;
1109 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1110 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1112 dma_addr
= skb_frag_dma_map(&adapter
->vdev
->dev
, frag
, 0,
1113 skb_frag_size(frag
), DMA_TO_DEVICE
);
1115 if (dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
1116 goto map_failed_frags
;
1118 descs
[i
+1].fields
.flags_len
= desc_flags
| skb_frag_size(frag
);
1119 descs
[i
+1].fields
.address
= dma_addr
;
1122 if (skb_is_gso(skb
)) {
1123 if (adapter
->fw_large_send_support
) {
1124 mss
= (unsigned long)skb_shinfo(skb
)->gso_size
;
1125 adapter
->tx_large_packets
++;
1126 } else if (!skb_is_gso_v6(skb
)) {
1127 /* Put -1 in the IP checksum to tell phyp it
1128 * is a largesend packet. Put the mss in
1131 ip_hdr(skb
)->check
= 0xffff;
1132 tcp_hdr(skb
)->check
=
1133 cpu_to_be16(skb_shinfo(skb
)->gso_size
);
1134 adapter
->tx_large_packets
++;
1138 if (ibmveth_send(adapter
, descs
, mss
)) {
1139 adapter
->tx_send_failed
++;
1140 netdev
->stats
.tx_dropped
++;
1142 netdev
->stats
.tx_packets
++;
1143 netdev
->stats
.tx_bytes
+= skb
->len
;
1146 dma_unmap_single(&adapter
->vdev
->dev
,
1147 descs
[0].fields
.address
,
1148 descs
[0].fields
.flags_len
& IBMVETH_BUF_LEN_MASK
,
1151 for (i
= 1; i
< skb_shinfo(skb
)->nr_frags
+ 1; i
++)
1152 dma_unmap_page(&adapter
->vdev
->dev
, descs
[i
].fields
.address
,
1153 descs
[i
].fields
.flags_len
& IBMVETH_BUF_LEN_MASK
,
1157 dev_consume_skb_any(skb
);
1158 return NETDEV_TX_OK
;
1162 for (i
= 0; i
< last
; i
++)
1163 dma_unmap_page(&adapter
->vdev
->dev
, descs
[i
].fields
.address
,
1164 descs
[i
].fields
.flags_len
& IBMVETH_BUF_LEN_MASK
,
1168 if (!firmware_has_feature(FW_FEATURE_CMO
))
1169 netdev_err(netdev
, "tx: unable to map xmit buffer\n");
1170 adapter
->tx_map_failed
++;
1176 static int ibmveth_poll(struct napi_struct
*napi
, int budget
)
1178 struct ibmveth_adapter
*adapter
=
1179 container_of(napi
, struct ibmveth_adapter
, napi
);
1180 struct net_device
*netdev
= adapter
->netdev
;
1181 int frames_processed
= 0;
1182 unsigned long lpar_rc
;
1186 while (frames_processed
< budget
) {
1187 if (!ibmveth_rxq_pending_buffer(adapter
))
1191 if (!ibmveth_rxq_buffer_valid(adapter
)) {
1192 wmb(); /* suggested by larson1 */
1193 adapter
->rx_invalid_buffer
++;
1194 netdev_dbg(netdev
, "recycling invalid buffer\n");
1195 ibmveth_rxq_recycle_buffer(adapter
);
1197 struct sk_buff
*skb
, *new_skb
;
1198 int length
= ibmveth_rxq_frame_length(adapter
);
1199 int offset
= ibmveth_rxq_frame_offset(adapter
);
1200 int csum_good
= ibmveth_rxq_csum_good(adapter
);
1202 skb
= ibmveth_rxq_get_buffer(adapter
);
1205 if (length
< rx_copybreak
)
1206 new_skb
= netdev_alloc_skb(netdev
, length
);
1209 skb_copy_to_linear_data(new_skb
,
1213 ibmveth_flush_buffer(skb
->data
,
1215 if (!ibmveth_rxq_recycle_buffer(adapter
))
1219 ibmveth_rxq_harvest_buffer(adapter
);
1220 skb_reserve(skb
, offset
);
1223 skb_put(skb
, length
);
1224 skb
->protocol
= eth_type_trans(skb
, netdev
);
1227 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1228 if (be16_to_cpu(skb
->protocol
) == ETH_P_IP
) {
1229 iph
= (struct iphdr
*)skb
->data
;
1231 /* If the IP checksum is not offloaded and if the packet
1232 * is large send, the checksum must be rebuilt.
1234 if (iph
->check
== 0xffff) {
1236 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
1237 adapter
->rx_large_packets
++;
1242 napi_gro_receive(napi
, skb
); /* send it up */
1244 netdev
->stats
.rx_packets
++;
1245 netdev
->stats
.rx_bytes
+= length
;
1250 ibmveth_replenish_task(adapter
);
1252 if (frames_processed
< budget
) {
1253 napi_complete(napi
);
1255 /* We think we are done - reenable interrupts,
1256 * then check once more to make sure we are done.
1258 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1261 BUG_ON(lpar_rc
!= H_SUCCESS
);
1263 if (ibmveth_rxq_pending_buffer(adapter
) &&
1264 napi_reschedule(napi
)) {
1265 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1271 return frames_processed
;
1274 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
)
1276 struct net_device
*netdev
= dev_instance
;
1277 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1278 unsigned long lpar_rc
;
1280 if (napi_schedule_prep(&adapter
->napi
)) {
1281 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1283 BUG_ON(lpar_rc
!= H_SUCCESS
);
1284 __napi_schedule(&adapter
->napi
);
1289 static void ibmveth_set_multicast_list(struct net_device
*netdev
)
1291 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1292 unsigned long lpar_rc
;
1294 if ((netdev
->flags
& IFF_PROMISC
) ||
1295 (netdev_mc_count(netdev
) > adapter
->mcastFilterSize
)) {
1296 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1297 IbmVethMcastEnableRecv
|
1298 IbmVethMcastDisableFiltering
,
1300 if (lpar_rc
!= H_SUCCESS
) {
1301 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1302 "entering promisc mode\n", lpar_rc
);
1305 struct netdev_hw_addr
*ha
;
1306 /* clear the filter table & disable filtering */
1307 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1308 IbmVethMcastEnableRecv
|
1309 IbmVethMcastDisableFiltering
|
1310 IbmVethMcastClearFilterTable
,
1312 if (lpar_rc
!= H_SUCCESS
) {
1313 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1314 "attempting to clear filter table\n",
1317 /* add the addresses to the filter table */
1318 netdev_for_each_mc_addr(ha
, netdev
) {
1319 /* add the multicast address to the filter table */
1321 mcast_addr
= ibmveth_encode_mac_addr(ha
->addr
);
1322 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1323 IbmVethMcastAddFilter
,
1325 if (lpar_rc
!= H_SUCCESS
) {
1326 netdev_err(netdev
, "h_multicast_ctrl rc=%ld "
1327 "when adding an entry to the filter "
1328 "table\n", lpar_rc
);
1332 /* re-enable filtering */
1333 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1334 IbmVethMcastEnableFiltering
,
1336 if (lpar_rc
!= H_SUCCESS
) {
1337 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1338 "enabling filtering\n", lpar_rc
);
1343 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
)
1345 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
1346 struct vio_dev
*viodev
= adapter
->vdev
;
1347 int new_mtu_oh
= new_mtu
+ IBMVETH_BUFF_OH
;
1349 int need_restart
= 0;
1351 if (new_mtu
< IBMVETH_MIN_MTU
)
1354 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
1355 if (new_mtu_oh
<= adapter
->rx_buff_pool
[i
].buff_size
)
1358 if (i
== IBMVETH_NUM_BUFF_POOLS
)
1361 /* Deactivate all the buffer pools so that the next loop can activate
1362 only the buffer pools necessary to hold the new MTU */
1363 if (netif_running(adapter
->netdev
)) {
1365 adapter
->pool_config
= 1;
1366 ibmveth_close(adapter
->netdev
);
1367 adapter
->pool_config
= 0;
1370 /* Look for an active buffer pool that can hold the new MTU */
1371 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1372 adapter
->rx_buff_pool
[i
].active
= 1;
1374 if (new_mtu_oh
<= adapter
->rx_buff_pool
[i
].buff_size
) {
1376 vio_cmo_set_dev_desired(viodev
,
1377 ibmveth_get_desired_dma
1380 return ibmveth_open(adapter
->netdev
);
1386 if (need_restart
&& (rc
= ibmveth_open(adapter
->netdev
)))
1392 #ifdef CONFIG_NET_POLL_CONTROLLER
1393 static void ibmveth_poll_controller(struct net_device
*dev
)
1395 ibmveth_replenish_task(netdev_priv(dev
));
1396 ibmveth_interrupt(dev
->irq
, dev
);
1401 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1403 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1406 * Number of bytes of IO data the driver will need to perform well.
1408 static unsigned long ibmveth_get_desired_dma(struct vio_dev
*vdev
)
1410 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
1411 struct ibmveth_adapter
*adapter
;
1412 struct iommu_table
*tbl
;
1417 tbl
= get_iommu_table_base(&vdev
->dev
);
1419 /* netdev inits at probe time along with the structures we need below*/
1421 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT
, tbl
);
1423 adapter
= netdev_priv(netdev
);
1425 ret
= IBMVETH_BUFF_LIST_SIZE
+ IBMVETH_FILT_LIST_SIZE
;
1426 ret
+= IOMMU_PAGE_ALIGN(netdev
->mtu
, tbl
);
1428 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1429 /* add the size of the active receive buffers */
1430 if (adapter
->rx_buff_pool
[i
].active
)
1432 adapter
->rx_buff_pool
[i
].size
*
1433 IOMMU_PAGE_ALIGN(adapter
->rx_buff_pool
[i
].
1435 rxqentries
+= adapter
->rx_buff_pool
[i
].size
;
1437 /* add the size of the receive queue entries */
1438 ret
+= IOMMU_PAGE_ALIGN(
1439 rxqentries
* sizeof(struct ibmveth_rx_q_entry
), tbl
);
1444 static int ibmveth_set_mac_addr(struct net_device
*dev
, void *p
)
1446 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
1447 struct sockaddr
*addr
= p
;
1451 if (!is_valid_ether_addr(addr
->sa_data
))
1452 return -EADDRNOTAVAIL
;
1454 mac_address
= ibmveth_encode_mac_addr(addr
->sa_data
);
1455 rc
= h_change_logical_lan_mac(adapter
->vdev
->unit_address
, mac_address
);
1457 netdev_err(adapter
->netdev
, "h_change_logical_lan_mac failed with rc=%d\n", rc
);
1461 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
1466 static const struct net_device_ops ibmveth_netdev_ops
= {
1467 .ndo_open
= ibmveth_open
,
1468 .ndo_stop
= ibmveth_close
,
1469 .ndo_start_xmit
= ibmveth_start_xmit
,
1470 .ndo_set_rx_mode
= ibmveth_set_multicast_list
,
1471 .ndo_do_ioctl
= ibmveth_ioctl
,
1472 .ndo_change_mtu
= ibmveth_change_mtu
,
1473 .ndo_fix_features
= ibmveth_fix_features
,
1474 .ndo_set_features
= ibmveth_set_features
,
1475 .ndo_validate_addr
= eth_validate_addr
,
1476 .ndo_set_mac_address
= ibmveth_set_mac_addr
,
1477 #ifdef CONFIG_NET_POLL_CONTROLLER
1478 .ndo_poll_controller
= ibmveth_poll_controller
,
1482 static int ibmveth_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
1485 struct net_device
*netdev
;
1486 struct ibmveth_adapter
*adapter
;
1487 unsigned char *mac_addr_p
;
1488 unsigned int *mcastFilterSize_p
;
1490 unsigned long ret_attr
;
1492 dev_dbg(&dev
->dev
, "entering ibmveth_probe for UA 0x%x\n",
1495 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
, VETH_MAC_ADDR
,
1498 dev_err(&dev
->dev
, "Can't find VETH_MAC_ADDR attribute\n");
1501 /* Workaround for old/broken pHyp */
1504 else if (mac_len
!= 6) {
1505 dev_err(&dev
->dev
, "VETH_MAC_ADDR attribute wrong len %d\n",
1510 mcastFilterSize_p
= (unsigned int *)vio_get_attribute(dev
,
1511 VETH_MCAST_FILTER_SIZE
, NULL
);
1512 if (!mcastFilterSize_p
) {
1513 dev_err(&dev
->dev
, "Can't find VETH_MCAST_FILTER_SIZE "
1518 netdev
= alloc_etherdev(sizeof(struct ibmveth_adapter
));
1523 adapter
= netdev_priv(netdev
);
1524 dev_set_drvdata(&dev
->dev
, netdev
);
1526 adapter
->vdev
= dev
;
1527 adapter
->netdev
= netdev
;
1528 adapter
->mcastFilterSize
= *mcastFilterSize_p
;
1529 adapter
->pool_config
= 0;
1531 netif_napi_add(netdev
, &adapter
->napi
, ibmveth_poll
, 16);
1533 netdev
->irq
= dev
->irq
;
1534 netdev
->netdev_ops
= &ibmveth_netdev_ops
;
1535 netdev
->ethtool_ops
= &netdev_ethtool_ops
;
1536 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1537 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
1538 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1540 netdev
->features
|= netdev
->hw_features
;
1542 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, 0, 0, &ret_attr
);
1544 /* If running older firmware, TSO should not be enabled by default */
1545 if (ret
== H_SUCCESS
&& (ret_attr
& IBMVETH_ILLAN_LRG_SND_SUPPORT
) &&
1547 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
1548 netdev
->features
|= netdev
->hw_features
;
1550 netdev
->hw_features
|= NETIF_F_TSO
;
1553 memcpy(netdev
->dev_addr
, mac_addr_p
, ETH_ALEN
);
1555 if (firmware_has_feature(FW_FEATURE_CMO
))
1556 memcpy(pool_count
, pool_count_cmo
, sizeof(pool_count
));
1558 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1559 struct kobject
*kobj
= &adapter
->rx_buff_pool
[i
].kobj
;
1562 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[i
], i
,
1563 pool_count
[i
], pool_size
[i
],
1565 error
= kobject_init_and_add(kobj
, &ktype_veth_pool
,
1566 &dev
->dev
.kobj
, "pool%d", i
);
1568 kobject_uevent(kobj
, KOBJ_ADD
);
1571 netdev_dbg(netdev
, "adapter @ 0x%p\n", adapter
);
1573 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
1574 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
1575 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
1577 netdev_dbg(netdev
, "registering netdev...\n");
1579 ibmveth_set_features(netdev
, netdev
->features
);
1581 rc
= register_netdev(netdev
);
1584 netdev_dbg(netdev
, "failed to register netdev rc=%d\n", rc
);
1585 free_netdev(netdev
);
1589 netdev_dbg(netdev
, "registered\n");
1594 static int ibmveth_remove(struct vio_dev
*dev
)
1596 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
1597 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1600 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
1601 kobject_put(&adapter
->rx_buff_pool
[i
].kobj
);
1603 unregister_netdev(netdev
);
1605 free_netdev(netdev
);
1606 dev_set_drvdata(&dev
->dev
, NULL
);
1611 static struct attribute veth_active_attr
;
1612 static struct attribute veth_num_attr
;
1613 static struct attribute veth_size_attr
;
1615 static ssize_t
veth_pool_show(struct kobject
*kobj
,
1616 struct attribute
*attr
, char *buf
)
1618 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1619 struct ibmveth_buff_pool
,
1622 if (attr
== &veth_active_attr
)
1623 return sprintf(buf
, "%d\n", pool
->active
);
1624 else if (attr
== &veth_num_attr
)
1625 return sprintf(buf
, "%d\n", pool
->size
);
1626 else if (attr
== &veth_size_attr
)
1627 return sprintf(buf
, "%d\n", pool
->buff_size
);
1631 static ssize_t
veth_pool_store(struct kobject
*kobj
, struct attribute
*attr
,
1632 const char *buf
, size_t count
)
1634 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1635 struct ibmveth_buff_pool
,
1637 struct net_device
*netdev
= dev_get_drvdata(
1638 container_of(kobj
->parent
, struct device
, kobj
));
1639 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1640 long value
= simple_strtol(buf
, NULL
, 10);
1643 if (attr
== &veth_active_attr
) {
1644 if (value
&& !pool
->active
) {
1645 if (netif_running(netdev
)) {
1646 if (ibmveth_alloc_buffer_pool(pool
)) {
1648 "unable to alloc pool\n");
1652 adapter
->pool_config
= 1;
1653 ibmveth_close(netdev
);
1654 adapter
->pool_config
= 0;
1655 if ((rc
= ibmveth_open(netdev
)))
1660 } else if (!value
&& pool
->active
) {
1661 int mtu
= netdev
->mtu
+ IBMVETH_BUFF_OH
;
1663 /* Make sure there is a buffer pool with buffers that
1664 can hold a packet of the size of the MTU */
1665 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1666 if (pool
== &adapter
->rx_buff_pool
[i
])
1668 if (!adapter
->rx_buff_pool
[i
].active
)
1670 if (mtu
<= adapter
->rx_buff_pool
[i
].buff_size
)
1674 if (i
== IBMVETH_NUM_BUFF_POOLS
) {
1675 netdev_err(netdev
, "no active pool >= MTU\n");
1679 if (netif_running(netdev
)) {
1680 adapter
->pool_config
= 1;
1681 ibmveth_close(netdev
);
1683 adapter
->pool_config
= 0;
1684 if ((rc
= ibmveth_open(netdev
)))
1689 } else if (attr
== &veth_num_attr
) {
1690 if (value
<= 0 || value
> IBMVETH_MAX_POOL_COUNT
) {
1693 if (netif_running(netdev
)) {
1694 adapter
->pool_config
= 1;
1695 ibmveth_close(netdev
);
1696 adapter
->pool_config
= 0;
1698 if ((rc
= ibmveth_open(netdev
)))
1704 } else if (attr
== &veth_size_attr
) {
1705 if (value
<= IBMVETH_BUFF_OH
|| value
> IBMVETH_MAX_BUF_SIZE
) {
1708 if (netif_running(netdev
)) {
1709 adapter
->pool_config
= 1;
1710 ibmveth_close(netdev
);
1711 adapter
->pool_config
= 0;
1712 pool
->buff_size
= value
;
1713 if ((rc
= ibmveth_open(netdev
)))
1716 pool
->buff_size
= value
;
1721 /* kick the interrupt handler to allocate/deallocate pools */
1722 ibmveth_interrupt(netdev
->irq
, netdev
);
1727 #define ATTR(_name, _mode) \
1728 struct attribute veth_##_name##_attr = { \
1729 .name = __stringify(_name), .mode = _mode, \
1732 static ATTR(active
, 0644);
1733 static ATTR(num
, 0644);
1734 static ATTR(size
, 0644);
1736 static struct attribute
*veth_pool_attrs
[] = {
1743 static const struct sysfs_ops veth_pool_ops
= {
1744 .show
= veth_pool_show
,
1745 .store
= veth_pool_store
,
1748 static struct kobj_type ktype_veth_pool
= {
1750 .sysfs_ops
= &veth_pool_ops
,
1751 .default_attrs
= veth_pool_attrs
,
1754 static int ibmveth_resume(struct device
*dev
)
1756 struct net_device
*netdev
= dev_get_drvdata(dev
);
1757 ibmveth_interrupt(netdev
->irq
, netdev
);
1761 static struct vio_device_id ibmveth_device_table
[] = {
1762 { "network", "IBM,l-lan"},
1765 MODULE_DEVICE_TABLE(vio
, ibmveth_device_table
);
1767 static struct dev_pm_ops ibmveth_pm_ops
= {
1768 .resume
= ibmveth_resume
1771 static struct vio_driver ibmveth_driver
= {
1772 .id_table
= ibmveth_device_table
,
1773 .probe
= ibmveth_probe
,
1774 .remove
= ibmveth_remove
,
1775 .get_desired_dma
= ibmveth_get_desired_dma
,
1776 .name
= ibmveth_driver_name
,
1777 .pm
= &ibmveth_pm_ops
,
1780 static int __init
ibmveth_module_init(void)
1782 printk(KERN_DEBUG
"%s: %s %s\n", ibmveth_driver_name
,
1783 ibmveth_driver_string
, ibmveth_driver_version
);
1785 return vio_register_driver(&ibmveth_driver
);
1788 static void __exit
ibmveth_module_exit(void)
1790 vio_unregister_driver(&ibmveth_driver
);
1793 module_init(ibmveth_module_init
);
1794 module_exit(ibmveth_module_exit
);