2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/delay.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
43 #include <linux/crc32.h>
46 #include <linux/tcp.h>
47 #include <linux/init.h>
48 #include <linux/dma-mapping.h>
52 #include <asm/byteorder.h>
54 #include <rdma/ib_smi.h>
56 #include "c2_provider.h"
58 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
59 MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
60 MODULE_LICENSE("Dual BSD/GPL");
61 MODULE_VERSION(DRV_VERSION
);
63 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
64 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
66 static int debug
= -1; /* defaults above */
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
70 static int c2_up(struct net_device
*netdev
);
71 static int c2_down(struct net_device
*netdev
);
72 static int c2_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
);
73 static void c2_tx_interrupt(struct net_device
*netdev
);
74 static void c2_rx_interrupt(struct net_device
*netdev
);
75 static irqreturn_t
c2_interrupt(int irq
, void *dev_id
);
76 static void c2_tx_timeout(struct net_device
*netdev
);
77 static int c2_change_mtu(struct net_device
*netdev
, int new_mtu
);
78 static void c2_reset(struct c2_port
*c2_port
);
80 static struct pci_device_id c2_pci_table
[] = {
81 { PCI_DEVICE(0x18b8, 0xb001) },
85 MODULE_DEVICE_TABLE(pci
, c2_pci_table
);
87 static void c2_print_macaddr(struct net_device
*netdev
)
89 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
90 "IRQ %u\n", netdev
->name
,
91 netdev
->dev_addr
[0], netdev
->dev_addr
[1], netdev
->dev_addr
[2],
92 netdev
->dev_addr
[3], netdev
->dev_addr
[4], netdev
->dev_addr
[5],
96 static void c2_set_rxbufsize(struct c2_port
*c2_port
)
98 struct net_device
*netdev
= c2_port
->netdev
;
100 if (netdev
->mtu
> RX_BUF_SIZE
)
101 c2_port
->rx_buf_size
=
102 netdev
->mtu
+ ETH_HLEN
+ sizeof(struct c2_rxp_hdr
) +
105 c2_port
->rx_buf_size
= sizeof(struct c2_rxp_hdr
) + RX_BUF_SIZE
;
109 * Allocate TX ring elements and chain them together.
110 * One-to-one association of adapter descriptors with ring elements.
112 static int c2_tx_ring_alloc(struct c2_ring
*tx_ring
, void *vaddr
,
113 dma_addr_t base
, void __iomem
* mmio_txp_ring
)
115 struct c2_tx_desc
*tx_desc
;
116 struct c2_txp_desc __iomem
*txp_desc
;
117 struct c2_element
*elem
;
120 tx_ring
->start
= kmalloc(sizeof(*elem
) * tx_ring
->count
, GFP_KERNEL
);
124 elem
= tx_ring
->start
;
126 txp_desc
= mmio_txp_ring
;
127 for (i
= 0; i
< tx_ring
->count
; i
++, elem
++, tx_desc
++, txp_desc
++) {
131 /* Set TXP_HTXD_UNINIT */
132 __raw_writeq((__force u64
) cpu_to_be64(0x1122334455667788ULL
),
133 (void __iomem
*) txp_desc
+ C2_TXP_ADDR
);
134 __raw_writew(0, (void __iomem
*) txp_desc
+ C2_TXP_LEN
);
135 __raw_writew((__force u16
) cpu_to_be16(TXP_HTXD_UNINIT
),
136 (void __iomem
*) txp_desc
+ C2_TXP_FLAGS
);
139 elem
->ht_desc
= tx_desc
;
140 elem
->hw_desc
= txp_desc
;
142 if (i
== tx_ring
->count
- 1) {
143 elem
->next
= tx_ring
->start
;
144 tx_desc
->next_offset
= base
;
146 elem
->next
= elem
+ 1;
147 tx_desc
->next_offset
=
148 base
+ (i
+ 1) * sizeof(*tx_desc
);
152 tx_ring
->to_use
= tx_ring
->to_clean
= tx_ring
->start
;
158 * Allocate RX ring elements and chain them together.
159 * One-to-one association of adapter descriptors with ring elements.
161 static int c2_rx_ring_alloc(struct c2_ring
*rx_ring
, void *vaddr
,
162 dma_addr_t base
, void __iomem
* mmio_rxp_ring
)
164 struct c2_rx_desc
*rx_desc
;
165 struct c2_rxp_desc __iomem
*rxp_desc
;
166 struct c2_element
*elem
;
169 rx_ring
->start
= kmalloc(sizeof(*elem
) * rx_ring
->count
, GFP_KERNEL
);
173 elem
= rx_ring
->start
;
175 rxp_desc
= mmio_rxp_ring
;
176 for (i
= 0; i
< rx_ring
->count
; i
++, elem
++, rx_desc
++, rxp_desc
++) {
180 /* Set RXP_HRXD_UNINIT */
181 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_OK
),
182 (void __iomem
*) rxp_desc
+ C2_RXP_STATUS
);
183 __raw_writew(0, (void __iomem
*) rxp_desc
+ C2_RXP_COUNT
);
184 __raw_writew(0, (void __iomem
*) rxp_desc
+ C2_RXP_LEN
);
185 __raw_writeq((__force u64
) cpu_to_be64(0x99aabbccddeeffULL
),
186 (void __iomem
*) rxp_desc
+ C2_RXP_ADDR
);
187 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_UNINIT
),
188 (void __iomem
*) rxp_desc
+ C2_RXP_FLAGS
);
191 elem
->ht_desc
= rx_desc
;
192 elem
->hw_desc
= rxp_desc
;
194 if (i
== rx_ring
->count
- 1) {
195 elem
->next
= rx_ring
->start
;
196 rx_desc
->next_offset
= base
;
198 elem
->next
= elem
+ 1;
199 rx_desc
->next_offset
=
200 base
+ (i
+ 1) * sizeof(*rx_desc
);
204 rx_ring
->to_use
= rx_ring
->to_clean
= rx_ring
->start
;
209 /* Setup buffer for receiving */
210 static inline int c2_rx_alloc(struct c2_port
*c2_port
, struct c2_element
*elem
)
212 struct c2_dev
*c2dev
= c2_port
->c2dev
;
213 struct c2_rx_desc
*rx_desc
= elem
->ht_desc
;
217 struct c2_rxp_hdr
*rxp_hdr
;
219 skb
= dev_alloc_skb(c2_port
->rx_buf_size
);
220 if (unlikely(!skb
)) {
221 pr_debug("%s: out of memory for receive\n",
222 c2_port
->netdev
->name
);
226 /* Zero out the rxp hdr in the sk_buff */
227 memset(skb
->data
, 0, sizeof(*rxp_hdr
));
229 skb
->dev
= c2_port
->netdev
;
231 maplen
= c2_port
->rx_buf_size
;
233 pci_map_single(c2dev
->pcidev
, skb
->data
, maplen
,
236 /* Set the sk_buff RXP_header to RXP_HRXD_READY */
237 rxp_hdr
= (struct c2_rxp_hdr
*) skb
->data
;
238 rxp_hdr
->flags
= RXP_HRXD_READY
;
240 __raw_writew(0, elem
->hw_desc
+ C2_RXP_STATUS
);
241 __raw_writew((__force u16
) cpu_to_be16((u16
) maplen
- sizeof(*rxp_hdr
)),
242 elem
->hw_desc
+ C2_RXP_LEN
);
243 __raw_writeq((__force u64
) cpu_to_be64(mapaddr
), elem
->hw_desc
+ C2_RXP_ADDR
);
244 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_READY
),
245 elem
->hw_desc
+ C2_RXP_FLAGS
);
248 elem
->mapaddr
= mapaddr
;
249 elem
->maplen
= maplen
;
250 rx_desc
->len
= maplen
;
256 * Allocate buffers for the Rx ring
257 * For receive: rx_ring.to_clean is next received frame
259 static int c2_rx_fill(struct c2_port
*c2_port
)
261 struct c2_ring
*rx_ring
= &c2_port
->rx_ring
;
262 struct c2_element
*elem
;
265 elem
= rx_ring
->start
;
267 if (c2_rx_alloc(c2_port
, elem
)) {
271 } while ((elem
= elem
->next
) != rx_ring
->start
);
273 rx_ring
->to_clean
= rx_ring
->start
;
277 /* Free all buffers in RX ring, assumes receiver stopped */
278 static void c2_rx_clean(struct c2_port
*c2_port
)
280 struct c2_dev
*c2dev
= c2_port
->c2dev
;
281 struct c2_ring
*rx_ring
= &c2_port
->rx_ring
;
282 struct c2_element
*elem
;
283 struct c2_rx_desc
*rx_desc
;
285 elem
= rx_ring
->start
;
287 rx_desc
= elem
->ht_desc
;
290 __raw_writew(0, elem
->hw_desc
+ C2_RXP_STATUS
);
291 __raw_writew(0, elem
->hw_desc
+ C2_RXP_COUNT
);
292 __raw_writew(0, elem
->hw_desc
+ C2_RXP_LEN
);
293 __raw_writeq((__force u64
) cpu_to_be64(0x99aabbccddeeffULL
),
294 elem
->hw_desc
+ C2_RXP_ADDR
);
295 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_UNINIT
),
296 elem
->hw_desc
+ C2_RXP_FLAGS
);
299 pci_unmap_single(c2dev
->pcidev
, elem
->mapaddr
,
300 elem
->maplen
, PCI_DMA_FROMDEVICE
);
301 dev_kfree_skb(elem
->skb
);
304 } while ((elem
= elem
->next
) != rx_ring
->start
);
307 static inline int c2_tx_free(struct c2_dev
*c2dev
, struct c2_element
*elem
)
309 struct c2_tx_desc
*tx_desc
= elem
->ht_desc
;
313 pci_unmap_single(c2dev
->pcidev
, elem
->mapaddr
, elem
->maplen
,
317 dev_kfree_skb_any(elem
->skb
);
324 /* Free all buffers in TX ring, assumes transmitter stopped */
325 static void c2_tx_clean(struct c2_port
*c2_port
)
327 struct c2_ring
*tx_ring
= &c2_port
->tx_ring
;
328 struct c2_element
*elem
;
329 struct c2_txp_desc txp_htxd
;
333 spin_lock_irqsave(&c2_port
->tx_lock
, flags
);
335 elem
= tx_ring
->start
;
341 readw(elem
->hw_desc
+ C2_TXP_FLAGS
);
343 if (txp_htxd
.flags
== TXP_HTXD_READY
) {
346 elem
->hw_desc
+ C2_TXP_LEN
);
348 elem
->hw_desc
+ C2_TXP_ADDR
);
349 __raw_writew((__force u16
) cpu_to_be16(TXP_HTXD_DONE
),
350 elem
->hw_desc
+ C2_TXP_FLAGS
);
351 c2_port
->netdev
->stats
.tx_dropped
++;
355 elem
->hw_desc
+ C2_TXP_LEN
);
356 __raw_writeq((__force u64
) cpu_to_be64(0x1122334455667788ULL
),
357 elem
->hw_desc
+ C2_TXP_ADDR
);
358 __raw_writew((__force u16
) cpu_to_be16(TXP_HTXD_UNINIT
),
359 elem
->hw_desc
+ C2_TXP_FLAGS
);
362 c2_tx_free(c2_port
->c2dev
, elem
);
364 } while ((elem
= elem
->next
) != tx_ring
->start
);
367 c2_port
->tx_avail
= c2_port
->tx_ring
.count
- 1;
368 c2_port
->c2dev
->cur_tx
= tx_ring
->to_use
- tx_ring
->start
;
370 if (c2_port
->tx_avail
> MAX_SKB_FRAGS
+ 1)
371 netif_wake_queue(c2_port
->netdev
);
373 spin_unlock_irqrestore(&c2_port
->tx_lock
, flags
);
377 * Process transmit descriptors marked 'DONE' by the firmware,
378 * freeing up their unneeded sk_buffs.
380 static void c2_tx_interrupt(struct net_device
*netdev
)
382 struct c2_port
*c2_port
= netdev_priv(netdev
);
383 struct c2_dev
*c2dev
= c2_port
->c2dev
;
384 struct c2_ring
*tx_ring
= &c2_port
->tx_ring
;
385 struct c2_element
*elem
;
386 struct c2_txp_desc txp_htxd
;
388 spin_lock(&c2_port
->tx_lock
);
390 for (elem
= tx_ring
->to_clean
; elem
!= tx_ring
->to_use
;
393 be16_to_cpu((__force __be16
) readw(elem
->hw_desc
+ C2_TXP_FLAGS
));
395 if (txp_htxd
.flags
!= TXP_HTXD_DONE
)
398 if (netif_msg_tx_done(c2_port
)) {
399 /* PCI reads are expensive in fast path */
401 be16_to_cpu((__force __be16
) readw(elem
->hw_desc
+ C2_TXP_LEN
));
402 pr_debug("%s: tx done slot %3Zu status 0x%x len "
404 netdev
->name
, elem
- tx_ring
->start
,
405 txp_htxd
.flags
, txp_htxd
.len
);
408 c2_tx_free(c2dev
, elem
);
409 ++(c2_port
->tx_avail
);
412 tx_ring
->to_clean
= elem
;
414 if (netif_queue_stopped(netdev
)
415 && c2_port
->tx_avail
> MAX_SKB_FRAGS
+ 1)
416 netif_wake_queue(netdev
);
418 spin_unlock(&c2_port
->tx_lock
);
421 static void c2_rx_error(struct c2_port
*c2_port
, struct c2_element
*elem
)
423 struct c2_rx_desc
*rx_desc
= elem
->ht_desc
;
424 struct c2_rxp_hdr
*rxp_hdr
= (struct c2_rxp_hdr
*) elem
->skb
->data
;
426 if (rxp_hdr
->status
!= RXP_HRXD_OK
||
427 rxp_hdr
->len
> (rx_desc
->len
- sizeof(*rxp_hdr
))) {
428 pr_debug("BAD RXP_HRXD\n");
429 pr_debug(" rx_desc : %p\n", rx_desc
);
430 pr_debug(" index : %Zu\n",
431 elem
- c2_port
->rx_ring
.start
);
432 pr_debug(" len : %u\n", rx_desc
->len
);
433 pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr
,
434 (void *) __pa((unsigned long) rxp_hdr
));
435 pr_debug(" flags : 0x%x\n", rxp_hdr
->flags
);
436 pr_debug(" status: 0x%x\n", rxp_hdr
->status
);
437 pr_debug(" len : %u\n", rxp_hdr
->len
);
438 pr_debug(" rsvd : 0x%x\n", rxp_hdr
->rsvd
);
441 /* Setup the skb for reuse since we're dropping this pkt */
442 elem
->skb
->data
= elem
->skb
->head
;
443 skb_reset_tail_pointer(elem
->skb
);
445 /* Zero out the rxp hdr in the sk_buff */
446 memset(elem
->skb
->data
, 0, sizeof(*rxp_hdr
));
448 /* Write the descriptor to the adapter's rx ring */
449 __raw_writew(0, elem
->hw_desc
+ C2_RXP_STATUS
);
450 __raw_writew(0, elem
->hw_desc
+ C2_RXP_COUNT
);
451 __raw_writew((__force u16
) cpu_to_be16((u16
) elem
->maplen
- sizeof(*rxp_hdr
)),
452 elem
->hw_desc
+ C2_RXP_LEN
);
453 __raw_writeq((__force u64
) cpu_to_be64(elem
->mapaddr
),
454 elem
->hw_desc
+ C2_RXP_ADDR
);
455 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_READY
),
456 elem
->hw_desc
+ C2_RXP_FLAGS
);
458 pr_debug("packet dropped\n");
459 c2_port
->netdev
->stats
.rx_dropped
++;
462 static void c2_rx_interrupt(struct net_device
*netdev
)
464 struct c2_port
*c2_port
= netdev_priv(netdev
);
465 struct c2_dev
*c2dev
= c2_port
->c2dev
;
466 struct c2_ring
*rx_ring
= &c2_port
->rx_ring
;
467 struct c2_element
*elem
;
468 struct c2_rx_desc
*rx_desc
;
469 struct c2_rxp_hdr
*rxp_hdr
;
475 spin_lock_irqsave(&c2dev
->lock
, flags
);
477 /* Begin where we left off */
478 rx_ring
->to_clean
= rx_ring
->start
+ c2dev
->cur_rx
;
480 for (elem
= rx_ring
->to_clean
; elem
->next
!= rx_ring
->to_clean
;
482 rx_desc
= elem
->ht_desc
;
483 mapaddr
= elem
->mapaddr
;
484 maplen
= elem
->maplen
;
486 rxp_hdr
= (struct c2_rxp_hdr
*) skb
->data
;
488 if (rxp_hdr
->flags
!= RXP_HRXD_DONE
)
490 buflen
= rxp_hdr
->len
;
492 /* Sanity check the RXP header */
493 if (rxp_hdr
->status
!= RXP_HRXD_OK
||
494 buflen
> (rx_desc
->len
- sizeof(*rxp_hdr
))) {
495 c2_rx_error(c2_port
, elem
);
500 * Allocate and map a new skb for replenishing the host
503 if (c2_rx_alloc(c2_port
, elem
)) {
504 c2_rx_error(c2_port
, elem
);
508 /* Unmap the old skb */
509 pci_unmap_single(c2dev
->pcidev
, mapaddr
, maplen
,
515 * Skip past the leading 8 bytes comprising of the
516 * "struct c2_rxp_hdr", prepended by the adapter
517 * to the usual Ethernet header ("struct ethhdr"),
518 * to the start of the raw Ethernet packet.
520 * Fix up the various fields in the sk_buff before
521 * passing it up to netif_rx(). The transfer size
522 * (in bytes) specified by the adapter len field of
523 * the "struct rxp_hdr_t" does NOT include the
524 * "sizeof(struct c2_rxp_hdr)".
526 skb
->data
+= sizeof(*rxp_hdr
);
527 skb_set_tail_pointer(skb
, buflen
);
529 skb
->protocol
= eth_type_trans(skb
, netdev
);
533 netdev
->stats
.rx_packets
++;
534 netdev
->stats
.rx_bytes
+= buflen
;
537 /* Save where we left off */
538 rx_ring
->to_clean
= elem
;
539 c2dev
->cur_rx
= elem
- rx_ring
->start
;
540 C2_SET_CUR_RX(c2dev
, c2dev
->cur_rx
);
542 spin_unlock_irqrestore(&c2dev
->lock
, flags
);
546 * Handle netisr0 TX & RX interrupts.
548 static irqreturn_t
c2_interrupt(int irq
, void *dev_id
)
550 unsigned int netisr0
, dmaisr
;
552 struct c2_dev
*c2dev
= (struct c2_dev
*) dev_id
;
554 /* Process CCILNET interrupts */
555 netisr0
= readl(c2dev
->regs
+ C2_NISR0
);
559 * There is an issue with the firmware that always
560 * provides the status of RX for both TX & RX
561 * interrupts. So process both queues here.
563 c2_rx_interrupt(c2dev
->netdev
);
564 c2_tx_interrupt(c2dev
->netdev
);
566 /* Clear the interrupt */
567 writel(netisr0
, c2dev
->regs
+ C2_NISR0
);
571 /* Process RNIC interrupts */
572 dmaisr
= readl(c2dev
->regs
+ C2_DISR
);
574 writel(dmaisr
, c2dev
->regs
+ C2_DISR
);
575 c2_rnic_interrupt(c2dev
);
586 static int c2_up(struct net_device
*netdev
)
588 struct c2_port
*c2_port
= netdev_priv(netdev
);
589 struct c2_dev
*c2dev
= c2_port
->c2dev
;
590 struct c2_element
*elem
;
591 struct c2_rxp_hdr
*rxp_hdr
;
592 struct in_device
*in_dev
;
593 size_t rx_size
, tx_size
;
595 unsigned int netimr0
;
597 if (netif_msg_ifup(c2_port
))
598 pr_debug("%s: enabling interface\n", netdev
->name
);
600 /* Set the Rx buffer size based on MTU */
601 c2_set_rxbufsize(c2_port
);
603 /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
604 rx_size
= c2_port
->rx_ring
.count
* sizeof(struct c2_rx_desc
);
605 tx_size
= c2_port
->tx_ring
.count
* sizeof(struct c2_tx_desc
);
607 c2_port
->mem_size
= tx_size
+ rx_size
;
608 c2_port
->mem
= pci_alloc_consistent(c2dev
->pcidev
, c2_port
->mem_size
,
610 if (c2_port
->mem
== NULL
) {
611 pr_debug("Unable to allocate memory for "
612 "host descriptor rings\n");
616 memset(c2_port
->mem
, 0, c2_port
->mem_size
);
618 /* Create the Rx host descriptor ring */
620 c2_rx_ring_alloc(&c2_port
->rx_ring
, c2_port
->mem
, c2_port
->dma
,
621 c2dev
->mmio_rxp_ring
))) {
622 pr_debug("Unable to create RX ring\n");
626 /* Allocate Rx buffers for the host descriptor ring */
627 if (c2_rx_fill(c2_port
)) {
628 pr_debug("Unable to fill RX ring\n");
632 /* Create the Tx host descriptor ring */
633 if ((ret
= c2_tx_ring_alloc(&c2_port
->tx_ring
, c2_port
->mem
+ rx_size
,
634 c2_port
->dma
+ rx_size
,
635 c2dev
->mmio_txp_ring
))) {
636 pr_debug("Unable to create TX ring\n");
640 /* Set the TX pointer to where we left off */
641 c2_port
->tx_avail
= c2_port
->tx_ring
.count
- 1;
642 c2_port
->tx_ring
.to_use
= c2_port
->tx_ring
.to_clean
=
643 c2_port
->tx_ring
.start
+ c2dev
->cur_tx
;
645 /* missing: Initialize MAC */
647 BUG_ON(c2_port
->tx_ring
.to_use
!= c2_port
->tx_ring
.to_clean
);
649 /* Reset the adapter, ensures the driver is in sync with the RXP */
652 /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
653 for (i
= 0, elem
= c2_port
->rx_ring
.start
; i
< c2_port
->rx_ring
.count
;
655 rxp_hdr
= (struct c2_rxp_hdr
*) elem
->skb
->data
;
657 __raw_writew((__force u16
) cpu_to_be16(RXP_HRXD_READY
),
658 elem
->hw_desc
+ C2_RXP_FLAGS
);
661 /* Enable network packets */
662 netif_start_queue(netdev
);
665 writel(0, c2dev
->regs
+ C2_IDIS
);
666 netimr0
= readl(c2dev
->regs
+ C2_NIMR0
);
667 netimr0
&= ~(C2_PCI_HTX_INT
| C2_PCI_HRX_INT
);
668 writel(netimr0
, c2dev
->regs
+ C2_NIMR0
);
670 /* Tell the stack to ignore arp requests for ipaddrs bound to
671 * other interfaces. This is needed to prevent the host stack
672 * from responding to arp requests to the ipaddr bound on the
675 in_dev
= in_dev_get(netdev
);
676 IN_DEV_CONF_SET(in_dev
, ARP_IGNORE
, 1);
682 c2_rx_clean(c2_port
);
683 kfree(c2_port
->rx_ring
.start
);
686 pci_free_consistent(c2dev
->pcidev
, c2_port
->mem_size
, c2_port
->mem
,
692 static int c2_down(struct net_device
*netdev
)
694 struct c2_port
*c2_port
= netdev_priv(netdev
);
695 struct c2_dev
*c2dev
= c2_port
->c2dev
;
697 if (netif_msg_ifdown(c2_port
))
698 pr_debug("%s: disabling interface\n",
701 /* Wait for all the queued packets to get sent */
702 c2_tx_interrupt(netdev
);
704 /* Disable network packets */
705 netif_stop_queue(netdev
);
707 /* Disable IRQs by clearing the interrupt mask */
708 writel(1, c2dev
->regs
+ C2_IDIS
);
709 writel(0, c2dev
->regs
+ C2_NIMR0
);
711 /* missing: Stop transmitter */
713 /* missing: Stop receiver */
715 /* Reset the adapter, ensures the driver is in sync with the RXP */
718 /* missing: Turn off LEDs here */
720 /* Free all buffers in the host descriptor rings */
721 c2_tx_clean(c2_port
);
722 c2_rx_clean(c2_port
);
724 /* Free the host descriptor rings */
725 kfree(c2_port
->rx_ring
.start
);
726 kfree(c2_port
->tx_ring
.start
);
727 pci_free_consistent(c2dev
->pcidev
, c2_port
->mem_size
, c2_port
->mem
,
733 static void c2_reset(struct c2_port
*c2_port
)
735 struct c2_dev
*c2dev
= c2_port
->c2dev
;
736 unsigned int cur_rx
= c2dev
->cur_rx
;
738 /* Tell the hardware to quiesce */
739 C2_SET_CUR_RX(c2dev
, cur_rx
| C2_PCI_HRX_QUI
);
742 * The hardware will reset the C2_PCI_HRX_QUI bit once
743 * the RXP is quiesced. Wait 2 seconds for this.
747 cur_rx
= C2_GET_CUR_RX(c2dev
);
749 if (cur_rx
& C2_PCI_HRX_QUI
)
750 pr_debug("c2_reset: failed to quiesce the hardware!\n");
752 cur_rx
&= ~C2_PCI_HRX_QUI
;
754 c2dev
->cur_rx
= cur_rx
;
756 pr_debug("Current RX: %u\n", c2dev
->cur_rx
);
759 static int c2_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
761 struct c2_port
*c2_port
= netdev_priv(netdev
);
762 struct c2_dev
*c2dev
= c2_port
->c2dev
;
763 struct c2_ring
*tx_ring
= &c2_port
->tx_ring
;
764 struct c2_element
*elem
;
770 spin_lock_irqsave(&c2_port
->tx_lock
, flags
);
772 if (unlikely(c2_port
->tx_avail
< (skb_shinfo(skb
)->nr_frags
+ 1))) {
773 netif_stop_queue(netdev
);
774 spin_unlock_irqrestore(&c2_port
->tx_lock
, flags
);
776 pr_debug("%s: Tx ring full when queue awake!\n",
778 return NETDEV_TX_BUSY
;
781 maplen
= skb_headlen(skb
);
783 pci_map_single(c2dev
->pcidev
, skb
->data
, maplen
, PCI_DMA_TODEVICE
);
785 elem
= tx_ring
->to_use
;
787 elem
->mapaddr
= mapaddr
;
788 elem
->maplen
= maplen
;
790 /* Tell HW to xmit */
791 __raw_writeq((__force u64
) cpu_to_be64(mapaddr
),
792 elem
->hw_desc
+ C2_TXP_ADDR
);
793 __raw_writew((__force u16
) cpu_to_be16(maplen
),
794 elem
->hw_desc
+ C2_TXP_LEN
);
795 __raw_writew((__force u16
) cpu_to_be16(TXP_HTXD_READY
),
796 elem
->hw_desc
+ C2_TXP_FLAGS
);
798 netdev
->stats
.tx_packets
++;
799 netdev
->stats
.tx_bytes
+= maplen
;
801 /* Loop thru additional data fragments and queue them */
802 if (skb_shinfo(skb
)->nr_frags
) {
803 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
804 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
807 pci_map_page(c2dev
->pcidev
, frag
->page
,
808 frag
->page_offset
, maplen
,
813 elem
->mapaddr
= mapaddr
;
814 elem
->maplen
= maplen
;
816 /* Tell HW to xmit */
817 __raw_writeq((__force u64
) cpu_to_be64(mapaddr
),
818 elem
->hw_desc
+ C2_TXP_ADDR
);
819 __raw_writew((__force u16
) cpu_to_be16(maplen
),
820 elem
->hw_desc
+ C2_TXP_LEN
);
821 __raw_writew((__force u16
) cpu_to_be16(TXP_HTXD_READY
),
822 elem
->hw_desc
+ C2_TXP_FLAGS
);
824 netdev
->stats
.tx_packets
++;
825 netdev
->stats
.tx_bytes
+= maplen
;
829 tx_ring
->to_use
= elem
->next
;
830 c2_port
->tx_avail
-= (skb_shinfo(skb
)->nr_frags
+ 1);
832 if (c2_port
->tx_avail
<= MAX_SKB_FRAGS
+ 1) {
833 netif_stop_queue(netdev
);
834 if (netif_msg_tx_queued(c2_port
))
835 pr_debug("%s: transmit queue full\n",
839 spin_unlock_irqrestore(&c2_port
->tx_lock
, flags
);
841 netdev
->trans_start
= jiffies
;
846 static void c2_tx_timeout(struct net_device
*netdev
)
848 struct c2_port
*c2_port
= netdev_priv(netdev
);
850 if (netif_msg_timer(c2_port
))
851 pr_debug("%s: tx timeout\n", netdev
->name
);
853 c2_tx_clean(c2_port
);
856 static int c2_change_mtu(struct net_device
*netdev
, int new_mtu
)
860 if (new_mtu
< ETH_ZLEN
|| new_mtu
> ETH_JUMBO_MTU
)
863 netdev
->mtu
= new_mtu
;
865 if (netif_running(netdev
)) {
874 static const struct net_device_ops c2_netdev
= {
877 .ndo_start_xmit
= c2_xmit_frame
,
878 .ndo_tx_timeout
= c2_tx_timeout
,
879 .ndo_change_mtu
= c2_change_mtu
,
880 .ndo_set_mac_address
= eth_mac_addr
,
881 .ndo_validate_addr
= eth_validate_addr
,
884 /* Initialize network device */
885 static struct net_device
*c2_devinit(struct c2_dev
*c2dev
,
886 void __iomem
* mmio_addr
)
888 struct c2_port
*c2_port
= NULL
;
889 struct net_device
*netdev
= alloc_etherdev(sizeof(*c2_port
));
892 pr_debug("c2_port etherdev alloc failed");
896 SET_NETDEV_DEV(netdev
, &c2dev
->pcidev
->dev
);
898 netdev
->netdev_ops
= &c2_netdev
;
899 netdev
->watchdog_timeo
= C2_TX_TIMEOUT
;
900 netdev
->irq
= c2dev
->pcidev
->irq
;
902 c2_port
= netdev_priv(netdev
);
903 c2_port
->netdev
= netdev
;
904 c2_port
->c2dev
= c2dev
;
905 c2_port
->msg_enable
= netif_msg_init(debug
, default_msg
);
906 c2_port
->tx_ring
.count
= C2_NUM_TX_DESC
;
907 c2_port
->rx_ring
.count
= C2_NUM_RX_DESC
;
909 spin_lock_init(&c2_port
->tx_lock
);
911 /* Copy our 48-bit ethernet hardware address */
912 memcpy_fromio(netdev
->dev_addr
, mmio_addr
+ C2_REGS_ENADDR
, 6);
914 /* Validate the MAC address */
915 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
916 pr_debug("Invalid MAC Address\n");
917 c2_print_macaddr(netdev
);
922 c2dev
->netdev
= netdev
;
927 static int __devinit
c2_probe(struct pci_dev
*pcidev
,
928 const struct pci_device_id
*ent
)
931 unsigned long reg0_start
, reg0_flags
, reg0_len
;
932 unsigned long reg2_start
, reg2_flags
, reg2_len
;
933 unsigned long reg4_start
, reg4_flags
, reg4_len
;
934 unsigned kva_map_size
;
935 struct net_device
*netdev
= NULL
;
936 struct c2_dev
*c2dev
= NULL
;
937 void __iomem
*mmio_regs
= NULL
;
939 printk(KERN_INFO PFX
"AMSO1100 Gigabit Ethernet driver v%s loaded\n",
942 /* Enable PCI device */
943 ret
= pci_enable_device(pcidev
);
945 printk(KERN_ERR PFX
"%s: Unable to enable PCI device\n",
950 reg0_start
= pci_resource_start(pcidev
, BAR_0
);
951 reg0_len
= pci_resource_len(pcidev
, BAR_0
);
952 reg0_flags
= pci_resource_flags(pcidev
, BAR_0
);
954 reg2_start
= pci_resource_start(pcidev
, BAR_2
);
955 reg2_len
= pci_resource_len(pcidev
, BAR_2
);
956 reg2_flags
= pci_resource_flags(pcidev
, BAR_2
);
958 reg4_start
= pci_resource_start(pcidev
, BAR_4
);
959 reg4_len
= pci_resource_len(pcidev
, BAR_4
);
960 reg4_flags
= pci_resource_flags(pcidev
, BAR_4
);
962 pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len
);
963 pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len
);
964 pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len
);
966 /* Make sure PCI base addr are MMIO */
967 if (!(reg0_flags
& IORESOURCE_MEM
) ||
968 !(reg2_flags
& IORESOURCE_MEM
) || !(reg4_flags
& IORESOURCE_MEM
)) {
969 printk(KERN_ERR PFX
"PCI regions not an MMIO resource\n");
974 /* Check for weird/broken PCI region reporting */
975 if ((reg0_len
< C2_REG0_SIZE
) ||
976 (reg2_len
< C2_REG2_SIZE
) || (reg4_len
< C2_REG4_SIZE
)) {
977 printk(KERN_ERR PFX
"Invalid PCI region sizes\n");
982 /* Reserve PCI I/O and memory resources */
983 ret
= pci_request_regions(pcidev
, DRV_NAME
);
985 printk(KERN_ERR PFX
"%s: Unable to request regions\n",
990 if ((sizeof(dma_addr_t
) > 4)) {
991 ret
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(64));
993 printk(KERN_ERR PFX
"64b DMA configuration failed\n");
997 ret
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(32));
999 printk(KERN_ERR PFX
"32b DMA configuration failed\n");
1004 /* Enables bus-mastering on the device */
1005 pci_set_master(pcidev
);
1007 /* Remap the adapter PCI registers in BAR4 */
1008 mmio_regs
= ioremap_nocache(reg4_start
+ C2_PCI_REGS_OFFSET
,
1009 sizeof(struct c2_adapter_pci_regs
));
1012 "Unable to remap adapter PCI registers in BAR4\n");
1017 /* Validate PCI regs magic */
1018 for (i
= 0; i
< sizeof(c2_magic
); i
++) {
1019 if (c2_magic
[i
] != readb(mmio_regs
+ C2_REGS_MAGIC
+ i
)) {
1020 printk(KERN_ERR PFX
"Downlevel Firmware boot loader "
1021 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1022 "utility to update your boot loader\n",
1023 i
+ 1, sizeof(c2_magic
),
1024 readb(mmio_regs
+ C2_REGS_MAGIC
+ i
),
1026 printk(KERN_ERR PFX
"Adapter not claimed\n");
1033 /* Validate the adapter version */
1034 if (be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_VERS
)) != C2_VERSION
) {
1035 printk(KERN_ERR PFX
"Version mismatch "
1036 "[fw=%u, c2=%u], Adapter not claimed\n",
1037 be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_VERS
)),
1044 /* Validate the adapter IVN */
1045 if (be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_IVN
)) != C2_IVN
) {
1046 printk(KERN_ERR PFX
"Downlevel FIrmware level. You should be using "
1047 "the OpenIB device support kit. "
1048 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1049 be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_IVN
)),
1056 /* Allocate hardware structure */
1057 c2dev
= (struct c2_dev
*) ib_alloc_device(sizeof(*c2dev
));
1059 printk(KERN_ERR PFX
"%s: Unable to alloc hardware struct\n",
1066 memset(c2dev
, 0, sizeof(*c2dev
));
1067 spin_lock_init(&c2dev
->lock
);
1068 c2dev
->pcidev
= pcidev
;
1071 /* Get the last RX index */
1073 (be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_HRX_CUR
)) -
1074 0xffffc000) / sizeof(struct c2_rxp_desc
);
1076 /* Request an interrupt line for the driver */
1077 ret
= request_irq(pcidev
->irq
, c2_interrupt
, IRQF_SHARED
, DRV_NAME
, c2dev
);
1079 printk(KERN_ERR PFX
"%s: requested IRQ %u is busy\n",
1080 pci_name(pcidev
), pcidev
->irq
);
1085 /* Set driver specific data */
1086 pci_set_drvdata(pcidev
, c2dev
);
1088 /* Initialize network device */
1089 if ((netdev
= c2_devinit(c2dev
, mmio_regs
)) == NULL
) {
1094 /* Save off the actual size prior to unmapping mmio_regs */
1095 kva_map_size
= be32_to_cpu((__force __be32
) readl(mmio_regs
+ C2_REGS_PCI_WINSIZE
));
1097 /* Unmap the adapter PCI registers in BAR4 */
1100 /* Register network device */
1101 ret
= register_netdev(netdev
);
1103 printk(KERN_ERR PFX
"Unable to register netdev, ret = %d\n",
1108 /* Disable network packets */
1109 netif_stop_queue(netdev
);
1111 /* Remap the adapter HRXDQ PA space to kernel VA space */
1112 c2dev
->mmio_rxp_ring
= ioremap_nocache(reg4_start
+ C2_RXP_HRXDQ_OFFSET
,
1114 if (!c2dev
->mmio_rxp_ring
) {
1115 printk(KERN_ERR PFX
"Unable to remap MMIO HRXDQ region\n");
1120 /* Remap the adapter HTXDQ PA space to kernel VA space */
1121 c2dev
->mmio_txp_ring
= ioremap_nocache(reg4_start
+ C2_TXP_HTXDQ_OFFSET
,
1123 if (!c2dev
->mmio_txp_ring
) {
1124 printk(KERN_ERR PFX
"Unable to remap MMIO HTXDQ region\n");
1129 /* Save off the current RX index in the last 4 bytes of the TXP Ring */
1130 C2_SET_CUR_RX(c2dev
, c2dev
->cur_rx
);
1132 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1133 c2dev
->regs
= ioremap_nocache(reg0_start
, reg0_len
);
1135 printk(KERN_ERR PFX
"Unable to remap BAR0\n");
1140 /* Remap the PCI registers in adapter BAR4 to kernel VA space */
1141 c2dev
->pa
= reg4_start
+ C2_PCI_REGS_OFFSET
;
1142 c2dev
->kva
= ioremap_nocache(reg4_start
+ C2_PCI_REGS_OFFSET
,
1145 printk(KERN_ERR PFX
"Unable to remap BAR4\n");
1150 /* Print out the MAC address */
1151 c2_print_macaddr(netdev
);
1153 ret
= c2_rnic_init(c2dev
);
1155 printk(KERN_ERR PFX
"c2_rnic_init failed: %d\n", ret
);
1159 if (c2_register_device(c2dev
))
1165 iounmap(c2dev
->kva
);
1168 iounmap(c2dev
->regs
);
1171 iounmap(c2dev
->mmio_txp_ring
);
1174 iounmap(c2dev
->mmio_rxp_ring
);
1177 unregister_netdev(netdev
);
1180 free_netdev(netdev
);
1183 free_irq(pcidev
->irq
, c2dev
);
1186 ib_dealloc_device(&c2dev
->ibdev
);
1189 pci_release_regions(pcidev
);
1192 pci_disable_device(pcidev
);
1198 static void __devexit
c2_remove(struct pci_dev
*pcidev
)
1200 struct c2_dev
*c2dev
= pci_get_drvdata(pcidev
);
1201 struct net_device
*netdev
= c2dev
->netdev
;
1203 /* Unregister with OpenIB */
1204 c2_unregister_device(c2dev
);
1206 /* Clean up the RNIC resources */
1207 c2_rnic_term(c2dev
);
1209 /* Remove network device from the kernel */
1210 unregister_netdev(netdev
);
1212 /* Free network device */
1213 free_netdev(netdev
);
1215 /* Free the interrupt line */
1216 free_irq(pcidev
->irq
, c2dev
);
1218 /* missing: Turn LEDs off here */
1220 /* Unmap adapter PA space */
1221 iounmap(c2dev
->kva
);
1222 iounmap(c2dev
->regs
);
1223 iounmap(c2dev
->mmio_txp_ring
);
1224 iounmap(c2dev
->mmio_rxp_ring
);
1226 /* Free the hardware structure */
1227 ib_dealloc_device(&c2dev
->ibdev
);
1229 /* Release reserved PCI I/O and memory resources */
1230 pci_release_regions(pcidev
);
1232 /* Disable PCI device */
1233 pci_disable_device(pcidev
);
1235 /* Clear driver specific data */
1236 pci_set_drvdata(pcidev
, NULL
);
1239 static struct pci_driver c2_pci_driver
= {
1241 .id_table
= c2_pci_table
,
1243 .remove
= __devexit_p(c2_remove
),
1246 static int __init
c2_init_module(void)
1248 return pci_register_driver(&c2_pci_driver
);
1251 static void __exit
c2_exit_module(void)
1253 pci_unregister_driver(&c2_pci_driver
);
1256 module_init(c2_init_module
);
1257 module_exit(c2_exit_module
);