2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
26 * Contact Information:
27 * Xiong Huang <xiong_huang@attansic.com>
28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
31 * Chris Snook <csnook@redhat.com>
32 * Jay Cliburn <jcliburn@gmail.com>
34 * This version is adapted from the Attansic reference driver for
35 * inclusion in the Linux kernel. It is currently under heavy development.
36 * A very incomplete list of things that need to be dealt with:
39 * Fix TSO; tx performance is horrible with TSO enabled.
41 * Add more ethtool functions.
42 * Fix abstruse irq enable/disable condition described here:
43 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
49 * interrupt coalescing
53 #include <linux/types.h>
54 #include <linux/netdevice.h>
55 #include <linux/pci.h>
56 #include <linux/spinlock.h>
57 #include <linux/slab.h>
58 #include <linux/string.h>
59 #include <linux/skbuff.h>
60 #include <linux/etherdevice.h>
61 #include <linux/if_vlan.h>
62 #include <linux/irqreturn.h>
63 #include <linux/workqueue.h>
64 #include <linux/timer.h>
65 #include <linux/jiffies.h>
66 #include <linux/hardirq.h>
67 #include <linux/interrupt.h>
68 #include <linux/irqflags.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/net.h>
74 #include <linux/tcp.h>
75 #include <linux/compiler.h>
76 #include <linux/delay.h>
77 #include <linux/mii.h>
78 #include <net/checksum.h>
80 #include <asm/atomic.h>
81 #include <asm/byteorder.h>
85 #define DRIVER_VERSION "2.0.7"
87 char atl1_driver_name
[] = "atl1";
88 static const char atl1_driver_string
[] = "Attansic L1 Ethernet Network Driver";
89 static const char atl1_copyright
[] = "Copyright(c) 2005-2006 Attansic Corporation.";
90 char atl1_driver_version
[] = DRIVER_VERSION
;
93 ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
94 MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRIVER_VERSION
);
99 * atl1_pci_tbl - PCI Device ID Table
101 static const struct pci_device_id atl1_pci_tbl
[] = {
102 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC
, PCI_DEVICE_ID_ATTANSIC_L1
)},
103 /* required last entry */
107 MODULE_DEVICE_TABLE(pci
, atl1_pci_tbl
);
110 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
111 * @adapter: board private structure to initialize
113 * atl1_sw_init initializes the Adapter private data structure.
114 * Fields are initialized based on PCI device information and
115 * OS network device settings (MTU size).
117 static int __devinit
atl1_sw_init(struct atl1_adapter
*adapter
)
119 struct atl1_hw
*hw
= &adapter
->hw
;
120 struct net_device
*netdev
= adapter
->netdev
;
121 struct pci_dev
*pdev
= adapter
->pdev
;
123 /* PCI config space info */
124 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
126 hw
->max_frame_size
= netdev
->mtu
+ ENET_HEADER_SIZE
+ ETHERNET_FCS_SIZE
;
127 hw
->min_frame_size
= MINIMUM_ETHERNET_FRAME_SIZE
;
130 adapter
->rx_buffer_len
= (hw
->max_frame_size
+ 7) & ~7;
131 adapter
->ict
= 50000; /* 100ms */
132 adapter
->link_speed
= SPEED_0
; /* hardware init */
133 adapter
->link_duplex
= FULL_DUPLEX
;
135 hw
->phy_configured
= false;
136 hw
->preamble_len
= 7;
146 hw
->rfd_fetch_gap
= 1;
147 hw
->rx_jumbo_th
= adapter
->rx_buffer_len
/ 8;
148 hw
->rx_jumbo_lkah
= 1;
149 hw
->rrd_ret_timer
= 16;
151 hw
->tpd_fetch_th
= 16;
152 hw
->txf_burst
= 0x100;
153 hw
->tx_jumbo_task_th
= (hw
->max_frame_size
+ 7) >> 3;
154 hw
->tpd_fetch_gap
= 1;
155 hw
->rcb_value
= atl1_rcb_64
;
156 hw
->dma_ord
= atl1_dma_ord_enh
;
157 hw
->dmar_block
= atl1_dma_req_256
;
158 hw
->dmaw_block
= atl1_dma_req_256
;
161 hw
->cmb_rx_timer
= 1; /* about 2us */
162 hw
->cmb_tx_timer
= 1; /* about 2us */
163 hw
->smb_timer
= 100000; /* about 200ms */
165 spin_lock_init(&adapter
->lock
);
166 spin_lock_init(&adapter
->mb_lock
);
172 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
173 * @adapter: board private structure
175 * Return 0 on success, negative on failure
177 s32
atl1_setup_ring_resources(struct atl1_adapter
*adapter
)
179 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
180 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
181 struct atl1_rrd_ring
*rrd_ring
= &adapter
->rrd_ring
;
182 struct atl1_ring_header
*ring_header
= &adapter
->ring_header
;
183 struct pci_dev
*pdev
= adapter
->pdev
;
187 size
= sizeof(struct atl1_buffer
) * (tpd_ring
->count
+ rfd_ring
->count
);
188 tpd_ring
->buffer_info
= kzalloc(size
, GFP_KERNEL
);
189 if (unlikely(!tpd_ring
->buffer_info
)) {
190 dev_err(&pdev
->dev
, "kzalloc failed , size = D%d\n", size
);
193 rfd_ring
->buffer_info
=
194 (struct atl1_buffer
*)(tpd_ring
->buffer_info
+ tpd_ring
->count
);
196 /* real ring DMA buffer
197 * each ring/block may need up to 8 bytes for alignment, hence the
198 * additional 40 bytes tacked onto the end.
200 ring_header
->size
= size
=
201 sizeof(struct tx_packet_desc
) * tpd_ring
->count
202 + sizeof(struct rx_free_desc
) * rfd_ring
->count
203 + sizeof(struct rx_return_desc
) * rrd_ring
->count
204 + sizeof(struct coals_msg_block
)
205 + sizeof(struct stats_msg_block
)
208 ring_header
->desc
= pci_alloc_consistent(pdev
, ring_header
->size
,
210 if (unlikely(!ring_header
->desc
)) {
211 dev_err(&pdev
->dev
, "pci_alloc_consistent failed\n");
215 memset(ring_header
->desc
, 0, ring_header
->size
);
218 tpd_ring
->dma
= ring_header
->dma
;
219 offset
= (tpd_ring
->dma
& 0x7) ? (8 - (ring_header
->dma
& 0x7)) : 0;
220 tpd_ring
->dma
+= offset
;
221 tpd_ring
->desc
= (u8
*) ring_header
->desc
+ offset
;
222 tpd_ring
->size
= sizeof(struct tx_packet_desc
) * tpd_ring
->count
;
225 rfd_ring
->dma
= tpd_ring
->dma
+ tpd_ring
->size
;
226 offset
= (rfd_ring
->dma
& 0x7) ? (8 - (rfd_ring
->dma
& 0x7)) : 0;
227 rfd_ring
->dma
+= offset
;
228 rfd_ring
->desc
= (u8
*) tpd_ring
->desc
+ (tpd_ring
->size
+ offset
);
229 rfd_ring
->size
= sizeof(struct rx_free_desc
) * rfd_ring
->count
;
233 rrd_ring
->dma
= rfd_ring
->dma
+ rfd_ring
->size
;
234 offset
= (rrd_ring
->dma
& 0x7) ? (8 - (rrd_ring
->dma
& 0x7)) : 0;
235 rrd_ring
->dma
+= offset
;
236 rrd_ring
->desc
= (u8
*) rfd_ring
->desc
+ (rfd_ring
->size
+ offset
);
237 rrd_ring
->size
= sizeof(struct rx_return_desc
) * rrd_ring
->count
;
241 adapter
->cmb
.dma
= rrd_ring
->dma
+ rrd_ring
->size
;
242 offset
= (adapter
->cmb
.dma
& 0x7) ? (8 - (adapter
->cmb
.dma
& 0x7)) : 0;
243 adapter
->cmb
.dma
+= offset
;
244 adapter
->cmb
.cmb
= (struct coals_msg_block
*)
245 ((u8
*) rrd_ring
->desc
+ (rrd_ring
->size
+ offset
));
248 adapter
->smb
.dma
= adapter
->cmb
.dma
+ sizeof(struct coals_msg_block
);
249 offset
= (adapter
->smb
.dma
& 0x7) ? (8 - (adapter
->smb
.dma
& 0x7)) : 0;
250 adapter
->smb
.dma
+= offset
;
251 adapter
->smb
.smb
= (struct stats_msg_block
*)
252 ((u8
*) adapter
->cmb
.cmb
+
253 (sizeof(struct coals_msg_block
) + offset
));
258 kfree(tpd_ring
->buffer_info
);
262 void atl1_init_ring_ptrs(struct atl1_adapter
*adapter
)
264 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
265 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
266 struct atl1_rrd_ring
*rrd_ring
= &adapter
->rrd_ring
;
268 atomic_set(&tpd_ring
->next_to_use
, 0);
269 atomic_set(&tpd_ring
->next_to_clean
, 0);
271 rfd_ring
->next_to_clean
= 0;
272 atomic_set(&rfd_ring
->next_to_use
, 0);
274 rrd_ring
->next_to_use
= 0;
275 atomic_set(&rrd_ring
->next_to_clean
, 0);
279 * atl1_irq_enable - Enable default interrupt generation settings
280 * @adapter: board private structure
282 static void atl1_irq_enable(struct atl1_adapter
*adapter
)
284 iowrite32(IMR_NORMAL_MASK
, adapter
->hw
.hw_addr
+ REG_IMR
);
285 ioread32(adapter
->hw
.hw_addr
+ REG_IMR
);
288 static void atl1_clear_phy_int(struct atl1_adapter
*adapter
)
293 spin_lock_irqsave(&adapter
->lock
, flags
);
294 atl1_read_phy_reg(&adapter
->hw
, 19, &phy_data
);
295 spin_unlock_irqrestore(&adapter
->lock
, flags
);
298 static void atl1_inc_smb(struct atl1_adapter
*adapter
)
300 struct stats_msg_block
*smb
= adapter
->smb
.smb
;
302 /* Fill out the OS statistics structure */
303 adapter
->soft_stats
.rx_packets
+= smb
->rx_ok
;
304 adapter
->soft_stats
.tx_packets
+= smb
->tx_ok
;
305 adapter
->soft_stats
.rx_bytes
+= smb
->rx_byte_cnt
;
306 adapter
->soft_stats
.tx_bytes
+= smb
->tx_byte_cnt
;
307 adapter
->soft_stats
.multicast
+= smb
->rx_mcast
;
308 adapter
->soft_stats
.collisions
+= (smb
->tx_1_col
+ smb
->tx_2_col
* 2 +
309 smb
->tx_late_col
+ smb
->tx_abort_col
* adapter
->hw
.max_retry
);
312 adapter
->soft_stats
.rx_errors
+= (smb
->rx_frag
+ smb
->rx_fcs_err
+
313 smb
->rx_len_err
+ smb
->rx_sz_ov
+ smb
->rx_rxf_ov
+
314 smb
->rx_rrd_ov
+ smb
->rx_align_err
);
315 adapter
->soft_stats
.rx_fifo_errors
+= smb
->rx_rxf_ov
;
316 adapter
->soft_stats
.rx_length_errors
+= smb
->rx_len_err
;
317 adapter
->soft_stats
.rx_crc_errors
+= smb
->rx_fcs_err
;
318 adapter
->soft_stats
.rx_frame_errors
+= smb
->rx_align_err
;
319 adapter
->soft_stats
.rx_missed_errors
+= (smb
->rx_rrd_ov
+
322 adapter
->soft_stats
.rx_pause
+= smb
->rx_pause
;
323 adapter
->soft_stats
.rx_rrd_ov
+= smb
->rx_rrd_ov
;
324 adapter
->soft_stats
.rx_trunc
+= smb
->rx_sz_ov
;
327 adapter
->soft_stats
.tx_errors
+= (smb
->tx_late_col
+
328 smb
->tx_abort_col
+ smb
->tx_underrun
+ smb
->tx_trunc
);
329 adapter
->soft_stats
.tx_fifo_errors
+= smb
->tx_underrun
;
330 adapter
->soft_stats
.tx_aborted_errors
+= smb
->tx_abort_col
;
331 adapter
->soft_stats
.tx_window_errors
+= smb
->tx_late_col
;
333 adapter
->soft_stats
.excecol
+= smb
->tx_abort_col
;
334 adapter
->soft_stats
.deffer
+= smb
->tx_defer
;
335 adapter
->soft_stats
.scc
+= smb
->tx_1_col
;
336 adapter
->soft_stats
.mcc
+= smb
->tx_2_col
;
337 adapter
->soft_stats
.latecol
+= smb
->tx_late_col
;
338 adapter
->soft_stats
.tx_underun
+= smb
->tx_underrun
;
339 adapter
->soft_stats
.tx_trunc
+= smb
->tx_trunc
;
340 adapter
->soft_stats
.tx_pause
+= smb
->tx_pause
;
342 adapter
->net_stats
.rx_packets
= adapter
->soft_stats
.rx_packets
;
343 adapter
->net_stats
.tx_packets
= adapter
->soft_stats
.tx_packets
;
344 adapter
->net_stats
.rx_bytes
= adapter
->soft_stats
.rx_bytes
;
345 adapter
->net_stats
.tx_bytes
= adapter
->soft_stats
.tx_bytes
;
346 adapter
->net_stats
.multicast
= adapter
->soft_stats
.multicast
;
347 adapter
->net_stats
.collisions
= adapter
->soft_stats
.collisions
;
348 adapter
->net_stats
.rx_errors
= adapter
->soft_stats
.rx_errors
;
349 adapter
->net_stats
.rx_over_errors
=
350 adapter
->soft_stats
.rx_missed_errors
;
351 adapter
->net_stats
.rx_length_errors
=
352 adapter
->soft_stats
.rx_length_errors
;
353 adapter
->net_stats
.rx_crc_errors
= adapter
->soft_stats
.rx_crc_errors
;
354 adapter
->net_stats
.rx_frame_errors
=
355 adapter
->soft_stats
.rx_frame_errors
;
356 adapter
->net_stats
.rx_fifo_errors
= adapter
->soft_stats
.rx_fifo_errors
;
357 adapter
->net_stats
.rx_missed_errors
=
358 adapter
->soft_stats
.rx_missed_errors
;
359 adapter
->net_stats
.tx_errors
= adapter
->soft_stats
.tx_errors
;
360 adapter
->net_stats
.tx_fifo_errors
= adapter
->soft_stats
.tx_fifo_errors
;
361 adapter
->net_stats
.tx_aborted_errors
=
362 adapter
->soft_stats
.tx_aborted_errors
;
363 adapter
->net_stats
.tx_window_errors
=
364 adapter
->soft_stats
.tx_window_errors
;
365 adapter
->net_stats
.tx_carrier_errors
=
366 adapter
->soft_stats
.tx_carrier_errors
;
369 static void atl1_rx_checksum(struct atl1_adapter
*adapter
,
370 struct rx_return_desc
*rrd
, struct sk_buff
*skb
)
372 struct pci_dev
*pdev
= adapter
->pdev
;
374 skb
->ip_summed
= CHECKSUM_NONE
;
376 if (unlikely(rrd
->pkt_flg
& PACKET_FLAG_ERR
)) {
377 if (rrd
->err_flg
& (ERR_FLAG_CRC
| ERR_FLAG_TRUNC
|
378 ERR_FLAG_CODE
| ERR_FLAG_OV
)) {
379 adapter
->hw_csum_err
++;
380 dev_printk(KERN_DEBUG
, &pdev
->dev
,
381 "rx checksum error\n");
387 if (!(rrd
->pkt_flg
& PACKET_FLAG_IPV4
))
388 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
392 if (likely(!(rrd
->err_flg
&
393 (ERR_FLAG_IP_CHKSUM
| ERR_FLAG_L4_CHKSUM
)))) {
394 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
395 adapter
->hw_csum_good
++;
399 /* IPv4, but hardware thinks its checksum is wrong */
400 dev_printk(KERN_DEBUG
, &pdev
->dev
,
401 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
402 rrd
->pkt_flg
, rrd
->err_flg
);
403 skb
->ip_summed
= CHECKSUM_COMPLETE
;
404 skb
->csum
= htons(rrd
->xsz
.xsum_sz
.rx_chksum
);
405 adapter
->hw_csum_err
++;
410 * atl1_alloc_rx_buffers - Replace used receive buffers
411 * @adapter: address of board private structure
413 static u16
atl1_alloc_rx_buffers(struct atl1_adapter
*adapter
)
415 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
416 struct pci_dev
*pdev
= adapter
->pdev
;
418 unsigned long offset
;
419 struct atl1_buffer
*buffer_info
, *next_info
;
422 u16 rfd_next_to_use
, next_next
;
423 struct rx_free_desc
*rfd_desc
;
425 next_next
= rfd_next_to_use
= atomic_read(&rfd_ring
->next_to_use
);
426 if (++next_next
== rfd_ring
->count
)
428 buffer_info
= &rfd_ring
->buffer_info
[rfd_next_to_use
];
429 next_info
= &rfd_ring
->buffer_info
[next_next
];
431 while (!buffer_info
->alloced
&& !next_info
->alloced
) {
432 if (buffer_info
->skb
) {
433 buffer_info
->alloced
= 1;
437 rfd_desc
= ATL1_RFD_DESC(rfd_ring
, rfd_next_to_use
);
439 skb
= dev_alloc_skb(adapter
->rx_buffer_len
+ NET_IP_ALIGN
);
440 if (unlikely(!skb
)) { /* Better luck next round */
441 adapter
->net_stats
.rx_dropped
++;
446 * Make buffer alignment 2 beyond a 16 byte boundary
447 * this will result in a 16 byte aligned IP header after
448 * the 14 byte MAC header is removed
450 skb_reserve(skb
, NET_IP_ALIGN
);
452 buffer_info
->alloced
= 1;
453 buffer_info
->skb
= skb
;
454 buffer_info
->length
= (u16
) adapter
->rx_buffer_len
;
455 page
= virt_to_page(skb
->data
);
456 offset
= (unsigned long)skb
->data
& ~PAGE_MASK
;
457 buffer_info
->dma
= pci_map_page(pdev
, page
, offset
,
458 adapter
->rx_buffer_len
,
460 rfd_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
461 rfd_desc
->buf_len
= cpu_to_le16(adapter
->rx_buffer_len
);
462 rfd_desc
->coalese
= 0;
465 rfd_next_to_use
= next_next
;
466 if (unlikely(++next_next
== rfd_ring
->count
))
469 buffer_info
= &rfd_ring
->buffer_info
[rfd_next_to_use
];
470 next_info
= &rfd_ring
->buffer_info
[next_next
];
476 * Force memory writes to complete before letting h/w
477 * know there are new descriptors to fetch. (Only
478 * applicable for weak-ordered memory model archs,
482 atomic_set(&rfd_ring
->next_to_use
, (int)rfd_next_to_use
);
487 static void atl1_clean_alloc_flag(struct atl1_adapter
*adapter
,
488 struct rx_return_desc
*rrd
, u16 offset
)
490 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
492 while (rfd_ring
->next_to_clean
!= (rrd
->buf_indx
+ offset
)) {
493 rfd_ring
->buffer_info
[rfd_ring
->next_to_clean
].alloced
= 0;
494 if (++rfd_ring
->next_to_clean
== rfd_ring
->count
) {
495 rfd_ring
->next_to_clean
= 0;
500 static void atl1_update_rfd_index(struct atl1_adapter
*adapter
,
501 struct rx_return_desc
*rrd
)
505 num_buf
= (rrd
->xsz
.xsum_sz
.pkt_size
+ adapter
->rx_buffer_len
- 1) /
506 adapter
->rx_buffer_len
;
507 if (rrd
->num_buf
== num_buf
)
508 /* clean alloc flag for bad rrd */
509 atl1_clean_alloc_flag(adapter
, rrd
, num_buf
);
512 static void atl1_intr_rx(struct atl1_adapter
*adapter
)
516 u16 rrd_next_to_clean
;
518 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
519 struct atl1_rrd_ring
*rrd_ring
= &adapter
->rrd_ring
;
520 struct atl1_buffer
*buffer_info
;
521 struct rx_return_desc
*rrd
;
526 rrd_next_to_clean
= atomic_read(&rrd_ring
->next_to_clean
);
529 rrd
= ATL1_RRD_DESC(rrd_ring
, rrd_next_to_clean
);
531 if (likely(rrd
->xsz
.valid
)) { /* packet valid */
533 /* check rrd status */
534 if (likely(rrd
->num_buf
== 1))
537 /* rrd seems to be bad */
538 if (unlikely(i
-- > 0)) {
539 /* rrd may not be DMAed completely */
540 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
541 "incomplete RRD DMA transfer\n");
546 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
548 /* see if update RFD index */
549 if (rrd
->num_buf
> 1)
550 atl1_update_rfd_index(adapter
, rrd
);
554 if (++rrd_next_to_clean
== rrd_ring
->count
)
555 rrd_next_to_clean
= 0;
558 } else { /* current rrd still not be updated */
563 /* clean alloc flag for bad rrd */
564 atl1_clean_alloc_flag(adapter
, rrd
, 0);
566 buffer_info
= &rfd_ring
->buffer_info
[rrd
->buf_indx
];
567 if (++rfd_ring
->next_to_clean
== rfd_ring
->count
)
568 rfd_ring
->next_to_clean
= 0;
570 /* update rrd next to clean */
571 if (++rrd_next_to_clean
== rrd_ring
->count
)
572 rrd_next_to_clean
= 0;
575 if (unlikely(rrd
->pkt_flg
& PACKET_FLAG_ERR
)) {
577 (ERR_FLAG_IP_CHKSUM
| ERR_FLAG_L4_CHKSUM
579 /* packet error, don't need upstream */
580 buffer_info
->alloced
= 0;
587 pci_unmap_page(adapter
->pdev
, buffer_info
->dma
,
588 buffer_info
->length
, PCI_DMA_FROMDEVICE
);
589 skb
= buffer_info
->skb
;
590 length
= le16_to_cpu(rrd
->xsz
.xsum_sz
.pkt_size
);
592 skb_put(skb
, length
- ETHERNET_FCS_SIZE
);
594 /* Receive Checksum Offload */
595 atl1_rx_checksum(adapter
, rrd
, skb
);
596 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
598 if (adapter
->vlgrp
&& (rrd
->pkt_flg
& PACKET_FLAG_VLAN_INS
)) {
599 u16 vlan_tag
= (rrd
->vlan_tag
>> 4) |
600 ((rrd
->vlan_tag
& 7) << 13) |
601 ((rrd
->vlan_tag
& 8) << 9);
602 vlan_hwaccel_rx(skb
, adapter
->vlgrp
, vlan_tag
);
606 /* let protocol layer free skb */
607 buffer_info
->skb
= NULL
;
608 buffer_info
->alloced
= 0;
611 adapter
->netdev
->last_rx
= jiffies
;
614 atomic_set(&rrd_ring
->next_to_clean
, rrd_next_to_clean
);
616 atl1_alloc_rx_buffers(adapter
);
618 /* update mailbox ? */
622 u32 rrd_next_to_clean
;
624 spin_lock(&adapter
->mb_lock
);
626 tpd_next_to_use
= atomic_read(&adapter
->tpd_ring
.next_to_use
);
628 atomic_read(&adapter
->rfd_ring
.next_to_use
);
630 atomic_read(&adapter
->rrd_ring
.next_to_clean
);
631 value
= ((rfd_next_to_use
& MB_RFD_PROD_INDX_MASK
) <<
632 MB_RFD_PROD_INDX_SHIFT
) |
633 ((rrd_next_to_clean
& MB_RRD_CONS_INDX_MASK
) <<
634 MB_RRD_CONS_INDX_SHIFT
) |
635 ((tpd_next_to_use
& MB_TPD_PROD_INDX_MASK
) <<
636 MB_TPD_PROD_INDX_SHIFT
);
637 iowrite32(value
, adapter
->hw
.hw_addr
+ REG_MAILBOX
);
638 spin_unlock(&adapter
->mb_lock
);
642 static void atl1_intr_tx(struct atl1_adapter
*adapter
)
644 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
645 struct atl1_buffer
*buffer_info
;
646 u16 sw_tpd_next_to_clean
;
647 u16 cmb_tpd_next_to_clean
;
649 sw_tpd_next_to_clean
= atomic_read(&tpd_ring
->next_to_clean
);
650 cmb_tpd_next_to_clean
= le16_to_cpu(adapter
->cmb
.cmb
->tpd_cons_idx
);
652 while (cmb_tpd_next_to_clean
!= sw_tpd_next_to_clean
) {
653 struct tx_packet_desc
*tpd
;
655 tpd
= ATL1_TPD_DESC(tpd_ring
, sw_tpd_next_to_clean
);
656 buffer_info
= &tpd_ring
->buffer_info
[sw_tpd_next_to_clean
];
657 if (buffer_info
->dma
) {
658 pci_unmap_page(adapter
->pdev
, buffer_info
->dma
,
659 buffer_info
->length
, PCI_DMA_TODEVICE
);
660 buffer_info
->dma
= 0;
663 if (buffer_info
->skb
) {
664 dev_kfree_skb_irq(buffer_info
->skb
);
665 buffer_info
->skb
= NULL
;
667 tpd
->buffer_addr
= 0;
670 if (++sw_tpd_next_to_clean
== tpd_ring
->count
)
671 sw_tpd_next_to_clean
= 0;
673 atomic_set(&tpd_ring
->next_to_clean
, sw_tpd_next_to_clean
);
675 if (netif_queue_stopped(adapter
->netdev
)
676 && netif_carrier_ok(adapter
->netdev
))
677 netif_wake_queue(adapter
->netdev
);
680 static void atl1_check_for_link(struct atl1_adapter
*adapter
)
682 struct net_device
*netdev
= adapter
->netdev
;
685 spin_lock(&adapter
->lock
);
686 adapter
->phy_timer_pending
= false;
687 atl1_read_phy_reg(&adapter
->hw
, MII_BMSR
, &phy_data
);
688 atl1_read_phy_reg(&adapter
->hw
, MII_BMSR
, &phy_data
);
689 spin_unlock(&adapter
->lock
);
691 /* notify upper layer link down ASAP */
692 if (!(phy_data
& BMSR_LSTATUS
)) { /* Link Down */
693 if (netif_carrier_ok(netdev
)) { /* old link state: Up */
694 dev_info(&adapter
->pdev
->dev
, "%s link is down\n",
696 adapter
->link_speed
= SPEED_0
;
697 netif_carrier_off(netdev
);
698 netif_stop_queue(netdev
);
701 schedule_work(&adapter
->link_chg_task
);
705 * atl1_intr - Interrupt Handler
706 * @irq: interrupt number
707 * @data: pointer to a network interface device structure
708 * @pt_regs: CPU registers structure
710 static irqreturn_t
atl1_intr(int irq
, void *data
)
712 struct atl1_adapter
*adapter
= netdev_priv(data
);
717 status
= adapter
->cmb
.cmb
->int_stats
;
724 /* clear CMB interrupt status at once */
725 adapter
->cmb
.cmb
->int_stats
= 0;
727 if (status
& ISR_GPHY
) /* clear phy status */
728 atl1_clear_phy_int(adapter
);
730 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
731 iowrite32(status
| ISR_DIS_INT
, adapter
->hw
.hw_addr
+ REG_ISR
);
733 /* check if SMB intr */
734 if (status
& ISR_SMB
)
735 atl1_inc_smb(adapter
);
737 /* check if PCIE PHY Link down */
738 if (status
& ISR_PHY_LINKDOWN
) {
739 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
740 "pcie phy link down %x\n", status
);
741 if (netif_running(adapter
->netdev
)) { /* reset MAC */
742 iowrite32(0, adapter
->hw
.hw_addr
+ REG_IMR
);
743 schedule_work(&adapter
->pcie_dma_to_rst_task
);
748 /* check if DMA read/write error ? */
749 if (status
& (ISR_DMAR_TO_RST
| ISR_DMAW_TO_RST
)) {
750 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
751 "pcie DMA r/w error (status = 0x%x)\n",
753 iowrite32(0, adapter
->hw
.hw_addr
+ REG_IMR
);
754 schedule_work(&adapter
->pcie_dma_to_rst_task
);
759 if (status
& ISR_GPHY
) {
760 adapter
->soft_stats
.tx_carrier_errors
++;
761 atl1_check_for_link(adapter
);
765 if (status
& ISR_CMB_TX
)
766 atl1_intr_tx(adapter
);
769 if (unlikely(status
& (ISR_RXF_OV
| ISR_RFD_UNRUN
|
770 ISR_RRD_OV
| ISR_HOST_RFD_UNRUN
|
771 ISR_HOST_RRD_OV
| ISR_CMB_RX
))) {
772 if (status
& (ISR_RXF_OV
| ISR_RFD_UNRUN
|
773 ISR_RRD_OV
| ISR_HOST_RFD_UNRUN
|
775 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
776 "rx exception, ISR = 0x%x\n", status
);
777 atl1_intr_rx(adapter
);
783 } while ((status
= adapter
->cmb
.cmb
->int_stats
));
785 /* re-enable Interrupt */
786 iowrite32(ISR_DIS_SMB
| ISR_DIS_DMA
, adapter
->hw
.hw_addr
+ REG_ISR
);
791 * atl1_set_multi - Multicast and Promiscuous mode set
792 * @netdev: network interface device structure
794 * The set_multi entry point is called whenever the multicast address
795 * list or the network interface flags are updated. This routine is
796 * responsible for configuring the hardware for proper multicast,
797 * promiscuous mode, and all-multi behavior.
799 static void atl1_set_multi(struct net_device
*netdev
)
801 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
802 struct atl1_hw
*hw
= &adapter
->hw
;
803 struct dev_mc_list
*mc_ptr
;
807 /* Check for Promiscuous and All Multicast modes */
808 rctl
= ioread32(hw
->hw_addr
+ REG_MAC_CTRL
);
809 if (netdev
->flags
& IFF_PROMISC
)
810 rctl
|= MAC_CTRL_PROMIS_EN
;
811 else if (netdev
->flags
& IFF_ALLMULTI
) {
812 rctl
|= MAC_CTRL_MC_ALL_EN
;
813 rctl
&= ~MAC_CTRL_PROMIS_EN
;
815 rctl
&= ~(MAC_CTRL_PROMIS_EN
| MAC_CTRL_MC_ALL_EN
);
817 iowrite32(rctl
, hw
->hw_addr
+ REG_MAC_CTRL
);
819 /* clear the old settings from the multicast hash table */
820 iowrite32(0, hw
->hw_addr
+ REG_RX_HASH_TABLE
);
821 iowrite32(0, (hw
->hw_addr
+ REG_RX_HASH_TABLE
) + (1 << 2));
823 /* compute mc addresses' hash value ,and put it into hash table */
824 for (mc_ptr
= netdev
->mc_list
; mc_ptr
; mc_ptr
= mc_ptr
->next
) {
825 hash_value
= atl1_hash_mc_addr(hw
, mc_ptr
->dmi_addr
);
826 atl1_hash_set(hw
, hash_value
);
830 static void atl1_setup_mac_ctrl(struct atl1_adapter
*adapter
)
833 struct atl1_hw
*hw
= &adapter
->hw
;
834 struct net_device
*netdev
= adapter
->netdev
;
835 /* Config MAC CTRL Register */
836 value
= MAC_CTRL_TX_EN
| MAC_CTRL_RX_EN
;
838 if (FULL_DUPLEX
== adapter
->link_duplex
)
839 value
|= MAC_CTRL_DUPLX
;
841 value
|= ((u32
) ((SPEED_1000
== adapter
->link_speed
) ?
842 MAC_CTRL_SPEED_1000
: MAC_CTRL_SPEED_10_100
) <<
843 MAC_CTRL_SPEED_SHIFT
);
845 value
|= (MAC_CTRL_TX_FLOW
| MAC_CTRL_RX_FLOW
);
847 value
|= (MAC_CTRL_ADD_CRC
| MAC_CTRL_PAD
);
848 /* preamble length */
849 value
|= (((u32
) adapter
->hw
.preamble_len
850 & MAC_CTRL_PRMLEN_MASK
) << MAC_CTRL_PRMLEN_SHIFT
);
853 value
|= MAC_CTRL_RMV_VLAN
;
855 if (adapter->rx_csum)
856 value |= MAC_CTRL_RX_CHKSUM_EN;
859 value
|= MAC_CTRL_BC_EN
;
860 if (netdev
->flags
& IFF_PROMISC
)
861 value
|= MAC_CTRL_PROMIS_EN
;
862 else if (netdev
->flags
& IFF_ALLMULTI
)
863 value
|= MAC_CTRL_MC_ALL_EN
;
864 /* value |= MAC_CTRL_LOOPBACK; */
865 iowrite32(value
, hw
->hw_addr
+ REG_MAC_CTRL
);
868 static u32
atl1_check_link(struct atl1_adapter
*adapter
)
870 struct atl1_hw
*hw
= &adapter
->hw
;
871 struct net_device
*netdev
= adapter
->netdev
;
873 u16 speed
, duplex
, phy_data
;
876 /* MII_BMSR must read twice */
877 atl1_read_phy_reg(hw
, MII_BMSR
, &phy_data
);
878 atl1_read_phy_reg(hw
, MII_BMSR
, &phy_data
);
879 if (!(phy_data
& BMSR_LSTATUS
)) { /* link down */
880 if (netif_carrier_ok(netdev
)) { /* old link state: Up */
881 dev_info(&adapter
->pdev
->dev
, "link is down\n");
882 adapter
->link_speed
= SPEED_0
;
883 netif_carrier_off(netdev
);
884 netif_stop_queue(netdev
);
890 ret_val
= atl1_get_speed_and_duplex(hw
, &speed
, &duplex
);
894 switch (hw
->media_type
) {
895 case MEDIA_TYPE_1000M_FULL
:
896 if (speed
!= SPEED_1000
|| duplex
!= FULL_DUPLEX
)
899 case MEDIA_TYPE_100M_FULL
:
900 if (speed
!= SPEED_100
|| duplex
!= FULL_DUPLEX
)
903 case MEDIA_TYPE_100M_HALF
:
904 if (speed
!= SPEED_100
|| duplex
!= HALF_DUPLEX
)
907 case MEDIA_TYPE_10M_FULL
:
908 if (speed
!= SPEED_10
|| duplex
!= FULL_DUPLEX
)
911 case MEDIA_TYPE_10M_HALF
:
912 if (speed
!= SPEED_10
|| duplex
!= HALF_DUPLEX
)
917 /* link result is our setting */
919 if (adapter
->link_speed
!= speed
920 || adapter
->link_duplex
!= duplex
) {
921 adapter
->link_speed
= speed
;
922 adapter
->link_duplex
= duplex
;
923 atl1_setup_mac_ctrl(adapter
);
924 dev_info(&adapter
->pdev
->dev
,
925 "%s link is up %d Mbps %s\n",
926 netdev
->name
, adapter
->link_speed
,
927 adapter
->link_duplex
== FULL_DUPLEX
?
928 "full duplex" : "half duplex");
930 if (!netif_carrier_ok(netdev
)) { /* Link down -> Up */
931 netif_carrier_on(netdev
);
932 netif_wake_queue(netdev
);
937 /* change orignal link status */
938 if (netif_carrier_ok(netdev
)) {
939 adapter
->link_speed
= SPEED_0
;
940 netif_carrier_off(netdev
);
941 netif_stop_queue(netdev
);
944 if (hw
->media_type
!= MEDIA_TYPE_AUTO_SENSOR
&&
945 hw
->media_type
!= MEDIA_TYPE_1000M_FULL
) {
946 switch (hw
->media_type
) {
947 case MEDIA_TYPE_100M_FULL
:
948 phy_data
= MII_CR_FULL_DUPLEX
| MII_CR_SPEED_100
|
951 case MEDIA_TYPE_100M_HALF
:
952 phy_data
= MII_CR_SPEED_100
| MII_CR_RESET
;
954 case MEDIA_TYPE_10M_FULL
:
956 MII_CR_FULL_DUPLEX
| MII_CR_SPEED_10
| MII_CR_RESET
;
958 default: /* MEDIA_TYPE_10M_HALF: */
959 phy_data
= MII_CR_SPEED_10
| MII_CR_RESET
;
962 atl1_write_phy_reg(hw
, MII_BMCR
, phy_data
);
966 /* auto-neg, insert timer to re-config phy */
967 if (!adapter
->phy_timer_pending
) {
968 adapter
->phy_timer_pending
= true;
969 mod_timer(&adapter
->phy_config_timer
, jiffies
+ 3 * HZ
);
975 static void set_flow_ctrl_old(struct atl1_adapter
*adapter
)
979 /* RFD Flow Control */
980 value
= adapter
->rfd_ring
.count
;
986 value
= ((hi
& RXQ_RXF_PAUSE_TH_HI_MASK
) << RXQ_RXF_PAUSE_TH_HI_SHIFT
) |
987 ((lo
& RXQ_RXF_PAUSE_TH_LO_MASK
) << RXQ_RXF_PAUSE_TH_LO_SHIFT
);
988 iowrite32(value
, adapter
->hw
.hw_addr
+ REG_RXQ_RXF_PAUSE_THRESH
);
990 /* RRD Flow Control */
991 value
= adapter
->rrd_ring
.count
;
996 value
= ((hi
& RXQ_RRD_PAUSE_TH_HI_MASK
) << RXQ_RRD_PAUSE_TH_HI_SHIFT
) |
997 ((lo
& RXQ_RRD_PAUSE_TH_LO_MASK
) << RXQ_RRD_PAUSE_TH_LO_SHIFT
);
998 iowrite32(value
, adapter
->hw
.hw_addr
+ REG_RXQ_RRD_PAUSE_THRESH
);
1001 static void set_flow_ctrl_new(struct atl1_hw
*hw
)
1005 /* RXF Flow Control */
1006 value
= ioread32(hw
->hw_addr
+ REG_SRAM_RXF_LEN
);
1013 value
= ((hi
& RXQ_RXF_PAUSE_TH_HI_MASK
) << RXQ_RXF_PAUSE_TH_HI_SHIFT
) |
1014 ((lo
& RXQ_RXF_PAUSE_TH_LO_MASK
) << RXQ_RXF_PAUSE_TH_LO_SHIFT
);
1015 iowrite32(value
, hw
->hw_addr
+ REG_RXQ_RXF_PAUSE_THRESH
);
1017 /* RRD Flow Control */
1018 value
= ioread32(hw
->hw_addr
+ REG_SRAM_RRD_LEN
);
1025 value
= ((hi
& RXQ_RRD_PAUSE_TH_HI_MASK
) << RXQ_RRD_PAUSE_TH_HI_SHIFT
) |
1026 ((lo
& RXQ_RRD_PAUSE_TH_LO_MASK
) << RXQ_RRD_PAUSE_TH_LO_SHIFT
);
1027 iowrite32(value
, hw
->hw_addr
+ REG_RXQ_RRD_PAUSE_THRESH
);
1031 * atl1_configure - Configure Transmit&Receive Unit after Reset
1032 * @adapter: board private structure
1034 * Configure the Tx /Rx unit of the MAC after a reset.
1036 static u32
atl1_configure(struct atl1_adapter
*adapter
)
1038 struct atl1_hw
*hw
= &adapter
->hw
;
1041 /* clear interrupt status */
1042 iowrite32(0xffffffff, adapter
->hw
.hw_addr
+ REG_ISR
);
1044 /* set MAC Address */
1045 value
= (((u32
) hw
->mac_addr
[2]) << 24) |
1046 (((u32
) hw
->mac_addr
[3]) << 16) |
1047 (((u32
) hw
->mac_addr
[4]) << 8) |
1048 (((u32
) hw
->mac_addr
[5]));
1049 iowrite32(value
, hw
->hw_addr
+ REG_MAC_STA_ADDR
);
1050 value
= (((u32
) hw
->mac_addr
[0]) << 8) | (((u32
) hw
->mac_addr
[1]));
1051 iowrite32(value
, hw
->hw_addr
+ (REG_MAC_STA_ADDR
+ 4));
1055 /* HI base address */
1056 iowrite32((u32
) ((adapter
->tpd_ring
.dma
& 0xffffffff00000000ULL
) >> 32),
1057 hw
->hw_addr
+ REG_DESC_BASE_ADDR_HI
);
1058 /* LO base address */
1059 iowrite32((u32
) (adapter
->rfd_ring
.dma
& 0x00000000ffffffffULL
),
1060 hw
->hw_addr
+ REG_DESC_RFD_ADDR_LO
);
1061 iowrite32((u32
) (adapter
->rrd_ring
.dma
& 0x00000000ffffffffULL
),
1062 hw
->hw_addr
+ REG_DESC_RRD_ADDR_LO
);
1063 iowrite32((u32
) (adapter
->tpd_ring
.dma
& 0x00000000ffffffffULL
),
1064 hw
->hw_addr
+ REG_DESC_TPD_ADDR_LO
);
1065 iowrite32((u32
) (adapter
->cmb
.dma
& 0x00000000ffffffffULL
),
1066 hw
->hw_addr
+ REG_DESC_CMB_ADDR_LO
);
1067 iowrite32((u32
) (adapter
->smb
.dma
& 0x00000000ffffffffULL
),
1068 hw
->hw_addr
+ REG_DESC_SMB_ADDR_LO
);
1071 value
= adapter
->rrd_ring
.count
;
1073 value
+= adapter
->rfd_ring
.count
;
1074 iowrite32(value
, hw
->hw_addr
+ REG_DESC_RFD_RRD_RING_SIZE
);
1075 iowrite32(adapter
->tpd_ring
.count
, hw
->hw_addr
+
1076 REG_DESC_TPD_RING_SIZE
);
1079 iowrite32(1, hw
->hw_addr
+ REG_LOAD_PTR
);
1081 /* config Mailbox */
1082 value
= ((atomic_read(&adapter
->tpd_ring
.next_to_use
)
1083 & MB_TPD_PROD_INDX_MASK
) << MB_TPD_PROD_INDX_SHIFT
) |
1084 ((atomic_read(&adapter
->rrd_ring
.next_to_clean
)
1085 & MB_RRD_CONS_INDX_MASK
) << MB_RRD_CONS_INDX_SHIFT
) |
1086 ((atomic_read(&adapter
->rfd_ring
.next_to_use
)
1087 & MB_RFD_PROD_INDX_MASK
) << MB_RFD_PROD_INDX_SHIFT
);
1088 iowrite32(value
, hw
->hw_addr
+ REG_MAILBOX
);
1090 /* config IPG/IFG */
1091 value
= (((u32
) hw
->ipgt
& MAC_IPG_IFG_IPGT_MASK
)
1092 << MAC_IPG_IFG_IPGT_SHIFT
) |
1093 (((u32
) hw
->min_ifg
& MAC_IPG_IFG_MIFG_MASK
)
1094 << MAC_IPG_IFG_MIFG_SHIFT
) |
1095 (((u32
) hw
->ipgr1
& MAC_IPG_IFG_IPGR1_MASK
)
1096 << MAC_IPG_IFG_IPGR1_SHIFT
) |
1097 (((u32
) hw
->ipgr2
& MAC_IPG_IFG_IPGR2_MASK
)
1098 << MAC_IPG_IFG_IPGR2_SHIFT
);
1099 iowrite32(value
, hw
->hw_addr
+ REG_MAC_IPG_IFG
);
1101 /* config Half-Duplex Control */
1102 value
= ((u32
) hw
->lcol
& MAC_HALF_DUPLX_CTRL_LCOL_MASK
) |
1103 (((u32
) hw
->max_retry
& MAC_HALF_DUPLX_CTRL_RETRY_MASK
)
1104 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT
) |
1105 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN
|
1106 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT
) |
1107 (((u32
) hw
->jam_ipg
& MAC_HALF_DUPLX_CTRL_JAMIPG_MASK
)
1108 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT
);
1109 iowrite32(value
, hw
->hw_addr
+ REG_MAC_HALF_DUPLX_CTRL
);
1111 /* set Interrupt Moderator Timer */
1112 iowrite16(adapter
->imt
, hw
->hw_addr
+ REG_IRQ_MODU_TIMER_INIT
);
1113 iowrite32(MASTER_CTRL_ITIMER_EN
, hw
->hw_addr
+ REG_MASTER_CTRL
);
1115 /* set Interrupt Clear Timer */
1116 iowrite16(adapter
->ict
, hw
->hw_addr
+ REG_CMBDISDMA_TIMER
);
1118 /* set MTU, 4 : VLAN */
1119 iowrite32(hw
->max_frame_size
+ 4, hw
->hw_addr
+ REG_MTU
);
1121 /* jumbo size & rrd retirement timer */
1122 value
= (((u32
) hw
->rx_jumbo_th
& RXQ_JMBOSZ_TH_MASK
)
1123 << RXQ_JMBOSZ_TH_SHIFT
) |
1124 (((u32
) hw
->rx_jumbo_lkah
& RXQ_JMBO_LKAH_MASK
)
1125 << RXQ_JMBO_LKAH_SHIFT
) |
1126 (((u32
) hw
->rrd_ret_timer
& RXQ_RRD_TIMER_MASK
)
1127 << RXQ_RRD_TIMER_SHIFT
);
1128 iowrite32(value
, hw
->hw_addr
+ REG_RXQ_JMBOSZ_RRDTIM
);
1131 switch (hw
->dev_rev
) {
1136 set_flow_ctrl_old(adapter
);
1139 set_flow_ctrl_new(hw
);
1144 value
= (((u32
) hw
->tpd_burst
& TXQ_CTRL_TPD_BURST_NUM_MASK
)
1145 << TXQ_CTRL_TPD_BURST_NUM_SHIFT
) |
1146 (((u32
) hw
->txf_burst
& TXQ_CTRL_TXF_BURST_NUM_MASK
)
1147 << TXQ_CTRL_TXF_BURST_NUM_SHIFT
) |
1148 (((u32
) hw
->tpd_fetch_th
& TXQ_CTRL_TPD_FETCH_TH_MASK
)
1149 << TXQ_CTRL_TPD_FETCH_TH_SHIFT
) | TXQ_CTRL_ENH_MODE
|
1151 iowrite32(value
, hw
->hw_addr
+ REG_TXQ_CTRL
);
1153 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
1154 value
= (((u32
) hw
->tx_jumbo_task_th
& TX_JUMBO_TASK_TH_MASK
)
1155 << TX_JUMBO_TASK_TH_SHIFT
) |
1156 (((u32
) hw
->tpd_fetch_gap
& TX_TPD_MIN_IPG_MASK
)
1157 << TX_TPD_MIN_IPG_SHIFT
);
1158 iowrite32(value
, hw
->hw_addr
+ REG_TX_JUMBO_TASK_TH_TPD_IPG
);
1161 value
= (((u32
) hw
->rfd_burst
& RXQ_CTRL_RFD_BURST_NUM_MASK
)
1162 << RXQ_CTRL_RFD_BURST_NUM_SHIFT
) |
1163 (((u32
) hw
->rrd_burst
& RXQ_CTRL_RRD_BURST_THRESH_MASK
)
1164 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT
) |
1165 (((u32
) hw
->rfd_fetch_gap
& RXQ_CTRL_RFD_PREF_MIN_IPG_MASK
)
1166 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT
) | RXQ_CTRL_CUT_THRU_EN
|
1168 iowrite32(value
, hw
->hw_addr
+ REG_RXQ_CTRL
);
1170 /* config DMA Engine */
1171 value
= ((((u32
) hw
->dmar_block
) & DMA_CTRL_DMAR_BURST_LEN_MASK
)
1172 << DMA_CTRL_DMAR_BURST_LEN_SHIFT
) |
1173 ((((u32
) hw
->dmaw_block
) & DMA_CTRL_DMAR_BURST_LEN_MASK
)
1174 << DMA_CTRL_DMAR_BURST_LEN_SHIFT
) | DMA_CTRL_DMAR_EN
|
1176 value
|= (u32
) hw
->dma_ord
;
1177 if (atl1_rcb_128
== hw
->rcb_value
)
1178 value
|= DMA_CTRL_RCB_VALUE
;
1179 iowrite32(value
, hw
->hw_addr
+ REG_DMA_CTRL
);
1181 /* config CMB / SMB */
1182 value
= hw
->cmb_rrd
| ((u32
) hw
->cmb_tpd
<< 16);
1183 iowrite32(value
, hw
->hw_addr
+ REG_CMB_WRITE_TH
);
1184 value
= hw
->cmb_rx_timer
| ((u32
) hw
->cmb_tx_timer
<< 16);
1185 iowrite32(value
, hw
->hw_addr
+ REG_CMB_WRITE_TIMER
);
1186 iowrite32(hw
->smb_timer
, hw
->hw_addr
+ REG_SMB_TIMER
);
1188 /* --- enable CMB / SMB */
1189 value
= CSMB_CTRL_CMB_EN
| CSMB_CTRL_SMB_EN
;
1190 iowrite32(value
, hw
->hw_addr
+ REG_CSMB_CTRL
);
1192 value
= ioread32(adapter
->hw
.hw_addr
+ REG_ISR
);
1193 if (unlikely((value
& ISR_PHY_LINKDOWN
) != 0))
1194 value
= 1; /* config failed */
1198 /* clear all interrupt status */
1199 iowrite32(0x3fffffff, adapter
->hw
.hw_addr
+ REG_ISR
);
1200 iowrite32(0, adapter
->hw
.hw_addr
+ REG_ISR
);
1205 * atl1_irq_disable - Mask off interrupt generation on the NIC
1206 * @adapter: board private structure
1208 static void atl1_irq_disable(struct atl1_adapter
*adapter
)
1210 iowrite32(0, adapter
->hw
.hw_addr
+ REG_IMR
);
1211 ioread32(adapter
->hw
.hw_addr
+ REG_IMR
);
1212 synchronize_irq(adapter
->pdev
->irq
);
1215 static void atl1_vlan_rx_register(struct net_device
*netdev
,
1216 struct vlan_group
*grp
)
1218 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1219 unsigned long flags
;
1222 spin_lock_irqsave(&adapter
->lock
, flags
);
1223 /* atl1_irq_disable(adapter); */
1224 adapter
->vlgrp
= grp
;
1227 /* enable VLAN tag insert/strip */
1228 ctrl
= ioread32(adapter
->hw
.hw_addr
+ REG_MAC_CTRL
);
1229 ctrl
|= MAC_CTRL_RMV_VLAN
;
1230 iowrite32(ctrl
, adapter
->hw
.hw_addr
+ REG_MAC_CTRL
);
1232 /* disable VLAN tag insert/strip */
1233 ctrl
= ioread32(adapter
->hw
.hw_addr
+ REG_MAC_CTRL
);
1234 ctrl
&= ~MAC_CTRL_RMV_VLAN
;
1235 iowrite32(ctrl
, adapter
->hw
.hw_addr
+ REG_MAC_CTRL
);
1238 /* atl1_irq_enable(adapter); */
1239 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1242 static void atl1_restore_vlan(struct atl1_adapter
*adapter
)
1244 atl1_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1247 static u16
tpd_avail(struct atl1_tpd_ring
*tpd_ring
)
1249 u16 next_to_clean
= atomic_read(&tpd_ring
->next_to_clean
);
1250 u16 next_to_use
= atomic_read(&tpd_ring
->next_to_use
);
1251 return ((next_to_clean
> next_to_use
) ?
1252 next_to_clean
- next_to_use
- 1 :
1253 tpd_ring
->count
+ next_to_clean
- next_to_use
- 1);
1256 static int atl1_tso(struct atl1_adapter
*adapter
, struct sk_buff
*skb
,
1257 struct tso_param
*tso
)
1259 /* We enter this function holding a spinlock. */
1263 if (skb_shinfo(skb
)->gso_size
) {
1264 if (skb_header_cloned(skb
)) {
1265 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1270 if (skb
->protocol
== ntohs(ETH_P_IP
)) {
1271 struct iphdr
*iph
= ip_hdr(skb
);
1275 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1276 iph
->daddr
, 0, IPPROTO_TCP
, 0);
1277 ipofst
= skb_network_offset(skb
);
1278 if (ipofst
!= ENET_HEADER_SIZE
) /* 802.3 frame */
1279 tso
->tsopl
|= 1 << TSO_PARAM_ETHTYPE_SHIFT
;
1281 tso
->tsopl
|= (iph
->ihl
&
1282 CSUM_PARAM_IPHL_MASK
) << CSUM_PARAM_IPHL_SHIFT
;
1283 tso
->tsopl
|= (tcp_hdrlen(skb
) &
1284 TSO_PARAM_TCPHDRLEN_MASK
) <<
1285 TSO_PARAM_TCPHDRLEN_SHIFT
;
1286 tso
->tsopl
|= (skb_shinfo(skb
)->gso_size
&
1287 TSO_PARAM_MSS_MASK
) << TSO_PARAM_MSS_SHIFT
;
1288 tso
->tsopl
|= 1 << TSO_PARAM_IPCKSUM_SHIFT
;
1289 tso
->tsopl
|= 1 << TSO_PARAM_TCPCKSUM_SHIFT
;
1290 tso
->tsopl
|= 1 << TSO_PARAM_SEGMENT_SHIFT
;
1297 static int atl1_tx_csum(struct atl1_adapter
*adapter
, struct sk_buff
*skb
,
1298 struct csum_param
*csum
)
1302 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1303 cso
= skb_transport_offset(skb
);
1304 css
= cso
+ skb
->csum_offset
;
1305 if (unlikely(cso
& 0x1)) {
1306 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
,
1307 "payload offset not an even number\n");
1310 csum
->csumpl
|= (cso
& CSUM_PARAM_PLOADOFFSET_MASK
) <<
1311 CSUM_PARAM_PLOADOFFSET_SHIFT
;
1312 csum
->csumpl
|= (css
& CSUM_PARAM_XSUMOFFSET_MASK
) <<
1313 CSUM_PARAM_XSUMOFFSET_SHIFT
;
1314 csum
->csumpl
|= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT
;
1321 static void atl1_tx_map(struct atl1_adapter
*adapter
, struct sk_buff
*skb
,
1324 /* We enter this function holding a spinlock. */
1325 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
1326 struct atl1_buffer
*buffer_info
;
1328 int first_buf_len
= skb
->len
;
1329 unsigned long offset
;
1330 unsigned int nr_frags
;
1332 u16 tpd_next_to_use
;
1336 first_buf_len
-= skb
->data_len
;
1337 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1338 tpd_next_to_use
= atomic_read(&tpd_ring
->next_to_use
);
1339 buffer_info
= &tpd_ring
->buffer_info
[tpd_next_to_use
];
1340 if (unlikely(buffer_info
->skb
))
1342 buffer_info
->skb
= NULL
; /* put skb in last TPD */
1346 proto_hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1347 buffer_info
->length
= proto_hdr_len
;
1348 page
= virt_to_page(skb
->data
);
1349 offset
= (unsigned long)skb
->data
& ~PAGE_MASK
;
1350 buffer_info
->dma
= pci_map_page(adapter
->pdev
, page
,
1351 offset
, proto_hdr_len
,
1354 if (++tpd_next_to_use
== tpd_ring
->count
)
1355 tpd_next_to_use
= 0;
1357 if (first_buf_len
> proto_hdr_len
) {
1358 len12
= first_buf_len
- proto_hdr_len
;
1359 m
= (len12
+ ATL1_MAX_TX_BUF_LEN
- 1) /
1360 ATL1_MAX_TX_BUF_LEN
;
1361 for (i
= 0; i
< m
; i
++) {
1363 &tpd_ring
->buffer_info
[tpd_next_to_use
];
1364 buffer_info
->skb
= NULL
;
1365 buffer_info
->length
=
1366 (ATL1_MAX_TX_BUF_LEN
>=
1367 len12
) ? ATL1_MAX_TX_BUF_LEN
: len12
;
1368 len12
-= buffer_info
->length
;
1369 page
= virt_to_page(skb
->data
+
1371 i
* ATL1_MAX_TX_BUF_LEN
));
1372 offset
= (unsigned long)(skb
->data
+
1374 i
* ATL1_MAX_TX_BUF_LEN
)) & ~PAGE_MASK
;
1375 buffer_info
->dma
= pci_map_page(adapter
->pdev
,
1376 page
, offset
, buffer_info
->length
,
1378 if (++tpd_next_to_use
== tpd_ring
->count
)
1379 tpd_next_to_use
= 0;
1384 buffer_info
->length
= first_buf_len
;
1385 page
= virt_to_page(skb
->data
);
1386 offset
= (unsigned long)skb
->data
& ~PAGE_MASK
;
1387 buffer_info
->dma
= pci_map_page(adapter
->pdev
, page
,
1388 offset
, first_buf_len
, PCI_DMA_TODEVICE
);
1389 if (++tpd_next_to_use
== tpd_ring
->count
)
1390 tpd_next_to_use
= 0;
1393 for (f
= 0; f
< nr_frags
; f
++) {
1394 struct skb_frag_struct
*frag
;
1397 frag
= &skb_shinfo(skb
)->frags
[f
];
1400 m
= (lenf
+ ATL1_MAX_TX_BUF_LEN
- 1) / ATL1_MAX_TX_BUF_LEN
;
1401 for (i
= 0; i
< m
; i
++) {
1402 buffer_info
= &tpd_ring
->buffer_info
[tpd_next_to_use
];
1403 if (unlikely(buffer_info
->skb
))
1405 buffer_info
->skb
= NULL
;
1406 buffer_info
->length
= (lenf
> ATL1_MAX_TX_BUF_LEN
) ?
1407 ATL1_MAX_TX_BUF_LEN
: lenf
;
1408 lenf
-= buffer_info
->length
;
1409 buffer_info
->dma
= pci_map_page(adapter
->pdev
,
1411 frag
->page_offset
+ (i
* ATL1_MAX_TX_BUF_LEN
),
1412 buffer_info
->length
, PCI_DMA_TODEVICE
);
1414 if (++tpd_next_to_use
== tpd_ring
->count
)
1415 tpd_next_to_use
= 0;
1419 /* last tpd's buffer-info */
1420 buffer_info
->skb
= skb
;
1423 static void atl1_tx_queue(struct atl1_adapter
*adapter
, int count
,
1424 union tpd_descr
*descr
)
1426 /* We enter this function holding a spinlock. */
1427 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
1430 struct atl1_buffer
*buffer_info
;
1431 struct tx_packet_desc
*tpd
;
1432 u16 tpd_next_to_use
= atomic_read(&tpd_ring
->next_to_use
);
1434 for (j
= 0; j
< count
; j
++) {
1435 buffer_info
= &tpd_ring
->buffer_info
[tpd_next_to_use
];
1436 tpd
= ATL1_TPD_DESC(&adapter
->tpd_ring
, tpd_next_to_use
);
1437 tpd
->desc
.csum
.csumpu
= descr
->csum
.csumpu
;
1438 tpd
->desc
.csum
.csumpl
= descr
->csum
.csumpl
;
1439 tpd
->desc
.tso
.tsopu
= descr
->tso
.tsopu
;
1440 tpd
->desc
.tso
.tsopl
= descr
->tso
.tsopl
;
1441 tpd
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
1442 tpd
->desc
.data
= descr
->data
;
1443 tpd
->desc
.csum
.csumpu
|= (cpu_to_le16(buffer_info
->length
) &
1444 CSUM_PARAM_BUFLEN_MASK
) << CSUM_PARAM_BUFLEN_SHIFT
;
1446 val
= (descr
->tso
.tsopl
>> TSO_PARAM_SEGMENT_SHIFT
) &
1447 TSO_PARAM_SEGMENT_MASK
;
1449 tpd
->desc
.tso
.tsopl
|= 1 << TSO_PARAM_HDRFLAG_SHIFT
;
1451 if (j
== (count
- 1))
1452 tpd
->desc
.csum
.csumpl
|= 1 << CSUM_PARAM_EOP_SHIFT
;
1454 if (++tpd_next_to_use
== tpd_ring
->count
)
1455 tpd_next_to_use
= 0;
1458 * Force memory writes to complete before letting h/w
1459 * know there are new descriptors to fetch. (Only
1460 * applicable for weak-ordered memory model archs,
1465 atomic_set(&tpd_ring
->next_to_use
, (int)tpd_next_to_use
);
1468 static void atl1_update_mailbox(struct atl1_adapter
*adapter
)
1470 unsigned long flags
;
1471 u32 tpd_next_to_use
;
1472 u32 rfd_next_to_use
;
1473 u32 rrd_next_to_clean
;
1476 spin_lock_irqsave(&adapter
->mb_lock
, flags
);
1478 tpd_next_to_use
= atomic_read(&adapter
->tpd_ring
.next_to_use
);
1479 rfd_next_to_use
= atomic_read(&adapter
->rfd_ring
.next_to_use
);
1480 rrd_next_to_clean
= atomic_read(&adapter
->rrd_ring
.next_to_clean
);
1482 value
= ((rfd_next_to_use
& MB_RFD_PROD_INDX_MASK
) <<
1483 MB_RFD_PROD_INDX_SHIFT
) |
1484 ((rrd_next_to_clean
& MB_RRD_CONS_INDX_MASK
) <<
1485 MB_RRD_CONS_INDX_SHIFT
) |
1486 ((tpd_next_to_use
& MB_TPD_PROD_INDX_MASK
) <<
1487 MB_TPD_PROD_INDX_SHIFT
);
1488 iowrite32(value
, adapter
->hw
.hw_addr
+ REG_MAILBOX
);
1490 spin_unlock_irqrestore(&adapter
->mb_lock
, flags
);
1493 static int atl1_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1495 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1501 union tpd_descr param
;
1504 unsigned long flags
;
1505 unsigned int nr_frags
= 0;
1506 unsigned int mss
= 0;
1508 unsigned int proto_hdr_len
;
1510 len
-= skb
->data_len
;
1512 if (unlikely(skb
->len
== 0)) {
1513 dev_kfree_skb_any(skb
);
1514 return NETDEV_TX_OK
;
1518 param
.tso
.tsopu
= 0;
1519 param
.tso
.tsopl
= 0;
1520 param
.csum
.csumpu
= 0;
1521 param
.csum
.csumpl
= 0;
1523 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1524 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1525 for (f
= 0; f
< nr_frags
; f
++) {
1526 frag_size
= skb_shinfo(skb
)->frags
[f
].size
;
1528 count
+= (frag_size
+ ATL1_MAX_TX_BUF_LEN
- 1) /
1529 ATL1_MAX_TX_BUF_LEN
;
1532 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1533 mss
= skb_shinfo(skb
)->gso_size
;
1535 if (skb
->protocol
== htons(ETH_P_IP
)) {
1536 proto_hdr_len
= (skb_transport_offset(skb
) +
1538 if (unlikely(proto_hdr_len
> len
)) {
1539 dev_kfree_skb_any(skb
);
1540 return NETDEV_TX_OK
;
1542 /* need additional TPD ? */
1543 if (proto_hdr_len
!= len
)
1544 count
+= (len
- proto_hdr_len
+
1545 ATL1_MAX_TX_BUF_LEN
- 1) /
1546 ATL1_MAX_TX_BUF_LEN
;
1550 local_irq_save(flags
);
1551 if (!spin_trylock(&adapter
->lock
)) {
1552 /* Can't get lock - tell upper layer to requeue */
1553 local_irq_restore(flags
);
1554 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
, "tx locked\n");
1555 return NETDEV_TX_LOCKED
;
1558 if (tpd_avail(&adapter
->tpd_ring
) < count
) {
1559 /* not enough descriptors */
1560 netif_stop_queue(netdev
);
1561 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1562 dev_printk(KERN_DEBUG
, &adapter
->pdev
->dev
, "tx busy\n");
1563 return NETDEV_TX_BUSY
;
1568 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
1569 vlan_tag
= vlan_tx_tag_get(skb
);
1570 vlan_tag
= (vlan_tag
<< 4) | (vlan_tag
>> 13) |
1571 ((vlan_tag
>> 9) & 0x8);
1572 param
.csum
.csumpl
|= 1 << CSUM_PARAM_INSVLAG_SHIFT
;
1573 param
.csum
.csumpu
|= (vlan_tag
& CSUM_PARAM_VALANTAG_MASK
) <<
1574 CSUM_PARAM_VALAN_SHIFT
;
1577 tso
= atl1_tso(adapter
, skb
, ¶m
.tso
);
1579 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1580 dev_kfree_skb_any(skb
);
1581 return NETDEV_TX_OK
;
1585 ret_val
= atl1_tx_csum(adapter
, skb
, ¶m
.csum
);
1587 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1588 dev_kfree_skb_any(skb
);
1589 return NETDEV_TX_OK
;
1593 val
= (param
.csum
.csumpl
>> CSUM_PARAM_SEGMENT_SHIFT
) &
1594 CSUM_PARAM_SEGMENT_MASK
;
1595 atl1_tx_map(adapter
, skb
, 1 == val
);
1596 atl1_tx_queue(adapter
, count
, ¶m
);
1597 netdev
->trans_start
= jiffies
;
1598 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1599 atl1_update_mailbox(adapter
);
1600 return NETDEV_TX_OK
;
1604 * atl1_get_stats - Get System Network Statistics
1605 * @netdev: network interface device structure
1607 * Returns the address of the device statistics structure.
1608 * The statistics are actually updated from the timer callback.
1610 static struct net_device_stats
*atl1_get_stats(struct net_device
*netdev
)
1612 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1613 return &adapter
->net_stats
;
1617 * atl1_clean_rx_ring - Free RFD Buffers
1618 * @adapter: board private structure
1620 static void atl1_clean_rx_ring(struct atl1_adapter
*adapter
)
1622 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
1623 struct atl1_rrd_ring
*rrd_ring
= &adapter
->rrd_ring
;
1624 struct atl1_buffer
*buffer_info
;
1625 struct pci_dev
*pdev
= adapter
->pdev
;
1629 /* Free all the Rx ring sk_buffs */
1630 for (i
= 0; i
< rfd_ring
->count
; i
++) {
1631 buffer_info
= &rfd_ring
->buffer_info
[i
];
1632 if (buffer_info
->dma
) {
1633 pci_unmap_page(pdev
, buffer_info
->dma
,
1634 buffer_info
->length
, PCI_DMA_FROMDEVICE
);
1635 buffer_info
->dma
= 0;
1637 if (buffer_info
->skb
) {
1638 dev_kfree_skb(buffer_info
->skb
);
1639 buffer_info
->skb
= NULL
;
1643 size
= sizeof(struct atl1_buffer
) * rfd_ring
->count
;
1644 memset(rfd_ring
->buffer_info
, 0, size
);
1646 /* Zero out the descriptor ring */
1647 memset(rfd_ring
->desc
, 0, rfd_ring
->size
);
1649 rfd_ring
->next_to_clean
= 0;
1650 atomic_set(&rfd_ring
->next_to_use
, 0);
1652 rrd_ring
->next_to_use
= 0;
1653 atomic_set(&rrd_ring
->next_to_clean
, 0);
1657 * atl1_clean_tx_ring - Free Tx Buffers
1658 * @adapter: board private structure
1660 static void atl1_clean_tx_ring(struct atl1_adapter
*adapter
)
1662 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
1663 struct atl1_buffer
*buffer_info
;
1664 struct pci_dev
*pdev
= adapter
->pdev
;
1668 /* Free all the Tx ring sk_buffs */
1669 for (i
= 0; i
< tpd_ring
->count
; i
++) {
1670 buffer_info
= &tpd_ring
->buffer_info
[i
];
1671 if (buffer_info
->dma
) {
1672 pci_unmap_page(pdev
, buffer_info
->dma
,
1673 buffer_info
->length
, PCI_DMA_TODEVICE
);
1674 buffer_info
->dma
= 0;
1678 for (i
= 0; i
< tpd_ring
->count
; i
++) {
1679 buffer_info
= &tpd_ring
->buffer_info
[i
];
1680 if (buffer_info
->skb
) {
1681 dev_kfree_skb_any(buffer_info
->skb
);
1682 buffer_info
->skb
= NULL
;
1686 size
= sizeof(struct atl1_buffer
) * tpd_ring
->count
;
1687 memset(tpd_ring
->buffer_info
, 0, size
);
1689 /* Zero out the descriptor ring */
1690 memset(tpd_ring
->desc
, 0, tpd_ring
->size
);
1692 atomic_set(&tpd_ring
->next_to_use
, 0);
1693 atomic_set(&tpd_ring
->next_to_clean
, 0);
1697 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1698 * @adapter: board private structure
1700 * Free all transmit software resources
1702 void atl1_free_ring_resources(struct atl1_adapter
*adapter
)
1704 struct pci_dev
*pdev
= adapter
->pdev
;
1705 struct atl1_tpd_ring
*tpd_ring
= &adapter
->tpd_ring
;
1706 struct atl1_rfd_ring
*rfd_ring
= &adapter
->rfd_ring
;
1707 struct atl1_rrd_ring
*rrd_ring
= &adapter
->rrd_ring
;
1708 struct atl1_ring_header
*ring_header
= &adapter
->ring_header
;
1710 atl1_clean_tx_ring(adapter
);
1711 atl1_clean_rx_ring(adapter
);
1713 kfree(tpd_ring
->buffer_info
);
1714 pci_free_consistent(pdev
, ring_header
->size
, ring_header
->desc
,
1717 tpd_ring
->buffer_info
= NULL
;
1718 tpd_ring
->desc
= NULL
;
1721 rfd_ring
->buffer_info
= NULL
;
1722 rfd_ring
->desc
= NULL
;
1725 rrd_ring
->desc
= NULL
;
1729 s32
atl1_up(struct atl1_adapter
*adapter
)
1731 struct net_device
*netdev
= adapter
->netdev
;
1733 int irq_flags
= IRQF_SAMPLE_RANDOM
;
1735 /* hardware has been reset, we need to reload some things */
1736 atl1_set_multi(netdev
);
1737 atl1_init_ring_ptrs(adapter
);
1738 atl1_restore_vlan(adapter
);
1739 err
= atl1_alloc_rx_buffers(adapter
);
1740 if (unlikely(!err
)) /* no RX BUFFER allocated */
1743 if (unlikely(atl1_configure(adapter
))) {
1748 err
= pci_enable_msi(adapter
->pdev
);
1750 dev_info(&adapter
->pdev
->dev
,
1751 "Unable to enable MSI: %d\n", err
);
1752 irq_flags
|= IRQF_SHARED
;
1755 err
= request_irq(adapter
->pdev
->irq
, &atl1_intr
, irq_flags
,
1756 netdev
->name
, netdev
);
1760 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1761 atl1_irq_enable(adapter
);
1762 atl1_check_link(adapter
);
1766 pci_disable_msi(adapter
->pdev
);
1767 /* free rx_buffers */
1768 atl1_clean_rx_ring(adapter
);
1772 void atl1_down(struct atl1_adapter
*adapter
)
1774 struct net_device
*netdev
= adapter
->netdev
;
1776 del_timer_sync(&adapter
->watchdog_timer
);
1777 del_timer_sync(&adapter
->phy_config_timer
);
1778 adapter
->phy_timer_pending
= false;
1780 atl1_irq_disable(adapter
);
1781 free_irq(adapter
->pdev
->irq
, netdev
);
1782 pci_disable_msi(adapter
->pdev
);
1783 atl1_reset_hw(&adapter
->hw
);
1784 adapter
->cmb
.cmb
->int_stats
= 0;
1786 adapter
->link_speed
= SPEED_0
;
1787 adapter
->link_duplex
= -1;
1788 netif_carrier_off(netdev
);
1789 netif_stop_queue(netdev
);
1791 atl1_clean_tx_ring(adapter
);
1792 atl1_clean_rx_ring(adapter
);
1796 * atl1_change_mtu - Change the Maximum Transfer Unit
1797 * @netdev: network interface device structure
1798 * @new_mtu: new value for maximum frame size
1800 * Returns 0 on success, negative on failure
1802 static int atl1_change_mtu(struct net_device
*netdev
, int new_mtu
)
1804 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1805 int old_mtu
= netdev
->mtu
;
1806 int max_frame
= new_mtu
+ ENET_HEADER_SIZE
+ ETHERNET_FCS_SIZE
;
1808 if ((max_frame
< MINIMUM_ETHERNET_FRAME_SIZE
) ||
1809 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
1810 dev_warn(&adapter
->pdev
->dev
, "invalid MTU setting\n");
1814 adapter
->hw
.max_frame_size
= max_frame
;
1815 adapter
->hw
.tx_jumbo_task_th
= (max_frame
+ 7) >> 3;
1816 adapter
->rx_buffer_len
= (max_frame
+ 7) & ~7;
1817 adapter
->hw
.rx_jumbo_th
= adapter
->rx_buffer_len
/ 8;
1819 netdev
->mtu
= new_mtu
;
1820 if ((old_mtu
!= new_mtu
) && netif_running(netdev
)) {
1829 * atl1_set_mac - Change the Ethernet Address of the NIC
1830 * @netdev: network interface device structure
1831 * @p: pointer to an address structure
1833 * Returns 0 on success, negative on failure
1835 static int atl1_set_mac(struct net_device
*netdev
, void *p
)
1837 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1838 struct sockaddr
*addr
= p
;
1840 if (netif_running(netdev
))
1843 if (!is_valid_ether_addr(addr
->sa_data
))
1844 return -EADDRNOTAVAIL
;
1846 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1847 memcpy(adapter
->hw
.mac_addr
, addr
->sa_data
, netdev
->addr_len
);
1849 atl1_set_mac_addr(&adapter
->hw
);
1854 * atl1_watchdog - Timer Call-back
1855 * @data: pointer to netdev cast into an unsigned long
1857 static void atl1_watchdog(unsigned long data
)
1859 struct atl1_adapter
*adapter
= (struct atl1_adapter
*)data
;
1861 /* Reset the timer */
1862 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2 * HZ
);
1865 static int mdio_read(struct net_device
*netdev
, int phy_id
, int reg_num
)
1867 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1870 atl1_read_phy_reg(&adapter
->hw
, reg_num
& 0x1f, &result
);
1875 static void mdio_write(struct net_device
*netdev
, int phy_id
, int reg_num
,
1878 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1880 atl1_write_phy_reg(&adapter
->hw
, reg_num
, val
);
1889 static int atl1_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1891 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1892 unsigned long flags
;
1895 if (!netif_running(netdev
))
1898 spin_lock_irqsave(&adapter
->lock
, flags
);
1899 retval
= generic_mii_ioctl(&adapter
->mii
, if_mii(ifr
), cmd
, NULL
);
1900 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1911 static int atl1_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1917 return atl1_mii_ioctl(netdev
, ifr
, cmd
);
1924 * atl1_tx_timeout - Respond to a Tx Hang
1925 * @netdev: network interface device structure
1927 static void atl1_tx_timeout(struct net_device
*netdev
)
1929 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1930 /* Do the reset outside of interrupt context */
1931 schedule_work(&adapter
->tx_timeout_task
);
1935 * atl1_phy_config - Timer Call-back
1936 * @data: pointer to netdev cast into an unsigned long
1938 static void atl1_phy_config(unsigned long data
)
1940 struct atl1_adapter
*adapter
= (struct atl1_adapter
*)data
;
1941 struct atl1_hw
*hw
= &adapter
->hw
;
1942 unsigned long flags
;
1944 spin_lock_irqsave(&adapter
->lock
, flags
);
1945 adapter
->phy_timer_pending
= false;
1946 atl1_write_phy_reg(hw
, MII_ADVERTISE
, hw
->mii_autoneg_adv_reg
);
1947 atl1_write_phy_reg(hw
, MII_AT001_CR
, hw
->mii_1000t_ctrl_reg
);
1948 atl1_write_phy_reg(hw
, MII_BMCR
, MII_CR_RESET
| MII_CR_AUTO_NEG_EN
);
1949 spin_unlock_irqrestore(&adapter
->lock
, flags
);
1952 int atl1_reset(struct atl1_adapter
*adapter
)
1956 ret
= atl1_reset_hw(&adapter
->hw
);
1957 if (ret
!= ATL1_SUCCESS
)
1959 return atl1_init_hw(&adapter
->hw
);
1963 * atl1_open - Called when a network interface is made active
1964 * @netdev: network interface device structure
1966 * Returns 0 on success, negative value on failure
1968 * The open entry point is called when a network interface is made
1969 * active by the system (IFF_UP). At this point all resources needed
1970 * for transmit and receive operations are allocated, the interrupt
1971 * handler is registered with the OS, the watchdog timer is started,
1972 * and the stack is notified that the interface is ready.
1974 static int atl1_open(struct net_device
*netdev
)
1976 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
1979 /* allocate transmit descriptors */
1980 err
= atl1_setup_ring_resources(adapter
);
1984 err
= atl1_up(adapter
);
1991 atl1_reset(adapter
);
1996 * atl1_close - Disables a network interface
1997 * @netdev: network interface device structure
1999 * Returns 0, this is not allowed to fail
2001 * The close entry point is called when an interface is de-activated
2002 * by the OS. The hardware is still under the drivers control, but
2003 * needs to be disabled. A global MAC reset is issued to stop the
2004 * hardware, and all transmit and receive resources are freed.
2006 static int atl1_close(struct net_device
*netdev
)
2008 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
2010 atl1_free_ring_resources(adapter
);
2014 #ifdef CONFIG_NET_POLL_CONTROLLER
2015 static void atl1_poll_controller(struct net_device
*netdev
)
2017 disable_irq(netdev
->irq
);
2018 atl1_intr(netdev
->irq
, netdev
);
2019 enable_irq(netdev
->irq
);
2024 * Orphaned vendor comment left intact here:
2026 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2027 * will assert. We do soft reset <0x1400=1> according
2028 * with the SPEC. BUT, it seemes that PCIE or DMA
2029 * state-machine will not be reset. DMAR_TO_INT will
2030 * assert again and again.
2033 static void atl1_tx_timeout_task(struct work_struct
*work
)
2035 struct atl1_adapter
*adapter
=
2036 container_of(work
, struct atl1_adapter
, tx_timeout_task
);
2037 struct net_device
*netdev
= adapter
->netdev
;
2039 netif_device_detach(netdev
);
2042 netif_device_attach(netdev
);
2046 * atl1_link_chg_task - deal with link change event Out of interrupt context
2048 static void atl1_link_chg_task(struct work_struct
*work
)
2050 struct atl1_adapter
*adapter
=
2051 container_of(work
, struct atl1_adapter
, link_chg_task
);
2052 unsigned long flags
;
2054 spin_lock_irqsave(&adapter
->lock
, flags
);
2055 atl1_check_link(adapter
);
2056 spin_unlock_irqrestore(&adapter
->lock
, flags
);
2060 * atl1_pcie_patch - Patch for PCIE module
2062 static void atl1_pcie_patch(struct atl1_adapter
*adapter
)
2066 /* much vendor magic here */
2068 iowrite32(value
, adapter
->hw
.hw_addr
+ 0x12FC);
2069 /* pcie flow control mode change */
2070 value
= ioread32(adapter
->hw
.hw_addr
+ 0x1008);
2072 iowrite32(value
, adapter
->hw
.hw_addr
+ 0x1008);
2076 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
2077 * on PCI Command register is disable.
2078 * The function enable this bit.
2079 * Brackett, 2006/03/15
2081 static void atl1_via_workaround(struct atl1_adapter
*adapter
)
2083 unsigned long value
;
2085 value
= ioread16(adapter
->hw
.hw_addr
+ PCI_COMMAND
);
2086 if (value
& PCI_COMMAND_INTX_DISABLE
)
2087 value
&= ~PCI_COMMAND_INTX_DISABLE
;
2088 iowrite32(value
, adapter
->hw
.hw_addr
+ PCI_COMMAND
);
2092 * atl1_probe - Device Initialization Routine
2093 * @pdev: PCI device information struct
2094 * @ent: entry in atl1_pci_tbl
2096 * Returns 0 on success, negative on failure
2098 * atl1_probe initializes an adapter identified by a pci_dev structure.
2099 * The OS initialization, configuring of the adapter private structure,
2100 * and a hardware reset occur.
2102 static int __devinit
atl1_probe(struct pci_dev
*pdev
,
2103 const struct pci_device_id
*ent
)
2105 struct net_device
*netdev
;
2106 struct atl1_adapter
*adapter
;
2107 static int cards_found
= 0;
2108 bool pci_using_64
= true;
2111 err
= pci_enable_device(pdev
);
2115 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
2117 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2119 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
2122 pci_using_64
= false;
2124 /* Mark all PCI regions associated with PCI device
2125 * pdev as being reserved by owner atl1_driver_name
2127 err
= pci_request_regions(pdev
, atl1_driver_name
);
2129 goto err_request_regions
;
2131 /* Enables bus-mastering on the device and calls
2132 * pcibios_set_master to do the needed arch specific settings
2134 pci_set_master(pdev
);
2136 netdev
= alloc_etherdev(sizeof(struct atl1_adapter
));
2139 goto err_alloc_etherdev
;
2141 SET_MODULE_OWNER(netdev
);
2142 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2144 pci_set_drvdata(pdev
, netdev
);
2145 adapter
= netdev_priv(netdev
);
2146 adapter
->netdev
= netdev
;
2147 adapter
->pdev
= pdev
;
2148 adapter
->hw
.back
= adapter
;
2150 adapter
->hw
.hw_addr
= pci_iomap(pdev
, 0, 0);
2151 if (!adapter
->hw
.hw_addr
) {
2155 /* get device revision number */
2156 adapter
->hw
.dev_rev
= ioread16(adapter
->hw
.hw_addr
+
2157 (REG_MASTER_CTRL
+ 2));
2158 dev_info(&pdev
->dev
, "version %s\n", DRIVER_VERSION
);
2160 /* set default ring resource counts */
2161 adapter
->rfd_ring
.count
= adapter
->rrd_ring
.count
= ATL1_DEFAULT_RFD
;
2162 adapter
->tpd_ring
.count
= ATL1_DEFAULT_TPD
;
2164 adapter
->mii
.dev
= netdev
;
2165 adapter
->mii
.mdio_read
= mdio_read
;
2166 adapter
->mii
.mdio_write
= mdio_write
;
2167 adapter
->mii
.phy_id_mask
= 0x1f;
2168 adapter
->mii
.reg_num_mask
= 0x1f;
2170 netdev
->open
= &atl1_open
;
2171 netdev
->stop
= &atl1_close
;
2172 netdev
->hard_start_xmit
= &atl1_xmit_frame
;
2173 netdev
->get_stats
= &atl1_get_stats
;
2174 netdev
->set_multicast_list
= &atl1_set_multi
;
2175 netdev
->set_mac_address
= &atl1_set_mac
;
2176 netdev
->change_mtu
= &atl1_change_mtu
;
2177 netdev
->do_ioctl
= &atl1_ioctl
;
2178 netdev
->tx_timeout
= &atl1_tx_timeout
;
2179 netdev
->watchdog_timeo
= 5 * HZ
;
2180 #ifdef CONFIG_NET_POLL_CONTROLLER
2181 netdev
->poll_controller
= atl1_poll_controller
;
2183 netdev
->vlan_rx_register
= atl1_vlan_rx_register
;
2185 netdev
->ethtool_ops
= &atl1_ethtool_ops
;
2186 adapter
->bd_number
= cards_found
;
2187 adapter
->pci_using_64
= pci_using_64
;
2189 /* setup the private structure */
2190 err
= atl1_sw_init(adapter
);
2194 netdev
->features
= NETIF_F_HW_CSUM
;
2195 netdev
->features
|= NETIF_F_SG
;
2196 netdev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
2199 * FIXME - Until tso performance gets fixed, disable the feature.
2200 * Enable it with ethtool -K if desired.
2202 /* netdev->features |= NETIF_F_TSO; */
2205 netdev
->features
|= NETIF_F_HIGHDMA
;
2207 netdev
->features
|= NETIF_F_LLTX
;
2210 * patch for some L1 of old version,
2211 * the final version of L1 may not need these
2214 /* atl1_pcie_patch(adapter); */
2216 /* really reset GPHY core */
2217 iowrite16(0, adapter
->hw
.hw_addr
+ REG_GPHY_ENABLE
);
2220 * reset the controller to
2221 * put the device in a known good starting state
2223 if (atl1_reset_hw(&adapter
->hw
)) {
2228 /* copy the MAC address out of the EEPROM */
2229 atl1_read_mac_addr(&adapter
->hw
);
2230 memcpy(netdev
->dev_addr
, adapter
->hw
.mac_addr
, netdev
->addr_len
);
2232 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2237 atl1_check_options(adapter
);
2239 /* pre-init the MAC, and setup link */
2240 err
= atl1_init_hw(&adapter
->hw
);
2246 atl1_pcie_patch(adapter
);
2247 /* assume we have no link for now */
2248 netif_carrier_off(netdev
);
2249 netif_stop_queue(netdev
);
2251 init_timer(&adapter
->watchdog_timer
);
2252 adapter
->watchdog_timer
.function
= &atl1_watchdog
;
2253 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
2255 init_timer(&adapter
->phy_config_timer
);
2256 adapter
->phy_config_timer
.function
= &atl1_phy_config
;
2257 adapter
->phy_config_timer
.data
= (unsigned long)adapter
;
2258 adapter
->phy_timer_pending
= false;
2260 INIT_WORK(&adapter
->tx_timeout_task
, atl1_tx_timeout_task
);
2262 INIT_WORK(&adapter
->link_chg_task
, atl1_link_chg_task
);
2264 INIT_WORK(&adapter
->pcie_dma_to_rst_task
, atl1_tx_timeout_task
);
2266 err
= register_netdev(netdev
);
2271 atl1_via_workaround(adapter
);
2275 pci_iounmap(pdev
, adapter
->hw
.hw_addr
);
2277 free_netdev(netdev
);
2279 pci_release_regions(pdev
);
2281 err_request_regions
:
2282 pci_disable_device(pdev
);
2287 * atl1_remove - Device Removal Routine
2288 * @pdev: PCI device information struct
2290 * atl1_remove is called by the PCI subsystem to alert the driver
2291 * that it should release a PCI device. The could be caused by a
2292 * Hot-Plug event, or because the driver is going to be removed from
2295 static void __devexit
atl1_remove(struct pci_dev
*pdev
)
2297 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2298 struct atl1_adapter
*adapter
;
2299 /* Device not available. Return. */
2303 adapter
= netdev_priv(netdev
);
2305 /* Some atl1 boards lack persistent storage for their MAC, and get it
2306 * from the BIOS during POST. If we've been messing with the MAC
2307 * address, we need to save the permanent one.
2309 if (memcmp(adapter
->hw
.mac_addr
, adapter
->hw
.perm_mac_addr
, ETH_ALEN
)) {
2310 memcpy(adapter
->hw
.mac_addr
, adapter
->hw
.perm_mac_addr
,
2312 atl1_set_mac_addr(&adapter
->hw
);
2315 iowrite16(0, adapter
->hw
.hw_addr
+ REG_GPHY_ENABLE
);
2316 unregister_netdev(netdev
);
2317 pci_iounmap(pdev
, adapter
->hw
.hw_addr
);
2318 pci_release_regions(pdev
);
2319 free_netdev(netdev
);
2320 pci_disable_device(pdev
);
2324 static int atl1_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2326 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2327 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
2328 struct atl1_hw
*hw
= &adapter
->hw
;
2330 u32 wufc
= adapter
->wol
;
2332 netif_device_detach(netdev
);
2333 if (netif_running(netdev
))
2336 atl1_read_phy_reg(hw
, MII_BMSR
, (u16
*) & ctrl
);
2337 atl1_read_phy_reg(hw
, MII_BMSR
, (u16
*) & ctrl
);
2338 if (ctrl
& BMSR_LSTATUS
)
2339 wufc
&= ~ATL1_WUFC_LNKC
;
2341 /* reduce speed to 10/100M */
2343 atl1_phy_enter_power_saving(hw
);
2344 /* if resume, let driver to re- setup link */
2345 hw
->phy_configured
= false;
2346 atl1_set_mac_addr(hw
);
2347 atl1_set_multi(netdev
);
2350 /* turn on magic packet wol */
2351 if (wufc
& ATL1_WUFC_MAG
)
2352 ctrl
= WOL_MAGIC_EN
| WOL_MAGIC_PME_EN
;
2354 /* turn on Link change WOL */
2355 if (wufc
& ATL1_WUFC_LNKC
)
2356 ctrl
|= (WOL_LINK_CHG_EN
| WOL_LINK_CHG_PME_EN
);
2357 iowrite32(ctrl
, hw
->hw_addr
+ REG_WOL_CTRL
);
2359 /* turn on all-multi mode if wake on multicast is enabled */
2360 ctrl
= ioread32(hw
->hw_addr
+ REG_MAC_CTRL
);
2361 ctrl
&= ~MAC_CTRL_DBG
;
2362 ctrl
&= ~MAC_CTRL_PROMIS_EN
;
2363 if (wufc
& ATL1_WUFC_MC
)
2364 ctrl
|= MAC_CTRL_MC_ALL_EN
;
2366 ctrl
&= ~MAC_CTRL_MC_ALL_EN
;
2368 /* turn on broadcast mode if wake on-BC is enabled */
2369 if (wufc
& ATL1_WUFC_BC
)
2370 ctrl
|= MAC_CTRL_BC_EN
;
2372 ctrl
&= ~MAC_CTRL_BC_EN
;
2375 ctrl
|= MAC_CTRL_RX_EN
;
2376 iowrite32(ctrl
, hw
->hw_addr
+ REG_MAC_CTRL
);
2377 pci_enable_wake(pdev
, PCI_D3hot
, 1);
2378 pci_enable_wake(pdev
, PCI_D3cold
, 1);
2380 iowrite32(0, hw
->hw_addr
+ REG_WOL_CTRL
);
2381 pci_enable_wake(pdev
, PCI_D3hot
, 0);
2382 pci_enable_wake(pdev
, PCI_D3cold
, 0);
2385 pci_save_state(pdev
);
2386 pci_disable_device(pdev
);
2388 pci_set_power_state(pdev
, PCI_D3hot
);
2393 static int atl1_resume(struct pci_dev
*pdev
)
2395 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2396 struct atl1_adapter
*adapter
= netdev_priv(netdev
);
2399 pci_set_power_state(pdev
, 0);
2400 pci_restore_state(pdev
);
2402 ret_val
= pci_enable_device(pdev
);
2403 pci_enable_wake(pdev
, PCI_D3hot
, 0);
2404 pci_enable_wake(pdev
, PCI_D3cold
, 0);
2406 iowrite32(0, adapter
->hw
.hw_addr
+ REG_WOL_CTRL
);
2407 atl1_reset(adapter
);
2409 if (netif_running(netdev
))
2411 netif_device_attach(netdev
);
2413 atl1_via_workaround(adapter
);
2418 #define atl1_suspend NULL
2419 #define atl1_resume NULL
2422 static struct pci_driver atl1_driver
= {
2423 .name
= atl1_driver_name
,
2424 .id_table
= atl1_pci_tbl
,
2425 .probe
= atl1_probe
,
2426 .remove
= __devexit_p(atl1_remove
),
2427 .suspend
= atl1_suspend
,
2428 .resume
= atl1_resume
2432 * atl1_exit_module - Driver Exit Cleanup Routine
2434 * atl1_exit_module is called just before the driver is removed
2437 static void __exit
atl1_exit_module(void)
2439 pci_unregister_driver(&atl1_driver
);
2443 * atl1_init_module - Driver Registration Routine
2445 * atl1_init_module is the first routine called when the driver is
2446 * loaded. All it does is register with the PCI subsystem.
2448 static int __init
atl1_init_module(void)
2450 return pci_register_driver(&atl1_driver
);
2453 module_init(atl1_init_module
);
2454 module_exit(atl1_exit_module
);