2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.1"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
91 "RX Alignment Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
114 "TX Excess Deferral Errors",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
139 struct lan78xx_statstage
{
141 u32 rx_alignment_errors
;
142 u32 rx_fragment_errors
;
143 u32 rx_jabber_errors
;
144 u32 rx_undersize_frame_errors
;
145 u32 rx_oversize_frame_errors
;
146 u32 rx_dropped_frames
;
147 u32 rx_unicast_byte_count
;
148 u32 rx_broadcast_byte_count
;
149 u32 rx_multicast_byte_count
;
150 u32 rx_unicast_frames
;
151 u32 rx_broadcast_frames
;
152 u32 rx_multicast_frames
;
154 u32 rx_64_byte_frames
;
155 u32 rx_65_127_byte_frames
;
156 u32 rx_128_255_byte_frames
;
157 u32 rx_256_511_bytes_frames
;
158 u32 rx_512_1023_byte_frames
;
159 u32 rx_1024_1518_byte_frames
;
160 u32 rx_greater_1518_byte_frames
;
161 u32 eee_rx_lpi_transitions
;
164 u32 tx_excess_deferral_errors
;
165 u32 tx_carrier_errors
;
166 u32 tx_bad_byte_count
;
167 u32 tx_single_collisions
;
168 u32 tx_multiple_collisions
;
169 u32 tx_excessive_collision
;
170 u32 tx_late_collisions
;
171 u32 tx_unicast_byte_count
;
172 u32 tx_broadcast_byte_count
;
173 u32 tx_multicast_byte_count
;
174 u32 tx_unicast_frames
;
175 u32 tx_broadcast_frames
;
176 u32 tx_multicast_frames
;
178 u32 tx_64_byte_frames
;
179 u32 tx_65_127_byte_frames
;
180 u32 tx_128_255_byte_frames
;
181 u32 tx_256_511_bytes_frames
;
182 u32 tx_512_1023_byte_frames
;
183 u32 tx_1024_1518_byte_frames
;
184 u32 tx_greater_1518_byte_frames
;
185 u32 eee_tx_lpi_transitions
;
191 struct lan78xx_priv
{
192 struct lan78xx_net
*dev
;
194 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
195 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
196 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
197 struct mutex dataport_mutex
; /* for dataport access */
198 spinlock_t rfe_ctl_lock
; /* for rfe register access */
199 struct work_struct set_multicast
;
200 struct work_struct set_vlan
;
214 struct skb_data
{ /* skb->cb is one of these */
216 struct lan78xx_net
*dev
;
217 enum skb_state state
;
222 struct usb_ctrlrequest req
;
223 struct lan78xx_net
*dev
;
226 #define EVENT_TX_HALT 0
227 #define EVENT_RX_HALT 1
228 #define EVENT_RX_MEMORY 2
229 #define EVENT_STS_SPLIT 3
230 #define EVENT_LINK_RESET 4
231 #define EVENT_RX_PAUSED 5
232 #define EVENT_DEV_WAKING 6
233 #define EVENT_DEV_ASLEEP 7
234 #define EVENT_DEV_OPEN 8
237 struct net_device
*net
;
238 struct usb_device
*udev
;
239 struct usb_interface
*intf
;
244 struct sk_buff_head rxq
;
245 struct sk_buff_head txq
;
246 struct sk_buff_head done
;
247 struct sk_buff_head rxq_pause
;
248 struct sk_buff_head txq_pend
;
250 struct tasklet_struct bh
;
251 struct delayed_work wq
;
253 struct usb_host_endpoint
*ep_blkin
;
254 struct usb_host_endpoint
*ep_blkout
;
255 struct usb_host_endpoint
*ep_intr
;
259 struct urb
*urb_intr
;
260 struct usb_anchor deferred
;
262 struct mutex phy_mutex
; /* for phy access */
263 unsigned pipe_in
, pipe_out
, pipe_intr
;
265 u32 hard_mtu
; /* count any extra framing */
266 size_t rx_urb_size
; /* size for rx urbs */
270 wait_queue_head_t
*wait
;
271 unsigned char suspend_count
;
274 struct timer_list delay
;
276 unsigned long data
[5];
282 struct mii_bus
*mdiobus
;
285 /* use ethtool to change the level for any given device */
286 static int msg_level
= -1;
287 module_param(msg_level
, int, 0);
288 MODULE_PARM_DESC(msg_level
, "Override default message level");
290 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
292 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
298 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER
,
300 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
301 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
302 if (likely(ret
>= 0)) {
306 netdev_warn(dev
->net
,
307 "Failed to read register index 0x%08x. ret = %d",
316 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
318 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
327 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER
,
329 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
330 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
331 if (unlikely(ret
< 0)) {
332 netdev_warn(dev
->net
,
333 "Failed to write register index 0x%08x. ret = %d",
342 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
343 struct lan78xx_statstage
*data
)
347 struct lan78xx_statstage
*stats
;
351 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
355 ret
= usb_control_msg(dev
->udev
,
356 usb_rcvctrlpipe(dev
->udev
, 0),
357 USB_VENDOR_REQUEST_GET_STATS
,
358 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
363 USB_CTRL_SET_TIMEOUT
);
364 if (likely(ret
>= 0)) {
367 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
368 le32_to_cpus(&src
[i
]);
372 netdev_warn(dev
->net
,
373 "Failed to read stat ret = 0x%x", ret
);
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
384 unsigned long start_time
= jiffies
;
389 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
390 if (unlikely(ret
< 0))
393 if (!(val
& MII_ACC_MII_BUSY_
))
395 } while (!time_after(jiffies
, start_time
+ HZ
));
400 static inline u32
mii_access(int id
, int index
, int read
)
404 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
405 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
407 ret
|= MII_ACC_MII_READ_
;
409 ret
|= MII_ACC_MII_WRITE_
;
410 ret
|= MII_ACC_MII_BUSY_
;
415 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
417 unsigned long start_time
= jiffies
;
422 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
423 if (unlikely(ret
< 0))
426 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
427 (val
& E2P_CMD_EPC_TIMEOUT_
))
429 usleep_range(40, 100);
430 } while (!time_after(jiffies
, start_time
+ HZ
));
432 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
433 netdev_warn(dev
->net
, "EEPROM read operation timeout");
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
442 unsigned long start_time
= jiffies
;
447 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
448 if (unlikely(ret
< 0))
451 if (!(val
& E2P_CMD_EPC_BUSY_
))
454 usleep_range(40, 100);
455 } while (!time_after(jiffies
, start_time
+ HZ
));
457 netdev_warn(dev
->net
, "EEPROM is busy");
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
462 u32 length
, u8
*data
)
467 ret
= lan78xx_eeprom_confirm_not_busy(dev
);
471 for (i
= 0; i
< length
; i
++) {
472 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
473 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
474 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
475 if (unlikely(ret
< 0))
478 ret
= lan78xx_wait_eeprom(dev
);
482 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
483 if (unlikely(ret
< 0))
486 data
[i
] = val
& 0xFF;
493 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
494 u32 length
, u8
*data
)
499 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
500 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
501 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
508 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
509 u32 length
, u8
*data
)
514 ret
= lan78xx_eeprom_confirm_not_busy(dev
);
518 /* Issue write/erase enable command */
519 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
520 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
521 if (unlikely(ret
< 0))
524 ret
= lan78xx_wait_eeprom(dev
);
528 for (i
= 0; i
< length
; i
++) {
529 /* Fill data register */
531 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
535 /* Send "write" command */
536 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
537 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
538 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
542 ret
= lan78xx_wait_eeprom(dev
);
552 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
553 u32 length
, u8
*data
)
558 unsigned long timeout
;
560 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
562 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
563 /* clear it and wait to be cleared */
564 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
566 timeout
= jiffies
+ HZ
;
569 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
570 if (time_after(jiffies
, timeout
)) {
571 netdev_warn(dev
->net
,
572 "timeout on OTP_PWR_DN");
575 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
578 for (i
= 0; i
< length
; i
++) {
579 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
580 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
581 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
582 ((offset
+ i
) & OTP_ADDR2_10_3
));
584 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
585 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
587 timeout
= jiffies
+ HZ
;
590 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
591 if (time_after(jiffies
, timeout
)) {
592 netdev_warn(dev
->net
,
593 "timeout on OTP_STATUS");
596 } while (buf
& OTP_STATUS_BUSY_
);
598 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
600 data
[i
] = (u8
)(buf
& 0xFF);
606 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
607 u32 length
, u8
*data
)
612 unsigned long timeout
;
614 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
616 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
617 /* clear it and wait to be cleared */
618 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
620 timeout
= jiffies
+ HZ
;
623 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
624 if (time_after(jiffies
, timeout
)) {
625 netdev_warn(dev
->net
,
626 "timeout on OTP_PWR_DN completion");
629 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
632 /* set to BYTE program mode */
633 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
635 for (i
= 0; i
< length
; i
++) {
636 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
637 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
638 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
639 ((offset
+ i
) & OTP_ADDR2_10_3
));
640 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
641 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
642 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
644 timeout
= jiffies
+ HZ
;
647 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
648 if (time_after(jiffies
, timeout
)) {
649 netdev_warn(dev
->net
,
650 "Timeout on OTP_STATUS completion");
653 } while (buf
& OTP_STATUS_BUSY_
);
659 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
660 u32 length
, u8
*data
)
665 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
668 if (sig
== OTP_INDICATOR_1
)
670 else if (sig
== OTP_INDICATOR_2
)
674 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
680 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
684 for (i
= 0; i
< 100; i
++) {
687 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
688 if (unlikely(ret
< 0))
691 if (dp_sel
& DP_SEL_DPRDY_
)
694 usleep_range(40, 100);
697 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
702 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
703 u32 addr
, u32 length
, u32
*buf
)
705 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
709 if (usb_autopm_get_interface(dev
->intf
) < 0)
712 mutex_lock(&pdata
->dataport_mutex
);
714 ret
= lan78xx_dataport_wait_not_busy(dev
);
718 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
720 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
721 dp_sel
|= ram_select
;
722 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
724 for (i
= 0; i
< length
; i
++) {
725 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
727 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
729 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
731 ret
= lan78xx_dataport_wait_not_busy(dev
);
737 mutex_unlock(&pdata
->dataport_mutex
);
738 usb_autopm_put_interface(dev
->intf
);
743 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
744 int index
, u8 addr
[ETH_ALEN
])
748 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
750 temp
= addr
[2] | (temp
<< 8);
751 temp
= addr
[1] | (temp
<< 8);
752 temp
= addr
[0] | (temp
<< 8);
753 pdata
->pfilter_table
[index
][1] = temp
;
755 temp
= addr
[4] | (temp
<< 8);
756 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
757 pdata
->pfilter_table
[index
][0] = temp
;
761 /* returns hash bit number for given MAC address */
762 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
764 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
767 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
769 struct lan78xx_priv
*pdata
=
770 container_of(param
, struct lan78xx_priv
, set_multicast
);
771 struct lan78xx_net
*dev
= pdata
->dev
;
775 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
778 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
779 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
781 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
782 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
783 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
784 pdata
->pfilter_table
[i
][1]);
785 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
786 pdata
->pfilter_table
[i
][0]);
789 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
792 static void lan78xx_set_multicast(struct net_device
*netdev
)
794 struct lan78xx_net
*dev
= netdev_priv(netdev
);
795 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
799 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
801 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
802 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
804 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
805 pdata
->mchash_table
[i
] = 0;
806 /* pfilter_table[0] has own HW address */
807 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
808 pdata
->pfilter_table
[i
][0] =
809 pdata
->pfilter_table
[i
][1] = 0;
812 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
814 if (dev
->net
->flags
& IFF_PROMISC
) {
815 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
816 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
818 if (dev
->net
->flags
& IFF_ALLMULTI
) {
819 netif_dbg(dev
, drv
, dev
->net
,
820 "receive all multicast enabled");
821 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
825 if (netdev_mc_count(dev
->net
)) {
826 struct netdev_hw_addr
*ha
;
829 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
831 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
834 netdev_for_each_mc_addr(ha
, netdev
) {
835 /* set first 32 into Perfect Filter */
837 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
839 u32 bitnum
= lan78xx_hash(ha
->addr
);
841 pdata
->mchash_table
[bitnum
/ 32] |=
842 (1 << (bitnum
% 32));
843 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
849 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
851 /* defer register writes to a sleepable context */
852 schedule_work(&pdata
->set_multicast
);
855 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
856 u16 lcladv
, u16 rmtadv
)
858 u32 flow
= 0, fct_flow
= 0;
861 u8 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
863 if (cap
& FLOW_CTRL_TX
)
864 flow
= (FLOW_CR_TX_FCEN_
| 0xFFFF);
866 if (cap
& FLOW_CTRL_RX
)
867 flow
|= FLOW_CR_RX_FCEN_
;
869 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
871 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
874 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
875 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
876 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
878 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
880 /* threshold value should be set before enabling flow */
881 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
886 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
888 struct phy_device
*phydev
= dev
->net
->phydev
;
889 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
893 /* clear PHY interrupt status */
894 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
895 if (unlikely(ret
< 0))
898 /* clear LAN78xx interrupt status */
899 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
900 if (unlikely(ret
< 0))
903 phy_read_status(phydev
);
905 if (!phydev
->link
&& dev
->link_on
) {
906 dev
->link_on
= false;
907 netif_carrier_off(dev
->net
);
910 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
911 if (unlikely(ret
< 0))
914 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
915 if (unlikely(ret
< 0))
917 } else if (phydev
->link
&& !dev
->link_on
) {
920 phy_ethtool_gset(phydev
, &ecmd
);
922 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
924 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
925 if (ethtool_cmd_speed(&ecmd
) == 1000) {
927 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
928 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
929 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
931 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
932 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
933 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
936 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
937 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
938 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
939 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
943 ladv
= phy_read(phydev
, MII_ADVERTISE
);
947 radv
= phy_read(phydev
, MII_LPA
);
951 netif_dbg(dev
, link
, dev
->net
,
952 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
953 ethtool_cmd_speed(&ecmd
), ecmd
.duplex
, ladv
, radv
);
955 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.duplex
, ladv
, radv
);
956 netif_carrier_on(dev
->net
);
962 /* some work can't be done in tasklets, so we use keventd
964 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
965 * but tasklet_schedule() doesn't. hope the failure is rare.
967 void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
969 set_bit(work
, &dev
->flags
);
970 if (!schedule_delayed_work(&dev
->wq
, 0))
971 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
974 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
978 if (urb
->actual_length
!= 4) {
979 netdev_warn(dev
->net
,
980 "unexpected urb length %d", urb
->actual_length
);
984 memcpy(&intdata
, urb
->transfer_buffer
, 4);
985 le32_to_cpus(&intdata
);
987 if (intdata
& INT_ENP_PHY_INT
) {
988 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
989 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
991 netdev_warn(dev
->net
,
992 "unexpected interrupt: 0x%08x\n", intdata
);
995 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
997 return MAX_EEPROM_SIZE
;
1000 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1001 struct ethtool_eeprom
*ee
, u8
*data
)
1003 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1005 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1007 return lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1010 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1011 struct ethtool_eeprom
*ee
, u8
*data
)
1013 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1015 /* Allow entire eeprom update only */
1016 if ((ee
->magic
== LAN78XX_EEPROM_MAGIC
) &&
1017 (ee
->offset
== 0) &&
1019 (data
[0] == EEPROM_INDICATOR
))
1020 return lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1021 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1022 (ee
->offset
== 0) &&
1024 (data
[0] == OTP_INDICATOR_1
))
1025 return lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1030 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1033 if (stringset
== ETH_SS_STATS
)
1034 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1037 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1039 if (sset
== ETH_SS_STATS
)
1040 return ARRAY_SIZE(lan78xx_gstrings
);
1045 static void lan78xx_get_stats(struct net_device
*netdev
,
1046 struct ethtool_stats
*stats
, u64
*data
)
1048 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1049 struct lan78xx_statstage lan78xx_stat
;
1053 if (usb_autopm_get_interface(dev
->intf
) < 0)
1056 if (lan78xx_read_stats(dev
, &lan78xx_stat
) > 0) {
1057 p
= (u32
*)&lan78xx_stat
;
1058 for (i
= 0; i
< (sizeof(lan78xx_stat
) / (sizeof(u32
))); i
++)
1062 usb_autopm_put_interface(dev
->intf
);
1065 static void lan78xx_get_wol(struct net_device
*netdev
,
1066 struct ethtool_wolinfo
*wol
)
1068 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1071 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1073 if (usb_autopm_get_interface(dev
->intf
) < 0)
1076 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1077 if (unlikely(ret
< 0)) {
1081 if (buf
& USB_CFG_RMT_WKP_
) {
1082 wol
->supported
= WAKE_ALL
;
1083 wol
->wolopts
= pdata
->wol
;
1090 usb_autopm_put_interface(dev
->intf
);
1093 static int lan78xx_set_wol(struct net_device
*netdev
,
1094 struct ethtool_wolinfo
*wol
)
1096 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1097 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1100 ret
= usb_autopm_get_interface(dev
->intf
);
1105 if (wol
->wolopts
& WAKE_UCAST
)
1106 pdata
->wol
|= WAKE_UCAST
;
1107 if (wol
->wolopts
& WAKE_MCAST
)
1108 pdata
->wol
|= WAKE_MCAST
;
1109 if (wol
->wolopts
& WAKE_BCAST
)
1110 pdata
->wol
|= WAKE_BCAST
;
1111 if (wol
->wolopts
& WAKE_MAGIC
)
1112 pdata
->wol
|= WAKE_MAGIC
;
1113 if (wol
->wolopts
& WAKE_PHY
)
1114 pdata
->wol
|= WAKE_PHY
;
1115 if (wol
->wolopts
& WAKE_ARP
)
1116 pdata
->wol
|= WAKE_ARP
;
1118 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1120 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1122 usb_autopm_put_interface(dev
->intf
);
1127 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1129 struct lan78xx_net
*dev
= netdev_priv(net
);
1130 struct phy_device
*phydev
= net
->phydev
;
1134 ret
= usb_autopm_get_interface(dev
->intf
);
1138 ret
= phy_ethtool_get_eee(phydev
, edata
);
1142 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1143 if (buf
& MAC_CR_EEE_EN_
) {
1144 edata
->eee_enabled
= true;
1145 edata
->eee_active
= !!(edata
->advertised
&
1146 edata
->lp_advertised
);
1147 edata
->tx_lpi_enabled
= true;
1148 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1149 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1150 edata
->tx_lpi_timer
= buf
;
1152 edata
->eee_enabled
= false;
1153 edata
->eee_active
= false;
1154 edata
->tx_lpi_enabled
= false;
1155 edata
->tx_lpi_timer
= 0;
1160 usb_autopm_put_interface(dev
->intf
);
1165 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1167 struct lan78xx_net
*dev
= netdev_priv(net
);
1171 ret
= usb_autopm_get_interface(dev
->intf
);
1175 if (edata
->eee_enabled
) {
1176 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1177 buf
|= MAC_CR_EEE_EN_
;
1178 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1180 phy_ethtool_set_eee(net
->phydev
, edata
);
1182 buf
= (u32
)edata
->tx_lpi_timer
;
1183 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1185 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1186 buf
&= ~MAC_CR_EEE_EN_
;
1187 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1190 usb_autopm_put_interface(dev
->intf
);
1195 static u32
lan78xx_get_link(struct net_device
*net
)
1197 phy_read_status(net
->phydev
);
1199 return net
->phydev
->link
;
1202 int lan78xx_nway_reset(struct net_device
*net
)
1204 return phy_start_aneg(net
->phydev
);
1207 static void lan78xx_get_drvinfo(struct net_device
*net
,
1208 struct ethtool_drvinfo
*info
)
1210 struct lan78xx_net
*dev
= netdev_priv(net
);
1212 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1213 strncpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1214 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1217 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1219 struct lan78xx_net
*dev
= netdev_priv(net
);
1221 return dev
->msg_enable
;
1224 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1226 struct lan78xx_net
*dev
= netdev_priv(net
);
1228 dev
->msg_enable
= level
;
1231 static int lan78xx_get_mdix_status(struct net_device
*net
)
1233 struct phy_device
*phydev
= net
->phydev
;
1236 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_1
);
1237 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1238 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_0
);
1243 static void lan78xx_set_mdix_status(struct net_device
*net
, __u8 mdix_ctrl
)
1245 struct lan78xx_net
*dev
= netdev_priv(net
);
1246 struct phy_device
*phydev
= net
->phydev
;
1249 if (mdix_ctrl
== ETH_TP_MDI
) {
1250 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1251 LAN88XX_EXT_PAGE_SPACE_1
);
1252 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1253 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1254 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1255 buf
| LAN88XX_EXT_MODE_CTRL_MDI_
);
1256 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1257 LAN88XX_EXT_PAGE_SPACE_0
);
1258 } else if (mdix_ctrl
== ETH_TP_MDI_X
) {
1259 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1260 LAN88XX_EXT_PAGE_SPACE_1
);
1261 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1262 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1263 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1264 buf
| LAN88XX_EXT_MODE_CTRL_MDI_X_
);
1265 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1266 LAN88XX_EXT_PAGE_SPACE_0
);
1267 } else if (mdix_ctrl
== ETH_TP_MDI_AUTO
) {
1268 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1269 LAN88XX_EXT_PAGE_SPACE_1
);
1270 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1271 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1272 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1273 buf
| LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
);
1274 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1275 LAN88XX_EXT_PAGE_SPACE_0
);
1277 dev
->mdix_ctrl
= mdix_ctrl
;
1280 static int lan78xx_get_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1282 struct lan78xx_net
*dev
= netdev_priv(net
);
1283 struct phy_device
*phydev
= net
->phydev
;
1287 ret
= usb_autopm_get_interface(dev
->intf
);
1291 ret
= phy_ethtool_gset(phydev
, cmd
);
1293 buf
= lan78xx_get_mdix_status(net
);
1295 buf
&= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1296 if (buf
== LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
) {
1297 cmd
->eth_tp_mdix
= ETH_TP_MDI_AUTO
;
1298 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
1299 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_
) {
1300 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
1301 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI
;
1302 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_X_
) {
1303 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
1304 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_X
;
1307 usb_autopm_put_interface(dev
->intf
);
1312 static int lan78xx_set_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1314 struct lan78xx_net
*dev
= netdev_priv(net
);
1315 struct phy_device
*phydev
= net
->phydev
;
1319 ret
= usb_autopm_get_interface(dev
->intf
);
1323 if (dev
->mdix_ctrl
!= cmd
->eth_tp_mdix_ctrl
) {
1324 lan78xx_set_mdix_status(net
, cmd
->eth_tp_mdix_ctrl
);
1327 /* change speed & duplex */
1328 ret
= phy_ethtool_sset(phydev
, cmd
);
1330 if (!cmd
->autoneg
) {
1331 /* force link down */
1332 temp
= phy_read(phydev
, MII_BMCR
);
1333 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1335 phy_write(phydev
, MII_BMCR
, temp
);
1338 usb_autopm_put_interface(dev
->intf
);
1343 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1344 .get_link
= lan78xx_get_link
,
1345 .nway_reset
= lan78xx_nway_reset
,
1346 .get_drvinfo
= lan78xx_get_drvinfo
,
1347 .get_msglevel
= lan78xx_get_msglevel
,
1348 .set_msglevel
= lan78xx_set_msglevel
,
1349 .get_settings
= lan78xx_get_settings
,
1350 .set_settings
= lan78xx_set_settings
,
1351 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1352 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1353 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1354 .get_ethtool_stats
= lan78xx_get_stats
,
1355 .get_sset_count
= lan78xx_get_sset_count
,
1356 .get_strings
= lan78xx_get_strings
,
1357 .get_wol
= lan78xx_get_wol
,
1358 .set_wol
= lan78xx_set_wol
,
1359 .get_eee
= lan78xx_get_eee
,
1360 .set_eee
= lan78xx_set_eee
,
1363 static int lan78xx_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
1365 if (!netif_running(netdev
))
1368 return phy_mii_ioctl(netdev
->phydev
, rq
, cmd
);
1371 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1373 u32 addr_lo
, addr_hi
;
1377 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1378 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1380 addr
[0] = addr_lo
& 0xFF;
1381 addr
[1] = (addr_lo
>> 8) & 0xFF;
1382 addr
[2] = (addr_lo
>> 16) & 0xFF;
1383 addr
[3] = (addr_lo
>> 24) & 0xFF;
1384 addr
[4] = addr_hi
& 0xFF;
1385 addr
[5] = (addr_hi
>> 8) & 0xFF;
1387 if (!is_valid_ether_addr(addr
)) {
1388 /* reading mac address from EEPROM or OTP */
1389 if ((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1391 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1393 if (is_valid_ether_addr(addr
)) {
1394 /* eeprom values are valid so use them */
1395 netif_dbg(dev
, ifup
, dev
->net
,
1396 "MAC address read from EEPROM");
1398 /* generate random MAC */
1399 random_ether_addr(addr
);
1400 netif_dbg(dev
, ifup
, dev
->net
,
1401 "MAC address set to random addr");
1404 addr_lo
= addr
[0] | (addr
[1] << 8) |
1405 (addr
[2] << 16) | (addr
[3] << 24);
1406 addr_hi
= addr
[4] | (addr
[5] << 8);
1408 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1409 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1411 /* generate random MAC */
1412 random_ether_addr(addr
);
1413 netif_dbg(dev
, ifup
, dev
->net
,
1414 "MAC address set to random addr");
1418 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1419 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1421 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1424 /* MDIO read and write wrappers for phylib */
1425 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1427 struct lan78xx_net
*dev
= bus
->priv
;
1431 ret
= usb_autopm_get_interface(dev
->intf
);
1435 mutex_lock(&dev
->phy_mutex
);
1437 /* confirm MII not busy */
1438 ret
= lan78xx_phy_wait_not_busy(dev
);
1442 /* set the address, index & direction (read from PHY) */
1443 addr
= mii_access(phy_id
, idx
, MII_READ
);
1444 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1446 ret
= lan78xx_phy_wait_not_busy(dev
);
1450 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1452 ret
= (int)(val
& 0xFFFF);
1455 mutex_unlock(&dev
->phy_mutex
);
1456 usb_autopm_put_interface(dev
->intf
);
1460 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1463 struct lan78xx_net
*dev
= bus
->priv
;
1467 ret
= usb_autopm_get_interface(dev
->intf
);
1471 mutex_lock(&dev
->phy_mutex
);
1473 /* confirm MII not busy */
1474 ret
= lan78xx_phy_wait_not_busy(dev
);
1479 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1481 /* set the address, index & direction (write to PHY) */
1482 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1483 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1485 ret
= lan78xx_phy_wait_not_busy(dev
);
1490 mutex_unlock(&dev
->phy_mutex
);
1491 usb_autopm_put_interface(dev
->intf
);
1495 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1500 dev
->mdiobus
= mdiobus_alloc();
1501 if (!dev
->mdiobus
) {
1502 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1506 dev
->mdiobus
->priv
= (void *)dev
;
1507 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1508 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1509 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1511 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1512 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1514 /* handle our own interrupt */
1515 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1516 dev
->mdiobus
->irq
[i
] = PHY_IGNORE_INTERRUPT
;
1518 switch (dev
->devid
& ID_REV_CHIP_ID_MASK_
) {
1521 /* set to internal PHY id */
1522 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1526 ret
= mdiobus_register(dev
->mdiobus
);
1528 netdev_err(dev
->net
, "can't register MDIO bus\n");
1532 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1535 mdiobus_free(dev
->mdiobus
);
1539 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1541 mdiobus_unregister(dev
->mdiobus
);
1542 mdiobus_free(dev
->mdiobus
);
1545 static void lan78xx_link_status_change(struct net_device
*net
)
1550 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
1553 struct phy_device
*phydev
= dev
->net
->phydev
;
1555 phydev
= phy_find_first(dev
->mdiobus
);
1557 netdev_err(dev
->net
, "no PHY found\n");
1561 ret
= phy_connect_direct(dev
->net
, phydev
,
1562 lan78xx_link_status_change
,
1563 PHY_INTERFACE_MODE_GMII
);
1565 netdev_err(dev
->net
, "can't attach PHY to %s\n",
1570 /* set to AUTOMDIX */
1571 lan78xx_set_mdix_status(dev
->net
, ETH_TP_MDI_AUTO
);
1573 /* MAC doesn't support 1000T Half */
1574 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1575 phydev
->supported
|= (SUPPORTED_10baseT_Half
|
1576 SUPPORTED_10baseT_Full
|
1577 SUPPORTED_100baseT_Half
|
1578 SUPPORTED_100baseT_Full
|
1579 SUPPORTED_1000baseT_Full
|
1580 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
);
1581 genphy_config_aneg(phydev
);
1583 /* Workaround to enable PHY interrupt.
1584 * phy_start_interrupts() is API for requesting and enabling
1585 * PHY interrupt. However, USB-to-Ethernet device can't use
1586 * request_irq() called in phy_start_interrupts().
1587 * Set PHY to PHY_HALTED and call phy_start()
1588 * to make a call to phy_enable_interrupts()
1593 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
1598 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
1604 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1606 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
1609 buf
&= ~MAC_RX_RXEN_
;
1610 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1613 /* add 4 to size for FCS */
1614 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
1615 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
1617 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1620 buf
|= MAC_RX_RXEN_
;
1621 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1627 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
1629 struct sk_buff
*skb
;
1630 unsigned long flags
;
1633 spin_lock_irqsave(&q
->lock
, flags
);
1634 while (!skb_queue_empty(q
)) {
1635 struct skb_data
*entry
;
1639 skb_queue_walk(q
, skb
) {
1640 entry
= (struct skb_data
*)skb
->cb
;
1641 if (entry
->state
!= unlink_start
)
1646 entry
->state
= unlink_start
;
1649 /* Get reference count of the URB to avoid it to be
1650 * freed during usb_unlink_urb, which may trigger
1651 * use-after-free problem inside usb_unlink_urb since
1652 * usb_unlink_urb is always racing with .complete
1653 * handler(include defer_bh).
1656 spin_unlock_irqrestore(&q
->lock
, flags
);
1657 /* during some PM-driven resume scenarios,
1658 * these (async) unlinks complete immediately
1660 ret
= usb_unlink_urb(urb
);
1661 if (ret
!= -EINPROGRESS
&& ret
!= 0)
1662 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
1666 spin_lock_irqsave(&q
->lock
, flags
);
1668 spin_unlock_irqrestore(&q
->lock
, flags
);
1672 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
1674 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1675 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
1676 int old_hard_mtu
= dev
->hard_mtu
;
1677 int old_rx_urb_size
= dev
->rx_urb_size
;
1680 if (new_mtu
> MAX_SINGLE_PACKET_SIZE
)
1685 /* no second zero-length packet read wanted after mtu-sized packets */
1686 if ((ll_mtu
% dev
->maxpacket
) == 0)
1689 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ ETH_HLEN
);
1691 netdev
->mtu
= new_mtu
;
1693 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
1694 if (dev
->rx_urb_size
== old_hard_mtu
) {
1695 dev
->rx_urb_size
= dev
->hard_mtu
;
1696 if (dev
->rx_urb_size
> old_rx_urb_size
) {
1697 if (netif_running(dev
->net
)) {
1698 unlink_urbs(dev
, &dev
->rxq
);
1699 tasklet_schedule(&dev
->bh
);
1707 int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
1709 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1710 struct sockaddr
*addr
= p
;
1711 u32 addr_lo
, addr_hi
;
1714 if (netif_running(netdev
))
1717 if (!is_valid_ether_addr(addr
->sa_data
))
1718 return -EADDRNOTAVAIL
;
1720 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1722 addr_lo
= netdev
->dev_addr
[0] |
1723 netdev
->dev_addr
[1] << 8 |
1724 netdev
->dev_addr
[2] << 16 |
1725 netdev
->dev_addr
[3] << 24;
1726 addr_hi
= netdev
->dev_addr
[4] |
1727 netdev
->dev_addr
[5] << 8;
1729 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1730 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1735 /* Enable or disable Rx checksum offload engine */
1736 static int lan78xx_set_features(struct net_device
*netdev
,
1737 netdev_features_t features
)
1739 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1740 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1741 unsigned long flags
;
1744 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1746 if (features
& NETIF_F_RXCSUM
) {
1747 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
1748 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
1750 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
1751 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
1754 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1755 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
1757 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
1759 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1761 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1766 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
1768 struct lan78xx_priv
*pdata
=
1769 container_of(param
, struct lan78xx_priv
, set_vlan
);
1770 struct lan78xx_net
*dev
= pdata
->dev
;
1772 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
1773 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
1776 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
1777 __be16 proto
, u16 vid
)
1779 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1780 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1782 u16 vid_dword_index
;
1784 vid_dword_index
= (vid
>> 5) & 0x7F;
1785 vid_bit_index
= vid
& 0x1F;
1787 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
1789 /* defer register writes to a sleepable context */
1790 schedule_work(&pdata
->set_vlan
);
1795 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
1796 __be16 proto
, u16 vid
)
1798 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1799 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1801 u16 vid_dword_index
;
1803 vid_dword_index
= (vid
>> 5) & 0x7F;
1804 vid_bit_index
= vid
& 0x1F;
1806 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
1808 /* defer register writes to a sleepable context */
1809 schedule_work(&pdata
->set_vlan
);
1814 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
1818 u32 regs
[6] = { 0 };
1820 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1821 if (buf
& USB_CFG1_LTM_ENABLE_
) {
1823 /* Get values from EEPROM first */
1824 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
1825 if (temp
[0] == 24) {
1826 ret
= lan78xx_read_raw_eeprom(dev
,
1833 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
1834 if (temp
[0] == 24) {
1835 ret
= lan78xx_read_raw_otp(dev
,
1845 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
1846 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
1847 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
1848 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
1849 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
1850 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
1853 static int lan78xx_reset(struct lan78xx_net
*dev
)
1855 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1858 unsigned long timeout
;
1860 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1861 buf
|= HW_CFG_LRST_
;
1862 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1864 timeout
= jiffies
+ HZ
;
1867 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1868 if (time_after(jiffies
, timeout
)) {
1869 netdev_warn(dev
->net
,
1870 "timeout on completion of LiteReset");
1873 } while (buf
& HW_CFG_LRST_
);
1875 lan78xx_init_mac_address(dev
);
1877 /* save DEVID for later usage */
1878 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
1881 /* Respond to the IN token with a NAK */
1882 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1883 buf
|= USB_CFG_BIR_
;
1884 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1887 lan78xx_init_ltm(dev
);
1889 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
1890 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
1892 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
1893 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
1894 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1897 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
1898 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
1899 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1900 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
1901 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
1903 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
1904 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1908 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
1909 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
1911 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1913 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1915 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1916 buf
|= USB_CFG_BCE_
;
1917 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1919 /* set FIFO sizes */
1920 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
1921 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
1923 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
1924 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
1926 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
1927 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
1928 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
1930 /* Don't need rfe_ctl_lock during initialisation */
1931 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
1932 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
1933 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1935 /* Enable or disable checksum offload engines */
1936 lan78xx_set_features(dev
->net
, dev
->net
->features
);
1938 lan78xx_set_multicast(dev
->net
);
1941 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1942 buf
|= PMT_CTL_PHY_RST_
;
1943 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
1945 timeout
= jiffies
+ HZ
;
1948 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1949 if (time_after(jiffies
, timeout
)) {
1950 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
1953 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
1955 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1956 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
1957 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1959 /* enable PHY interrupts */
1960 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1961 buf
|= INT_ENP_PHY_INT
;
1962 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, buf
);
1964 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
1965 buf
|= MAC_TX_TXEN_
;
1966 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
1968 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
1969 buf
|= FCT_TX_CTL_EN_
;
1970 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
1972 ret
= lan78xx_set_rx_max_frame_length(dev
, dev
->net
->mtu
+ ETH_HLEN
);
1974 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1975 buf
|= MAC_RX_RXEN_
;
1976 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1978 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
1979 buf
|= FCT_RX_CTL_EN_
;
1980 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
1985 static int lan78xx_open(struct net_device
*net
)
1987 struct lan78xx_net
*dev
= netdev_priv(net
);
1990 ret
= usb_autopm_get_interface(dev
->intf
);
1994 ret
= lan78xx_reset(dev
);
1998 ret
= lan78xx_phy_init(dev
);
2002 /* for Link Check */
2003 if (dev
->urb_intr
) {
2004 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2006 netif_err(dev
, ifup
, dev
->net
,
2007 "intr submit %d\n", ret
);
2012 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2014 netif_start_queue(net
);
2016 dev
->link_on
= false;
2018 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2020 usb_autopm_put_interface(dev
->intf
);
2026 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2028 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2029 DECLARE_WAITQUEUE(wait
, current
);
2032 /* ensure there are no more active urbs */
2033 add_wait_queue(&unlink_wakeup
, &wait
);
2034 set_current_state(TASK_UNINTERRUPTIBLE
);
2035 dev
->wait
= &unlink_wakeup
;
2036 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2038 /* maybe wait for deletions to finish. */
2039 while (!skb_queue_empty(&dev
->rxq
) &&
2040 !skb_queue_empty(&dev
->txq
) &&
2041 !skb_queue_empty(&dev
->done
)) {
2042 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2043 set_current_state(TASK_UNINTERRUPTIBLE
);
2044 netif_dbg(dev
, ifdown
, dev
->net
,
2045 "waited for %d urb completions\n", temp
);
2047 set_current_state(TASK_RUNNING
);
2049 remove_wait_queue(&unlink_wakeup
, &wait
);
2052 int lan78xx_stop(struct net_device
*net
)
2054 struct lan78xx_net
*dev
= netdev_priv(net
);
2056 phy_stop(net
->phydev
);
2057 phy_disconnect(net
->phydev
);
2060 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2061 netif_stop_queue(net
);
2063 netif_info(dev
, ifdown
, dev
->net
,
2064 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2065 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2066 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2068 lan78xx_terminate_urbs(dev
);
2070 usb_kill_urb(dev
->urb_intr
);
2072 skb_queue_purge(&dev
->rxq_pause
);
2074 /* deferred work (task, timer, softirq) must also stop.
2075 * can't flush_scheduled_work() until we drop rtnl (later),
2076 * else workers could deadlock; so make workers a NOP.
2079 cancel_delayed_work_sync(&dev
->wq
);
2080 tasklet_kill(&dev
->bh
);
2082 usb_autopm_put_interface(dev
->intf
);
2087 static int lan78xx_linearize(struct sk_buff
*skb
)
2089 return skb_linearize(skb
);
2092 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2093 struct sk_buff
*skb
, gfp_t flags
)
2095 u32 tx_cmd_a
, tx_cmd_b
;
2097 if (skb_headroom(skb
) < TX_OVERHEAD
) {
2098 struct sk_buff
*skb2
;
2100 skb2
= skb_copy_expand(skb
, TX_OVERHEAD
, 0, flags
);
2101 dev_kfree_skb_any(skb
);
2107 if (lan78xx_linearize(skb
) < 0)
2110 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2112 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2113 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2116 if (skb_is_gso(skb
)) {
2117 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2119 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2121 tx_cmd_a
|= TX_CMD_A_LSO_
;
2124 if (skb_vlan_tag_present(skb
)) {
2125 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2126 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2130 cpu_to_le32s(&tx_cmd_b
);
2131 memcpy(skb
->data
, &tx_cmd_b
, 4);
2134 cpu_to_le32s(&tx_cmd_a
);
2135 memcpy(skb
->data
, &tx_cmd_a
, 4);
2140 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2141 struct sk_buff_head
*list
, enum skb_state state
)
2143 unsigned long flags
;
2144 enum skb_state old_state
;
2145 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2147 spin_lock_irqsave(&list
->lock
, flags
);
2148 old_state
= entry
->state
;
2149 entry
->state
= state
;
2151 __skb_unlink(skb
, list
);
2152 spin_unlock(&list
->lock
);
2153 spin_lock(&dev
->done
.lock
);
2155 __skb_queue_tail(&dev
->done
, skb
);
2156 if (skb_queue_len(&dev
->done
) == 1)
2157 tasklet_schedule(&dev
->bh
);
2158 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2163 static void tx_complete(struct urb
*urb
)
2165 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2166 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2167 struct lan78xx_net
*dev
= entry
->dev
;
2169 if (urb
->status
== 0) {
2170 dev
->net
->stats
.tx_packets
++;
2171 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2173 dev
->net
->stats
.tx_errors
++;
2175 switch (urb
->status
) {
2177 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2180 /* software-driven interface shutdown */
2188 netif_stop_queue(dev
->net
);
2191 netif_dbg(dev
, tx_err
, dev
->net
,
2192 "tx err %d\n", entry
->urb
->status
);
2197 usb_autopm_put_interface_async(dev
->intf
);
2199 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2202 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2203 struct sk_buff
*newsk
, enum skb_state state
)
2205 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2207 __skb_queue_tail(list
, newsk
);
2208 entry
->state
= state
;
2211 netdev_tx_t
lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2213 struct lan78xx_net
*dev
= netdev_priv(net
);
2214 struct sk_buff
*skb2
= NULL
;
2217 skb_tx_timestamp(skb
);
2218 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2222 skb_queue_tail(&dev
->txq_pend
, skb2
);
2224 if (skb_queue_len(&dev
->txq_pend
) > 10)
2225 netif_stop_queue(net
);
2227 netif_dbg(dev
, tx_err
, dev
->net
,
2228 "lan78xx_tx_prep return NULL\n");
2229 dev
->net
->stats
.tx_errors
++;
2230 dev
->net
->stats
.tx_dropped
++;
2233 tasklet_schedule(&dev
->bh
);
2235 return NETDEV_TX_OK
;
2238 int lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2241 struct usb_host_interface
*alt
= NULL
;
2242 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2243 struct usb_host_endpoint
*status
= NULL
;
2245 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2251 alt
= intf
->altsetting
+ tmp
;
2253 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2254 struct usb_host_endpoint
*e
;
2257 e
= alt
->endpoint
+ ep
;
2258 switch (e
->desc
.bmAttributes
) {
2259 case USB_ENDPOINT_XFER_INT
:
2260 if (!usb_endpoint_dir_in(&e
->desc
))
2264 case USB_ENDPOINT_XFER_BULK
:
2269 if (usb_endpoint_dir_in(&e
->desc
)) {
2272 else if (intr
&& !status
)
2282 if (!alt
|| !in
|| !out
)
2285 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2286 in
->desc
.bEndpointAddress
&
2287 USB_ENDPOINT_NUMBER_MASK
);
2288 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2289 out
->desc
.bEndpointAddress
&
2290 USB_ENDPOINT_NUMBER_MASK
);
2291 dev
->ep_intr
= status
;
2296 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2298 struct lan78xx_priv
*pdata
= NULL
;
2302 ret
= lan78xx_get_endpoints(dev
, intf
);
2304 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2306 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2308 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2314 spin_lock_init(&pdata
->rfe_ctl_lock
);
2315 mutex_init(&pdata
->dataport_mutex
);
2317 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2319 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2320 pdata
->vlan_table
[i
] = 0;
2322 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2324 dev
->net
->features
= 0;
2326 if (DEFAULT_TX_CSUM_ENABLE
)
2327 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2329 if (DEFAULT_RX_CSUM_ENABLE
)
2330 dev
->net
->features
|= NETIF_F_RXCSUM
;
2332 if (DEFAULT_TSO_CSUM_ENABLE
)
2333 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2335 dev
->net
->hw_features
= dev
->net
->features
;
2337 /* Init all registers */
2338 ret
= lan78xx_reset(dev
);
2340 lan78xx_mdio_init(dev
);
2342 dev
->net
->flags
|= IFF_MULTICAST
;
2344 pdata
->wol
= WAKE_MAGIC
;
2349 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2351 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2353 lan78xx_remove_mdio(dev
);
2356 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
2363 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
2364 struct sk_buff
*skb
,
2365 u32 rx_cmd_a
, u32 rx_cmd_b
)
2367 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
2368 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
)) {
2369 skb
->ip_summed
= CHECKSUM_NONE
;
2371 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
2372 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2376 void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2380 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
2381 skb_queue_tail(&dev
->rxq_pause
, skb
);
2385 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
2386 dev
->net
->stats
.rx_packets
++;
2387 dev
->net
->stats
.rx_bytes
+= skb
->len
;
2389 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
2390 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
2391 memset(skb
->cb
, 0, sizeof(struct skb_data
));
2393 if (skb_defer_rx_timestamp(skb
))
2396 status
= netif_rx(skb
);
2397 if (status
!= NET_RX_SUCCESS
)
2398 netif_dbg(dev
, rx_err
, dev
->net
,
2399 "netif_rx status %d\n", status
);
2402 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2404 if (skb
->len
< dev
->net
->hard_header_len
)
2407 while (skb
->len
> 0) {
2408 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
2410 struct sk_buff
*skb2
;
2411 unsigned char *packet
;
2413 memcpy(&rx_cmd_a
, skb
->data
, sizeof(rx_cmd_a
));
2414 le32_to_cpus(&rx_cmd_a
);
2415 skb_pull(skb
, sizeof(rx_cmd_a
));
2417 memcpy(&rx_cmd_b
, skb
->data
, sizeof(rx_cmd_b
));
2418 le32_to_cpus(&rx_cmd_b
);
2419 skb_pull(skb
, sizeof(rx_cmd_b
));
2421 memcpy(&rx_cmd_c
, skb
->data
, sizeof(rx_cmd_c
));
2422 le16_to_cpus(&rx_cmd_c
);
2423 skb_pull(skb
, sizeof(rx_cmd_c
));
2427 /* get the packet length */
2428 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
2429 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
2431 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
2432 netif_dbg(dev
, rx_err
, dev
->net
,
2433 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
2435 /* last frame in this batch */
2436 if (skb
->len
== size
) {
2437 lan78xx_rx_csum_offload(dev
, skb
,
2438 rx_cmd_a
, rx_cmd_b
);
2440 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
2441 skb
->truesize
= size
+ sizeof(struct sk_buff
);
2446 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2447 if (unlikely(!skb2
)) {
2448 netdev_warn(dev
->net
, "Error allocating skb");
2453 skb2
->data
= packet
;
2454 skb_set_tail_pointer(skb2
, size
);
2456 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
2458 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
2459 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
2461 lan78xx_skb_return(dev
, skb2
);
2464 skb_pull(skb
, size
);
2466 /* padding bytes before the next frame starts */
2468 skb_pull(skb
, align_count
);
2474 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2476 if (!lan78xx_rx(dev
, skb
)) {
2477 dev
->net
->stats
.rx_errors
++;
2482 lan78xx_skb_return(dev
, skb
);
2486 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
2487 dev
->net
->stats
.rx_errors
++;
2489 skb_queue_tail(&dev
->done
, skb
);
2492 static void rx_complete(struct urb
*urb
);
2494 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
2496 struct sk_buff
*skb
;
2497 struct skb_data
*entry
;
2498 unsigned long lockflags
;
2499 size_t size
= dev
->rx_urb_size
;
2502 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
2508 entry
= (struct skb_data
*)skb
->cb
;
2513 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
2514 skb
->data
, size
, rx_complete
, skb
);
2516 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
2518 if (netif_device_present(dev
->net
) &&
2519 netif_running(dev
->net
) &&
2520 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2521 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2522 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2525 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
2528 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2531 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
2532 netif_device_detach(dev
->net
);
2538 netif_dbg(dev
, rx_err
, dev
->net
,
2539 "rx submit, %d\n", ret
);
2540 tasklet_schedule(&dev
->bh
);
2543 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
2546 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
2548 dev_kfree_skb_any(skb
);
2554 static void rx_complete(struct urb
*urb
)
2556 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2557 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2558 struct lan78xx_net
*dev
= entry
->dev
;
2559 int urb_status
= urb
->status
;
2560 enum skb_state state
;
2562 skb_put(skb
, urb
->actual_length
);
2566 switch (urb_status
) {
2568 if (skb
->len
< dev
->net
->hard_header_len
) {
2570 dev
->net
->stats
.rx_errors
++;
2571 dev
->net
->stats
.rx_length_errors
++;
2572 netif_dbg(dev
, rx_err
, dev
->net
,
2573 "rx length %d\n", skb
->len
);
2575 usb_mark_last_busy(dev
->udev
);
2578 dev
->net
->stats
.rx_errors
++;
2579 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2581 case -ECONNRESET
: /* async unlink */
2582 case -ESHUTDOWN
: /* hardware gone */
2583 netif_dbg(dev
, ifdown
, dev
->net
,
2584 "rx shutdown, code %d\n", urb_status
);
2592 dev
->net
->stats
.rx_errors
++;
2598 /* data overrun ... flush fifo? */
2600 dev
->net
->stats
.rx_over_errors
++;
2605 dev
->net
->stats
.rx_errors
++;
2606 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
2610 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
2613 if (netif_running(dev
->net
) &&
2614 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2615 state
!= unlink_start
) {
2616 rx_submit(dev
, urb
, GFP_ATOMIC
);
2621 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
2624 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
2627 struct urb
*urb
= NULL
;
2628 struct skb_data
*entry
;
2629 unsigned long flags
;
2630 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
2631 struct sk_buff
*skb
, *skb2
;
2634 int skb_totallen
, pkt_cnt
;
2638 for (skb
= tqp
->next
; pkt_cnt
< tqp
->qlen
; skb
= skb
->next
) {
2639 if (skb_is_gso(skb
)) {
2641 /* handle previous packets first */
2645 skb2
= skb_dequeue(tqp
);
2649 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
2651 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
2655 /* copy to a single skb */
2656 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
2660 skb_put(skb
, skb_totallen
);
2662 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
2663 skb2
= skb_dequeue(tqp
);
2665 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
2666 pos
+= roundup(skb2
->len
, sizeof(u32
));
2667 dev_kfree_skb(skb2
);
2671 length
= skb_totallen
;
2674 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2676 netif_dbg(dev
, tx_err
, dev
->net
, "no urb\n");
2680 entry
= (struct skb_data
*)skb
->cb
;
2683 entry
->length
= length
;
2685 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
2686 ret
= usb_autopm_get_interface_async(dev
->intf
);
2688 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2692 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
2693 skb
->data
, skb
->len
, tx_complete
, skb
);
2695 if (length
% dev
->maxpacket
== 0) {
2696 /* send USB_ZERO_PACKET */
2697 urb
->transfer_flags
|= URB_ZERO_PACKET
;
2701 /* if this triggers the device is still a sleep */
2702 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2703 /* transmission will be done in resume */
2704 usb_anchor_urb(urb
, &dev
->deferred
);
2705 /* no use to process more packets */
2706 netif_stop_queue(dev
->net
);
2708 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2709 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
2714 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2717 dev
->net
->trans_start
= jiffies
;
2718 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
2719 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
2720 netif_stop_queue(dev
->net
);
2723 netif_stop_queue(dev
->net
);
2724 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2725 usb_autopm_put_interface_async(dev
->intf
);
2728 usb_autopm_put_interface_async(dev
->intf
);
2729 netif_dbg(dev
, tx_err
, dev
->net
,
2730 "tx: submit urb err %d\n", ret
);
2734 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2737 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
2739 dev
->net
->stats
.tx_dropped
++;
2741 dev_kfree_skb_any(skb
);
2744 netif_dbg(dev
, tx_queued
, dev
->net
,
2745 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
2748 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
2753 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
2754 for (i
= 0; i
< 10; i
++) {
2755 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
2757 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2759 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
2763 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
2764 tasklet_schedule(&dev
->bh
);
2766 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
2767 netif_wake_queue(dev
->net
);
2770 static void lan78xx_bh(unsigned long param
)
2772 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
2773 struct sk_buff
*skb
;
2774 struct skb_data
*entry
;
2776 while ((skb
= skb_dequeue(&dev
->done
))) {
2777 entry
= (struct skb_data
*)(skb
->cb
);
2778 switch (entry
->state
) {
2780 entry
->state
= rx_cleanup
;
2781 rx_process(dev
, skb
);
2784 usb_free_urb(entry
->urb
);
2788 usb_free_urb(entry
->urb
);
2792 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
2797 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
2798 if (!skb_queue_empty(&dev
->txq_pend
))
2801 if (!timer_pending(&dev
->delay
) &&
2802 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
2807 static void lan78xx_delayedwork(struct work_struct
*work
)
2810 struct lan78xx_net
*dev
;
2812 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
2814 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
2815 unlink_urbs(dev
, &dev
->txq
);
2816 status
= usb_autopm_get_interface(dev
->intf
);
2819 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
2820 usb_autopm_put_interface(dev
->intf
);
2823 status
!= -ESHUTDOWN
) {
2824 if (netif_msg_tx_err(dev
))
2826 netdev_err(dev
->net
,
2827 "can't clear tx halt, status %d\n",
2830 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
2831 if (status
!= -ESHUTDOWN
)
2832 netif_wake_queue(dev
->net
);
2835 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
2836 unlink_urbs(dev
, &dev
->rxq
);
2837 status
= usb_autopm_get_interface(dev
->intf
);
2840 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
2841 usb_autopm_put_interface(dev
->intf
);
2844 status
!= -ESHUTDOWN
) {
2845 if (netif_msg_rx_err(dev
))
2847 netdev_err(dev
->net
,
2848 "can't clear rx halt, status %d\n",
2851 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
2852 tasklet_schedule(&dev
->bh
);
2856 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
2859 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
2860 status
= usb_autopm_get_interface(dev
->intf
);
2863 if (lan78xx_link_reset(dev
) < 0) {
2864 usb_autopm_put_interface(dev
->intf
);
2866 netdev_info(dev
->net
, "link reset failed (%d)\n",
2869 usb_autopm_put_interface(dev
->intf
);
2874 static void intr_complete(struct urb
*urb
)
2876 struct lan78xx_net
*dev
= urb
->context
;
2877 int status
= urb
->status
;
2882 lan78xx_status(dev
, urb
);
2885 /* software-driven interface shutdown */
2886 case -ENOENT
: /* urb killed */
2887 case -ESHUTDOWN
: /* hardware gone */
2888 netif_dbg(dev
, ifdown
, dev
->net
,
2889 "intr shutdown, code %d\n", status
);
2892 /* NOTE: not throttling like RX/TX, since this endpoint
2893 * already polls infrequently
2896 netdev_dbg(dev
->net
, "intr status %d\n", status
);
2900 if (!netif_running(dev
->net
))
2903 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
2904 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
2906 netif_err(dev
, timer
, dev
->net
,
2907 "intr resubmit --> %d\n", status
);
2910 static void lan78xx_disconnect(struct usb_interface
*intf
)
2912 struct lan78xx_net
*dev
;
2913 struct usb_device
*udev
;
2914 struct net_device
*net
;
2916 dev
= usb_get_intfdata(intf
);
2917 usb_set_intfdata(intf
, NULL
);
2921 udev
= interface_to_usbdev(intf
);
2924 unregister_netdev(net
);
2926 cancel_delayed_work_sync(&dev
->wq
);
2928 usb_scuttle_anchored_urbs(&dev
->deferred
);
2930 lan78xx_unbind(dev
, intf
);
2932 usb_kill_urb(dev
->urb_intr
);
2933 usb_free_urb(dev
->urb_intr
);
2939 void lan78xx_tx_timeout(struct net_device
*net
)
2941 struct lan78xx_net
*dev
= netdev_priv(net
);
2943 unlink_urbs(dev
, &dev
->txq
);
2944 tasklet_schedule(&dev
->bh
);
2947 static const struct net_device_ops lan78xx_netdev_ops
= {
2948 .ndo_open
= lan78xx_open
,
2949 .ndo_stop
= lan78xx_stop
,
2950 .ndo_start_xmit
= lan78xx_start_xmit
,
2951 .ndo_tx_timeout
= lan78xx_tx_timeout
,
2952 .ndo_change_mtu
= lan78xx_change_mtu
,
2953 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
2954 .ndo_validate_addr
= eth_validate_addr
,
2955 .ndo_do_ioctl
= lan78xx_ioctl
,
2956 .ndo_set_rx_mode
= lan78xx_set_multicast
,
2957 .ndo_set_features
= lan78xx_set_features
,
2958 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
2959 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
2962 static int lan78xx_probe(struct usb_interface
*intf
,
2963 const struct usb_device_id
*id
)
2965 struct lan78xx_net
*dev
;
2966 struct net_device
*netdev
;
2967 struct usb_device
*udev
;
2973 udev
= interface_to_usbdev(intf
);
2974 udev
= usb_get_dev(udev
);
2977 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
2979 dev_err(&intf
->dev
, "Error: OOM\n");
2983 /* netdev_printk() needs this */
2984 SET_NETDEV_DEV(netdev
, &intf
->dev
);
2986 dev
= netdev_priv(netdev
);
2990 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
2991 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
2993 skb_queue_head_init(&dev
->rxq
);
2994 skb_queue_head_init(&dev
->txq
);
2995 skb_queue_head_init(&dev
->done
);
2996 skb_queue_head_init(&dev
->rxq_pause
);
2997 skb_queue_head_init(&dev
->txq_pend
);
2998 mutex_init(&dev
->phy_mutex
);
3000 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
3001 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3002 init_usb_anchor(&dev
->deferred
);
3004 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3005 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3006 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3008 ret
= lan78xx_bind(dev
, intf
);
3011 strcpy(netdev
->name
, "eth%d");
3013 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3014 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3016 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3017 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3018 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3020 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3021 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3023 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3024 dev
->ep_intr
->desc
.bEndpointAddress
&
3025 USB_ENDPOINT_NUMBER_MASK
);
3026 period
= dev
->ep_intr
->desc
.bInterval
;
3028 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3029 buf
= kmalloc(maxp
, GFP_KERNEL
);
3031 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3032 if (!dev
->urb_intr
) {
3036 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3037 dev
->pipe_intr
, buf
, maxp
,
3038 intr_complete
, dev
, period
);
3042 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3044 /* driver requires remote-wakeup capability during autosuspend. */
3045 intf
->needs_remote_wakeup
= 1;
3047 ret
= register_netdev(netdev
);
3049 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3053 usb_set_intfdata(intf
, dev
);
3055 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3057 /* Default delay of 2sec has more overhead than advantage.
3058 * Set to 10sec as default.
3060 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3061 DEFAULT_AUTOSUSPEND_DELAY
);
3066 lan78xx_unbind(dev
, intf
);
3068 free_netdev(netdev
);
3075 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3077 const u16 crc16poly
= 0x8005;
3083 for (i
= 0; i
< len
; i
++) {
3085 for (bit
= 0; bit
< 8; bit
++) {
3089 if (msb
^ (u16
)(data
& 1)) {
3091 crc
|= (u16
)0x0001U
;
3100 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3108 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3109 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3110 const u8 arp_type
[2] = { 0x08, 0x06 };
3112 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3113 buf
&= ~MAC_TX_TXEN_
;
3114 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3115 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3116 buf
&= ~MAC_RX_RXEN_
;
3117 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3119 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3120 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3121 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3126 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3127 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3128 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3130 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3131 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3134 if (wol
& WAKE_PHY
) {
3135 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3137 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3138 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3139 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3141 if (wol
& WAKE_MAGIC
) {
3142 temp_wucsr
|= WUCSR_MPEN_
;
3144 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3145 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3146 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3148 if (wol
& WAKE_BCAST
) {
3149 temp_wucsr
|= WUCSR_BCST_EN_
;
3151 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3152 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3153 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3155 if (wol
& WAKE_MCAST
) {
3156 temp_wucsr
|= WUCSR_WAKE_EN_
;
3158 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3159 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3160 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3162 WUF_CFGX_TYPE_MCAST_
|
3163 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3164 (crc
& WUF_CFGX_CRC16_MASK_
));
3166 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3167 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3168 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3169 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3172 /* for IPv6 Multicast */
3173 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3174 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3176 WUF_CFGX_TYPE_MCAST_
|
3177 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3178 (crc
& WUF_CFGX_CRC16_MASK_
));
3180 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3181 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3182 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3183 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3186 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3187 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3188 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3190 if (wol
& WAKE_UCAST
) {
3191 temp_wucsr
|= WUCSR_PFDA_EN_
;
3193 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3194 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3195 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3197 if (wol
& WAKE_ARP
) {
3198 temp_wucsr
|= WUCSR_WAKE_EN_
;
3200 /* set WUF_CFG & WUF_MASK
3201 * for packettype (offset 12,13) = ARP (0x0806)
3203 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3204 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3206 WUF_CFGX_TYPE_ALL_
|
3207 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3208 (crc
& WUF_CFGX_CRC16_MASK_
));
3210 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3211 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3212 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3213 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3216 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3217 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3218 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3221 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3223 /* when multiple WOL bits are set */
3224 if (hweight_long((unsigned long)wol
) > 1) {
3225 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3226 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3227 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3229 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3232 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3233 buf
|= PMT_CTL_WUPS_MASK_
;
3234 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3236 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3237 buf
|= MAC_RX_RXEN_
;
3238 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3243 int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
3245 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3246 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3251 event
= message
.event
;
3253 if (!dev
->suspend_count
++) {
3254 spin_lock_irq(&dev
->txq
.lock
);
3255 /* don't autosuspend while transmitting */
3256 if ((skb_queue_len(&dev
->txq
) ||
3257 skb_queue_len(&dev
->txq_pend
)) &&
3258 PMSG_IS_AUTO(message
)) {
3259 spin_unlock_irq(&dev
->txq
.lock
);
3263 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3264 spin_unlock_irq(&dev
->txq
.lock
);
3268 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3269 buf
&= ~MAC_TX_TXEN_
;
3270 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3271 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3272 buf
&= ~MAC_RX_RXEN_
;
3273 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3275 /* empty out the rx and queues */
3276 netif_device_detach(dev
->net
);
3277 lan78xx_terminate_urbs(dev
);
3278 usb_kill_urb(dev
->urb_intr
);
3281 netif_device_attach(dev
->net
);
3284 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3285 if (PMSG_IS_AUTO(message
)) {
3286 /* auto suspend (selective suspend) */
3287 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3288 buf
&= ~MAC_TX_TXEN_
;
3289 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3290 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3291 buf
&= ~MAC_RX_RXEN_
;
3292 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3294 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3295 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3296 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3298 /* set goodframe wakeup */
3299 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
3301 buf
|= WUCSR_RFE_WAKE_EN_
;
3302 buf
|= WUCSR_STORE_WAKE_
;
3304 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
3306 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3308 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3309 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
3311 buf
|= PMT_CTL_PHY_WAKE_EN_
;
3312 buf
|= PMT_CTL_WOL_EN_
;
3313 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
3314 buf
|= PMT_CTL_SUS_MODE_3_
;
3316 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3318 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3320 buf
|= PMT_CTL_WUPS_MASK_
;
3322 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3324 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3325 buf
|= MAC_RX_RXEN_
;
3326 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3328 lan78xx_set_suspend(dev
, pdata
->wol
);
3337 int lan78xx_resume(struct usb_interface
*intf
)
3339 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3340 struct sk_buff
*skb
;
3345 if (!--dev
->suspend_count
) {
3346 /* resume interrupt URBs */
3347 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
3348 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
3350 spin_lock_irq(&dev
->txq
.lock
);
3351 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
3352 skb
= (struct sk_buff
*)res
->context
;
3353 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
3355 dev_kfree_skb_any(skb
);
3357 usb_autopm_put_interface_async(dev
->intf
);
3359 dev
->net
->trans_start
= jiffies
;
3360 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3364 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3365 spin_unlock_irq(&dev
->txq
.lock
);
3367 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
3368 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
3369 netif_start_queue(dev
->net
);
3370 tasklet_schedule(&dev
->bh
);
3374 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3375 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3376 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3378 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
3380 WUCSR2_IPV6_TCPSYN_RCD_
|
3381 WUCSR2_IPV4_TCPSYN_RCD_
);
3383 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
3384 WUCSR_EEE_RX_WAKE_
|
3386 WUCSR_RFE_WAKE_FR_
|
3391 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3392 buf
|= MAC_TX_TXEN_
;
3393 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3398 int lan78xx_reset_resume(struct usb_interface
*intf
)
3400 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3404 lan78xx_phy_init(dev
);
3406 return lan78xx_resume(intf
);
3409 static const struct usb_device_id products
[] = {
3411 /* LAN7800 USB Gigabit Ethernet Device */
3412 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
3415 /* LAN7850 USB Gigabit Ethernet Device */
3416 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
3420 MODULE_DEVICE_TABLE(usb
, products
);
3422 static struct usb_driver lan78xx_driver
= {
3423 .name
= DRIVER_NAME
,
3424 .id_table
= products
,
3425 .probe
= lan78xx_probe
,
3426 .disconnect
= lan78xx_disconnect
,
3427 .suspend
= lan78xx_suspend
,
3428 .resume
= lan78xx_resume
,
3429 .reset_resume
= lan78xx_reset_resume
,
3430 .supports_autosuspend
= 1,
3431 .disable_hub_initiated_lpm
= 1,
3434 module_usb_driver(lan78xx_driver
);
3436 MODULE_AUTHOR(DRIVER_AUTHOR
);
3437 MODULE_DESCRIPTION(DRIVER_DESC
);
3438 MODULE_LICENSE("GPL");