lan78xx: replace devid to chipid & chiprev
[deliverable/linux.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
bdfba55e 33#include <linux/microchipphy.h>
55d7de9d
WH
34#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
e4953910 39#define DRIVER_VERSION "1.0.2"
55d7de9d
WH
40
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
60#define TX_OVERHEAD (8)
61#define RXW_PADDING 2
62
63#define LAN78XX_USB_VENDOR_ID (0x0424)
64#define LAN7800_USB_PRODUCT_ID (0x7800)
65#define LAN7850_USB_PRODUCT_ID (0x7850)
66#define LAN78XX_EEPROM_MAGIC (0x78A5)
67#define LAN78XX_OTP_MAGIC (0x78F3)
68
69#define MII_READ 1
70#define MII_WRITE 0
71
72#define EEPROM_INDICATOR (0xA5)
73#define EEPROM_MAC_OFFSET (0x01)
74#define MAX_EEPROM_SIZE 512
75#define OTP_INDICATOR_1 (0xF3)
76#define OTP_INDICATOR_2 (0xF7)
77
78#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82/* USB related defines */
83#define BULK_IN_PIPE 1
84#define BULK_OUT_PIPE 2
85
86/* default autosuspend delay (mSec)*/
87#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137};
138
139struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187};
188
189struct lan78xx_net;
190
191struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202};
203
204enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212};
213
214struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219};
220
221struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224};
225
226#define EVENT_TX_HALT 0
227#define EVENT_RX_HALT 1
228#define EVENT_RX_MEMORY 2
229#define EVENT_STS_SPLIT 3
230#define EVENT_LINK_RESET 4
231#define EVENT_RX_PAUSED 5
232#define EVENT_DEV_WAKING 6
233#define EVENT_DEV_ASLEEP 7
234#define EVENT_DEV_OPEN 8
235
236struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
55d7de9d
WH
277
278 int link_on;
279 u8 mdix_ctrl;
ce85e13a 280
87177ba6
WH
281 u32 chipid;
282 u32 chiprev;
ce85e13a 283 struct mii_bus *mdiobus;
55d7de9d
WH
284};
285
286/* use ethtool to change the level for any given device */
287static int msg_level = -1;
288module_param(msg_level, int, 0);
289MODULE_PARM_DESC(msg_level, "Override default message level");
290
291static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
292{
293 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
294 int ret;
295
55d7de9d
WH
296 if (!buf)
297 return -ENOMEM;
298
299 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
300 USB_VENDOR_REQUEST_READ_REGISTER,
301 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
302 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
303 if (likely(ret >= 0)) {
304 le32_to_cpus(buf);
305 *data = *buf;
306 } else {
307 netdev_warn(dev->net,
308 "Failed to read register index 0x%08x. ret = %d",
309 index, ret);
310 }
311
312 kfree(buf);
313
314 return ret;
315}
316
317static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
318{
319 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
320 int ret;
321
55d7de9d
WH
322 if (!buf)
323 return -ENOMEM;
324
325 *buf = data;
326 cpu_to_le32s(buf);
327
328 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
329 USB_VENDOR_REQUEST_WRITE_REGISTER,
330 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
331 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
332 if (unlikely(ret < 0)) {
333 netdev_warn(dev->net,
334 "Failed to write register index 0x%08x. ret = %d",
335 index, ret);
336 }
337
338 kfree(buf);
339
340 return ret;
341}
342
343static int lan78xx_read_stats(struct lan78xx_net *dev,
344 struct lan78xx_statstage *data)
345{
346 int ret = 0;
347 int i;
348 struct lan78xx_statstage *stats;
349 u32 *src;
350 u32 *dst;
351
55d7de9d
WH
352 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
353 if (!stats)
354 return -ENOMEM;
355
356 ret = usb_control_msg(dev->udev,
357 usb_rcvctrlpipe(dev->udev, 0),
358 USB_VENDOR_REQUEST_GET_STATS,
359 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
360 0,
361 0,
362 (void *)stats,
363 sizeof(*stats),
364 USB_CTRL_SET_TIMEOUT);
365 if (likely(ret >= 0)) {
366 src = (u32 *)stats;
367 dst = (u32 *)data;
368 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
369 le32_to_cpus(&src[i]);
370 dst[i] = src[i];
371 }
372 } else {
373 netdev_warn(dev->net,
374 "Failed to read stat ret = 0x%x", ret);
375 }
376
377 kfree(stats);
378
379 return ret;
380}
381
382/* Loop until the read is completed with timeout called with phy_mutex held */
383static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
384{
385 unsigned long start_time = jiffies;
386 u32 val;
387 int ret;
388
389 do {
390 ret = lan78xx_read_reg(dev, MII_ACC, &val);
391 if (unlikely(ret < 0))
392 return -EIO;
393
394 if (!(val & MII_ACC_MII_BUSY_))
395 return 0;
396 } while (!time_after(jiffies, start_time + HZ));
397
398 return -EIO;
399}
400
401static inline u32 mii_access(int id, int index, int read)
402{
403 u32 ret;
404
405 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
406 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
407 if (read)
408 ret |= MII_ACC_MII_READ_;
409 else
410 ret |= MII_ACC_MII_WRITE_;
411 ret |= MII_ACC_MII_BUSY_;
412
413 return ret;
414}
415
55d7de9d
WH
416static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
417{
418 unsigned long start_time = jiffies;
419 u32 val;
420 int ret;
421
422 do {
423 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
424 if (unlikely(ret < 0))
425 return -EIO;
426
427 if (!(val & E2P_CMD_EPC_BUSY_) ||
428 (val & E2P_CMD_EPC_TIMEOUT_))
429 break;
430 usleep_range(40, 100);
431 } while (!time_after(jiffies, start_time + HZ));
432
433 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
434 netdev_warn(dev->net, "EEPROM read operation timeout");
435 return -EIO;
436 }
437
438 return 0;
439}
440
441static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
442{
443 unsigned long start_time = jiffies;
444 u32 val;
445 int ret;
446
447 do {
448 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
449 if (unlikely(ret < 0))
450 return -EIO;
451
452 if (!(val & E2P_CMD_EPC_BUSY_))
453 return 0;
454
455 usleep_range(40, 100);
456 } while (!time_after(jiffies, start_time + HZ));
457
458 netdev_warn(dev->net, "EEPROM is busy");
459 return -EIO;
460}
461
462static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
463 u32 length, u8 *data)
464{
465 u32 val;
a0db7d10 466 u32 saved;
55d7de9d 467 int i, ret;
a0db7d10
WH
468 int retval;
469
470 /* depends on chip, some EEPROM pins are muxed with LED function.
471 * disable & restore LED function to access EEPROM.
472 */
473 ret = lan78xx_read_reg(dev, HW_CFG, &val);
474 saved = val;
87177ba6 475 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
476 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
477 ret = lan78xx_write_reg(dev, HW_CFG, val);
478 }
55d7de9d 479
a0db7d10
WH
480 retval = lan78xx_eeprom_confirm_not_busy(dev);
481 if (retval)
482 return retval;
55d7de9d
WH
483
484 for (i = 0; i < length; i++) {
485 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
486 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
487 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
488 if (unlikely(ret < 0)) {
489 retval = -EIO;
490 goto exit;
491 }
55d7de9d 492
a0db7d10
WH
493 retval = lan78xx_wait_eeprom(dev);
494 if (retval < 0)
495 goto exit;
55d7de9d
WH
496
497 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
498 if (unlikely(ret < 0)) {
499 retval = -EIO;
500 goto exit;
501 }
55d7de9d
WH
502
503 data[i] = val & 0xFF;
504 offset++;
505 }
506
a0db7d10
WH
507 retval = 0;
508exit:
87177ba6 509 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
510 ret = lan78xx_write_reg(dev, HW_CFG, saved);
511
512 return retval;
55d7de9d
WH
513}
514
515static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
516 u32 length, u8 *data)
517{
518 u8 sig;
519 int ret;
520
521 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
522 if ((ret == 0) && (sig == EEPROM_INDICATOR))
523 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
524 else
525 ret = -EINVAL;
526
527 return ret;
528}
529
530static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
531 u32 length, u8 *data)
532{
533 u32 val;
a0db7d10 534 u32 saved;
55d7de9d 535 int i, ret;
a0db7d10
WH
536 int retval;
537
538 /* depends on chip, some EEPROM pins are muxed with LED function.
539 * disable & restore LED function to access EEPROM.
540 */
541 ret = lan78xx_read_reg(dev, HW_CFG, &val);
542 saved = val;
87177ba6 543 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
544 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
545 ret = lan78xx_write_reg(dev, HW_CFG, val);
546 }
55d7de9d 547
a0db7d10
WH
548 retval = lan78xx_eeprom_confirm_not_busy(dev);
549 if (retval)
550 goto exit;
55d7de9d
WH
551
552 /* Issue write/erase enable command */
553 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
554 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
555 if (unlikely(ret < 0)) {
556 retval = -EIO;
557 goto exit;
558 }
55d7de9d 559
a0db7d10
WH
560 retval = lan78xx_wait_eeprom(dev);
561 if (retval < 0)
562 goto exit;
55d7de9d
WH
563
564 for (i = 0; i < length; i++) {
565 /* Fill data register */
566 val = data[i];
567 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
568 if (ret < 0) {
569 retval = -EIO;
570 goto exit;
571 }
55d7de9d
WH
572
573 /* Send "write" command */
574 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
575 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
576 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
577 if (ret < 0) {
578 retval = -EIO;
579 goto exit;
580 }
55d7de9d 581
a0db7d10
WH
582 retval = lan78xx_wait_eeprom(dev);
583 if (retval < 0)
584 goto exit;
55d7de9d
WH
585
586 offset++;
587 }
588
a0db7d10
WH
589 retval = 0;
590exit:
87177ba6 591 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
592 ret = lan78xx_write_reg(dev, HW_CFG, saved);
593
594 return retval;
55d7de9d
WH
595}
596
597static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
598 u32 length, u8 *data)
599{
600 int i;
601 int ret;
602 u32 buf;
603 unsigned long timeout;
604
605 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
606
607 if (buf & OTP_PWR_DN_PWRDN_N_) {
608 /* clear it and wait to be cleared */
609 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
610
611 timeout = jiffies + HZ;
612 do {
613 usleep_range(1, 10);
614 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
615 if (time_after(jiffies, timeout)) {
616 netdev_warn(dev->net,
617 "timeout on OTP_PWR_DN");
618 return -EIO;
619 }
620 } while (buf & OTP_PWR_DN_PWRDN_N_);
621 }
622
623 for (i = 0; i < length; i++) {
624 ret = lan78xx_write_reg(dev, OTP_ADDR1,
625 ((offset + i) >> 8) & OTP_ADDR1_15_11);
626 ret = lan78xx_write_reg(dev, OTP_ADDR2,
627 ((offset + i) & OTP_ADDR2_10_3));
628
629 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
630 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
631
632 timeout = jiffies + HZ;
633 do {
634 udelay(1);
635 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
636 if (time_after(jiffies, timeout)) {
637 netdev_warn(dev->net,
638 "timeout on OTP_STATUS");
639 return -EIO;
640 }
641 } while (buf & OTP_STATUS_BUSY_);
642
643 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
644
645 data[i] = (u8)(buf & 0xFF);
646 }
647
648 return 0;
649}
650
9fb6066d
WH
651static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
652 u32 length, u8 *data)
653{
654 int i;
655 int ret;
656 u32 buf;
657 unsigned long timeout;
658
659 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
660
661 if (buf & OTP_PWR_DN_PWRDN_N_) {
662 /* clear it and wait to be cleared */
663 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
664
665 timeout = jiffies + HZ;
666 do {
667 udelay(1);
668 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
669 if (time_after(jiffies, timeout)) {
670 netdev_warn(dev->net,
671 "timeout on OTP_PWR_DN completion");
672 return -EIO;
673 }
674 } while (buf & OTP_PWR_DN_PWRDN_N_);
675 }
676
677 /* set to BYTE program mode */
678 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
679
680 for (i = 0; i < length; i++) {
681 ret = lan78xx_write_reg(dev, OTP_ADDR1,
682 ((offset + i) >> 8) & OTP_ADDR1_15_11);
683 ret = lan78xx_write_reg(dev, OTP_ADDR2,
684 ((offset + i) & OTP_ADDR2_10_3));
685 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
686 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
687 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
688
689 timeout = jiffies + HZ;
690 do {
691 udelay(1);
692 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
693 if (time_after(jiffies, timeout)) {
694 netdev_warn(dev->net,
695 "Timeout on OTP_STATUS completion");
696 return -EIO;
697 }
698 } while (buf & OTP_STATUS_BUSY_);
699 }
700
701 return 0;
702}
703
55d7de9d
WH
704static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
705 u32 length, u8 *data)
706{
707 u8 sig;
708 int ret;
709
710 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
711
712 if (ret == 0) {
713 if (sig == OTP_INDICATOR_1)
714 offset = offset;
715 else if (sig == OTP_INDICATOR_2)
716 offset += 0x100;
717 else
718 ret = -EINVAL;
719 ret = lan78xx_read_raw_otp(dev, offset, length, data);
720 }
721
722 return ret;
723}
724
725static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
726{
727 int i, ret;
728
729 for (i = 0; i < 100; i++) {
730 u32 dp_sel;
731
732 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
733 if (unlikely(ret < 0))
734 return -EIO;
735
736 if (dp_sel & DP_SEL_DPRDY_)
737 return 0;
738
739 usleep_range(40, 100);
740 }
741
742 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
743
744 return -EIO;
745}
746
747static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
748 u32 addr, u32 length, u32 *buf)
749{
750 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
751 u32 dp_sel;
752 int i, ret;
753
754 if (usb_autopm_get_interface(dev->intf) < 0)
755 return 0;
756
757 mutex_lock(&pdata->dataport_mutex);
758
759 ret = lan78xx_dataport_wait_not_busy(dev);
760 if (ret < 0)
761 goto done;
762
763 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
764
765 dp_sel &= ~DP_SEL_RSEL_MASK_;
766 dp_sel |= ram_select;
767 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
768
769 for (i = 0; i < length; i++) {
770 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
771
772 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
773
774 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
775
776 ret = lan78xx_dataport_wait_not_busy(dev);
777 if (ret < 0)
778 goto done;
779 }
780
781done:
782 mutex_unlock(&pdata->dataport_mutex);
783 usb_autopm_put_interface(dev->intf);
784
785 return ret;
786}
787
788static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
789 int index, u8 addr[ETH_ALEN])
790{
791 u32 temp;
792
793 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
794 temp = addr[3];
795 temp = addr[2] | (temp << 8);
796 temp = addr[1] | (temp << 8);
797 temp = addr[0] | (temp << 8);
798 pdata->pfilter_table[index][1] = temp;
799 temp = addr[5];
800 temp = addr[4] | (temp << 8);
801 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
802 pdata->pfilter_table[index][0] = temp;
803 }
804}
805
806/* returns hash bit number for given MAC address */
807static inline u32 lan78xx_hash(char addr[ETH_ALEN])
808{
809 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
810}
811
812static void lan78xx_deferred_multicast_write(struct work_struct *param)
813{
814 struct lan78xx_priv *pdata =
815 container_of(param, struct lan78xx_priv, set_multicast);
816 struct lan78xx_net *dev = pdata->dev;
817 int i;
818 int ret;
819
820 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
821 pdata->rfe_ctl);
822
823 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
824 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
825
826 for (i = 1; i < NUM_OF_MAF; i++) {
827 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
828 ret = lan78xx_write_reg(dev, MAF_LO(i),
829 pdata->pfilter_table[i][1]);
830 ret = lan78xx_write_reg(dev, MAF_HI(i),
831 pdata->pfilter_table[i][0]);
832 }
833
834 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
835}
836
837static void lan78xx_set_multicast(struct net_device *netdev)
838{
839 struct lan78xx_net *dev = netdev_priv(netdev);
840 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
841 unsigned long flags;
842 int i;
843
844 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
845
846 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
847 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
848
849 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
850 pdata->mchash_table[i] = 0;
851 /* pfilter_table[0] has own HW address */
852 for (i = 1; i < NUM_OF_MAF; i++) {
853 pdata->pfilter_table[i][0] =
854 pdata->pfilter_table[i][1] = 0;
855 }
856
857 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
858
859 if (dev->net->flags & IFF_PROMISC) {
860 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
861 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
862 } else {
863 if (dev->net->flags & IFF_ALLMULTI) {
864 netif_dbg(dev, drv, dev->net,
865 "receive all multicast enabled");
866 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
867 }
868 }
869
870 if (netdev_mc_count(dev->net)) {
871 struct netdev_hw_addr *ha;
872 int i;
873
874 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
875
876 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
877
878 i = 1;
879 netdev_for_each_mc_addr(ha, netdev) {
880 /* set first 32 into Perfect Filter */
881 if (i < 33) {
882 lan78xx_set_addr_filter(pdata, i, ha->addr);
883 } else {
884 u32 bitnum = lan78xx_hash(ha->addr);
885
886 pdata->mchash_table[bitnum / 32] |=
887 (1 << (bitnum % 32));
888 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
889 }
890 i++;
891 }
892 }
893
894 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
895
896 /* defer register writes to a sleepable context */
897 schedule_work(&pdata->set_multicast);
898}
899
900static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
901 u16 lcladv, u16 rmtadv)
902{
903 u32 flow = 0, fct_flow = 0;
904 int ret;
905
906 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
907
908 if (cap & FLOW_CTRL_TX)
909 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
910
911 if (cap & FLOW_CTRL_RX)
912 flow |= FLOW_CR_RX_FCEN_;
913
914 if (dev->udev->speed == USB_SPEED_SUPER)
915 fct_flow = 0x817;
916 else if (dev->udev->speed == USB_SPEED_HIGH)
917 fct_flow = 0x211;
918
919 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
920 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
921 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
922
923 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
924
925 /* threshold value should be set before enabling flow */
926 ret = lan78xx_write_reg(dev, FLOW, flow);
927
928 return 0;
929}
930
931static int lan78xx_link_reset(struct lan78xx_net *dev)
932{
ce85e13a 933 struct phy_device *phydev = dev->net->phydev;
55d7de9d 934 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
99c79ece 935 int ladv, radv, ret;
55d7de9d
WH
936 u32 buf;
937
938 /* clear PHY interrupt status */
bdfba55e 939 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
940 if (unlikely(ret < 0))
941 return -EIO;
942
943 /* clear LAN78xx interrupt status */
944 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
945 if (unlikely(ret < 0))
946 return -EIO;
947
ce85e13a
WH
948 phy_read_status(phydev);
949
950 if (!phydev->link && dev->link_on) {
55d7de9d 951 dev->link_on = false;
55d7de9d
WH
952
953 /* reset MAC */
954 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
955 if (unlikely(ret < 0))
956 return -EIO;
957 buf |= MAC_CR_RST_;
958 ret = lan78xx_write_reg(dev, MAC_CR, buf);
959 if (unlikely(ret < 0))
960 return -EIO;
e4953910
WH
961
962 phy_mac_interrupt(phydev, 0);
ce85e13a 963 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
964 dev->link_on = true;
965
ce85e13a 966 phy_ethtool_gset(phydev, &ecmd);
55d7de9d 967
bdfba55e 968 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
969
970 if (dev->udev->speed == USB_SPEED_SUPER) {
971 if (ethtool_cmd_speed(&ecmd) == 1000) {
972 /* disable U2 */
973 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
974 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
975 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
976 /* enable U1 */
977 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
978 buf |= USB_CFG1_DEV_U1_INIT_EN_;
979 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
980 } else {
981 /* enable U1 & U2 */
982 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
983 buf |= USB_CFG1_DEV_U2_INIT_EN_;
984 buf |= USB_CFG1_DEV_U1_INIT_EN_;
985 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
986 }
987 }
988
ce85e13a 989 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
990 if (ladv < 0)
991 return ladv;
55d7de9d 992
ce85e13a 993 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
994 if (radv < 0)
995 return radv;
55d7de9d
WH
996
997 netif_dbg(dev, link, dev->net,
998 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
999 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1000
1001 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
e4953910 1002 phy_mac_interrupt(phydev, 1);
55d7de9d
WH
1003 }
1004
1005 return ret;
1006}
1007
1008/* some work can't be done in tasklets, so we use keventd
1009 *
1010 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1011 * but tasklet_schedule() doesn't. hope the failure is rare.
1012 */
1013void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1014{
1015 set_bit(work, &dev->flags);
1016 if (!schedule_delayed_work(&dev->wq, 0))
1017 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1018}
1019
1020static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1021{
1022 u32 intdata;
1023
1024 if (urb->actual_length != 4) {
1025 netdev_warn(dev->net,
1026 "unexpected urb length %d", urb->actual_length);
1027 return;
1028 }
1029
1030 memcpy(&intdata, urb->transfer_buffer, 4);
1031 le32_to_cpus(&intdata);
1032
1033 if (intdata & INT_ENP_PHY_INT) {
1034 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1035 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1036 } else
1037 netdev_warn(dev->net,
1038 "unexpected interrupt: 0x%08x\n", intdata);
1039}
1040
1041static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1042{
1043 return MAX_EEPROM_SIZE;
1044}
1045
1046static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1047 struct ethtool_eeprom *ee, u8 *data)
1048{
1049 struct lan78xx_net *dev = netdev_priv(netdev);
1050
1051 ee->magic = LAN78XX_EEPROM_MAGIC;
1052
1053 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1054}
1055
1056static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1057 struct ethtool_eeprom *ee, u8 *data)
1058{
1059 struct lan78xx_net *dev = netdev_priv(netdev);
1060
1061 /* Allow entire eeprom update only */
1062 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1063 (ee->offset == 0) &&
1064 (ee->len == 512) &&
1065 (data[0] == EEPROM_INDICATOR))
1066 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1067 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1068 (ee->offset == 0) &&
1069 (ee->len == 512) &&
1070 (data[0] == OTP_INDICATOR_1))
9fb6066d 1071 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1072
1073 return -EINVAL;
1074}
1075
1076static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1077 u8 *data)
1078{
1079 if (stringset == ETH_SS_STATS)
1080 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1081}
1082
1083static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1084{
1085 if (sset == ETH_SS_STATS)
1086 return ARRAY_SIZE(lan78xx_gstrings);
1087 else
1088 return -EOPNOTSUPP;
1089}
1090
1091static void lan78xx_get_stats(struct net_device *netdev,
1092 struct ethtool_stats *stats, u64 *data)
1093{
1094 struct lan78xx_net *dev = netdev_priv(netdev);
1095 struct lan78xx_statstage lan78xx_stat;
1096 u32 *p;
1097 int i;
1098
1099 if (usb_autopm_get_interface(dev->intf) < 0)
1100 return;
1101
1102 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1103 p = (u32 *)&lan78xx_stat;
1104 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1105 data[i] = p[i];
1106 }
1107
1108 usb_autopm_put_interface(dev->intf);
1109}
1110
1111static void lan78xx_get_wol(struct net_device *netdev,
1112 struct ethtool_wolinfo *wol)
1113{
1114 struct lan78xx_net *dev = netdev_priv(netdev);
1115 int ret;
1116 u32 buf;
1117 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1118
1119 if (usb_autopm_get_interface(dev->intf) < 0)
1120 return;
1121
1122 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1123 if (unlikely(ret < 0)) {
1124 wol->supported = 0;
1125 wol->wolopts = 0;
1126 } else {
1127 if (buf & USB_CFG_RMT_WKP_) {
1128 wol->supported = WAKE_ALL;
1129 wol->wolopts = pdata->wol;
1130 } else {
1131 wol->supported = 0;
1132 wol->wolopts = 0;
1133 }
1134 }
1135
1136 usb_autopm_put_interface(dev->intf);
1137}
1138
1139static int lan78xx_set_wol(struct net_device *netdev,
1140 struct ethtool_wolinfo *wol)
1141{
1142 struct lan78xx_net *dev = netdev_priv(netdev);
1143 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1144 int ret;
1145
1146 ret = usb_autopm_get_interface(dev->intf);
1147 if (ret < 0)
1148 return ret;
1149
1150 pdata->wol = 0;
1151 if (wol->wolopts & WAKE_UCAST)
1152 pdata->wol |= WAKE_UCAST;
1153 if (wol->wolopts & WAKE_MCAST)
1154 pdata->wol |= WAKE_MCAST;
1155 if (wol->wolopts & WAKE_BCAST)
1156 pdata->wol |= WAKE_BCAST;
1157 if (wol->wolopts & WAKE_MAGIC)
1158 pdata->wol |= WAKE_MAGIC;
1159 if (wol->wolopts & WAKE_PHY)
1160 pdata->wol |= WAKE_PHY;
1161 if (wol->wolopts & WAKE_ARP)
1162 pdata->wol |= WAKE_ARP;
1163
1164 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1165
ce85e13a
WH
1166 phy_ethtool_set_wol(netdev->phydev, wol);
1167
55d7de9d
WH
1168 usb_autopm_put_interface(dev->intf);
1169
1170 return ret;
1171}
1172
1173static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1174{
1175 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1176 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1177 int ret;
1178 u32 buf;
55d7de9d
WH
1179
1180 ret = usb_autopm_get_interface(dev->intf);
1181 if (ret < 0)
1182 return ret;
1183
ce85e13a
WH
1184 ret = phy_ethtool_get_eee(phydev, edata);
1185 if (ret < 0)
1186 goto exit;
1187
55d7de9d
WH
1188 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1189 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1190 edata->eee_enabled = true;
ce85e13a
WH
1191 edata->eee_active = !!(edata->advertised &
1192 edata->lp_advertised);
55d7de9d
WH
1193 edata->tx_lpi_enabled = true;
1194 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1195 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1196 edata->tx_lpi_timer = buf;
1197 } else {
55d7de9d
WH
1198 edata->eee_enabled = false;
1199 edata->eee_active = false;
55d7de9d
WH
1200 edata->tx_lpi_enabled = false;
1201 edata->tx_lpi_timer = 0;
1202 }
1203
ce85e13a
WH
1204 ret = 0;
1205exit:
55d7de9d
WH
1206 usb_autopm_put_interface(dev->intf);
1207
ce85e13a 1208 return ret;
55d7de9d
WH
1209}
1210
1211static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1212{
1213 struct lan78xx_net *dev = netdev_priv(net);
1214 int ret;
1215 u32 buf;
1216
1217 ret = usb_autopm_get_interface(dev->intf);
1218 if (ret < 0)
1219 return ret;
1220
1221 if (edata->eee_enabled) {
1222 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1223 buf |= MAC_CR_EEE_EN_;
1224 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1225
ce85e13a
WH
1226 phy_ethtool_set_eee(net->phydev, edata);
1227
1228 buf = (u32)edata->tx_lpi_timer;
1229 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1230 } else {
1231 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1232 buf &= ~MAC_CR_EEE_EN_;
1233 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1234 }
1235
1236 usb_autopm_put_interface(dev->intf);
1237
1238 return 0;
1239}
1240
1241static u32 lan78xx_get_link(struct net_device *net)
1242{
ce85e13a 1243 phy_read_status(net->phydev);
55d7de9d 1244
ce85e13a 1245 return net->phydev->link;
55d7de9d
WH
1246}
1247
1248int lan78xx_nway_reset(struct net_device *net)
1249{
ce85e13a 1250 return phy_start_aneg(net->phydev);
55d7de9d
WH
1251}
1252
1253static void lan78xx_get_drvinfo(struct net_device *net,
1254 struct ethtool_drvinfo *info)
1255{
1256 struct lan78xx_net *dev = netdev_priv(net);
1257
1258 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1259 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1260 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1261}
1262
1263static u32 lan78xx_get_msglevel(struct net_device *net)
1264{
1265 struct lan78xx_net *dev = netdev_priv(net);
1266
1267 return dev->msg_enable;
1268}
1269
1270static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1271{
1272 struct lan78xx_net *dev = netdev_priv(net);
1273
1274 dev->msg_enable = level;
1275}
1276
758c5c11
WH
1277static int lan78xx_get_mdix_status(struct net_device *net)
1278{
1279 struct phy_device *phydev = net->phydev;
1280 int buf;
1281
1282 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1283 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1284 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1285
1286 return buf;
1287}
1288
1289static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1290{
1291 struct lan78xx_net *dev = netdev_priv(net);
1292 struct phy_device *phydev = net->phydev;
1293 int buf;
1294
1295 if (mdix_ctrl == ETH_TP_MDI) {
1296 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1297 LAN88XX_EXT_PAGE_SPACE_1);
1298 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1299 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1300 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1301 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1302 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1303 LAN88XX_EXT_PAGE_SPACE_0);
1304 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1305 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1306 LAN88XX_EXT_PAGE_SPACE_1);
1307 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1308 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1309 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1310 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1311 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1312 LAN88XX_EXT_PAGE_SPACE_0);
1313 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1314 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1315 LAN88XX_EXT_PAGE_SPACE_1);
1316 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1317 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1318 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1319 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1320 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1321 LAN88XX_EXT_PAGE_SPACE_0);
1322 }
1323 dev->mdix_ctrl = mdix_ctrl;
1324}
1325
55d7de9d
WH
1326static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1327{
1328 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1329 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1330 int ret;
1331 int buf;
1332
55d7de9d
WH
1333 ret = usb_autopm_get_interface(dev->intf);
1334 if (ret < 0)
1335 return ret;
1336
ce85e13a 1337 ret = phy_ethtool_gset(phydev, cmd);
55d7de9d 1338
758c5c11 1339 buf = lan78xx_get_mdix_status(net);
55d7de9d 1340
bdfba55e
WH
1341 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1342 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
55d7de9d
WH
1343 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1344 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
bdfba55e 1345 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
55d7de9d
WH
1346 cmd->eth_tp_mdix = ETH_TP_MDI;
1347 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
bdfba55e 1348 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
55d7de9d
WH
1349 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1350 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1351 }
1352
1353 usb_autopm_put_interface(dev->intf);
1354
1355 return ret;
1356}
1357
1358static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1359{
1360 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1361 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1362 int ret = 0;
1363 int temp;
1364
55d7de9d
WH
1365 ret = usb_autopm_get_interface(dev->intf);
1366 if (ret < 0)
1367 return ret;
1368
1369 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
758c5c11 1370 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
55d7de9d
WH
1371 }
1372
1373 /* change speed & duplex */
ce85e13a 1374 ret = phy_ethtool_sset(phydev, cmd);
55d7de9d
WH
1375
1376 if (!cmd->autoneg) {
1377 /* force link down */
ce85e13a
WH
1378 temp = phy_read(phydev, MII_BMCR);
1379 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1380 mdelay(1);
ce85e13a 1381 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1382 }
1383
1384 usb_autopm_put_interface(dev->intf);
1385
1386 return ret;
1387}
1388
1389static const struct ethtool_ops lan78xx_ethtool_ops = {
1390 .get_link = lan78xx_get_link,
1391 .nway_reset = lan78xx_nway_reset,
1392 .get_drvinfo = lan78xx_get_drvinfo,
1393 .get_msglevel = lan78xx_get_msglevel,
1394 .set_msglevel = lan78xx_set_msglevel,
1395 .get_settings = lan78xx_get_settings,
1396 .set_settings = lan78xx_set_settings,
1397 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1398 .get_eeprom = lan78xx_ethtool_get_eeprom,
1399 .set_eeprom = lan78xx_ethtool_set_eeprom,
1400 .get_ethtool_stats = lan78xx_get_stats,
1401 .get_sset_count = lan78xx_get_sset_count,
1402 .get_strings = lan78xx_get_strings,
1403 .get_wol = lan78xx_get_wol,
1404 .set_wol = lan78xx_set_wol,
1405 .get_eee = lan78xx_get_eee,
1406 .set_eee = lan78xx_set_eee,
1407};
1408
1409static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1410{
55d7de9d
WH
1411 if (!netif_running(netdev))
1412 return -EINVAL;
1413
ce85e13a 1414 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1415}
1416
1417static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1418{
1419 u32 addr_lo, addr_hi;
1420 int ret;
1421 u8 addr[6];
1422
1423 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1424 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1425
1426 addr[0] = addr_lo & 0xFF;
1427 addr[1] = (addr_lo >> 8) & 0xFF;
1428 addr[2] = (addr_lo >> 16) & 0xFF;
1429 addr[3] = (addr_lo >> 24) & 0xFF;
1430 addr[4] = addr_hi & 0xFF;
1431 addr[5] = (addr_hi >> 8) & 0xFF;
1432
1433 if (!is_valid_ether_addr(addr)) {
1434 /* reading mac address from EEPROM or OTP */
1435 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1436 addr) == 0) ||
1437 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1438 addr) == 0)) {
1439 if (is_valid_ether_addr(addr)) {
1440 /* eeprom values are valid so use them */
1441 netif_dbg(dev, ifup, dev->net,
1442 "MAC address read from EEPROM");
1443 } else {
1444 /* generate random MAC */
1445 random_ether_addr(addr);
1446 netif_dbg(dev, ifup, dev->net,
1447 "MAC address set to random addr");
1448 }
1449
1450 addr_lo = addr[0] | (addr[1] << 8) |
1451 (addr[2] << 16) | (addr[3] << 24);
1452 addr_hi = addr[4] | (addr[5] << 8);
1453
1454 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1455 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1456 } else {
1457 /* generate random MAC */
1458 random_ether_addr(addr);
1459 netif_dbg(dev, ifup, dev->net,
1460 "MAC address set to random addr");
1461 }
1462 }
1463
1464 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1465 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1466
1467 ether_addr_copy(dev->net->dev_addr, addr);
1468}
1469
ce85e13a
WH
1470/* MDIO read and write wrappers for phylib */
1471static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1472{
1473 struct lan78xx_net *dev = bus->priv;
1474 u32 val, addr;
1475 int ret;
1476
1477 ret = usb_autopm_get_interface(dev->intf);
1478 if (ret < 0)
1479 return ret;
1480
1481 mutex_lock(&dev->phy_mutex);
1482
1483 /* confirm MII not busy */
1484 ret = lan78xx_phy_wait_not_busy(dev);
1485 if (ret < 0)
1486 goto done;
1487
1488 /* set the address, index & direction (read from PHY) */
1489 addr = mii_access(phy_id, idx, MII_READ);
1490 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1491
1492 ret = lan78xx_phy_wait_not_busy(dev);
1493 if (ret < 0)
1494 goto done;
1495
1496 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1497
1498 ret = (int)(val & 0xFFFF);
1499
1500done:
1501 mutex_unlock(&dev->phy_mutex);
1502 usb_autopm_put_interface(dev->intf);
1503 return ret;
1504}
1505
1506static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1507 u16 regval)
1508{
1509 struct lan78xx_net *dev = bus->priv;
1510 u32 val, addr;
1511 int ret;
1512
1513 ret = usb_autopm_get_interface(dev->intf);
1514 if (ret < 0)
1515 return ret;
1516
1517 mutex_lock(&dev->phy_mutex);
1518
1519 /* confirm MII not busy */
1520 ret = lan78xx_phy_wait_not_busy(dev);
1521 if (ret < 0)
1522 goto done;
1523
1524 val = (u32)regval;
1525 ret = lan78xx_write_reg(dev, MII_DATA, val);
1526
1527 /* set the address, index & direction (write to PHY) */
1528 addr = mii_access(phy_id, idx, MII_WRITE);
1529 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1530
1531 ret = lan78xx_phy_wait_not_busy(dev);
1532 if (ret < 0)
1533 goto done;
1534
1535done:
1536 mutex_unlock(&dev->phy_mutex);
1537 usb_autopm_put_interface(dev->intf);
1538 return 0;
1539}
1540
1541static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1542{
ce85e13a 1543 int ret;
ce85e13a
WH
1544
1545 dev->mdiobus = mdiobus_alloc();
1546 if (!dev->mdiobus) {
1547 netdev_err(dev->net, "can't allocate MDIO bus\n");
1548 return -ENOMEM;
1549 }
1550
1551 dev->mdiobus->priv = (void *)dev;
1552 dev->mdiobus->read = lan78xx_mdiobus_read;
1553 dev->mdiobus->write = lan78xx_mdiobus_write;
1554 dev->mdiobus->name = "lan78xx-mdiobus";
1555
1556 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1557 dev->udev->bus->busnum, dev->udev->devnum);
1558
87177ba6
WH
1559 switch (dev->chipid) {
1560 case ID_REV_CHIP_ID_7800_:
1561 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1562 /* set to internal PHY id */
1563 dev->mdiobus->phy_mask = ~(1 << 1);
1564 break;
1565 }
1566
1567 ret = mdiobus_register(dev->mdiobus);
1568 if (ret) {
1569 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1570 goto exit1;
ce85e13a
WH
1571 }
1572
1573 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1574 return 0;
ce85e13a
WH
1575exit1:
1576 mdiobus_free(dev->mdiobus);
1577 return ret;
1578}
1579
1580static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1581{
1582 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1583 mdiobus_free(dev->mdiobus);
1584}
1585
1586static void lan78xx_link_status_change(struct net_device *net)
1587{
1588 /* nothing to do */
55d7de9d
WH
1589}
1590
1591static int lan78xx_phy_init(struct lan78xx_net *dev)
1592{
ce85e13a
WH
1593 int ret;
1594 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1595
ce85e13a
WH
1596 phydev = phy_find_first(dev->mdiobus);
1597 if (!phydev) {
1598 netdev_err(dev->net, "no PHY found\n");
1599 return -EIO;
1600 }
55d7de9d 1601
e4953910
WH
1602 /* Enable PHY interrupts.
1603 * We handle our own interrupt
1604 */
1605 ret = phy_read(phydev, LAN88XX_INT_STS);
1606 ret = phy_write(phydev, LAN88XX_INT_MASK,
1607 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1608 LAN88XX_INT_MASK_LINK_CHANGE_);
1609
1610 phydev->irq = PHY_IGNORE_INTERRUPT;
1611
ce85e13a
WH
1612 ret = phy_connect_direct(dev->net, phydev,
1613 lan78xx_link_status_change,
1614 PHY_INTERFACE_MODE_GMII);
1615 if (ret) {
1616 netdev_err(dev->net, "can't attach PHY to %s\n",
1617 dev->mdiobus->id);
1618 return -EIO;
1619 }
55d7de9d
WH
1620
1621 /* set to AUTOMDIX */
758c5c11 1622 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
55d7de9d 1623
ce85e13a
WH
1624 /* MAC doesn't support 1000T Half */
1625 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1626 phydev->supported |= (SUPPORTED_10baseT_Half |
1627 SUPPORTED_10baseT_Full |
1628 SUPPORTED_100baseT_Half |
1629 SUPPORTED_100baseT_Full |
1630 SUPPORTED_1000baseT_Full |
1631 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1632 genphy_config_aneg(phydev);
1633
ce85e13a 1634 phy_start(phydev);
55d7de9d
WH
1635
1636 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1637
1638 return 0;
1639}
1640
1641static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1642{
1643 int ret = 0;
1644 u32 buf;
1645 bool rxenabled;
1646
1647 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1648
1649 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1650
1651 if (rxenabled) {
1652 buf &= ~MAC_RX_RXEN_;
1653 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1654 }
1655
1656 /* add 4 to size for FCS */
1657 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1658 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1659
1660 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1661
1662 if (rxenabled) {
1663 buf |= MAC_RX_RXEN_;
1664 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1665 }
1666
1667 return 0;
1668}
1669
1670static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1671{
1672 struct sk_buff *skb;
1673 unsigned long flags;
1674 int count = 0;
1675
1676 spin_lock_irqsave(&q->lock, flags);
1677 while (!skb_queue_empty(q)) {
1678 struct skb_data *entry;
1679 struct urb *urb;
1680 int ret;
1681
1682 skb_queue_walk(q, skb) {
1683 entry = (struct skb_data *)skb->cb;
1684 if (entry->state != unlink_start)
1685 goto found;
1686 }
1687 break;
1688found:
1689 entry->state = unlink_start;
1690 urb = entry->urb;
1691
1692 /* Get reference count of the URB to avoid it to be
1693 * freed during usb_unlink_urb, which may trigger
1694 * use-after-free problem inside usb_unlink_urb since
1695 * usb_unlink_urb is always racing with .complete
1696 * handler(include defer_bh).
1697 */
1698 usb_get_urb(urb);
1699 spin_unlock_irqrestore(&q->lock, flags);
1700 /* during some PM-driven resume scenarios,
1701 * these (async) unlinks complete immediately
1702 */
1703 ret = usb_unlink_urb(urb);
1704 if (ret != -EINPROGRESS && ret != 0)
1705 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1706 else
1707 count++;
1708 usb_put_urb(urb);
1709 spin_lock_irqsave(&q->lock, flags);
1710 }
1711 spin_unlock_irqrestore(&q->lock, flags);
1712 return count;
1713}
1714
1715static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1716{
1717 struct lan78xx_net *dev = netdev_priv(netdev);
1718 int ll_mtu = new_mtu + netdev->hard_header_len;
1719 int old_hard_mtu = dev->hard_mtu;
1720 int old_rx_urb_size = dev->rx_urb_size;
1721 int ret;
1722
1723 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1724 return -EINVAL;
1725
1726 if (new_mtu <= 0)
1727 return -EINVAL;
1728 /* no second zero-length packet read wanted after mtu-sized packets */
1729 if ((ll_mtu % dev->maxpacket) == 0)
1730 return -EDOM;
1731
1732 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1733
1734 netdev->mtu = new_mtu;
1735
1736 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1737 if (dev->rx_urb_size == old_hard_mtu) {
1738 dev->rx_urb_size = dev->hard_mtu;
1739 if (dev->rx_urb_size > old_rx_urb_size) {
1740 if (netif_running(dev->net)) {
1741 unlink_urbs(dev, &dev->rxq);
1742 tasklet_schedule(&dev->bh);
1743 }
1744 }
1745 }
1746
1747 return 0;
1748}
1749
1750int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1751{
1752 struct lan78xx_net *dev = netdev_priv(netdev);
1753 struct sockaddr *addr = p;
1754 u32 addr_lo, addr_hi;
1755 int ret;
1756
1757 if (netif_running(netdev))
1758 return -EBUSY;
1759
1760 if (!is_valid_ether_addr(addr->sa_data))
1761 return -EADDRNOTAVAIL;
1762
1763 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1764
1765 addr_lo = netdev->dev_addr[0] |
1766 netdev->dev_addr[1] << 8 |
1767 netdev->dev_addr[2] << 16 |
1768 netdev->dev_addr[3] << 24;
1769 addr_hi = netdev->dev_addr[4] |
1770 netdev->dev_addr[5] << 8;
1771
1772 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1773 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1774
1775 return 0;
1776}
1777
1778/* Enable or disable Rx checksum offload engine */
1779static int lan78xx_set_features(struct net_device *netdev,
1780 netdev_features_t features)
1781{
1782 struct lan78xx_net *dev = netdev_priv(netdev);
1783 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1784 unsigned long flags;
1785 int ret;
1786
1787 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1788
1789 if (features & NETIF_F_RXCSUM) {
1790 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1791 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1792 } else {
1793 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1794 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1795 }
1796
1797 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1798 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1799 else
1800 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1801
1802 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1803
1804 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1805
1806 return 0;
1807}
1808
1809static void lan78xx_deferred_vlan_write(struct work_struct *param)
1810{
1811 struct lan78xx_priv *pdata =
1812 container_of(param, struct lan78xx_priv, set_vlan);
1813 struct lan78xx_net *dev = pdata->dev;
1814
1815 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1816 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1817}
1818
1819static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1820 __be16 proto, u16 vid)
1821{
1822 struct lan78xx_net *dev = netdev_priv(netdev);
1823 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1824 u16 vid_bit_index;
1825 u16 vid_dword_index;
1826
1827 vid_dword_index = (vid >> 5) & 0x7F;
1828 vid_bit_index = vid & 0x1F;
1829
1830 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1831
1832 /* defer register writes to a sleepable context */
1833 schedule_work(&pdata->set_vlan);
1834
1835 return 0;
1836}
1837
1838static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1839 __be16 proto, u16 vid)
1840{
1841 struct lan78xx_net *dev = netdev_priv(netdev);
1842 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1843 u16 vid_bit_index;
1844 u16 vid_dword_index;
1845
1846 vid_dword_index = (vid >> 5) & 0x7F;
1847 vid_bit_index = vid & 0x1F;
1848
1849 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1850
1851 /* defer register writes to a sleepable context */
1852 schedule_work(&pdata->set_vlan);
1853
1854 return 0;
1855}
1856
1857static void lan78xx_init_ltm(struct lan78xx_net *dev)
1858{
1859 int ret;
1860 u32 buf;
1861 u32 regs[6] = { 0 };
1862
1863 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1864 if (buf & USB_CFG1_LTM_ENABLE_) {
1865 u8 temp[2];
1866 /* Get values from EEPROM first */
1867 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1868 if (temp[0] == 24) {
1869 ret = lan78xx_read_raw_eeprom(dev,
1870 temp[1] * 2,
1871 24,
1872 (u8 *)regs);
1873 if (ret < 0)
1874 return;
1875 }
1876 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1877 if (temp[0] == 24) {
1878 ret = lan78xx_read_raw_otp(dev,
1879 temp[1] * 2,
1880 24,
1881 (u8 *)regs);
1882 if (ret < 0)
1883 return;
1884 }
1885 }
1886 }
1887
1888 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1889 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1890 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1891 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1892 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1893 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1894}
1895
1896static int lan78xx_reset(struct lan78xx_net *dev)
1897{
1898 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1899 u32 buf;
1900 int ret = 0;
1901 unsigned long timeout;
1902
1903 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1904 buf |= HW_CFG_LRST_;
1905 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1906
1907 timeout = jiffies + HZ;
1908 do {
1909 mdelay(1);
1910 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1911 if (time_after(jiffies, timeout)) {
1912 netdev_warn(dev->net,
1913 "timeout on completion of LiteReset");
1914 return -EIO;
1915 }
1916 } while (buf & HW_CFG_LRST_);
1917
1918 lan78xx_init_mac_address(dev);
1919
ce85e13a
WH
1920 /* save DEVID for later usage */
1921 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
1922 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
1923 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 1924
55d7de9d
WH
1925 /* Respond to the IN token with a NAK */
1926 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1927 buf |= USB_CFG_BIR_;
1928 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1929
1930 /* Init LTM */
1931 lan78xx_init_ltm(dev);
1932
1933 dev->net->hard_header_len += TX_OVERHEAD;
1934 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1935
1936 if (dev->udev->speed == USB_SPEED_SUPER) {
1937 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1938 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1939 dev->rx_qlen = 4;
1940 dev->tx_qlen = 4;
1941 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1942 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1943 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1944 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1945 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1946 } else {
1947 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1948 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1949 dev->rx_qlen = 4;
1950 }
1951
1952 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1953 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1954
1955 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1956 buf |= HW_CFG_MEF_;
1957 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1958
1959 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1960 buf |= USB_CFG_BCE_;
1961 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1962
1963 /* set FIFO sizes */
1964 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1965 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1966
1967 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1968 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1969
1970 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1971 ret = lan78xx_write_reg(dev, FLOW, 0);
1972 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1973
1974 /* Don't need rfe_ctl_lock during initialisation */
1975 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1976 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1977 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1978
1979 /* Enable or disable checksum offload engines */
1980 lan78xx_set_features(dev->net, dev->net->features);
1981
1982 lan78xx_set_multicast(dev->net);
1983
1984 /* reset PHY */
1985 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1986 buf |= PMT_CTL_PHY_RST_;
1987 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1988
1989 timeout = jiffies + HZ;
1990 do {
1991 mdelay(1);
1992 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1993 if (time_after(jiffies, timeout)) {
1994 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1995 return -EIO;
1996 }
6c595b03 1997 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 1998
55d7de9d 1999 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
55d7de9d 2000 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
2001 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2002
55d7de9d
WH
2003 /* enable PHY interrupts */
2004 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2005 buf |= INT_ENP_PHY_INT;
2006 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2007
2008 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2009 buf |= MAC_TX_TXEN_;
2010 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2011
2012 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2013 buf |= FCT_TX_CTL_EN_;
2014 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2015
2016 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2017
2018 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2019 buf |= MAC_RX_RXEN_;
2020 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2021
2022 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2023 buf |= FCT_RX_CTL_EN_;
2024 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2025
55d7de9d
WH
2026 return 0;
2027}
2028
2029static int lan78xx_open(struct net_device *net)
2030{
2031 struct lan78xx_net *dev = netdev_priv(net);
2032 int ret;
2033
2034 ret = usb_autopm_get_interface(dev->intf);
2035 if (ret < 0)
2036 goto out;
2037
2038 ret = lan78xx_reset(dev);
2039 if (ret < 0)
2040 goto done;
2041
ce85e13a
WH
2042 ret = lan78xx_phy_init(dev);
2043 if (ret < 0)
2044 goto done;
2045
55d7de9d
WH
2046 /* for Link Check */
2047 if (dev->urb_intr) {
2048 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2049 if (ret < 0) {
2050 netif_err(dev, ifup, dev->net,
2051 "intr submit %d\n", ret);
2052 goto done;
2053 }
2054 }
2055
2056 set_bit(EVENT_DEV_OPEN, &dev->flags);
2057
2058 netif_start_queue(net);
2059
2060 dev->link_on = false;
2061
2062 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2063done:
2064 usb_autopm_put_interface(dev->intf);
2065
2066out:
2067 return ret;
2068}
2069
2070static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2071{
2072 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2073 DECLARE_WAITQUEUE(wait, current);
2074 int temp;
2075
2076 /* ensure there are no more active urbs */
2077 add_wait_queue(&unlink_wakeup, &wait);
2078 set_current_state(TASK_UNINTERRUPTIBLE);
2079 dev->wait = &unlink_wakeup;
2080 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2081
2082 /* maybe wait for deletions to finish. */
2083 while (!skb_queue_empty(&dev->rxq) &&
2084 !skb_queue_empty(&dev->txq) &&
2085 !skb_queue_empty(&dev->done)) {
2086 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2087 set_current_state(TASK_UNINTERRUPTIBLE);
2088 netif_dbg(dev, ifdown, dev->net,
2089 "waited for %d urb completions\n", temp);
2090 }
2091 set_current_state(TASK_RUNNING);
2092 dev->wait = NULL;
2093 remove_wait_queue(&unlink_wakeup, &wait);
2094}
2095
2096int lan78xx_stop(struct net_device *net)
2097{
2098 struct lan78xx_net *dev = netdev_priv(net);
2099
ce85e13a
WH
2100 phy_stop(net->phydev);
2101 phy_disconnect(net->phydev);
2102 net->phydev = NULL;
2103
55d7de9d
WH
2104 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2105 netif_stop_queue(net);
2106
2107 netif_info(dev, ifdown, dev->net,
2108 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2109 net->stats.rx_packets, net->stats.tx_packets,
2110 net->stats.rx_errors, net->stats.tx_errors);
2111
2112 lan78xx_terminate_urbs(dev);
2113
2114 usb_kill_urb(dev->urb_intr);
2115
2116 skb_queue_purge(&dev->rxq_pause);
2117
2118 /* deferred work (task, timer, softirq) must also stop.
2119 * can't flush_scheduled_work() until we drop rtnl (later),
2120 * else workers could deadlock; so make workers a NOP.
2121 */
2122 dev->flags = 0;
2123 cancel_delayed_work_sync(&dev->wq);
2124 tasklet_kill(&dev->bh);
2125
2126 usb_autopm_put_interface(dev->intf);
2127
2128 return 0;
2129}
2130
2131static int lan78xx_linearize(struct sk_buff *skb)
2132{
2133 return skb_linearize(skb);
2134}
2135
2136static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2137 struct sk_buff *skb, gfp_t flags)
2138{
2139 u32 tx_cmd_a, tx_cmd_b;
2140
2141 if (skb_headroom(skb) < TX_OVERHEAD) {
2142 struct sk_buff *skb2;
2143
2144 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2145 dev_kfree_skb_any(skb);
2146 skb = skb2;
2147 if (!skb)
2148 return NULL;
2149 }
2150
2151 if (lan78xx_linearize(skb) < 0)
2152 return NULL;
2153
2154 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2155
2156 if (skb->ip_summed == CHECKSUM_PARTIAL)
2157 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2158
2159 tx_cmd_b = 0;
2160 if (skb_is_gso(skb)) {
2161 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2162
2163 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2164
2165 tx_cmd_a |= TX_CMD_A_LSO_;
2166 }
2167
2168 if (skb_vlan_tag_present(skb)) {
2169 tx_cmd_a |= TX_CMD_A_IVTG_;
2170 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2171 }
2172
2173 skb_push(skb, 4);
2174 cpu_to_le32s(&tx_cmd_b);
2175 memcpy(skb->data, &tx_cmd_b, 4);
2176
2177 skb_push(skb, 4);
2178 cpu_to_le32s(&tx_cmd_a);
2179 memcpy(skb->data, &tx_cmd_a, 4);
2180
2181 return skb;
2182}
2183
2184static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2185 struct sk_buff_head *list, enum skb_state state)
2186{
2187 unsigned long flags;
2188 enum skb_state old_state;
2189 struct skb_data *entry = (struct skb_data *)skb->cb;
2190
2191 spin_lock_irqsave(&list->lock, flags);
2192 old_state = entry->state;
2193 entry->state = state;
55d7de9d
WH
2194
2195 __skb_unlink(skb, list);
2196 spin_unlock(&list->lock);
2197 spin_lock(&dev->done.lock);
55d7de9d
WH
2198
2199 __skb_queue_tail(&dev->done, skb);
2200 if (skb_queue_len(&dev->done) == 1)
2201 tasklet_schedule(&dev->bh);
2202 spin_unlock_irqrestore(&dev->done.lock, flags);
2203
2204 return old_state;
2205}
2206
2207static void tx_complete(struct urb *urb)
2208{
2209 struct sk_buff *skb = (struct sk_buff *)urb->context;
2210 struct skb_data *entry = (struct skb_data *)skb->cb;
2211 struct lan78xx_net *dev = entry->dev;
2212
2213 if (urb->status == 0) {
2214 dev->net->stats.tx_packets++;
2215 dev->net->stats.tx_bytes += entry->length;
2216 } else {
2217 dev->net->stats.tx_errors++;
2218
2219 switch (urb->status) {
2220 case -EPIPE:
2221 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2222 break;
2223
2224 /* software-driven interface shutdown */
2225 case -ECONNRESET:
2226 case -ESHUTDOWN:
2227 break;
2228
2229 case -EPROTO:
2230 case -ETIME:
2231 case -EILSEQ:
2232 netif_stop_queue(dev->net);
2233 break;
2234 default:
2235 netif_dbg(dev, tx_err, dev->net,
2236 "tx err %d\n", entry->urb->status);
2237 break;
2238 }
2239 }
2240
2241 usb_autopm_put_interface_async(dev->intf);
2242
81c38e81 2243 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2244}
2245
2246static void lan78xx_queue_skb(struct sk_buff_head *list,
2247 struct sk_buff *newsk, enum skb_state state)
2248{
2249 struct skb_data *entry = (struct skb_data *)newsk->cb;
2250
2251 __skb_queue_tail(list, newsk);
2252 entry->state = state;
2253}
2254
2255netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2256{
2257 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2258 struct sk_buff *skb2 = NULL;
55d7de9d 2259
81c38e81 2260 if (skb) {
55d7de9d 2261 skb_tx_timestamp(skb);
81c38e81
WH
2262 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2263 }
55d7de9d 2264
81c38e81
WH
2265 if (skb2) {
2266 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2267
4b2a4a96
WH
2268 /* throttle TX patch at slower than SUPER SPEED USB */
2269 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2270 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2271 netif_stop_queue(net);
2272 } else {
2273 netif_dbg(dev, tx_err, dev->net,
2274 "lan78xx_tx_prep return NULL\n");
2275 dev->net->stats.tx_errors++;
2276 dev->net->stats.tx_dropped++;
2277 }
2278
2279 tasklet_schedule(&dev->bh);
2280
2281 return NETDEV_TX_OK;
2282}
2283
2284int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2285{
2286 int tmp;
2287 struct usb_host_interface *alt = NULL;
2288 struct usb_host_endpoint *in = NULL, *out = NULL;
2289 struct usb_host_endpoint *status = NULL;
2290
2291 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2292 unsigned ep;
2293
2294 in = NULL;
2295 out = NULL;
2296 status = NULL;
2297 alt = intf->altsetting + tmp;
2298
2299 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2300 struct usb_host_endpoint *e;
2301 int intr = 0;
2302
2303 e = alt->endpoint + ep;
2304 switch (e->desc.bmAttributes) {
2305 case USB_ENDPOINT_XFER_INT:
2306 if (!usb_endpoint_dir_in(&e->desc))
2307 continue;
2308 intr = 1;
2309 /* FALLTHROUGH */
2310 case USB_ENDPOINT_XFER_BULK:
2311 break;
2312 default:
2313 continue;
2314 }
2315 if (usb_endpoint_dir_in(&e->desc)) {
2316 if (!intr && !in)
2317 in = e;
2318 else if (intr && !status)
2319 status = e;
2320 } else {
2321 if (!out)
2322 out = e;
2323 }
2324 }
2325 if (in && out)
2326 break;
2327 }
2328 if (!alt || !in || !out)
2329 return -EINVAL;
2330
2331 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2332 in->desc.bEndpointAddress &
2333 USB_ENDPOINT_NUMBER_MASK);
2334 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2335 out->desc.bEndpointAddress &
2336 USB_ENDPOINT_NUMBER_MASK);
2337 dev->ep_intr = status;
2338
2339 return 0;
2340}
2341
2342static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2343{
2344 struct lan78xx_priv *pdata = NULL;
2345 int ret;
2346 int i;
2347
2348 ret = lan78xx_get_endpoints(dev, intf);
2349
2350 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2351
2352 pdata = (struct lan78xx_priv *)(dev->data[0]);
2353 if (!pdata) {
2354 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2355 return -ENOMEM;
2356 }
2357
2358 pdata->dev = dev;
2359
2360 spin_lock_init(&pdata->rfe_ctl_lock);
2361 mutex_init(&pdata->dataport_mutex);
2362
2363 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2364
2365 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2366 pdata->vlan_table[i] = 0;
2367
2368 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2369
2370 dev->net->features = 0;
2371
2372 if (DEFAULT_TX_CSUM_ENABLE)
2373 dev->net->features |= NETIF_F_HW_CSUM;
2374
2375 if (DEFAULT_RX_CSUM_ENABLE)
2376 dev->net->features |= NETIF_F_RXCSUM;
2377
2378 if (DEFAULT_TSO_CSUM_ENABLE)
2379 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2380
2381 dev->net->hw_features = dev->net->features;
2382
2383 /* Init all registers */
2384 ret = lan78xx_reset(dev);
2385
ce85e13a
WH
2386 lan78xx_mdio_init(dev);
2387
55d7de9d
WH
2388 dev->net->flags |= IFF_MULTICAST;
2389
2390 pdata->wol = WAKE_MAGIC;
2391
2392 return 0;
2393}
2394
2395static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2396{
2397 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2398
ce85e13a
WH
2399 lan78xx_remove_mdio(dev);
2400
55d7de9d
WH
2401 if (pdata) {
2402 netif_dbg(dev, ifdown, dev->net, "free pdata");
2403 kfree(pdata);
2404 pdata = NULL;
2405 dev->data[0] = 0;
2406 }
2407}
2408
2409static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2410 struct sk_buff *skb,
2411 u32 rx_cmd_a, u32 rx_cmd_b)
2412{
2413 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2414 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2415 skb->ip_summed = CHECKSUM_NONE;
2416 } else {
2417 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2418 skb->ip_summed = CHECKSUM_COMPLETE;
2419 }
2420}
2421
2422void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2423{
2424 int status;
2425
2426 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2427 skb_queue_tail(&dev->rxq_pause, skb);
2428 return;
2429 }
2430
2431 skb->protocol = eth_type_trans(skb, dev->net);
2432 dev->net->stats.rx_packets++;
2433 dev->net->stats.rx_bytes += skb->len;
2434
2435 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2436 skb->len + sizeof(struct ethhdr), skb->protocol);
2437 memset(skb->cb, 0, sizeof(struct skb_data));
2438
2439 if (skb_defer_rx_timestamp(skb))
2440 return;
2441
2442 status = netif_rx(skb);
2443 if (status != NET_RX_SUCCESS)
2444 netif_dbg(dev, rx_err, dev->net,
2445 "netif_rx status %d\n", status);
2446}
2447
2448static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2449{
2450 if (skb->len < dev->net->hard_header_len)
2451 return 0;
2452
2453 while (skb->len > 0) {
2454 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2455 u16 rx_cmd_c;
2456 struct sk_buff *skb2;
2457 unsigned char *packet;
2458
2459 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2460 le32_to_cpus(&rx_cmd_a);
2461 skb_pull(skb, sizeof(rx_cmd_a));
2462
2463 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2464 le32_to_cpus(&rx_cmd_b);
2465 skb_pull(skb, sizeof(rx_cmd_b));
2466
2467 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2468 le16_to_cpus(&rx_cmd_c);
2469 skb_pull(skb, sizeof(rx_cmd_c));
2470
2471 packet = skb->data;
2472
2473 /* get the packet length */
2474 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2475 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2476
2477 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2478 netif_dbg(dev, rx_err, dev->net,
2479 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2480 } else {
2481 /* last frame in this batch */
2482 if (skb->len == size) {
2483 lan78xx_rx_csum_offload(dev, skb,
2484 rx_cmd_a, rx_cmd_b);
2485
2486 skb_trim(skb, skb->len - 4); /* remove fcs */
2487 skb->truesize = size + sizeof(struct sk_buff);
2488
2489 return 1;
2490 }
2491
2492 skb2 = skb_clone(skb, GFP_ATOMIC);
2493 if (unlikely(!skb2)) {
2494 netdev_warn(dev->net, "Error allocating skb");
2495 return 0;
2496 }
2497
2498 skb2->len = size;
2499 skb2->data = packet;
2500 skb_set_tail_pointer(skb2, size);
2501
2502 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2503
2504 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2505 skb2->truesize = size + sizeof(struct sk_buff);
2506
2507 lan78xx_skb_return(dev, skb2);
2508 }
2509
2510 skb_pull(skb, size);
2511
2512 /* padding bytes before the next frame starts */
2513 if (skb->len)
2514 skb_pull(skb, align_count);
2515 }
2516
55d7de9d
WH
2517 return 1;
2518}
2519
2520static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2521{
2522 if (!lan78xx_rx(dev, skb)) {
2523 dev->net->stats.rx_errors++;
2524 goto done;
2525 }
2526
2527 if (skb->len) {
2528 lan78xx_skb_return(dev, skb);
2529 return;
2530 }
2531
2532 netif_dbg(dev, rx_err, dev->net, "drop\n");
2533 dev->net->stats.rx_errors++;
2534done:
2535 skb_queue_tail(&dev->done, skb);
2536}
2537
2538static void rx_complete(struct urb *urb);
2539
2540static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2541{
2542 struct sk_buff *skb;
2543 struct skb_data *entry;
2544 unsigned long lockflags;
2545 size_t size = dev->rx_urb_size;
2546 int ret = 0;
2547
2548 skb = netdev_alloc_skb_ip_align(dev->net, size);
2549 if (!skb) {
2550 usb_free_urb(urb);
2551 return -ENOMEM;
2552 }
2553
2554 entry = (struct skb_data *)skb->cb;
2555 entry->urb = urb;
2556 entry->dev = dev;
2557 entry->length = 0;
2558
2559 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2560 skb->data, size, rx_complete, skb);
2561
2562 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2563
2564 if (netif_device_present(dev->net) &&
2565 netif_running(dev->net) &&
2566 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2567 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2568 ret = usb_submit_urb(urb, GFP_ATOMIC);
2569 switch (ret) {
2570 case 0:
2571 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2572 break;
2573 case -EPIPE:
2574 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2575 break;
2576 case -ENODEV:
2577 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2578 netif_device_detach(dev->net);
2579 break;
2580 case -EHOSTUNREACH:
2581 ret = -ENOLINK;
2582 break;
2583 default:
2584 netif_dbg(dev, rx_err, dev->net,
2585 "rx submit, %d\n", ret);
2586 tasklet_schedule(&dev->bh);
2587 }
2588 } else {
2589 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2590 ret = -ENOLINK;
2591 }
2592 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2593 if (ret) {
2594 dev_kfree_skb_any(skb);
2595 usb_free_urb(urb);
2596 }
2597 return ret;
2598}
2599
2600static void rx_complete(struct urb *urb)
2601{
2602 struct sk_buff *skb = (struct sk_buff *)urb->context;
2603 struct skb_data *entry = (struct skb_data *)skb->cb;
2604 struct lan78xx_net *dev = entry->dev;
2605 int urb_status = urb->status;
2606 enum skb_state state;
2607
2608 skb_put(skb, urb->actual_length);
2609 state = rx_done;
2610 entry->urb = NULL;
2611
2612 switch (urb_status) {
2613 case 0:
2614 if (skb->len < dev->net->hard_header_len) {
2615 state = rx_cleanup;
2616 dev->net->stats.rx_errors++;
2617 dev->net->stats.rx_length_errors++;
2618 netif_dbg(dev, rx_err, dev->net,
2619 "rx length %d\n", skb->len);
2620 }
2621 usb_mark_last_busy(dev->udev);
2622 break;
2623 case -EPIPE:
2624 dev->net->stats.rx_errors++;
2625 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2626 /* FALLTHROUGH */
2627 case -ECONNRESET: /* async unlink */
2628 case -ESHUTDOWN: /* hardware gone */
2629 netif_dbg(dev, ifdown, dev->net,
2630 "rx shutdown, code %d\n", urb_status);
2631 state = rx_cleanup;
2632 entry->urb = urb;
2633 urb = NULL;
2634 break;
2635 case -EPROTO:
2636 case -ETIME:
2637 case -EILSEQ:
2638 dev->net->stats.rx_errors++;
2639 state = rx_cleanup;
2640 entry->urb = urb;
2641 urb = NULL;
2642 break;
2643
2644 /* data overrun ... flush fifo? */
2645 case -EOVERFLOW:
2646 dev->net->stats.rx_over_errors++;
2647 /* FALLTHROUGH */
2648
2649 default:
2650 state = rx_cleanup;
2651 dev->net->stats.rx_errors++;
2652 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2653 break;
2654 }
2655
2656 state = defer_bh(dev, skb, &dev->rxq, state);
2657
2658 if (urb) {
2659 if (netif_running(dev->net) &&
2660 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2661 state != unlink_start) {
2662 rx_submit(dev, urb, GFP_ATOMIC);
2663 return;
2664 }
2665 usb_free_urb(urb);
2666 }
2667 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2668}
2669
2670static void lan78xx_tx_bh(struct lan78xx_net *dev)
2671{
2672 int length;
2673 struct urb *urb = NULL;
2674 struct skb_data *entry;
2675 unsigned long flags;
2676 struct sk_buff_head *tqp = &dev->txq_pend;
2677 struct sk_buff *skb, *skb2;
2678 int ret;
2679 int count, pos;
2680 int skb_totallen, pkt_cnt;
2681
2682 skb_totallen = 0;
2683 pkt_cnt = 0;
2684 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2685 if (skb_is_gso(skb)) {
2686 if (pkt_cnt) {
2687 /* handle previous packets first */
2688 break;
2689 }
2690 length = skb->len;
2691 skb2 = skb_dequeue(tqp);
2692 goto gso_skb;
2693 }
2694
2695 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2696 break;
2697 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2698 pkt_cnt++;
2699 }
2700
2701 /* copy to a single skb */
2702 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2703 if (!skb)
2704 goto drop;
2705
2706 skb_put(skb, skb_totallen);
2707
2708 for (count = pos = 0; count < pkt_cnt; count++) {
2709 skb2 = skb_dequeue(tqp);
2710 if (skb2) {
2711 memcpy(skb->data + pos, skb2->data, skb2->len);
2712 pos += roundup(skb2->len, sizeof(u32));
2713 dev_kfree_skb(skb2);
55d7de9d
WH
2714 }
2715 }
2716
2717 length = skb_totallen;
2718
2719gso_skb:
2720 urb = usb_alloc_urb(0, GFP_ATOMIC);
2721 if (!urb) {
2722 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2723 goto drop;
2724 }
2725
2726 entry = (struct skb_data *)skb->cb;
2727 entry->urb = urb;
2728 entry->dev = dev;
2729 entry->length = length;
2730
2731 spin_lock_irqsave(&dev->txq.lock, flags);
2732 ret = usb_autopm_get_interface_async(dev->intf);
2733 if (ret < 0) {
2734 spin_unlock_irqrestore(&dev->txq.lock, flags);
2735 goto drop;
2736 }
2737
2738 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2739 skb->data, skb->len, tx_complete, skb);
2740
2741 if (length % dev->maxpacket == 0) {
2742 /* send USB_ZERO_PACKET */
2743 urb->transfer_flags |= URB_ZERO_PACKET;
2744 }
2745
2746#ifdef CONFIG_PM
2747 /* if this triggers the device is still a sleep */
2748 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2749 /* transmission will be done in resume */
2750 usb_anchor_urb(urb, &dev->deferred);
2751 /* no use to process more packets */
2752 netif_stop_queue(dev->net);
2753 usb_put_urb(urb);
2754 spin_unlock_irqrestore(&dev->txq.lock, flags);
2755 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2756 return;
2757 }
2758#endif
2759
2760 ret = usb_submit_urb(urb, GFP_ATOMIC);
2761 switch (ret) {
2762 case 0:
2763 dev->net->trans_start = jiffies;
2764 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2765 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2766 netif_stop_queue(dev->net);
2767 break;
2768 case -EPIPE:
2769 netif_stop_queue(dev->net);
2770 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2771 usb_autopm_put_interface_async(dev->intf);
2772 break;
2773 default:
2774 usb_autopm_put_interface_async(dev->intf);
2775 netif_dbg(dev, tx_err, dev->net,
2776 "tx: submit urb err %d\n", ret);
2777 break;
2778 }
2779
2780 spin_unlock_irqrestore(&dev->txq.lock, flags);
2781
2782 if (ret) {
2783 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2784drop:
2785 dev->net->stats.tx_dropped++;
2786 if (skb)
2787 dev_kfree_skb_any(skb);
2788 usb_free_urb(urb);
2789 } else
2790 netif_dbg(dev, tx_queued, dev->net,
2791 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2792}
2793
2794static void lan78xx_rx_bh(struct lan78xx_net *dev)
2795{
2796 struct urb *urb;
2797 int i;
2798
2799 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2800 for (i = 0; i < 10; i++) {
2801 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2802 break;
2803 urb = usb_alloc_urb(0, GFP_ATOMIC);
2804 if (urb)
2805 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2806 return;
2807 }
2808
2809 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2810 tasklet_schedule(&dev->bh);
2811 }
2812 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2813 netif_wake_queue(dev->net);
2814}
2815
2816static void lan78xx_bh(unsigned long param)
2817{
2818 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2819 struct sk_buff *skb;
2820 struct skb_data *entry;
2821
55d7de9d
WH
2822 while ((skb = skb_dequeue(&dev->done))) {
2823 entry = (struct skb_data *)(skb->cb);
2824 switch (entry->state) {
2825 case rx_done:
2826 entry->state = rx_cleanup;
2827 rx_process(dev, skb);
2828 continue;
2829 case tx_done:
2830 usb_free_urb(entry->urb);
2831 dev_kfree_skb(skb);
2832 continue;
2833 case rx_cleanup:
2834 usb_free_urb(entry->urb);
2835 dev_kfree_skb(skb);
2836 continue;
2837 default:
2838 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2839 return;
2840 }
55d7de9d
WH
2841 }
2842
2843 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2844 if (!skb_queue_empty(&dev->txq_pend))
2845 lan78xx_tx_bh(dev);
2846
2847 if (!timer_pending(&dev->delay) &&
2848 !test_bit(EVENT_RX_HALT, &dev->flags))
2849 lan78xx_rx_bh(dev);
2850 }
2851}
2852
2853static void lan78xx_delayedwork(struct work_struct *work)
2854{
2855 int status;
2856 struct lan78xx_net *dev;
2857
2858 dev = container_of(work, struct lan78xx_net, wq.work);
2859
2860 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2861 unlink_urbs(dev, &dev->txq);
2862 status = usb_autopm_get_interface(dev->intf);
2863 if (status < 0)
2864 goto fail_pipe;
2865 status = usb_clear_halt(dev->udev, dev->pipe_out);
2866 usb_autopm_put_interface(dev->intf);
2867 if (status < 0 &&
2868 status != -EPIPE &&
2869 status != -ESHUTDOWN) {
2870 if (netif_msg_tx_err(dev))
2871fail_pipe:
2872 netdev_err(dev->net,
2873 "can't clear tx halt, status %d\n",
2874 status);
2875 } else {
2876 clear_bit(EVENT_TX_HALT, &dev->flags);
2877 if (status != -ESHUTDOWN)
2878 netif_wake_queue(dev->net);
2879 }
2880 }
2881 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2882 unlink_urbs(dev, &dev->rxq);
2883 status = usb_autopm_get_interface(dev->intf);
2884 if (status < 0)
2885 goto fail_halt;
2886 status = usb_clear_halt(dev->udev, dev->pipe_in);
2887 usb_autopm_put_interface(dev->intf);
2888 if (status < 0 &&
2889 status != -EPIPE &&
2890 status != -ESHUTDOWN) {
2891 if (netif_msg_rx_err(dev))
2892fail_halt:
2893 netdev_err(dev->net,
2894 "can't clear rx halt, status %d\n",
2895 status);
2896 } else {
2897 clear_bit(EVENT_RX_HALT, &dev->flags);
2898 tasklet_schedule(&dev->bh);
2899 }
2900 }
2901
2902 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2903 int ret = 0;
2904
2905 clear_bit(EVENT_LINK_RESET, &dev->flags);
2906 status = usb_autopm_get_interface(dev->intf);
2907 if (status < 0)
2908 goto skip_reset;
2909 if (lan78xx_link_reset(dev) < 0) {
2910 usb_autopm_put_interface(dev->intf);
2911skip_reset:
2912 netdev_info(dev->net, "link reset failed (%d)\n",
2913 ret);
2914 } else {
2915 usb_autopm_put_interface(dev->intf);
2916 }
2917 }
2918}
2919
2920static void intr_complete(struct urb *urb)
2921{
2922 struct lan78xx_net *dev = urb->context;
2923 int status = urb->status;
2924
2925 switch (status) {
2926 /* success */
2927 case 0:
2928 lan78xx_status(dev, urb);
2929 break;
2930
2931 /* software-driven interface shutdown */
2932 case -ENOENT: /* urb killed */
2933 case -ESHUTDOWN: /* hardware gone */
2934 netif_dbg(dev, ifdown, dev->net,
2935 "intr shutdown, code %d\n", status);
2936 return;
2937
2938 /* NOTE: not throttling like RX/TX, since this endpoint
2939 * already polls infrequently
2940 */
2941 default:
2942 netdev_dbg(dev->net, "intr status %d\n", status);
2943 break;
2944 }
2945
2946 if (!netif_running(dev->net))
2947 return;
2948
2949 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2950 status = usb_submit_urb(urb, GFP_ATOMIC);
2951 if (status != 0)
2952 netif_err(dev, timer, dev->net,
2953 "intr resubmit --> %d\n", status);
2954}
2955
2956static void lan78xx_disconnect(struct usb_interface *intf)
2957{
2958 struct lan78xx_net *dev;
2959 struct usb_device *udev;
2960 struct net_device *net;
2961
2962 dev = usb_get_intfdata(intf);
2963 usb_set_intfdata(intf, NULL);
2964 if (!dev)
2965 return;
2966
2967 udev = interface_to_usbdev(intf);
2968
2969 net = dev->net;
2970 unregister_netdev(net);
2971
2972 cancel_delayed_work_sync(&dev->wq);
2973
2974 usb_scuttle_anchored_urbs(&dev->deferred);
2975
2976 lan78xx_unbind(dev, intf);
2977
2978 usb_kill_urb(dev->urb_intr);
2979 usb_free_urb(dev->urb_intr);
2980
2981 free_netdev(net);
2982 usb_put_dev(udev);
2983}
2984
2985void lan78xx_tx_timeout(struct net_device *net)
2986{
2987 struct lan78xx_net *dev = netdev_priv(net);
2988
2989 unlink_urbs(dev, &dev->txq);
2990 tasklet_schedule(&dev->bh);
2991}
2992
2993static const struct net_device_ops lan78xx_netdev_ops = {
2994 .ndo_open = lan78xx_open,
2995 .ndo_stop = lan78xx_stop,
2996 .ndo_start_xmit = lan78xx_start_xmit,
2997 .ndo_tx_timeout = lan78xx_tx_timeout,
2998 .ndo_change_mtu = lan78xx_change_mtu,
2999 .ndo_set_mac_address = lan78xx_set_mac_addr,
3000 .ndo_validate_addr = eth_validate_addr,
3001 .ndo_do_ioctl = lan78xx_ioctl,
3002 .ndo_set_rx_mode = lan78xx_set_multicast,
3003 .ndo_set_features = lan78xx_set_features,
3004 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3005 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3006};
3007
3008static int lan78xx_probe(struct usb_interface *intf,
3009 const struct usb_device_id *id)
3010{
3011 struct lan78xx_net *dev;
3012 struct net_device *netdev;
3013 struct usb_device *udev;
3014 int ret;
3015 unsigned maxp;
3016 unsigned period;
3017 u8 *buf = NULL;
3018
3019 udev = interface_to_usbdev(intf);
3020 udev = usb_get_dev(udev);
3021
3022 ret = -ENOMEM;
3023 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3024 if (!netdev) {
3025 dev_err(&intf->dev, "Error: OOM\n");
3026 goto out1;
3027 }
3028
3029 /* netdev_printk() needs this */
3030 SET_NETDEV_DEV(netdev, &intf->dev);
3031
3032 dev = netdev_priv(netdev);
3033 dev->udev = udev;
3034 dev->intf = intf;
3035 dev->net = netdev;
3036 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3037 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3038
3039 skb_queue_head_init(&dev->rxq);
3040 skb_queue_head_init(&dev->txq);
3041 skb_queue_head_init(&dev->done);
3042 skb_queue_head_init(&dev->rxq_pause);
3043 skb_queue_head_init(&dev->txq_pend);
3044 mutex_init(&dev->phy_mutex);
3045
3046 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3047 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3048 init_usb_anchor(&dev->deferred);
3049
3050 netdev->netdev_ops = &lan78xx_netdev_ops;
3051 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3052 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3053
3054 ret = lan78xx_bind(dev, intf);
3055 if (ret < 0)
3056 goto out2;
3057 strcpy(netdev->name, "eth%d");
3058
3059 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3060 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3061
3062 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3063 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3064 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3065
3066 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3067 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3068
3069 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3070 dev->ep_intr->desc.bEndpointAddress &
3071 USB_ENDPOINT_NUMBER_MASK);
3072 period = dev->ep_intr->desc.bInterval;
3073
3074 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3075 buf = kmalloc(maxp, GFP_KERNEL);
3076 if (buf) {
3077 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3078 if (!dev->urb_intr) {
3079 kfree(buf);
3080 goto out3;
3081 } else {
3082 usb_fill_int_urb(dev->urb_intr, dev->udev,
3083 dev->pipe_intr, buf, maxp,
3084 intr_complete, dev, period);
3085 }
3086 }
3087
3088 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3089
3090 /* driver requires remote-wakeup capability during autosuspend. */
3091 intf->needs_remote_wakeup = 1;
3092
3093 ret = register_netdev(netdev);
3094 if (ret != 0) {
3095 netif_err(dev, probe, netdev, "couldn't register the device\n");
3096 goto out2;
3097 }
3098
3099 usb_set_intfdata(intf, dev);
3100
3101 ret = device_set_wakeup_enable(&udev->dev, true);
3102
3103 /* Default delay of 2sec has more overhead than advantage.
3104 * Set to 10sec as default.
3105 */
3106 pm_runtime_set_autosuspend_delay(&udev->dev,
3107 DEFAULT_AUTOSUSPEND_DELAY);
3108
3109 return 0;
3110
55d7de9d
WH
3111out3:
3112 lan78xx_unbind(dev, intf);
3113out2:
3114 free_netdev(netdev);
3115out1:
3116 usb_put_dev(udev);
3117
3118 return ret;
3119}
3120
3121static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3122{
3123 const u16 crc16poly = 0x8005;
3124 int i;
3125 u16 bit, crc, msb;
3126 u8 data;
3127
3128 crc = 0xFFFF;
3129 for (i = 0; i < len; i++) {
3130 data = *buf++;
3131 for (bit = 0; bit < 8; bit++) {
3132 msb = crc >> 15;
3133 crc <<= 1;
3134
3135 if (msb ^ (u16)(data & 1)) {
3136 crc ^= crc16poly;
3137 crc |= (u16)0x0001U;
3138 }
3139 data >>= 1;
3140 }
3141 }
3142
3143 return crc;
3144}
3145
3146static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3147{
3148 u32 buf;
3149 int ret;
3150 int mask_index;
3151 u16 crc;
3152 u32 temp_wucsr;
3153 u32 temp_pmt_ctl;
3154 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3155 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3156 const u8 arp_type[2] = { 0x08, 0x06 };
3157
3158 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3159 buf &= ~MAC_TX_TXEN_;
3160 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3161 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3162 buf &= ~MAC_RX_RXEN_;
3163 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3164
3165 ret = lan78xx_write_reg(dev, WUCSR, 0);
3166 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3167 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3168
3169 temp_wucsr = 0;
3170
3171 temp_pmt_ctl = 0;
3172 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3173 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3174 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3175
3176 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3177 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3178
3179 mask_index = 0;
3180 if (wol & WAKE_PHY) {
3181 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3182
3183 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3184 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3185 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3186 }
3187 if (wol & WAKE_MAGIC) {
3188 temp_wucsr |= WUCSR_MPEN_;
3189
3190 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3191 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3192 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3193 }
3194 if (wol & WAKE_BCAST) {
3195 temp_wucsr |= WUCSR_BCST_EN_;
3196
3197 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3198 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3199 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3200 }
3201 if (wol & WAKE_MCAST) {
3202 temp_wucsr |= WUCSR_WAKE_EN_;
3203
3204 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3205 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3206 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3207 WUF_CFGX_EN_ |
3208 WUF_CFGX_TYPE_MCAST_ |
3209 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3210 (crc & WUF_CFGX_CRC16_MASK_));
3211
3212 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3213 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3214 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3215 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3216 mask_index++;
3217
3218 /* for IPv6 Multicast */
3219 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3220 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3221 WUF_CFGX_EN_ |
3222 WUF_CFGX_TYPE_MCAST_ |
3223 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3224 (crc & WUF_CFGX_CRC16_MASK_));
3225
3226 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3227 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3228 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3229 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3230 mask_index++;
3231
3232 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3233 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3234 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3235 }
3236 if (wol & WAKE_UCAST) {
3237 temp_wucsr |= WUCSR_PFDA_EN_;
3238
3239 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3240 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3241 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3242 }
3243 if (wol & WAKE_ARP) {
3244 temp_wucsr |= WUCSR_WAKE_EN_;
3245
3246 /* set WUF_CFG & WUF_MASK
3247 * for packettype (offset 12,13) = ARP (0x0806)
3248 */
3249 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3250 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3251 WUF_CFGX_EN_ |
3252 WUF_CFGX_TYPE_ALL_ |
3253 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3254 (crc & WUF_CFGX_CRC16_MASK_));
3255
3256 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3257 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3258 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3259 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3260 mask_index++;
3261
3262 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3263 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3264 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3265 }
3266
3267 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3268
3269 /* when multiple WOL bits are set */
3270 if (hweight_long((unsigned long)wol) > 1) {
3271 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3272 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3273 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3274 }
3275 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3276
3277 /* clear WUPS */
3278 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3279 buf |= PMT_CTL_WUPS_MASK_;
3280 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3281
3282 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3283 buf |= MAC_RX_RXEN_;
3284 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3285
3286 return 0;
3287}
3288
3289int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3290{
3291 struct lan78xx_net *dev = usb_get_intfdata(intf);
3292 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3293 u32 buf;
3294 int ret;
3295 int event;
3296
55d7de9d
WH
3297 event = message.event;
3298
3299 if (!dev->suspend_count++) {
3300 spin_lock_irq(&dev->txq.lock);
3301 /* don't autosuspend while transmitting */
3302 if ((skb_queue_len(&dev->txq) ||
3303 skb_queue_len(&dev->txq_pend)) &&
3304 PMSG_IS_AUTO(message)) {
3305 spin_unlock_irq(&dev->txq.lock);
3306 ret = -EBUSY;
3307 goto out;
3308 } else {
3309 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3310 spin_unlock_irq(&dev->txq.lock);
3311 }
3312
3313 /* stop TX & RX */
3314 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3315 buf &= ~MAC_TX_TXEN_;
3316 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3317 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3318 buf &= ~MAC_RX_RXEN_;
3319 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3320
3321 /* empty out the rx and queues */
3322 netif_device_detach(dev->net);
3323 lan78xx_terminate_urbs(dev);
3324 usb_kill_urb(dev->urb_intr);
3325
3326 /* reattach */
3327 netif_device_attach(dev->net);
3328 }
3329
3330 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3331 if (PMSG_IS_AUTO(message)) {
3332 /* auto suspend (selective suspend) */
3333 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3334 buf &= ~MAC_TX_TXEN_;
3335 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3336 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3337 buf &= ~MAC_RX_RXEN_;
3338 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3339
3340 ret = lan78xx_write_reg(dev, WUCSR, 0);
3341 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3342 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3343
3344 /* set goodframe wakeup */
3345 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3346
3347 buf |= WUCSR_RFE_WAKE_EN_;
3348 buf |= WUCSR_STORE_WAKE_;
3349
3350 ret = lan78xx_write_reg(dev, WUCSR, buf);
3351
3352 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3353
3354 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3355 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3356
3357 buf |= PMT_CTL_PHY_WAKE_EN_;
3358 buf |= PMT_CTL_WOL_EN_;
3359 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3360 buf |= PMT_CTL_SUS_MODE_3_;
3361
3362 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3363
3364 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3365
3366 buf |= PMT_CTL_WUPS_MASK_;
3367
3368 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3369
3370 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3371 buf |= MAC_RX_RXEN_;
3372 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3373 } else {
3374 lan78xx_set_suspend(dev, pdata->wol);
3375 }
3376 }
3377
49d28b56 3378 ret = 0;
55d7de9d
WH
3379out:
3380 return ret;
3381}
3382
3383int lan78xx_resume(struct usb_interface *intf)
3384{
3385 struct lan78xx_net *dev = usb_get_intfdata(intf);
3386 struct sk_buff *skb;
3387 struct urb *res;
3388 int ret;
3389 u32 buf;
3390
3391 if (!--dev->suspend_count) {
3392 /* resume interrupt URBs */
3393 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3394 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3395
3396 spin_lock_irq(&dev->txq.lock);
3397 while ((res = usb_get_from_anchor(&dev->deferred))) {
3398 skb = (struct sk_buff *)res->context;
3399 ret = usb_submit_urb(res, GFP_ATOMIC);
3400 if (ret < 0) {
3401 dev_kfree_skb_any(skb);
3402 usb_free_urb(res);
3403 usb_autopm_put_interface_async(dev->intf);
3404 } else {
3405 dev->net->trans_start = jiffies;
3406 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3407 }
3408 }
3409
3410 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3411 spin_unlock_irq(&dev->txq.lock);
3412
3413 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3414 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3415 netif_start_queue(dev->net);
3416 tasklet_schedule(&dev->bh);
3417 }
3418 }
3419
3420 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3421 ret = lan78xx_write_reg(dev, WUCSR, 0);
3422 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3423
3424 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3425 WUCSR2_ARP_RCD_ |
3426 WUCSR2_IPV6_TCPSYN_RCD_ |
3427 WUCSR2_IPV4_TCPSYN_RCD_);
3428
3429 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3430 WUCSR_EEE_RX_WAKE_ |
3431 WUCSR_PFDA_FR_ |
3432 WUCSR_RFE_WAKE_FR_ |
3433 WUCSR_WUFR_ |
3434 WUCSR_MPR_ |
3435 WUCSR_BCST_FR_);
3436
3437 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3438 buf |= MAC_TX_TXEN_;
3439 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3440
3441 return 0;
3442}
3443
3444int lan78xx_reset_resume(struct usb_interface *intf)
3445{
3446 struct lan78xx_net *dev = usb_get_intfdata(intf);
3447
3448 lan78xx_reset(dev);
ce85e13a
WH
3449
3450 lan78xx_phy_init(dev);
3451
55d7de9d
WH
3452 return lan78xx_resume(intf);
3453}
3454
3455static const struct usb_device_id products[] = {
3456 {
3457 /* LAN7800 USB Gigabit Ethernet Device */
3458 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3459 },
3460 {
3461 /* LAN7850 USB Gigabit Ethernet Device */
3462 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3463 },
3464 {},
3465};
3466MODULE_DEVICE_TABLE(usb, products);
3467
3468static struct usb_driver lan78xx_driver = {
3469 .name = DRIVER_NAME,
3470 .id_table = products,
3471 .probe = lan78xx_probe,
3472 .disconnect = lan78xx_disconnect,
3473 .suspend = lan78xx_suspend,
3474 .resume = lan78xx_resume,
3475 .reset_resume = lan78xx_reset_resume,
3476 .supports_autosuspend = 1,
3477 .disable_hub_initiated_lpm = 1,
3478};
3479
3480module_usb_driver(lan78xx_driver);
3481
3482MODULE_AUTHOR(DRIVER_AUTHOR);
3483MODULE_DESCRIPTION(DRIVER_DESC);
3484MODULE_LICENSE("GPL");
This page took 0.199786 seconds and 5 git commands to generate.