Merge branch 'cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[deliverable/linux.git] / drivers / net / usb / lan78xx.c
1 /*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.2"
40
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
44
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
50
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
61 #define RXW_PADDING 2
62
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
68
69 #define MII_READ 1
70 #define MII_WRITE 0
71
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
77
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
85
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137 };
138
139 struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187 };
188
189 struct lan78xx_net;
190
191 struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202 };
203
204 enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212 };
213
214 struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219 };
220
221 struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224 };
225
226 #define EVENT_TX_HALT 0
227 #define EVENT_RX_HALT 1
228 #define EVENT_RX_MEMORY 2
229 #define EVENT_STS_SPLIT 3
230 #define EVENT_LINK_RESET 4
231 #define EVENT_RX_PAUSED 5
232 #define EVENT_DEV_WAKING 6
233 #define EVENT_DEV_ASLEEP 7
234 #define EVENT_DEV_OPEN 8
235
236 struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
277
278 int link_on;
279 u8 mdix_ctrl;
280
281 u32 devid;
282 struct mii_bus *mdiobus;
283 };
284
285 /* use ethtool to change the level for any given device */
286 static int msg_level = -1;
287 module_param(msg_level, int, 0);
288 MODULE_PARM_DESC(msg_level, "Override default message level");
289
290 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291 {
292 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293 int ret;
294
295 if (!buf)
296 return -ENOMEM;
297
298 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER,
300 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302 if (likely(ret >= 0)) {
303 le32_to_cpus(buf);
304 *data = *buf;
305 } else {
306 netdev_warn(dev->net,
307 "Failed to read register index 0x%08x. ret = %d",
308 index, ret);
309 }
310
311 kfree(buf);
312
313 return ret;
314 }
315
316 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317 {
318 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319 int ret;
320
321 if (!buf)
322 return -ENOMEM;
323
324 *buf = data;
325 cpu_to_le32s(buf);
326
327 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER,
329 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331 if (unlikely(ret < 0)) {
332 netdev_warn(dev->net,
333 "Failed to write register index 0x%08x. ret = %d",
334 index, ret);
335 }
336
337 kfree(buf);
338
339 return ret;
340 }
341
342 static int lan78xx_read_stats(struct lan78xx_net *dev,
343 struct lan78xx_statstage *data)
344 {
345 int ret = 0;
346 int i;
347 struct lan78xx_statstage *stats;
348 u32 *src;
349 u32 *dst;
350
351 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 if (!stats)
353 return -ENOMEM;
354
355 ret = usb_control_msg(dev->udev,
356 usb_rcvctrlpipe(dev->udev, 0),
357 USB_VENDOR_REQUEST_GET_STATS,
358 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359 0,
360 0,
361 (void *)stats,
362 sizeof(*stats),
363 USB_CTRL_SET_TIMEOUT);
364 if (likely(ret >= 0)) {
365 src = (u32 *)stats;
366 dst = (u32 *)data;
367 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368 le32_to_cpus(&src[i]);
369 dst[i] = src[i];
370 }
371 } else {
372 netdev_warn(dev->net,
373 "Failed to read stat ret = 0x%x", ret);
374 }
375
376 kfree(stats);
377
378 return ret;
379 }
380
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383 {
384 unsigned long start_time = jiffies;
385 u32 val;
386 int ret;
387
388 do {
389 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390 if (unlikely(ret < 0))
391 return -EIO;
392
393 if (!(val & MII_ACC_MII_BUSY_))
394 return 0;
395 } while (!time_after(jiffies, start_time + HZ));
396
397 return -EIO;
398 }
399
400 static inline u32 mii_access(int id, int index, int read)
401 {
402 u32 ret;
403
404 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 if (read)
407 ret |= MII_ACC_MII_READ_;
408 else
409 ret |= MII_ACC_MII_WRITE_;
410 ret |= MII_ACC_MII_BUSY_;
411
412 return ret;
413 }
414
415 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416 {
417 unsigned long start_time = jiffies;
418 u32 val;
419 int ret;
420
421 do {
422 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423 if (unlikely(ret < 0))
424 return -EIO;
425
426 if (!(val & E2P_CMD_EPC_BUSY_) ||
427 (val & E2P_CMD_EPC_TIMEOUT_))
428 break;
429 usleep_range(40, 100);
430 } while (!time_after(jiffies, start_time + HZ));
431
432 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433 netdev_warn(dev->net, "EEPROM read operation timeout");
434 return -EIO;
435 }
436
437 return 0;
438 }
439
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441 {
442 unsigned long start_time = jiffies;
443 u32 val;
444 int ret;
445
446 do {
447 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448 if (unlikely(ret < 0))
449 return -EIO;
450
451 if (!(val & E2P_CMD_EPC_BUSY_))
452 return 0;
453
454 usleep_range(40, 100);
455 } while (!time_after(jiffies, start_time + HZ));
456
457 netdev_warn(dev->net, "EEPROM is busy");
458 return -EIO;
459 }
460
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 u32 length, u8 *data)
463 {
464 u32 val;
465 u32 saved;
466 int i, ret;
467 int retval;
468
469 /* depends on chip, some EEPROM pins are muxed with LED function.
470 * disable & restore LED function to access EEPROM.
471 */
472 ret = lan78xx_read_reg(dev, HW_CFG, &val);
473 saved = val;
474 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
475 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
476 ret = lan78xx_write_reg(dev, HW_CFG, val);
477 }
478
479 retval = lan78xx_eeprom_confirm_not_busy(dev);
480 if (retval)
481 return retval;
482
483 for (i = 0; i < length; i++) {
484 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
485 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
486 ret = lan78xx_write_reg(dev, E2P_CMD, val);
487 if (unlikely(ret < 0)) {
488 retval = -EIO;
489 goto exit;
490 }
491
492 retval = lan78xx_wait_eeprom(dev);
493 if (retval < 0)
494 goto exit;
495
496 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
497 if (unlikely(ret < 0)) {
498 retval = -EIO;
499 goto exit;
500 }
501
502 data[i] = val & 0xFF;
503 offset++;
504 }
505
506 retval = 0;
507 exit:
508 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
509 ret = lan78xx_write_reg(dev, HW_CFG, saved);
510
511 return retval;
512 }
513
514 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
515 u32 length, u8 *data)
516 {
517 u8 sig;
518 int ret;
519
520 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
521 if ((ret == 0) && (sig == EEPROM_INDICATOR))
522 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
523 else
524 ret = -EINVAL;
525
526 return ret;
527 }
528
529 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
530 u32 length, u8 *data)
531 {
532 u32 val;
533 u32 saved;
534 int i, ret;
535 int retval;
536
537 /* depends on chip, some EEPROM pins are muxed with LED function.
538 * disable & restore LED function to access EEPROM.
539 */
540 ret = lan78xx_read_reg(dev, HW_CFG, &val);
541 saved = val;
542 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
543 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
544 ret = lan78xx_write_reg(dev, HW_CFG, val);
545 }
546
547 retval = lan78xx_eeprom_confirm_not_busy(dev);
548 if (retval)
549 goto exit;
550
551 /* Issue write/erase enable command */
552 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
553 ret = lan78xx_write_reg(dev, E2P_CMD, val);
554 if (unlikely(ret < 0)) {
555 retval = -EIO;
556 goto exit;
557 }
558
559 retval = lan78xx_wait_eeprom(dev);
560 if (retval < 0)
561 goto exit;
562
563 for (i = 0; i < length; i++) {
564 /* Fill data register */
565 val = data[i];
566 ret = lan78xx_write_reg(dev, E2P_DATA, val);
567 if (ret < 0) {
568 retval = -EIO;
569 goto exit;
570 }
571
572 /* Send "write" command */
573 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
574 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
575 ret = lan78xx_write_reg(dev, E2P_CMD, val);
576 if (ret < 0) {
577 retval = -EIO;
578 goto exit;
579 }
580
581 retval = lan78xx_wait_eeprom(dev);
582 if (retval < 0)
583 goto exit;
584
585 offset++;
586 }
587
588 retval = 0;
589 exit:
590 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
591 ret = lan78xx_write_reg(dev, HW_CFG, saved);
592
593 return retval;
594 }
595
596 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
597 u32 length, u8 *data)
598 {
599 int i;
600 int ret;
601 u32 buf;
602 unsigned long timeout;
603
604 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
605
606 if (buf & OTP_PWR_DN_PWRDN_N_) {
607 /* clear it and wait to be cleared */
608 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
609
610 timeout = jiffies + HZ;
611 do {
612 usleep_range(1, 10);
613 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
614 if (time_after(jiffies, timeout)) {
615 netdev_warn(dev->net,
616 "timeout on OTP_PWR_DN");
617 return -EIO;
618 }
619 } while (buf & OTP_PWR_DN_PWRDN_N_);
620 }
621
622 for (i = 0; i < length; i++) {
623 ret = lan78xx_write_reg(dev, OTP_ADDR1,
624 ((offset + i) >> 8) & OTP_ADDR1_15_11);
625 ret = lan78xx_write_reg(dev, OTP_ADDR2,
626 ((offset + i) & OTP_ADDR2_10_3));
627
628 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
629 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
630
631 timeout = jiffies + HZ;
632 do {
633 udelay(1);
634 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
635 if (time_after(jiffies, timeout)) {
636 netdev_warn(dev->net,
637 "timeout on OTP_STATUS");
638 return -EIO;
639 }
640 } while (buf & OTP_STATUS_BUSY_);
641
642 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
643
644 data[i] = (u8)(buf & 0xFF);
645 }
646
647 return 0;
648 }
649
650 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
651 u32 length, u8 *data)
652 {
653 int i;
654 int ret;
655 u32 buf;
656 unsigned long timeout;
657
658 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
659
660 if (buf & OTP_PWR_DN_PWRDN_N_) {
661 /* clear it and wait to be cleared */
662 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
663
664 timeout = jiffies + HZ;
665 do {
666 udelay(1);
667 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
668 if (time_after(jiffies, timeout)) {
669 netdev_warn(dev->net,
670 "timeout on OTP_PWR_DN completion");
671 return -EIO;
672 }
673 } while (buf & OTP_PWR_DN_PWRDN_N_);
674 }
675
676 /* set to BYTE program mode */
677 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
678
679 for (i = 0; i < length; i++) {
680 ret = lan78xx_write_reg(dev, OTP_ADDR1,
681 ((offset + i) >> 8) & OTP_ADDR1_15_11);
682 ret = lan78xx_write_reg(dev, OTP_ADDR2,
683 ((offset + i) & OTP_ADDR2_10_3));
684 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
685 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
686 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
687
688 timeout = jiffies + HZ;
689 do {
690 udelay(1);
691 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
692 if (time_after(jiffies, timeout)) {
693 netdev_warn(dev->net,
694 "Timeout on OTP_STATUS completion");
695 return -EIO;
696 }
697 } while (buf & OTP_STATUS_BUSY_);
698 }
699
700 return 0;
701 }
702
703 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
704 u32 length, u8 *data)
705 {
706 u8 sig;
707 int ret;
708
709 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
710
711 if (ret == 0) {
712 if (sig == OTP_INDICATOR_1)
713 offset = offset;
714 else if (sig == OTP_INDICATOR_2)
715 offset += 0x100;
716 else
717 ret = -EINVAL;
718 ret = lan78xx_read_raw_otp(dev, offset, length, data);
719 }
720
721 return ret;
722 }
723
724 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
725 {
726 int i, ret;
727
728 for (i = 0; i < 100; i++) {
729 u32 dp_sel;
730
731 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
732 if (unlikely(ret < 0))
733 return -EIO;
734
735 if (dp_sel & DP_SEL_DPRDY_)
736 return 0;
737
738 usleep_range(40, 100);
739 }
740
741 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
742
743 return -EIO;
744 }
745
746 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
747 u32 addr, u32 length, u32 *buf)
748 {
749 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
750 u32 dp_sel;
751 int i, ret;
752
753 if (usb_autopm_get_interface(dev->intf) < 0)
754 return 0;
755
756 mutex_lock(&pdata->dataport_mutex);
757
758 ret = lan78xx_dataport_wait_not_busy(dev);
759 if (ret < 0)
760 goto done;
761
762 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
763
764 dp_sel &= ~DP_SEL_RSEL_MASK_;
765 dp_sel |= ram_select;
766 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
767
768 for (i = 0; i < length; i++) {
769 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
770
771 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
772
773 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
774
775 ret = lan78xx_dataport_wait_not_busy(dev);
776 if (ret < 0)
777 goto done;
778 }
779
780 done:
781 mutex_unlock(&pdata->dataport_mutex);
782 usb_autopm_put_interface(dev->intf);
783
784 return ret;
785 }
786
787 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
788 int index, u8 addr[ETH_ALEN])
789 {
790 u32 temp;
791
792 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
793 temp = addr[3];
794 temp = addr[2] | (temp << 8);
795 temp = addr[1] | (temp << 8);
796 temp = addr[0] | (temp << 8);
797 pdata->pfilter_table[index][1] = temp;
798 temp = addr[5];
799 temp = addr[4] | (temp << 8);
800 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
801 pdata->pfilter_table[index][0] = temp;
802 }
803 }
804
805 /* returns hash bit number for given MAC address */
806 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
807 {
808 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
809 }
810
811 static void lan78xx_deferred_multicast_write(struct work_struct *param)
812 {
813 struct lan78xx_priv *pdata =
814 container_of(param, struct lan78xx_priv, set_multicast);
815 struct lan78xx_net *dev = pdata->dev;
816 int i;
817 int ret;
818
819 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
820 pdata->rfe_ctl);
821
822 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
823 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
824
825 for (i = 1; i < NUM_OF_MAF; i++) {
826 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
827 ret = lan78xx_write_reg(dev, MAF_LO(i),
828 pdata->pfilter_table[i][1]);
829 ret = lan78xx_write_reg(dev, MAF_HI(i),
830 pdata->pfilter_table[i][0]);
831 }
832
833 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
834 }
835
836 static void lan78xx_set_multicast(struct net_device *netdev)
837 {
838 struct lan78xx_net *dev = netdev_priv(netdev);
839 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
840 unsigned long flags;
841 int i;
842
843 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
844
845 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
846 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
847
848 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
849 pdata->mchash_table[i] = 0;
850 /* pfilter_table[0] has own HW address */
851 for (i = 1; i < NUM_OF_MAF; i++) {
852 pdata->pfilter_table[i][0] =
853 pdata->pfilter_table[i][1] = 0;
854 }
855
856 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
857
858 if (dev->net->flags & IFF_PROMISC) {
859 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
860 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
861 } else {
862 if (dev->net->flags & IFF_ALLMULTI) {
863 netif_dbg(dev, drv, dev->net,
864 "receive all multicast enabled");
865 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
866 }
867 }
868
869 if (netdev_mc_count(dev->net)) {
870 struct netdev_hw_addr *ha;
871 int i;
872
873 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
874
875 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
876
877 i = 1;
878 netdev_for_each_mc_addr(ha, netdev) {
879 /* set first 32 into Perfect Filter */
880 if (i < 33) {
881 lan78xx_set_addr_filter(pdata, i, ha->addr);
882 } else {
883 u32 bitnum = lan78xx_hash(ha->addr);
884
885 pdata->mchash_table[bitnum / 32] |=
886 (1 << (bitnum % 32));
887 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
888 }
889 i++;
890 }
891 }
892
893 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
894
895 /* defer register writes to a sleepable context */
896 schedule_work(&pdata->set_multicast);
897 }
898
899 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
900 u16 lcladv, u16 rmtadv)
901 {
902 u32 flow = 0, fct_flow = 0;
903 int ret;
904
905 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
906
907 if (cap & FLOW_CTRL_TX)
908 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
909
910 if (cap & FLOW_CTRL_RX)
911 flow |= FLOW_CR_RX_FCEN_;
912
913 if (dev->udev->speed == USB_SPEED_SUPER)
914 fct_flow = 0x817;
915 else if (dev->udev->speed == USB_SPEED_HIGH)
916 fct_flow = 0x211;
917
918 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
919 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
920 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
921
922 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
923
924 /* threshold value should be set before enabling flow */
925 ret = lan78xx_write_reg(dev, FLOW, flow);
926
927 return 0;
928 }
929
930 static int lan78xx_link_reset(struct lan78xx_net *dev)
931 {
932 struct phy_device *phydev = dev->net->phydev;
933 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
934 int ladv, radv, ret;
935 u32 buf;
936
937 /* clear PHY interrupt status */
938 ret = phy_read(phydev, LAN88XX_INT_STS);
939 if (unlikely(ret < 0))
940 return -EIO;
941
942 /* clear LAN78xx interrupt status */
943 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
944 if (unlikely(ret < 0))
945 return -EIO;
946
947 phy_read_status(phydev);
948
949 if (!phydev->link && dev->link_on) {
950 dev->link_on = false;
951
952 /* reset MAC */
953 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
954 if (unlikely(ret < 0))
955 return -EIO;
956 buf |= MAC_CR_RST_;
957 ret = lan78xx_write_reg(dev, MAC_CR, buf);
958 if (unlikely(ret < 0))
959 return -EIO;
960
961 phy_mac_interrupt(phydev, 0);
962 } else if (phydev->link && !dev->link_on) {
963 dev->link_on = true;
964
965 phy_ethtool_gset(phydev, &ecmd);
966
967 ret = phy_read(phydev, LAN88XX_INT_STS);
968
969 if (dev->udev->speed == USB_SPEED_SUPER) {
970 if (ethtool_cmd_speed(&ecmd) == 1000) {
971 /* disable U2 */
972 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
973 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
974 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
975 /* enable U1 */
976 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
977 buf |= USB_CFG1_DEV_U1_INIT_EN_;
978 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
979 } else {
980 /* enable U1 & U2 */
981 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
982 buf |= USB_CFG1_DEV_U2_INIT_EN_;
983 buf |= USB_CFG1_DEV_U1_INIT_EN_;
984 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
985 }
986 }
987
988 ladv = phy_read(phydev, MII_ADVERTISE);
989 if (ladv < 0)
990 return ladv;
991
992 radv = phy_read(phydev, MII_LPA);
993 if (radv < 0)
994 return radv;
995
996 netif_dbg(dev, link, dev->net,
997 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
998 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
999
1000 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1001 phy_mac_interrupt(phydev, 1);
1002 }
1003
1004 return ret;
1005 }
1006
1007 /* some work can't be done in tasklets, so we use keventd
1008 *
1009 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1010 * but tasklet_schedule() doesn't. hope the failure is rare.
1011 */
1012 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1013 {
1014 set_bit(work, &dev->flags);
1015 if (!schedule_delayed_work(&dev->wq, 0))
1016 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1017 }
1018
1019 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1020 {
1021 u32 intdata;
1022
1023 if (urb->actual_length != 4) {
1024 netdev_warn(dev->net,
1025 "unexpected urb length %d", urb->actual_length);
1026 return;
1027 }
1028
1029 memcpy(&intdata, urb->transfer_buffer, 4);
1030 le32_to_cpus(&intdata);
1031
1032 if (intdata & INT_ENP_PHY_INT) {
1033 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1034 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1035 } else
1036 netdev_warn(dev->net,
1037 "unexpected interrupt: 0x%08x\n", intdata);
1038 }
1039
1040 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1041 {
1042 return MAX_EEPROM_SIZE;
1043 }
1044
1045 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1046 struct ethtool_eeprom *ee, u8 *data)
1047 {
1048 struct lan78xx_net *dev = netdev_priv(netdev);
1049
1050 ee->magic = LAN78XX_EEPROM_MAGIC;
1051
1052 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1053 }
1054
1055 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1056 struct ethtool_eeprom *ee, u8 *data)
1057 {
1058 struct lan78xx_net *dev = netdev_priv(netdev);
1059
1060 /* Allow entire eeprom update only */
1061 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1062 (ee->offset == 0) &&
1063 (ee->len == 512) &&
1064 (data[0] == EEPROM_INDICATOR))
1065 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1066 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1067 (ee->offset == 0) &&
1068 (ee->len == 512) &&
1069 (data[0] == OTP_INDICATOR_1))
1070 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1071
1072 return -EINVAL;
1073 }
1074
1075 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1076 u8 *data)
1077 {
1078 if (stringset == ETH_SS_STATS)
1079 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1080 }
1081
1082 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1083 {
1084 if (sset == ETH_SS_STATS)
1085 return ARRAY_SIZE(lan78xx_gstrings);
1086 else
1087 return -EOPNOTSUPP;
1088 }
1089
1090 static void lan78xx_get_stats(struct net_device *netdev,
1091 struct ethtool_stats *stats, u64 *data)
1092 {
1093 struct lan78xx_net *dev = netdev_priv(netdev);
1094 struct lan78xx_statstage lan78xx_stat;
1095 u32 *p;
1096 int i;
1097
1098 if (usb_autopm_get_interface(dev->intf) < 0)
1099 return;
1100
1101 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1102 p = (u32 *)&lan78xx_stat;
1103 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1104 data[i] = p[i];
1105 }
1106
1107 usb_autopm_put_interface(dev->intf);
1108 }
1109
1110 static void lan78xx_get_wol(struct net_device *netdev,
1111 struct ethtool_wolinfo *wol)
1112 {
1113 struct lan78xx_net *dev = netdev_priv(netdev);
1114 int ret;
1115 u32 buf;
1116 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1117
1118 if (usb_autopm_get_interface(dev->intf) < 0)
1119 return;
1120
1121 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1122 if (unlikely(ret < 0)) {
1123 wol->supported = 0;
1124 wol->wolopts = 0;
1125 } else {
1126 if (buf & USB_CFG_RMT_WKP_) {
1127 wol->supported = WAKE_ALL;
1128 wol->wolopts = pdata->wol;
1129 } else {
1130 wol->supported = 0;
1131 wol->wolopts = 0;
1132 }
1133 }
1134
1135 usb_autopm_put_interface(dev->intf);
1136 }
1137
1138 static int lan78xx_set_wol(struct net_device *netdev,
1139 struct ethtool_wolinfo *wol)
1140 {
1141 struct lan78xx_net *dev = netdev_priv(netdev);
1142 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1143 int ret;
1144
1145 ret = usb_autopm_get_interface(dev->intf);
1146 if (ret < 0)
1147 return ret;
1148
1149 pdata->wol = 0;
1150 if (wol->wolopts & WAKE_UCAST)
1151 pdata->wol |= WAKE_UCAST;
1152 if (wol->wolopts & WAKE_MCAST)
1153 pdata->wol |= WAKE_MCAST;
1154 if (wol->wolopts & WAKE_BCAST)
1155 pdata->wol |= WAKE_BCAST;
1156 if (wol->wolopts & WAKE_MAGIC)
1157 pdata->wol |= WAKE_MAGIC;
1158 if (wol->wolopts & WAKE_PHY)
1159 pdata->wol |= WAKE_PHY;
1160 if (wol->wolopts & WAKE_ARP)
1161 pdata->wol |= WAKE_ARP;
1162
1163 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1164
1165 phy_ethtool_set_wol(netdev->phydev, wol);
1166
1167 usb_autopm_put_interface(dev->intf);
1168
1169 return ret;
1170 }
1171
1172 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1173 {
1174 struct lan78xx_net *dev = netdev_priv(net);
1175 struct phy_device *phydev = net->phydev;
1176 int ret;
1177 u32 buf;
1178
1179 ret = usb_autopm_get_interface(dev->intf);
1180 if (ret < 0)
1181 return ret;
1182
1183 ret = phy_ethtool_get_eee(phydev, edata);
1184 if (ret < 0)
1185 goto exit;
1186
1187 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1188 if (buf & MAC_CR_EEE_EN_) {
1189 edata->eee_enabled = true;
1190 edata->eee_active = !!(edata->advertised &
1191 edata->lp_advertised);
1192 edata->tx_lpi_enabled = true;
1193 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1194 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1195 edata->tx_lpi_timer = buf;
1196 } else {
1197 edata->eee_enabled = false;
1198 edata->eee_active = false;
1199 edata->tx_lpi_enabled = false;
1200 edata->tx_lpi_timer = 0;
1201 }
1202
1203 ret = 0;
1204 exit:
1205 usb_autopm_put_interface(dev->intf);
1206
1207 return ret;
1208 }
1209
1210 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1211 {
1212 struct lan78xx_net *dev = netdev_priv(net);
1213 int ret;
1214 u32 buf;
1215
1216 ret = usb_autopm_get_interface(dev->intf);
1217 if (ret < 0)
1218 return ret;
1219
1220 if (edata->eee_enabled) {
1221 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1222 buf |= MAC_CR_EEE_EN_;
1223 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1224
1225 phy_ethtool_set_eee(net->phydev, edata);
1226
1227 buf = (u32)edata->tx_lpi_timer;
1228 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1229 } else {
1230 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1231 buf &= ~MAC_CR_EEE_EN_;
1232 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1233 }
1234
1235 usb_autopm_put_interface(dev->intf);
1236
1237 return 0;
1238 }
1239
1240 static u32 lan78xx_get_link(struct net_device *net)
1241 {
1242 phy_read_status(net->phydev);
1243
1244 return net->phydev->link;
1245 }
1246
1247 int lan78xx_nway_reset(struct net_device *net)
1248 {
1249 return phy_start_aneg(net->phydev);
1250 }
1251
1252 static void lan78xx_get_drvinfo(struct net_device *net,
1253 struct ethtool_drvinfo *info)
1254 {
1255 struct lan78xx_net *dev = netdev_priv(net);
1256
1257 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1258 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1259 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1260 }
1261
1262 static u32 lan78xx_get_msglevel(struct net_device *net)
1263 {
1264 struct lan78xx_net *dev = netdev_priv(net);
1265
1266 return dev->msg_enable;
1267 }
1268
1269 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1270 {
1271 struct lan78xx_net *dev = netdev_priv(net);
1272
1273 dev->msg_enable = level;
1274 }
1275
1276 static int lan78xx_get_mdix_status(struct net_device *net)
1277 {
1278 struct phy_device *phydev = net->phydev;
1279 int buf;
1280
1281 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1282 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1283 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1284
1285 return buf;
1286 }
1287
1288 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1289 {
1290 struct lan78xx_net *dev = netdev_priv(net);
1291 struct phy_device *phydev = net->phydev;
1292 int buf;
1293
1294 if (mdix_ctrl == ETH_TP_MDI) {
1295 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1296 LAN88XX_EXT_PAGE_SPACE_1);
1297 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1298 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1299 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1300 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1301 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1302 LAN88XX_EXT_PAGE_SPACE_0);
1303 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1304 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1305 LAN88XX_EXT_PAGE_SPACE_1);
1306 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1307 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1308 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1309 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1310 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1311 LAN88XX_EXT_PAGE_SPACE_0);
1312 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1313 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1314 LAN88XX_EXT_PAGE_SPACE_1);
1315 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1316 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1317 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1318 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1319 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1320 LAN88XX_EXT_PAGE_SPACE_0);
1321 }
1322 dev->mdix_ctrl = mdix_ctrl;
1323 }
1324
1325 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1326 {
1327 struct lan78xx_net *dev = netdev_priv(net);
1328 struct phy_device *phydev = net->phydev;
1329 int ret;
1330 int buf;
1331
1332 ret = usb_autopm_get_interface(dev->intf);
1333 if (ret < 0)
1334 return ret;
1335
1336 ret = phy_ethtool_gset(phydev, cmd);
1337
1338 buf = lan78xx_get_mdix_status(net);
1339
1340 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1341 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1342 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1343 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1344 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1345 cmd->eth_tp_mdix = ETH_TP_MDI;
1346 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1347 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1348 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1349 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1350 }
1351
1352 usb_autopm_put_interface(dev->intf);
1353
1354 return ret;
1355 }
1356
1357 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1358 {
1359 struct lan78xx_net *dev = netdev_priv(net);
1360 struct phy_device *phydev = net->phydev;
1361 int ret = 0;
1362 int temp;
1363
1364 ret = usb_autopm_get_interface(dev->intf);
1365 if (ret < 0)
1366 return ret;
1367
1368 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1369 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1370 }
1371
1372 /* change speed & duplex */
1373 ret = phy_ethtool_sset(phydev, cmd);
1374
1375 if (!cmd->autoneg) {
1376 /* force link down */
1377 temp = phy_read(phydev, MII_BMCR);
1378 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1379 mdelay(1);
1380 phy_write(phydev, MII_BMCR, temp);
1381 }
1382
1383 usb_autopm_put_interface(dev->intf);
1384
1385 return ret;
1386 }
1387
1388 static const struct ethtool_ops lan78xx_ethtool_ops = {
1389 .get_link = lan78xx_get_link,
1390 .nway_reset = lan78xx_nway_reset,
1391 .get_drvinfo = lan78xx_get_drvinfo,
1392 .get_msglevel = lan78xx_get_msglevel,
1393 .set_msglevel = lan78xx_set_msglevel,
1394 .get_settings = lan78xx_get_settings,
1395 .set_settings = lan78xx_set_settings,
1396 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1397 .get_eeprom = lan78xx_ethtool_get_eeprom,
1398 .set_eeprom = lan78xx_ethtool_set_eeprom,
1399 .get_ethtool_stats = lan78xx_get_stats,
1400 .get_sset_count = lan78xx_get_sset_count,
1401 .get_strings = lan78xx_get_strings,
1402 .get_wol = lan78xx_get_wol,
1403 .set_wol = lan78xx_set_wol,
1404 .get_eee = lan78xx_get_eee,
1405 .set_eee = lan78xx_set_eee,
1406 };
1407
1408 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1409 {
1410 if (!netif_running(netdev))
1411 return -EINVAL;
1412
1413 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1414 }
1415
1416 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1417 {
1418 u32 addr_lo, addr_hi;
1419 int ret;
1420 u8 addr[6];
1421
1422 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1423 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1424
1425 addr[0] = addr_lo & 0xFF;
1426 addr[1] = (addr_lo >> 8) & 0xFF;
1427 addr[2] = (addr_lo >> 16) & 0xFF;
1428 addr[3] = (addr_lo >> 24) & 0xFF;
1429 addr[4] = addr_hi & 0xFF;
1430 addr[5] = (addr_hi >> 8) & 0xFF;
1431
1432 if (!is_valid_ether_addr(addr)) {
1433 /* reading mac address from EEPROM or OTP */
1434 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1435 addr) == 0) ||
1436 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1437 addr) == 0)) {
1438 if (is_valid_ether_addr(addr)) {
1439 /* eeprom values are valid so use them */
1440 netif_dbg(dev, ifup, dev->net,
1441 "MAC address read from EEPROM");
1442 } else {
1443 /* generate random MAC */
1444 random_ether_addr(addr);
1445 netif_dbg(dev, ifup, dev->net,
1446 "MAC address set to random addr");
1447 }
1448
1449 addr_lo = addr[0] | (addr[1] << 8) |
1450 (addr[2] << 16) | (addr[3] << 24);
1451 addr_hi = addr[4] | (addr[5] << 8);
1452
1453 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1454 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1455 } else {
1456 /* generate random MAC */
1457 random_ether_addr(addr);
1458 netif_dbg(dev, ifup, dev->net,
1459 "MAC address set to random addr");
1460 }
1461 }
1462
1463 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1464 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1465
1466 ether_addr_copy(dev->net->dev_addr, addr);
1467 }
1468
1469 /* MDIO read and write wrappers for phylib */
1470 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1471 {
1472 struct lan78xx_net *dev = bus->priv;
1473 u32 val, addr;
1474 int ret;
1475
1476 ret = usb_autopm_get_interface(dev->intf);
1477 if (ret < 0)
1478 return ret;
1479
1480 mutex_lock(&dev->phy_mutex);
1481
1482 /* confirm MII not busy */
1483 ret = lan78xx_phy_wait_not_busy(dev);
1484 if (ret < 0)
1485 goto done;
1486
1487 /* set the address, index & direction (read from PHY) */
1488 addr = mii_access(phy_id, idx, MII_READ);
1489 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1490
1491 ret = lan78xx_phy_wait_not_busy(dev);
1492 if (ret < 0)
1493 goto done;
1494
1495 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1496
1497 ret = (int)(val & 0xFFFF);
1498
1499 done:
1500 mutex_unlock(&dev->phy_mutex);
1501 usb_autopm_put_interface(dev->intf);
1502 return ret;
1503 }
1504
1505 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1506 u16 regval)
1507 {
1508 struct lan78xx_net *dev = bus->priv;
1509 u32 val, addr;
1510 int ret;
1511
1512 ret = usb_autopm_get_interface(dev->intf);
1513 if (ret < 0)
1514 return ret;
1515
1516 mutex_lock(&dev->phy_mutex);
1517
1518 /* confirm MII not busy */
1519 ret = lan78xx_phy_wait_not_busy(dev);
1520 if (ret < 0)
1521 goto done;
1522
1523 val = (u32)regval;
1524 ret = lan78xx_write_reg(dev, MII_DATA, val);
1525
1526 /* set the address, index & direction (write to PHY) */
1527 addr = mii_access(phy_id, idx, MII_WRITE);
1528 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1529
1530 ret = lan78xx_phy_wait_not_busy(dev);
1531 if (ret < 0)
1532 goto done;
1533
1534 done:
1535 mutex_unlock(&dev->phy_mutex);
1536 usb_autopm_put_interface(dev->intf);
1537 return 0;
1538 }
1539
1540 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1541 {
1542 int ret;
1543
1544 dev->mdiobus = mdiobus_alloc();
1545 if (!dev->mdiobus) {
1546 netdev_err(dev->net, "can't allocate MDIO bus\n");
1547 return -ENOMEM;
1548 }
1549
1550 dev->mdiobus->priv = (void *)dev;
1551 dev->mdiobus->read = lan78xx_mdiobus_read;
1552 dev->mdiobus->write = lan78xx_mdiobus_write;
1553 dev->mdiobus->name = "lan78xx-mdiobus";
1554
1555 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1556 dev->udev->bus->busnum, dev->udev->devnum);
1557
1558 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1559 case 0x78000000:
1560 case 0x78500000:
1561 /* set to internal PHY id */
1562 dev->mdiobus->phy_mask = ~(1 << 1);
1563 break;
1564 }
1565
1566 ret = mdiobus_register(dev->mdiobus);
1567 if (ret) {
1568 netdev_err(dev->net, "can't register MDIO bus\n");
1569 goto exit1;
1570 }
1571
1572 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1573 return 0;
1574 exit1:
1575 mdiobus_free(dev->mdiobus);
1576 return ret;
1577 }
1578
1579 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1580 {
1581 mdiobus_unregister(dev->mdiobus);
1582 mdiobus_free(dev->mdiobus);
1583 }
1584
1585 static void lan78xx_link_status_change(struct net_device *net)
1586 {
1587 /* nothing to do */
1588 }
1589
1590 static int lan78xx_phy_init(struct lan78xx_net *dev)
1591 {
1592 int ret;
1593 struct phy_device *phydev = dev->net->phydev;
1594
1595 phydev = phy_find_first(dev->mdiobus);
1596 if (!phydev) {
1597 netdev_err(dev->net, "no PHY found\n");
1598 return -EIO;
1599 }
1600
1601 /* Enable PHY interrupts.
1602 * We handle our own interrupt
1603 */
1604 ret = phy_read(phydev, LAN88XX_INT_STS);
1605 ret = phy_write(phydev, LAN88XX_INT_MASK,
1606 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1607 LAN88XX_INT_MASK_LINK_CHANGE_);
1608
1609 phydev->irq = PHY_IGNORE_INTERRUPT;
1610
1611 ret = phy_connect_direct(dev->net, phydev,
1612 lan78xx_link_status_change,
1613 PHY_INTERFACE_MODE_GMII);
1614 if (ret) {
1615 netdev_err(dev->net, "can't attach PHY to %s\n",
1616 dev->mdiobus->id);
1617 return -EIO;
1618 }
1619
1620 /* set to AUTOMDIX */
1621 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1622
1623 /* MAC doesn't support 1000T Half */
1624 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1625 phydev->supported |= (SUPPORTED_10baseT_Half |
1626 SUPPORTED_10baseT_Full |
1627 SUPPORTED_100baseT_Half |
1628 SUPPORTED_100baseT_Full |
1629 SUPPORTED_1000baseT_Full |
1630 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1631 genphy_config_aneg(phydev);
1632
1633 phy_start(phydev);
1634
1635 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1636
1637 return 0;
1638 }
1639
1640 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1641 {
1642 int ret = 0;
1643 u32 buf;
1644 bool rxenabled;
1645
1646 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1647
1648 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1649
1650 if (rxenabled) {
1651 buf &= ~MAC_RX_RXEN_;
1652 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1653 }
1654
1655 /* add 4 to size for FCS */
1656 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1657 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1658
1659 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1660
1661 if (rxenabled) {
1662 buf |= MAC_RX_RXEN_;
1663 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1664 }
1665
1666 return 0;
1667 }
1668
1669 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1670 {
1671 struct sk_buff *skb;
1672 unsigned long flags;
1673 int count = 0;
1674
1675 spin_lock_irqsave(&q->lock, flags);
1676 while (!skb_queue_empty(q)) {
1677 struct skb_data *entry;
1678 struct urb *urb;
1679 int ret;
1680
1681 skb_queue_walk(q, skb) {
1682 entry = (struct skb_data *)skb->cb;
1683 if (entry->state != unlink_start)
1684 goto found;
1685 }
1686 break;
1687 found:
1688 entry->state = unlink_start;
1689 urb = entry->urb;
1690
1691 /* Get reference count of the URB to avoid it to be
1692 * freed during usb_unlink_urb, which may trigger
1693 * use-after-free problem inside usb_unlink_urb since
1694 * usb_unlink_urb is always racing with .complete
1695 * handler(include defer_bh).
1696 */
1697 usb_get_urb(urb);
1698 spin_unlock_irqrestore(&q->lock, flags);
1699 /* during some PM-driven resume scenarios,
1700 * these (async) unlinks complete immediately
1701 */
1702 ret = usb_unlink_urb(urb);
1703 if (ret != -EINPROGRESS && ret != 0)
1704 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1705 else
1706 count++;
1707 usb_put_urb(urb);
1708 spin_lock_irqsave(&q->lock, flags);
1709 }
1710 spin_unlock_irqrestore(&q->lock, flags);
1711 return count;
1712 }
1713
1714 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1715 {
1716 struct lan78xx_net *dev = netdev_priv(netdev);
1717 int ll_mtu = new_mtu + netdev->hard_header_len;
1718 int old_hard_mtu = dev->hard_mtu;
1719 int old_rx_urb_size = dev->rx_urb_size;
1720 int ret;
1721
1722 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1723 return -EINVAL;
1724
1725 if (new_mtu <= 0)
1726 return -EINVAL;
1727 /* no second zero-length packet read wanted after mtu-sized packets */
1728 if ((ll_mtu % dev->maxpacket) == 0)
1729 return -EDOM;
1730
1731 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1732
1733 netdev->mtu = new_mtu;
1734
1735 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1736 if (dev->rx_urb_size == old_hard_mtu) {
1737 dev->rx_urb_size = dev->hard_mtu;
1738 if (dev->rx_urb_size > old_rx_urb_size) {
1739 if (netif_running(dev->net)) {
1740 unlink_urbs(dev, &dev->rxq);
1741 tasklet_schedule(&dev->bh);
1742 }
1743 }
1744 }
1745
1746 return 0;
1747 }
1748
1749 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1750 {
1751 struct lan78xx_net *dev = netdev_priv(netdev);
1752 struct sockaddr *addr = p;
1753 u32 addr_lo, addr_hi;
1754 int ret;
1755
1756 if (netif_running(netdev))
1757 return -EBUSY;
1758
1759 if (!is_valid_ether_addr(addr->sa_data))
1760 return -EADDRNOTAVAIL;
1761
1762 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1763
1764 addr_lo = netdev->dev_addr[0] |
1765 netdev->dev_addr[1] << 8 |
1766 netdev->dev_addr[2] << 16 |
1767 netdev->dev_addr[3] << 24;
1768 addr_hi = netdev->dev_addr[4] |
1769 netdev->dev_addr[5] << 8;
1770
1771 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1772 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1773
1774 return 0;
1775 }
1776
1777 /* Enable or disable Rx checksum offload engine */
1778 static int lan78xx_set_features(struct net_device *netdev,
1779 netdev_features_t features)
1780 {
1781 struct lan78xx_net *dev = netdev_priv(netdev);
1782 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1783 unsigned long flags;
1784 int ret;
1785
1786 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1787
1788 if (features & NETIF_F_RXCSUM) {
1789 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1790 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1791 } else {
1792 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1793 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1794 }
1795
1796 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1797 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1798 else
1799 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1800
1801 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1802
1803 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1804
1805 return 0;
1806 }
1807
1808 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1809 {
1810 struct lan78xx_priv *pdata =
1811 container_of(param, struct lan78xx_priv, set_vlan);
1812 struct lan78xx_net *dev = pdata->dev;
1813
1814 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1815 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1816 }
1817
1818 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1819 __be16 proto, u16 vid)
1820 {
1821 struct lan78xx_net *dev = netdev_priv(netdev);
1822 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1823 u16 vid_bit_index;
1824 u16 vid_dword_index;
1825
1826 vid_dword_index = (vid >> 5) & 0x7F;
1827 vid_bit_index = vid & 0x1F;
1828
1829 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1830
1831 /* defer register writes to a sleepable context */
1832 schedule_work(&pdata->set_vlan);
1833
1834 return 0;
1835 }
1836
1837 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1838 __be16 proto, u16 vid)
1839 {
1840 struct lan78xx_net *dev = netdev_priv(netdev);
1841 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1842 u16 vid_bit_index;
1843 u16 vid_dword_index;
1844
1845 vid_dword_index = (vid >> 5) & 0x7F;
1846 vid_bit_index = vid & 0x1F;
1847
1848 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1849
1850 /* defer register writes to a sleepable context */
1851 schedule_work(&pdata->set_vlan);
1852
1853 return 0;
1854 }
1855
1856 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1857 {
1858 int ret;
1859 u32 buf;
1860 u32 regs[6] = { 0 };
1861
1862 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1863 if (buf & USB_CFG1_LTM_ENABLE_) {
1864 u8 temp[2];
1865 /* Get values from EEPROM first */
1866 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1867 if (temp[0] == 24) {
1868 ret = lan78xx_read_raw_eeprom(dev,
1869 temp[1] * 2,
1870 24,
1871 (u8 *)regs);
1872 if (ret < 0)
1873 return;
1874 }
1875 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1876 if (temp[0] == 24) {
1877 ret = lan78xx_read_raw_otp(dev,
1878 temp[1] * 2,
1879 24,
1880 (u8 *)regs);
1881 if (ret < 0)
1882 return;
1883 }
1884 }
1885 }
1886
1887 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1888 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1889 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1890 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1891 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1892 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1893 }
1894
1895 static int lan78xx_reset(struct lan78xx_net *dev)
1896 {
1897 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1898 u32 buf;
1899 int ret = 0;
1900 unsigned long timeout;
1901
1902 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1903 buf |= HW_CFG_LRST_;
1904 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1905
1906 timeout = jiffies + HZ;
1907 do {
1908 mdelay(1);
1909 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1910 if (time_after(jiffies, timeout)) {
1911 netdev_warn(dev->net,
1912 "timeout on completion of LiteReset");
1913 return -EIO;
1914 }
1915 } while (buf & HW_CFG_LRST_);
1916
1917 lan78xx_init_mac_address(dev);
1918
1919 /* save DEVID for later usage */
1920 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1921 dev->devid = buf;
1922
1923 /* Respond to the IN token with a NAK */
1924 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1925 buf |= USB_CFG_BIR_;
1926 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1927
1928 /* Init LTM */
1929 lan78xx_init_ltm(dev);
1930
1931 dev->net->hard_header_len += TX_OVERHEAD;
1932 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1933
1934 if (dev->udev->speed == USB_SPEED_SUPER) {
1935 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1936 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1937 dev->rx_qlen = 4;
1938 dev->tx_qlen = 4;
1939 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1940 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1941 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1942 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1943 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1944 } else {
1945 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1946 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1947 dev->rx_qlen = 4;
1948 }
1949
1950 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1951 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1952
1953 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1954 buf |= HW_CFG_MEF_;
1955 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1956
1957 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1958 buf |= USB_CFG_BCE_;
1959 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1960
1961 /* set FIFO sizes */
1962 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1963 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1964
1965 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1966 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1967
1968 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1969 ret = lan78xx_write_reg(dev, FLOW, 0);
1970 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1971
1972 /* Don't need rfe_ctl_lock during initialisation */
1973 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1974 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1975 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1976
1977 /* Enable or disable checksum offload engines */
1978 lan78xx_set_features(dev->net, dev->net->features);
1979
1980 lan78xx_set_multicast(dev->net);
1981
1982 /* reset PHY */
1983 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1984 buf |= PMT_CTL_PHY_RST_;
1985 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1986
1987 timeout = jiffies + HZ;
1988 do {
1989 mdelay(1);
1990 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1991 if (time_after(jiffies, timeout)) {
1992 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1993 return -EIO;
1994 }
1995 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1996
1997 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1998 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1999 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2000
2001 /* enable PHY interrupts */
2002 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2003 buf |= INT_ENP_PHY_INT;
2004 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2005
2006 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2007 buf |= MAC_TX_TXEN_;
2008 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2009
2010 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2011 buf |= FCT_TX_CTL_EN_;
2012 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2013
2014 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2015
2016 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2017 buf |= MAC_RX_RXEN_;
2018 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2019
2020 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2021 buf |= FCT_RX_CTL_EN_;
2022 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2023
2024 return 0;
2025 }
2026
2027 static int lan78xx_open(struct net_device *net)
2028 {
2029 struct lan78xx_net *dev = netdev_priv(net);
2030 int ret;
2031
2032 ret = usb_autopm_get_interface(dev->intf);
2033 if (ret < 0)
2034 goto out;
2035
2036 ret = lan78xx_reset(dev);
2037 if (ret < 0)
2038 goto done;
2039
2040 ret = lan78xx_phy_init(dev);
2041 if (ret < 0)
2042 goto done;
2043
2044 /* for Link Check */
2045 if (dev->urb_intr) {
2046 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2047 if (ret < 0) {
2048 netif_err(dev, ifup, dev->net,
2049 "intr submit %d\n", ret);
2050 goto done;
2051 }
2052 }
2053
2054 set_bit(EVENT_DEV_OPEN, &dev->flags);
2055
2056 netif_start_queue(net);
2057
2058 dev->link_on = false;
2059
2060 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2061 done:
2062 usb_autopm_put_interface(dev->intf);
2063
2064 out:
2065 return ret;
2066 }
2067
2068 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2069 {
2070 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2071 DECLARE_WAITQUEUE(wait, current);
2072 int temp;
2073
2074 /* ensure there are no more active urbs */
2075 add_wait_queue(&unlink_wakeup, &wait);
2076 set_current_state(TASK_UNINTERRUPTIBLE);
2077 dev->wait = &unlink_wakeup;
2078 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2079
2080 /* maybe wait for deletions to finish. */
2081 while (!skb_queue_empty(&dev->rxq) &&
2082 !skb_queue_empty(&dev->txq) &&
2083 !skb_queue_empty(&dev->done)) {
2084 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2085 set_current_state(TASK_UNINTERRUPTIBLE);
2086 netif_dbg(dev, ifdown, dev->net,
2087 "waited for %d urb completions\n", temp);
2088 }
2089 set_current_state(TASK_RUNNING);
2090 dev->wait = NULL;
2091 remove_wait_queue(&unlink_wakeup, &wait);
2092 }
2093
2094 int lan78xx_stop(struct net_device *net)
2095 {
2096 struct lan78xx_net *dev = netdev_priv(net);
2097
2098 phy_stop(net->phydev);
2099 phy_disconnect(net->phydev);
2100 net->phydev = NULL;
2101
2102 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2103 netif_stop_queue(net);
2104
2105 netif_info(dev, ifdown, dev->net,
2106 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2107 net->stats.rx_packets, net->stats.tx_packets,
2108 net->stats.rx_errors, net->stats.tx_errors);
2109
2110 lan78xx_terminate_urbs(dev);
2111
2112 usb_kill_urb(dev->urb_intr);
2113
2114 skb_queue_purge(&dev->rxq_pause);
2115
2116 /* deferred work (task, timer, softirq) must also stop.
2117 * can't flush_scheduled_work() until we drop rtnl (later),
2118 * else workers could deadlock; so make workers a NOP.
2119 */
2120 dev->flags = 0;
2121 cancel_delayed_work_sync(&dev->wq);
2122 tasklet_kill(&dev->bh);
2123
2124 usb_autopm_put_interface(dev->intf);
2125
2126 return 0;
2127 }
2128
2129 static int lan78xx_linearize(struct sk_buff *skb)
2130 {
2131 return skb_linearize(skb);
2132 }
2133
2134 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2135 struct sk_buff *skb, gfp_t flags)
2136 {
2137 u32 tx_cmd_a, tx_cmd_b;
2138
2139 if (skb_headroom(skb) < TX_OVERHEAD) {
2140 struct sk_buff *skb2;
2141
2142 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2143 dev_kfree_skb_any(skb);
2144 skb = skb2;
2145 if (!skb)
2146 return NULL;
2147 }
2148
2149 if (lan78xx_linearize(skb) < 0)
2150 return NULL;
2151
2152 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2153
2154 if (skb->ip_summed == CHECKSUM_PARTIAL)
2155 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2156
2157 tx_cmd_b = 0;
2158 if (skb_is_gso(skb)) {
2159 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2160
2161 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2162
2163 tx_cmd_a |= TX_CMD_A_LSO_;
2164 }
2165
2166 if (skb_vlan_tag_present(skb)) {
2167 tx_cmd_a |= TX_CMD_A_IVTG_;
2168 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2169 }
2170
2171 skb_push(skb, 4);
2172 cpu_to_le32s(&tx_cmd_b);
2173 memcpy(skb->data, &tx_cmd_b, 4);
2174
2175 skb_push(skb, 4);
2176 cpu_to_le32s(&tx_cmd_a);
2177 memcpy(skb->data, &tx_cmd_a, 4);
2178
2179 return skb;
2180 }
2181
2182 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2183 struct sk_buff_head *list, enum skb_state state)
2184 {
2185 unsigned long flags;
2186 enum skb_state old_state;
2187 struct skb_data *entry = (struct skb_data *)skb->cb;
2188
2189 spin_lock_irqsave(&list->lock, flags);
2190 old_state = entry->state;
2191 entry->state = state;
2192
2193 __skb_unlink(skb, list);
2194 spin_unlock(&list->lock);
2195 spin_lock(&dev->done.lock);
2196
2197 __skb_queue_tail(&dev->done, skb);
2198 if (skb_queue_len(&dev->done) == 1)
2199 tasklet_schedule(&dev->bh);
2200 spin_unlock_irqrestore(&dev->done.lock, flags);
2201
2202 return old_state;
2203 }
2204
2205 static void tx_complete(struct urb *urb)
2206 {
2207 struct sk_buff *skb = (struct sk_buff *)urb->context;
2208 struct skb_data *entry = (struct skb_data *)skb->cb;
2209 struct lan78xx_net *dev = entry->dev;
2210
2211 if (urb->status == 0) {
2212 dev->net->stats.tx_packets++;
2213 dev->net->stats.tx_bytes += entry->length;
2214 } else {
2215 dev->net->stats.tx_errors++;
2216
2217 switch (urb->status) {
2218 case -EPIPE:
2219 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2220 break;
2221
2222 /* software-driven interface shutdown */
2223 case -ECONNRESET:
2224 case -ESHUTDOWN:
2225 break;
2226
2227 case -EPROTO:
2228 case -ETIME:
2229 case -EILSEQ:
2230 netif_stop_queue(dev->net);
2231 break;
2232 default:
2233 netif_dbg(dev, tx_err, dev->net,
2234 "tx err %d\n", entry->urb->status);
2235 break;
2236 }
2237 }
2238
2239 usb_autopm_put_interface_async(dev->intf);
2240
2241 defer_bh(dev, skb, &dev->txq, tx_done);
2242 }
2243
2244 static void lan78xx_queue_skb(struct sk_buff_head *list,
2245 struct sk_buff *newsk, enum skb_state state)
2246 {
2247 struct skb_data *entry = (struct skb_data *)newsk->cb;
2248
2249 __skb_queue_tail(list, newsk);
2250 entry->state = state;
2251 }
2252
2253 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2254 {
2255 struct lan78xx_net *dev = netdev_priv(net);
2256 struct sk_buff *skb2 = NULL;
2257
2258 if (skb) {
2259 skb_tx_timestamp(skb);
2260 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2261 }
2262
2263 if (skb2) {
2264 skb_queue_tail(&dev->txq_pend, skb2);
2265
2266 /* throttle TX patch at slower than SUPER SPEED USB */
2267 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2268 (skb_queue_len(&dev->txq_pend) > 10))
2269 netif_stop_queue(net);
2270 } else {
2271 netif_dbg(dev, tx_err, dev->net,
2272 "lan78xx_tx_prep return NULL\n");
2273 dev->net->stats.tx_errors++;
2274 dev->net->stats.tx_dropped++;
2275 }
2276
2277 tasklet_schedule(&dev->bh);
2278
2279 return NETDEV_TX_OK;
2280 }
2281
2282 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2283 {
2284 int tmp;
2285 struct usb_host_interface *alt = NULL;
2286 struct usb_host_endpoint *in = NULL, *out = NULL;
2287 struct usb_host_endpoint *status = NULL;
2288
2289 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2290 unsigned ep;
2291
2292 in = NULL;
2293 out = NULL;
2294 status = NULL;
2295 alt = intf->altsetting + tmp;
2296
2297 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2298 struct usb_host_endpoint *e;
2299 int intr = 0;
2300
2301 e = alt->endpoint + ep;
2302 switch (e->desc.bmAttributes) {
2303 case USB_ENDPOINT_XFER_INT:
2304 if (!usb_endpoint_dir_in(&e->desc))
2305 continue;
2306 intr = 1;
2307 /* FALLTHROUGH */
2308 case USB_ENDPOINT_XFER_BULK:
2309 break;
2310 default:
2311 continue;
2312 }
2313 if (usb_endpoint_dir_in(&e->desc)) {
2314 if (!intr && !in)
2315 in = e;
2316 else if (intr && !status)
2317 status = e;
2318 } else {
2319 if (!out)
2320 out = e;
2321 }
2322 }
2323 if (in && out)
2324 break;
2325 }
2326 if (!alt || !in || !out)
2327 return -EINVAL;
2328
2329 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2330 in->desc.bEndpointAddress &
2331 USB_ENDPOINT_NUMBER_MASK);
2332 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2333 out->desc.bEndpointAddress &
2334 USB_ENDPOINT_NUMBER_MASK);
2335 dev->ep_intr = status;
2336
2337 return 0;
2338 }
2339
2340 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2341 {
2342 struct lan78xx_priv *pdata = NULL;
2343 int ret;
2344 int i;
2345
2346 ret = lan78xx_get_endpoints(dev, intf);
2347
2348 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2349
2350 pdata = (struct lan78xx_priv *)(dev->data[0]);
2351 if (!pdata) {
2352 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2353 return -ENOMEM;
2354 }
2355
2356 pdata->dev = dev;
2357
2358 spin_lock_init(&pdata->rfe_ctl_lock);
2359 mutex_init(&pdata->dataport_mutex);
2360
2361 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2362
2363 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2364 pdata->vlan_table[i] = 0;
2365
2366 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2367
2368 dev->net->features = 0;
2369
2370 if (DEFAULT_TX_CSUM_ENABLE)
2371 dev->net->features |= NETIF_F_HW_CSUM;
2372
2373 if (DEFAULT_RX_CSUM_ENABLE)
2374 dev->net->features |= NETIF_F_RXCSUM;
2375
2376 if (DEFAULT_TSO_CSUM_ENABLE)
2377 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2378
2379 dev->net->hw_features = dev->net->features;
2380
2381 /* Init all registers */
2382 ret = lan78xx_reset(dev);
2383
2384 lan78xx_mdio_init(dev);
2385
2386 dev->net->flags |= IFF_MULTICAST;
2387
2388 pdata->wol = WAKE_MAGIC;
2389
2390 return 0;
2391 }
2392
2393 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2394 {
2395 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2396
2397 lan78xx_remove_mdio(dev);
2398
2399 if (pdata) {
2400 netif_dbg(dev, ifdown, dev->net, "free pdata");
2401 kfree(pdata);
2402 pdata = NULL;
2403 dev->data[0] = 0;
2404 }
2405 }
2406
2407 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2408 struct sk_buff *skb,
2409 u32 rx_cmd_a, u32 rx_cmd_b)
2410 {
2411 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2412 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2413 skb->ip_summed = CHECKSUM_NONE;
2414 } else {
2415 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2416 skb->ip_summed = CHECKSUM_COMPLETE;
2417 }
2418 }
2419
2420 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2421 {
2422 int status;
2423
2424 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2425 skb_queue_tail(&dev->rxq_pause, skb);
2426 return;
2427 }
2428
2429 skb->protocol = eth_type_trans(skb, dev->net);
2430 dev->net->stats.rx_packets++;
2431 dev->net->stats.rx_bytes += skb->len;
2432
2433 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2434 skb->len + sizeof(struct ethhdr), skb->protocol);
2435 memset(skb->cb, 0, sizeof(struct skb_data));
2436
2437 if (skb_defer_rx_timestamp(skb))
2438 return;
2439
2440 status = netif_rx(skb);
2441 if (status != NET_RX_SUCCESS)
2442 netif_dbg(dev, rx_err, dev->net,
2443 "netif_rx status %d\n", status);
2444 }
2445
2446 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2447 {
2448 if (skb->len < dev->net->hard_header_len)
2449 return 0;
2450
2451 while (skb->len > 0) {
2452 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2453 u16 rx_cmd_c;
2454 struct sk_buff *skb2;
2455 unsigned char *packet;
2456
2457 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2458 le32_to_cpus(&rx_cmd_a);
2459 skb_pull(skb, sizeof(rx_cmd_a));
2460
2461 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2462 le32_to_cpus(&rx_cmd_b);
2463 skb_pull(skb, sizeof(rx_cmd_b));
2464
2465 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2466 le16_to_cpus(&rx_cmd_c);
2467 skb_pull(skb, sizeof(rx_cmd_c));
2468
2469 packet = skb->data;
2470
2471 /* get the packet length */
2472 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2473 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2474
2475 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2476 netif_dbg(dev, rx_err, dev->net,
2477 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2478 } else {
2479 /* last frame in this batch */
2480 if (skb->len == size) {
2481 lan78xx_rx_csum_offload(dev, skb,
2482 rx_cmd_a, rx_cmd_b);
2483
2484 skb_trim(skb, skb->len - 4); /* remove fcs */
2485 skb->truesize = size + sizeof(struct sk_buff);
2486
2487 return 1;
2488 }
2489
2490 skb2 = skb_clone(skb, GFP_ATOMIC);
2491 if (unlikely(!skb2)) {
2492 netdev_warn(dev->net, "Error allocating skb");
2493 return 0;
2494 }
2495
2496 skb2->len = size;
2497 skb2->data = packet;
2498 skb_set_tail_pointer(skb2, size);
2499
2500 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2501
2502 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2503 skb2->truesize = size + sizeof(struct sk_buff);
2504
2505 lan78xx_skb_return(dev, skb2);
2506 }
2507
2508 skb_pull(skb, size);
2509
2510 /* padding bytes before the next frame starts */
2511 if (skb->len)
2512 skb_pull(skb, align_count);
2513 }
2514
2515 return 1;
2516 }
2517
2518 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2519 {
2520 if (!lan78xx_rx(dev, skb)) {
2521 dev->net->stats.rx_errors++;
2522 goto done;
2523 }
2524
2525 if (skb->len) {
2526 lan78xx_skb_return(dev, skb);
2527 return;
2528 }
2529
2530 netif_dbg(dev, rx_err, dev->net, "drop\n");
2531 dev->net->stats.rx_errors++;
2532 done:
2533 skb_queue_tail(&dev->done, skb);
2534 }
2535
2536 static void rx_complete(struct urb *urb);
2537
2538 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2539 {
2540 struct sk_buff *skb;
2541 struct skb_data *entry;
2542 unsigned long lockflags;
2543 size_t size = dev->rx_urb_size;
2544 int ret = 0;
2545
2546 skb = netdev_alloc_skb_ip_align(dev->net, size);
2547 if (!skb) {
2548 usb_free_urb(urb);
2549 return -ENOMEM;
2550 }
2551
2552 entry = (struct skb_data *)skb->cb;
2553 entry->urb = urb;
2554 entry->dev = dev;
2555 entry->length = 0;
2556
2557 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2558 skb->data, size, rx_complete, skb);
2559
2560 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2561
2562 if (netif_device_present(dev->net) &&
2563 netif_running(dev->net) &&
2564 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2565 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2566 ret = usb_submit_urb(urb, GFP_ATOMIC);
2567 switch (ret) {
2568 case 0:
2569 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2570 break;
2571 case -EPIPE:
2572 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2573 break;
2574 case -ENODEV:
2575 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2576 netif_device_detach(dev->net);
2577 break;
2578 case -EHOSTUNREACH:
2579 ret = -ENOLINK;
2580 break;
2581 default:
2582 netif_dbg(dev, rx_err, dev->net,
2583 "rx submit, %d\n", ret);
2584 tasklet_schedule(&dev->bh);
2585 }
2586 } else {
2587 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2588 ret = -ENOLINK;
2589 }
2590 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2591 if (ret) {
2592 dev_kfree_skb_any(skb);
2593 usb_free_urb(urb);
2594 }
2595 return ret;
2596 }
2597
2598 static void rx_complete(struct urb *urb)
2599 {
2600 struct sk_buff *skb = (struct sk_buff *)urb->context;
2601 struct skb_data *entry = (struct skb_data *)skb->cb;
2602 struct lan78xx_net *dev = entry->dev;
2603 int urb_status = urb->status;
2604 enum skb_state state;
2605
2606 skb_put(skb, urb->actual_length);
2607 state = rx_done;
2608 entry->urb = NULL;
2609
2610 switch (urb_status) {
2611 case 0:
2612 if (skb->len < dev->net->hard_header_len) {
2613 state = rx_cleanup;
2614 dev->net->stats.rx_errors++;
2615 dev->net->stats.rx_length_errors++;
2616 netif_dbg(dev, rx_err, dev->net,
2617 "rx length %d\n", skb->len);
2618 }
2619 usb_mark_last_busy(dev->udev);
2620 break;
2621 case -EPIPE:
2622 dev->net->stats.rx_errors++;
2623 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2624 /* FALLTHROUGH */
2625 case -ECONNRESET: /* async unlink */
2626 case -ESHUTDOWN: /* hardware gone */
2627 netif_dbg(dev, ifdown, dev->net,
2628 "rx shutdown, code %d\n", urb_status);
2629 state = rx_cleanup;
2630 entry->urb = urb;
2631 urb = NULL;
2632 break;
2633 case -EPROTO:
2634 case -ETIME:
2635 case -EILSEQ:
2636 dev->net->stats.rx_errors++;
2637 state = rx_cleanup;
2638 entry->urb = urb;
2639 urb = NULL;
2640 break;
2641
2642 /* data overrun ... flush fifo? */
2643 case -EOVERFLOW:
2644 dev->net->stats.rx_over_errors++;
2645 /* FALLTHROUGH */
2646
2647 default:
2648 state = rx_cleanup;
2649 dev->net->stats.rx_errors++;
2650 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2651 break;
2652 }
2653
2654 state = defer_bh(dev, skb, &dev->rxq, state);
2655
2656 if (urb) {
2657 if (netif_running(dev->net) &&
2658 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2659 state != unlink_start) {
2660 rx_submit(dev, urb, GFP_ATOMIC);
2661 return;
2662 }
2663 usb_free_urb(urb);
2664 }
2665 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2666 }
2667
2668 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2669 {
2670 int length;
2671 struct urb *urb = NULL;
2672 struct skb_data *entry;
2673 unsigned long flags;
2674 struct sk_buff_head *tqp = &dev->txq_pend;
2675 struct sk_buff *skb, *skb2;
2676 int ret;
2677 int count, pos;
2678 int skb_totallen, pkt_cnt;
2679
2680 skb_totallen = 0;
2681 pkt_cnt = 0;
2682 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2683 if (skb_is_gso(skb)) {
2684 if (pkt_cnt) {
2685 /* handle previous packets first */
2686 break;
2687 }
2688 length = skb->len;
2689 skb2 = skb_dequeue(tqp);
2690 goto gso_skb;
2691 }
2692
2693 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2694 break;
2695 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2696 pkt_cnt++;
2697 }
2698
2699 /* copy to a single skb */
2700 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2701 if (!skb)
2702 goto drop;
2703
2704 skb_put(skb, skb_totallen);
2705
2706 for (count = pos = 0; count < pkt_cnt; count++) {
2707 skb2 = skb_dequeue(tqp);
2708 if (skb2) {
2709 memcpy(skb->data + pos, skb2->data, skb2->len);
2710 pos += roundup(skb2->len, sizeof(u32));
2711 dev_kfree_skb(skb2);
2712 }
2713 }
2714
2715 length = skb_totallen;
2716
2717 gso_skb:
2718 urb = usb_alloc_urb(0, GFP_ATOMIC);
2719 if (!urb) {
2720 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2721 goto drop;
2722 }
2723
2724 entry = (struct skb_data *)skb->cb;
2725 entry->urb = urb;
2726 entry->dev = dev;
2727 entry->length = length;
2728
2729 spin_lock_irqsave(&dev->txq.lock, flags);
2730 ret = usb_autopm_get_interface_async(dev->intf);
2731 if (ret < 0) {
2732 spin_unlock_irqrestore(&dev->txq.lock, flags);
2733 goto drop;
2734 }
2735
2736 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2737 skb->data, skb->len, tx_complete, skb);
2738
2739 if (length % dev->maxpacket == 0) {
2740 /* send USB_ZERO_PACKET */
2741 urb->transfer_flags |= URB_ZERO_PACKET;
2742 }
2743
2744 #ifdef CONFIG_PM
2745 /* if this triggers the device is still a sleep */
2746 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2747 /* transmission will be done in resume */
2748 usb_anchor_urb(urb, &dev->deferred);
2749 /* no use to process more packets */
2750 netif_stop_queue(dev->net);
2751 usb_put_urb(urb);
2752 spin_unlock_irqrestore(&dev->txq.lock, flags);
2753 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2754 return;
2755 }
2756 #endif
2757
2758 ret = usb_submit_urb(urb, GFP_ATOMIC);
2759 switch (ret) {
2760 case 0:
2761 dev->net->trans_start = jiffies;
2762 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2763 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2764 netif_stop_queue(dev->net);
2765 break;
2766 case -EPIPE:
2767 netif_stop_queue(dev->net);
2768 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2769 usb_autopm_put_interface_async(dev->intf);
2770 break;
2771 default:
2772 usb_autopm_put_interface_async(dev->intf);
2773 netif_dbg(dev, tx_err, dev->net,
2774 "tx: submit urb err %d\n", ret);
2775 break;
2776 }
2777
2778 spin_unlock_irqrestore(&dev->txq.lock, flags);
2779
2780 if (ret) {
2781 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2782 drop:
2783 dev->net->stats.tx_dropped++;
2784 if (skb)
2785 dev_kfree_skb_any(skb);
2786 usb_free_urb(urb);
2787 } else
2788 netif_dbg(dev, tx_queued, dev->net,
2789 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2790 }
2791
2792 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2793 {
2794 struct urb *urb;
2795 int i;
2796
2797 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2798 for (i = 0; i < 10; i++) {
2799 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2800 break;
2801 urb = usb_alloc_urb(0, GFP_ATOMIC);
2802 if (urb)
2803 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2804 return;
2805 }
2806
2807 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2808 tasklet_schedule(&dev->bh);
2809 }
2810 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2811 netif_wake_queue(dev->net);
2812 }
2813
2814 static void lan78xx_bh(unsigned long param)
2815 {
2816 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2817 struct sk_buff *skb;
2818 struct skb_data *entry;
2819
2820 while ((skb = skb_dequeue(&dev->done))) {
2821 entry = (struct skb_data *)(skb->cb);
2822 switch (entry->state) {
2823 case rx_done:
2824 entry->state = rx_cleanup;
2825 rx_process(dev, skb);
2826 continue;
2827 case tx_done:
2828 usb_free_urb(entry->urb);
2829 dev_kfree_skb(skb);
2830 continue;
2831 case rx_cleanup:
2832 usb_free_urb(entry->urb);
2833 dev_kfree_skb(skb);
2834 continue;
2835 default:
2836 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2837 return;
2838 }
2839 }
2840
2841 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2842 if (!skb_queue_empty(&dev->txq_pend))
2843 lan78xx_tx_bh(dev);
2844
2845 if (!timer_pending(&dev->delay) &&
2846 !test_bit(EVENT_RX_HALT, &dev->flags))
2847 lan78xx_rx_bh(dev);
2848 }
2849 }
2850
2851 static void lan78xx_delayedwork(struct work_struct *work)
2852 {
2853 int status;
2854 struct lan78xx_net *dev;
2855
2856 dev = container_of(work, struct lan78xx_net, wq.work);
2857
2858 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2859 unlink_urbs(dev, &dev->txq);
2860 status = usb_autopm_get_interface(dev->intf);
2861 if (status < 0)
2862 goto fail_pipe;
2863 status = usb_clear_halt(dev->udev, dev->pipe_out);
2864 usb_autopm_put_interface(dev->intf);
2865 if (status < 0 &&
2866 status != -EPIPE &&
2867 status != -ESHUTDOWN) {
2868 if (netif_msg_tx_err(dev))
2869 fail_pipe:
2870 netdev_err(dev->net,
2871 "can't clear tx halt, status %d\n",
2872 status);
2873 } else {
2874 clear_bit(EVENT_TX_HALT, &dev->flags);
2875 if (status != -ESHUTDOWN)
2876 netif_wake_queue(dev->net);
2877 }
2878 }
2879 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2880 unlink_urbs(dev, &dev->rxq);
2881 status = usb_autopm_get_interface(dev->intf);
2882 if (status < 0)
2883 goto fail_halt;
2884 status = usb_clear_halt(dev->udev, dev->pipe_in);
2885 usb_autopm_put_interface(dev->intf);
2886 if (status < 0 &&
2887 status != -EPIPE &&
2888 status != -ESHUTDOWN) {
2889 if (netif_msg_rx_err(dev))
2890 fail_halt:
2891 netdev_err(dev->net,
2892 "can't clear rx halt, status %d\n",
2893 status);
2894 } else {
2895 clear_bit(EVENT_RX_HALT, &dev->flags);
2896 tasklet_schedule(&dev->bh);
2897 }
2898 }
2899
2900 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2901 int ret = 0;
2902
2903 clear_bit(EVENT_LINK_RESET, &dev->flags);
2904 status = usb_autopm_get_interface(dev->intf);
2905 if (status < 0)
2906 goto skip_reset;
2907 if (lan78xx_link_reset(dev) < 0) {
2908 usb_autopm_put_interface(dev->intf);
2909 skip_reset:
2910 netdev_info(dev->net, "link reset failed (%d)\n",
2911 ret);
2912 } else {
2913 usb_autopm_put_interface(dev->intf);
2914 }
2915 }
2916 }
2917
2918 static void intr_complete(struct urb *urb)
2919 {
2920 struct lan78xx_net *dev = urb->context;
2921 int status = urb->status;
2922
2923 switch (status) {
2924 /* success */
2925 case 0:
2926 lan78xx_status(dev, urb);
2927 break;
2928
2929 /* software-driven interface shutdown */
2930 case -ENOENT: /* urb killed */
2931 case -ESHUTDOWN: /* hardware gone */
2932 netif_dbg(dev, ifdown, dev->net,
2933 "intr shutdown, code %d\n", status);
2934 return;
2935
2936 /* NOTE: not throttling like RX/TX, since this endpoint
2937 * already polls infrequently
2938 */
2939 default:
2940 netdev_dbg(dev->net, "intr status %d\n", status);
2941 break;
2942 }
2943
2944 if (!netif_running(dev->net))
2945 return;
2946
2947 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2948 status = usb_submit_urb(urb, GFP_ATOMIC);
2949 if (status != 0)
2950 netif_err(dev, timer, dev->net,
2951 "intr resubmit --> %d\n", status);
2952 }
2953
2954 static void lan78xx_disconnect(struct usb_interface *intf)
2955 {
2956 struct lan78xx_net *dev;
2957 struct usb_device *udev;
2958 struct net_device *net;
2959
2960 dev = usb_get_intfdata(intf);
2961 usb_set_intfdata(intf, NULL);
2962 if (!dev)
2963 return;
2964
2965 udev = interface_to_usbdev(intf);
2966
2967 net = dev->net;
2968 unregister_netdev(net);
2969
2970 cancel_delayed_work_sync(&dev->wq);
2971
2972 usb_scuttle_anchored_urbs(&dev->deferred);
2973
2974 lan78xx_unbind(dev, intf);
2975
2976 usb_kill_urb(dev->urb_intr);
2977 usb_free_urb(dev->urb_intr);
2978
2979 free_netdev(net);
2980 usb_put_dev(udev);
2981 }
2982
2983 void lan78xx_tx_timeout(struct net_device *net)
2984 {
2985 struct lan78xx_net *dev = netdev_priv(net);
2986
2987 unlink_urbs(dev, &dev->txq);
2988 tasklet_schedule(&dev->bh);
2989 }
2990
2991 static const struct net_device_ops lan78xx_netdev_ops = {
2992 .ndo_open = lan78xx_open,
2993 .ndo_stop = lan78xx_stop,
2994 .ndo_start_xmit = lan78xx_start_xmit,
2995 .ndo_tx_timeout = lan78xx_tx_timeout,
2996 .ndo_change_mtu = lan78xx_change_mtu,
2997 .ndo_set_mac_address = lan78xx_set_mac_addr,
2998 .ndo_validate_addr = eth_validate_addr,
2999 .ndo_do_ioctl = lan78xx_ioctl,
3000 .ndo_set_rx_mode = lan78xx_set_multicast,
3001 .ndo_set_features = lan78xx_set_features,
3002 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3003 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3004 };
3005
3006 static int lan78xx_probe(struct usb_interface *intf,
3007 const struct usb_device_id *id)
3008 {
3009 struct lan78xx_net *dev;
3010 struct net_device *netdev;
3011 struct usb_device *udev;
3012 int ret;
3013 unsigned maxp;
3014 unsigned period;
3015 u8 *buf = NULL;
3016
3017 udev = interface_to_usbdev(intf);
3018 udev = usb_get_dev(udev);
3019
3020 ret = -ENOMEM;
3021 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3022 if (!netdev) {
3023 dev_err(&intf->dev, "Error: OOM\n");
3024 goto out1;
3025 }
3026
3027 /* netdev_printk() needs this */
3028 SET_NETDEV_DEV(netdev, &intf->dev);
3029
3030 dev = netdev_priv(netdev);
3031 dev->udev = udev;
3032 dev->intf = intf;
3033 dev->net = netdev;
3034 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3035 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3036
3037 skb_queue_head_init(&dev->rxq);
3038 skb_queue_head_init(&dev->txq);
3039 skb_queue_head_init(&dev->done);
3040 skb_queue_head_init(&dev->rxq_pause);
3041 skb_queue_head_init(&dev->txq_pend);
3042 mutex_init(&dev->phy_mutex);
3043
3044 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3045 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3046 init_usb_anchor(&dev->deferred);
3047
3048 netdev->netdev_ops = &lan78xx_netdev_ops;
3049 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3050 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3051
3052 ret = lan78xx_bind(dev, intf);
3053 if (ret < 0)
3054 goto out2;
3055 strcpy(netdev->name, "eth%d");
3056
3057 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3058 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3059
3060 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3061 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3062 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3063
3064 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3065 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3066
3067 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3068 dev->ep_intr->desc.bEndpointAddress &
3069 USB_ENDPOINT_NUMBER_MASK);
3070 period = dev->ep_intr->desc.bInterval;
3071
3072 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3073 buf = kmalloc(maxp, GFP_KERNEL);
3074 if (buf) {
3075 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3076 if (!dev->urb_intr) {
3077 kfree(buf);
3078 goto out3;
3079 } else {
3080 usb_fill_int_urb(dev->urb_intr, dev->udev,
3081 dev->pipe_intr, buf, maxp,
3082 intr_complete, dev, period);
3083 }
3084 }
3085
3086 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3087
3088 /* driver requires remote-wakeup capability during autosuspend. */
3089 intf->needs_remote_wakeup = 1;
3090
3091 ret = register_netdev(netdev);
3092 if (ret != 0) {
3093 netif_err(dev, probe, netdev, "couldn't register the device\n");
3094 goto out2;
3095 }
3096
3097 usb_set_intfdata(intf, dev);
3098
3099 ret = device_set_wakeup_enable(&udev->dev, true);
3100
3101 /* Default delay of 2sec has more overhead than advantage.
3102 * Set to 10sec as default.
3103 */
3104 pm_runtime_set_autosuspend_delay(&udev->dev,
3105 DEFAULT_AUTOSUSPEND_DELAY);
3106
3107 return 0;
3108
3109 out3:
3110 lan78xx_unbind(dev, intf);
3111 out2:
3112 free_netdev(netdev);
3113 out1:
3114 usb_put_dev(udev);
3115
3116 return ret;
3117 }
3118
3119 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3120 {
3121 const u16 crc16poly = 0x8005;
3122 int i;
3123 u16 bit, crc, msb;
3124 u8 data;
3125
3126 crc = 0xFFFF;
3127 for (i = 0; i < len; i++) {
3128 data = *buf++;
3129 for (bit = 0; bit < 8; bit++) {
3130 msb = crc >> 15;
3131 crc <<= 1;
3132
3133 if (msb ^ (u16)(data & 1)) {
3134 crc ^= crc16poly;
3135 crc |= (u16)0x0001U;
3136 }
3137 data >>= 1;
3138 }
3139 }
3140
3141 return crc;
3142 }
3143
3144 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3145 {
3146 u32 buf;
3147 int ret;
3148 int mask_index;
3149 u16 crc;
3150 u32 temp_wucsr;
3151 u32 temp_pmt_ctl;
3152 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3153 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3154 const u8 arp_type[2] = { 0x08, 0x06 };
3155
3156 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3157 buf &= ~MAC_TX_TXEN_;
3158 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3159 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3160 buf &= ~MAC_RX_RXEN_;
3161 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3162
3163 ret = lan78xx_write_reg(dev, WUCSR, 0);
3164 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3165 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3166
3167 temp_wucsr = 0;
3168
3169 temp_pmt_ctl = 0;
3170 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3171 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3172 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3173
3174 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3175 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3176
3177 mask_index = 0;
3178 if (wol & WAKE_PHY) {
3179 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3180
3181 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3182 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3183 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3184 }
3185 if (wol & WAKE_MAGIC) {
3186 temp_wucsr |= WUCSR_MPEN_;
3187
3188 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3189 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3190 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3191 }
3192 if (wol & WAKE_BCAST) {
3193 temp_wucsr |= WUCSR_BCST_EN_;
3194
3195 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3196 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3197 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3198 }
3199 if (wol & WAKE_MCAST) {
3200 temp_wucsr |= WUCSR_WAKE_EN_;
3201
3202 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3203 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3204 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3205 WUF_CFGX_EN_ |
3206 WUF_CFGX_TYPE_MCAST_ |
3207 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3208 (crc & WUF_CFGX_CRC16_MASK_));
3209
3210 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3211 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3212 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3213 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3214 mask_index++;
3215
3216 /* for IPv6 Multicast */
3217 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3218 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3219 WUF_CFGX_EN_ |
3220 WUF_CFGX_TYPE_MCAST_ |
3221 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3222 (crc & WUF_CFGX_CRC16_MASK_));
3223
3224 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3225 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3226 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3227 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3228 mask_index++;
3229
3230 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3231 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3232 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3233 }
3234 if (wol & WAKE_UCAST) {
3235 temp_wucsr |= WUCSR_PFDA_EN_;
3236
3237 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3238 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3239 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3240 }
3241 if (wol & WAKE_ARP) {
3242 temp_wucsr |= WUCSR_WAKE_EN_;
3243
3244 /* set WUF_CFG & WUF_MASK
3245 * for packettype (offset 12,13) = ARP (0x0806)
3246 */
3247 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3248 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3249 WUF_CFGX_EN_ |
3250 WUF_CFGX_TYPE_ALL_ |
3251 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3252 (crc & WUF_CFGX_CRC16_MASK_));
3253
3254 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3255 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3256 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3257 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3258 mask_index++;
3259
3260 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3261 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3262 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3263 }
3264
3265 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3266
3267 /* when multiple WOL bits are set */
3268 if (hweight_long((unsigned long)wol) > 1) {
3269 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3270 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3271 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3272 }
3273 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3274
3275 /* clear WUPS */
3276 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3277 buf |= PMT_CTL_WUPS_MASK_;
3278 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3279
3280 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3281 buf |= MAC_RX_RXEN_;
3282 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3283
3284 return 0;
3285 }
3286
3287 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3288 {
3289 struct lan78xx_net *dev = usb_get_intfdata(intf);
3290 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3291 u32 buf;
3292 int ret;
3293 int event;
3294
3295 event = message.event;
3296
3297 if (!dev->suspend_count++) {
3298 spin_lock_irq(&dev->txq.lock);
3299 /* don't autosuspend while transmitting */
3300 if ((skb_queue_len(&dev->txq) ||
3301 skb_queue_len(&dev->txq_pend)) &&
3302 PMSG_IS_AUTO(message)) {
3303 spin_unlock_irq(&dev->txq.lock);
3304 ret = -EBUSY;
3305 goto out;
3306 } else {
3307 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3308 spin_unlock_irq(&dev->txq.lock);
3309 }
3310
3311 /* stop TX & RX */
3312 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3313 buf &= ~MAC_TX_TXEN_;
3314 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3315 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3316 buf &= ~MAC_RX_RXEN_;
3317 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3318
3319 /* empty out the rx and queues */
3320 netif_device_detach(dev->net);
3321 lan78xx_terminate_urbs(dev);
3322 usb_kill_urb(dev->urb_intr);
3323
3324 /* reattach */
3325 netif_device_attach(dev->net);
3326 }
3327
3328 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3329 if (PMSG_IS_AUTO(message)) {
3330 /* auto suspend (selective suspend) */
3331 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3332 buf &= ~MAC_TX_TXEN_;
3333 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3334 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3335 buf &= ~MAC_RX_RXEN_;
3336 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3337
3338 ret = lan78xx_write_reg(dev, WUCSR, 0);
3339 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3340 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3341
3342 /* set goodframe wakeup */
3343 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3344
3345 buf |= WUCSR_RFE_WAKE_EN_;
3346 buf |= WUCSR_STORE_WAKE_;
3347
3348 ret = lan78xx_write_reg(dev, WUCSR, buf);
3349
3350 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3351
3352 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3353 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3354
3355 buf |= PMT_CTL_PHY_WAKE_EN_;
3356 buf |= PMT_CTL_WOL_EN_;
3357 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3358 buf |= PMT_CTL_SUS_MODE_3_;
3359
3360 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3361
3362 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3363
3364 buf |= PMT_CTL_WUPS_MASK_;
3365
3366 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3367
3368 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3369 buf |= MAC_RX_RXEN_;
3370 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3371 } else {
3372 lan78xx_set_suspend(dev, pdata->wol);
3373 }
3374 }
3375
3376 ret = 0;
3377 out:
3378 return ret;
3379 }
3380
3381 int lan78xx_resume(struct usb_interface *intf)
3382 {
3383 struct lan78xx_net *dev = usb_get_intfdata(intf);
3384 struct sk_buff *skb;
3385 struct urb *res;
3386 int ret;
3387 u32 buf;
3388
3389 if (!--dev->suspend_count) {
3390 /* resume interrupt URBs */
3391 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3392 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3393
3394 spin_lock_irq(&dev->txq.lock);
3395 while ((res = usb_get_from_anchor(&dev->deferred))) {
3396 skb = (struct sk_buff *)res->context;
3397 ret = usb_submit_urb(res, GFP_ATOMIC);
3398 if (ret < 0) {
3399 dev_kfree_skb_any(skb);
3400 usb_free_urb(res);
3401 usb_autopm_put_interface_async(dev->intf);
3402 } else {
3403 dev->net->trans_start = jiffies;
3404 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3405 }
3406 }
3407
3408 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3409 spin_unlock_irq(&dev->txq.lock);
3410
3411 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3412 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3413 netif_start_queue(dev->net);
3414 tasklet_schedule(&dev->bh);
3415 }
3416 }
3417
3418 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3419 ret = lan78xx_write_reg(dev, WUCSR, 0);
3420 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3421
3422 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3423 WUCSR2_ARP_RCD_ |
3424 WUCSR2_IPV6_TCPSYN_RCD_ |
3425 WUCSR2_IPV4_TCPSYN_RCD_);
3426
3427 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3428 WUCSR_EEE_RX_WAKE_ |
3429 WUCSR_PFDA_FR_ |
3430 WUCSR_RFE_WAKE_FR_ |
3431 WUCSR_WUFR_ |
3432 WUCSR_MPR_ |
3433 WUCSR_BCST_FR_);
3434
3435 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3436 buf |= MAC_TX_TXEN_;
3437 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3438
3439 return 0;
3440 }
3441
3442 int lan78xx_reset_resume(struct usb_interface *intf)
3443 {
3444 struct lan78xx_net *dev = usb_get_intfdata(intf);
3445
3446 lan78xx_reset(dev);
3447
3448 lan78xx_phy_init(dev);
3449
3450 return lan78xx_resume(intf);
3451 }
3452
3453 static const struct usb_device_id products[] = {
3454 {
3455 /* LAN7800 USB Gigabit Ethernet Device */
3456 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3457 },
3458 {
3459 /* LAN7850 USB Gigabit Ethernet Device */
3460 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3461 },
3462 {},
3463 };
3464 MODULE_DEVICE_TABLE(usb, products);
3465
3466 static struct usb_driver lan78xx_driver = {
3467 .name = DRIVER_NAME,
3468 .id_table = products,
3469 .probe = lan78xx_probe,
3470 .disconnect = lan78xx_disconnect,
3471 .suspend = lan78xx_suspend,
3472 .resume = lan78xx_resume,
3473 .reset_resume = lan78xx_reset_resume,
3474 .supports_autosuspend = 1,
3475 .disable_hub_initiated_lpm = 1,
3476 };
3477
3478 module_usb_driver(lan78xx_driver);
3479
3480 MODULE_AUTHOR(DRIVER_AUTHOR);
3481 MODULE_DESCRIPTION(DRIVER_DESC);
3482 MODULE_LICENSE("GPL");
This page took 0.23775 seconds and 6 git commands to generate.