drivers/net: Remove alloc_etherdev error messages
[deliverable/linux.git] / drivers / net / ethernet / via / via-velocity.c
CommitLineData
1da177e4
LT
1/*
2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
5 *
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
9 *
10 * TODO
1da177e4 11 * rx_copybreak/alignment
1da177e4
LT
12 * More testing
13 *
113aa838 14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
15 * Additional fixes and clean up: Francois Romieu
16 *
17 * This source has not been verified for use in safety critical systems.
18 *
19 * Please direct queries about the revamped driver to the linux-kernel
20 * list not VIA.
21 *
22 * Original code:
23 *
24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
25 * All rights reserved.
26 *
27 * This software may be redistributed and/or modified under
28 * the terms of the GNU General Public License as published by the Free
29 * Software Foundation; either version 2 of the License, or
30 * any later version.
31 *
32 * This program is distributed in the hope that it will be useful, but
33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
35 * for more details.
36 *
37 * Author: Chuang Liang-Shing, AJ Jiang
38 *
39 * Date: Jan 24, 2003
40 *
41 * MODULE_LICENSE("GPL");
42 *
43 */
44
1da177e4
LT
45#include <linux/module.h>
46#include <linux/types.h>
73b54688 47#include <linux/bitops.h>
1da177e4
LT
48#include <linux/init.h>
49#include <linux/mm.h>
50#include <linux/errno.h>
51#include <linux/ioport.h>
52#include <linux/pci.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/delay.h>
58#include <linux/timer.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
1da177e4
LT
61#include <linux/string.h>
62#include <linux/wait.h>
c4067400 63#include <linux/io.h>
1da177e4 64#include <linux/if.h>
c4067400 65#include <linux/uaccess.h>
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/inetdevice.h>
68#include <linux/reboot.h>
69#include <linux/ethtool.h>
70#include <linux/mii.h>
71#include <linux/in.h>
72#include <linux/if_arp.h>
501e4d24 73#include <linux/if_vlan.h>
1da177e4
LT
74#include <linux/ip.h>
75#include <linux/tcp.h>
76#include <linux/udp.h>
77#include <linux/crc-ccitt.h>
78#include <linux/crc32.h>
79
80#include "via-velocity.h"
81
82
c4067400 83static int velocity_nics;
1da177e4
LT
84static int msglevel = MSG_LEVEL_INFO;
85
01faccbf
SH
86/**
87 * mac_get_cam_mask - Read a CAM mask
88 * @regs: register block for this velocity
89 * @mask: buffer to store mask
90 *
91 * Fetch the mask bits of the selected CAM and store them into the
92 * provided mask buffer.
93 */
c4067400 94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
01faccbf
SH
95{
96 int i;
97
98 /* Select CAM mask */
99 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
100
101 writeb(0, &regs->CAMADDR);
102
103 /* read mask */
104 for (i = 0; i < 8; i++)
105 *mask++ = readb(&(regs->MARCAM[i]));
106
107 /* disable CAMEN */
108 writeb(0, &regs->CAMADDR);
109
110 /* Select mar */
111 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
01faccbf
SH
112}
113
01faccbf
SH
114/**
115 * mac_set_cam_mask - Set a CAM mask
116 * @regs: register block for this velocity
117 * @mask: CAM mask to load
118 *
119 * Store a new mask into a CAM
120 */
c4067400 121static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
01faccbf
SH
122{
123 int i;
124 /* Select CAM mask */
125 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
126
127 writeb(CAMADDR_CAMEN, &regs->CAMADDR);
128
c4067400 129 for (i = 0; i < 8; i++)
01faccbf 130 writeb(*mask++, &(regs->MARCAM[i]));
c4067400 131
01faccbf
SH
132 /* disable CAMEN */
133 writeb(0, &regs->CAMADDR);
134
135 /* Select mar */
136 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
137}
138
c4067400 139static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
01faccbf
SH
140{
141 int i;
142 /* Select CAM mask */
143 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
144
145 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
146
c4067400 147 for (i = 0; i < 8; i++)
01faccbf 148 writeb(*mask++, &(regs->MARCAM[i]));
c4067400 149
01faccbf
SH
150 /* disable CAMEN */
151 writeb(0, &regs->CAMADDR);
152
153 /* Select mar */
154 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
155}
156
157/**
158 * mac_set_cam - set CAM data
159 * @regs: register block of this velocity
160 * @idx: Cam index
161 * @addr: 2 or 6 bytes of CAM data
162 *
163 * Load an address or vlan tag into a CAM
164 */
c4067400 165static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
01faccbf
SH
166{
167 int i;
168
169 /* Select CAM mask */
170 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
171
172 idx &= (64 - 1);
173
174 writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
175
c4067400 176 for (i = 0; i < 6; i++)
01faccbf 177 writeb(*addr++, &(regs->MARCAM[i]));
c4067400 178
01faccbf
SH
179 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
180
181 udelay(10);
182
183 writeb(0, &regs->CAMADDR);
184
185 /* Select mar */
186 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
187}
188
c4067400 189static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
01faccbf
SH
190 const u8 *addr)
191{
192
193 /* Select CAM mask */
194 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
195
196 idx &= (64 - 1);
197
198 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
199 writew(*((u16 *) addr), &regs->MARCAM[0]);
200
201 BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
202
203 udelay(10);
204
205 writeb(0, &regs->CAMADDR);
206
207 /* Select mar */
208 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
209}
210
211
212/**
213 * mac_wol_reset - reset WOL after exiting low power
214 * @regs: register block of this velocity
215 *
216 * Called after we drop out of wake on lan mode in order to
217 * reset the Wake on lan features. This function doesn't restore
218 * the rest of the logic from the result of sleep/wakeup
219 */
c4067400 220static void mac_wol_reset(struct mac_regs __iomem *regs)
01faccbf
SH
221{
222
223 /* Turn off SWPTAG right after leaving power mode */
224 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
225 /* clear sticky bits */
226 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
227
228 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
229 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
230 /* disable force PME-enable */
231 writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
232 /* disable power-event config bit */
233 writew(0xFFFF, &regs->WOLCRClr);
234 /* clear power status */
235 writew(0xFFFF, &regs->WOLSRClr);
236}
1da177e4 237
7282d491 238static const struct ethtool_ops velocity_ethtool_ops;
1da177e4
LT
239
240/*
241 Define module options
242*/
243
244MODULE_AUTHOR("VIA Networking Technologies, Inc.");
245MODULE_LICENSE("GPL");
246MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
247
c4067400
DJ
248#define VELOCITY_PARAM(N, D) \
249 static int N[MAX_UNITS] = OPTION_DEFAULT;\
1da177e4 250 module_param_array(N, int, NULL, 0); \
c4067400 251 MODULE_PARM_DESC(N, D);
1da177e4
LT
252
253#define RX_DESC_MIN 64
254#define RX_DESC_MAX 255
255#define RX_DESC_DEF 64
256VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
257
258#define TX_DESC_MIN 16
259#define TX_DESC_MAX 256
260#define TX_DESC_DEF 64
261VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
262
1da177e4
LT
263#define RX_THRESH_MIN 0
264#define RX_THRESH_MAX 3
265#define RX_THRESH_DEF 0
266/* rx_thresh[] is used for controlling the receive fifo threshold.
267 0: indicate the rxfifo threshold is 128 bytes.
268 1: indicate the rxfifo threshold is 512 bytes.
269 2: indicate the rxfifo threshold is 1024 bytes.
270 3: indicate the rxfifo threshold is store & forward.
271*/
272VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
273
274#define DMA_LENGTH_MIN 0
275#define DMA_LENGTH_MAX 7
2a5774f7 276#define DMA_LENGTH_DEF 6
1da177e4
LT
277
278/* DMA_length[] is used for controlling the DMA length
279 0: 8 DWORDs
280 1: 16 DWORDs
281 2: 32 DWORDs
282 3: 64 DWORDs
283 4: 128 DWORDs
284 5: 256 DWORDs
285 6: SF(flush till emply)
286 7: SF(flush till emply)
287*/
288VELOCITY_PARAM(DMA_length, "DMA length");
289
1da177e4
LT
290#define IP_ALIG_DEF 0
291/* IP_byte_align[] is used for IP header DWORD byte aligned
292 0: indicate the IP header won't be DWORD byte aligned.(Default) .
293 1: indicate the IP header will be DWORD byte aligned.
25985edc 294 In some environment, the IP header should be DWORD byte aligned,
1da177e4
LT
295 or the packet will be droped when we receive it. (eg: IPVS)
296*/
297VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
298
1da177e4
LT
299#define FLOW_CNTL_DEF 1
300#define FLOW_CNTL_MIN 1
301#define FLOW_CNTL_MAX 5
302
303/* flow_control[] is used for setting the flow control ability of NIC.
304 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
305 2: enable TX flow control.
306 3: enable RX flow control.
307 4: enable RX/TX flow control.
308 5: disable
309*/
310VELOCITY_PARAM(flow_control, "Enable flow control ability");
311
312#define MED_LNK_DEF 0
313#define MED_LNK_MIN 0
15419227 314#define MED_LNK_MAX 5
1da177e4
LT
315/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
316 0: indicate autonegotiation for both speed and duplex mode
317 1: indicate 100Mbps half duplex mode
318 2: indicate 100Mbps full duplex mode
319 3: indicate 10Mbps half duplex mode
320 4: indicate 10Mbps full duplex mode
15419227 321 5: indicate 1000Mbps full duplex mode
1da177e4
LT
322
323 Note:
c4067400
DJ
324 if EEPROM have been set to the force mode, this option is ignored
325 by driver.
1da177e4
LT
326*/
327VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
328
329#define VAL_PKT_LEN_DEF 0
330/* ValPktLen[] is used for setting the checksum offload ability of NIC.
331 0: Receive frame with invalid layer 2 length (Default)
332 1: Drop frame with invalid layer 2 length
333*/
334VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
335
336#define WOL_OPT_DEF 0
337#define WOL_OPT_MIN 0
338#define WOL_OPT_MAX 7
339/* wol_opts[] is used for controlling wake on lan behavior.
340 0: Wake up if recevied a magic packet. (Default)
341 1: Wake up if link status is on/off.
342 2: Wake up if recevied an arp packet.
343 4: Wake up if recevied any unicast packet.
344 Those value can be sumed up to support more than one option.
345*/
346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
347
1da177e4
LT
348static int rx_copybreak = 200;
349module_param(rx_copybreak, int, 0644);
350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
351
1da177e4
LT
352/*
353 * Internal board variants. At the moment we have only one
354 */
4f14b92f 355static struct velocity_info_tbl chip_info_table[] = {
cabb7667
JG
356 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
357 { }
1da177e4
LT
358};
359
360/*
361 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading.
363 */
a3aa1884 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
e54f4893
JG
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { }
1da177e4
LT
367};
368
369MODULE_DEVICE_TABLE(pci, velocity_id_table);
370
371/**
372 * get_chip_name - identifier to name
373 * @id: chip identifier
374 *
375 * Given a chip identifier return a suitable description. Returns
376 * a pointer a static string valid while the driver is loaded.
377 */
01faccbf 378static const char __devinit *get_chip_name(enum chip_type chip_id)
1da177e4
LT
379{
380 int i;
381 for (i = 0; chip_info_table[i].name != NULL; i++)
382 if (chip_info_table[i].chip_id == chip_id)
383 break;
384 return chip_info_table[i].name;
385}
386
387/**
388 * velocity_remove1 - device unplug
389 * @pdev: PCI device being removed
390 *
391 * Device unload callback. Called on an unplug or on module
392 * unload for each active device that is present. Disconnects
393 * the device from the network layer and frees all the resources
394 */
1da177e4
LT
395static void __devexit velocity_remove1(struct pci_dev *pdev)
396{
397 struct net_device *dev = pci_get_drvdata(pdev);
8ab6f3f7 398 struct velocity_info *vptr = netdev_priv(dev);
1da177e4 399
1da177e4
LT
400 unregister_netdev(dev);
401 iounmap(vptr->mac_regs);
402 pci_release_regions(pdev);
403 pci_disable_device(pdev);
404 pci_set_drvdata(pdev, NULL);
405 free_netdev(dev);
406
407 velocity_nics--;
408}
409
410/**
411 * velocity_set_int_opt - parser for integer options
412 * @opt: pointer to option value
413 * @val: value the user requested (or -1 for default)
414 * @min: lowest value allowed
415 * @max: highest value allowed
416 * @def: default value
417 * @name: property name
418 * @dev: device name
419 *
420 * Set an integer property in the module options. This function does
421 * all the verification and checking as well as reporting so that
422 * we don't duplicate code for each option.
423 */
07b5f6a6 424static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
1da177e4
LT
425{
426 if (val == -1)
427 *opt = def;
428 else if (val < min || val > max) {
429 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
430 devname, name, min, max);
431 *opt = def;
432 } else {
433 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
434 devname, name, val);
435 *opt = val;
436 }
437}
438
439/**
440 * velocity_set_bool_opt - parser for boolean options
441 * @opt: pointer to option value
442 * @val: value the user requested (or -1 for default)
443 * @def: default value (yes/no)
444 * @flag: numeric value to set for true.
445 * @name: property name
446 * @dev: device name
447 *
448 * Set a boolean property in the module options. This function does
449 * all the verification and checking as well as reporting so that
450 * we don't duplicate code for each option.
451 */
c4067400 452static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
1da177e4
LT
453{
454 (*opt) &= (~flag);
455 if (val == -1)
456 *opt |= (def ? flag : 0);
457 else if (val < 0 || val > 1) {
6aa20a22 458 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
1da177e4
LT
459 devname, name);
460 *opt |= (def ? flag : 0);
461 } else {
6aa20a22 462 printk(KERN_INFO "%s: set parameter %s to %s\n",
1da177e4
LT
463 devname, name, val ? "TRUE" : "FALSE");
464 *opt |= (val ? flag : 0);
465 }
466}
467
468/**
469 * velocity_get_options - set options on device
470 * @opts: option structure for the device
471 * @index: index of option to use in module options array
472 * @devname: device name
473 *
474 * Turn the module and command options into a single structure
475 * for the current device
476 */
07b5f6a6 477static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
1da177e4
LT
478{
479
480 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
481 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
482 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
483 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
501e4d24 484
1da177e4
LT
485 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
489 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
1da177e4
LT
490 opts->numrx = (opts->numrx & ~3);
491}
492
493/**
494 * velocity_init_cam_filter - initialise CAM
495 * @vptr: velocity to program
496 *
497 * Initialize the content addressable memory used for filters. Load
498 * appropriately according to the presence of VLAN
499 */
1da177e4
LT
500static void velocity_init_cam_filter(struct velocity_info *vptr)
501{
c4067400 502 struct mac_regs __iomem *regs = vptr->mac_regs;
73b54688 503 unsigned int vid, i = 0;
1da177e4
LT
504
505 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
506 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
507 WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
508
509 /* Disable all CAMs */
510 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
511 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
01faccbf
SH
512 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
513 mac_set_cam_mask(regs, vptr->mCAMmask);
1da177e4 514
d4f73c8e 515 /* Enable VCAMs */
73b54688
JP
516 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
517 mac_set_vlan_cam(regs, i, (u8 *) &vid);
518 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
519 if (++i >= VCAM_SIZE)
520 break;
521 }
522 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
d4f73c8e
FR
523}
524
8e586137 525static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
501e4d24
SH
526{
527 struct velocity_info *vptr = netdev_priv(dev);
528
c4067400 529 spin_lock_irq(&vptr->lock);
73b54688 530 set_bit(vid, vptr->active_vlans);
501e4d24 531 velocity_init_cam_filter(vptr);
c4067400 532 spin_unlock_irq(&vptr->lock);
8e586137 533 return 0;
501e4d24
SH
534}
535
8e586137 536static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
501e4d24
SH
537{
538 struct velocity_info *vptr = netdev_priv(dev);
539
c4067400 540 spin_lock_irq(&vptr->lock);
73b54688 541 clear_bit(vid, vptr->active_vlans);
501e4d24 542 velocity_init_cam_filter(vptr);
c4067400 543 spin_unlock_irq(&vptr->lock);
8e586137 544 return 0;
501e4d24
SH
545}
546
3c4dc711
FR
547static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
548{
549 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
550}
501e4d24 551
1da177e4
LT
552/**
553 * velocity_rx_reset - handle a receive reset
554 * @vptr: velocity we are resetting
555 *
556 * Reset the ownership and status for the receive ring side.
557 * Hand all the receive queue to the NIC.
558 */
1da177e4
LT
559static void velocity_rx_reset(struct velocity_info *vptr)
560{
561
c4067400 562 struct mac_regs __iomem *regs = vptr->mac_regs;
1da177e4
LT
563 int i;
564
3c4dc711 565 velocity_init_rx_ring_indexes(vptr);
1da177e4
LT
566
567 /*
568 * Init state, all RD entries belong to the NIC
569 */
570 for (i = 0; i < vptr->options.numrx; ++i)
0fe9f15e 571 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
1da177e4
LT
572
573 writew(vptr->options.numrx, &regs->RBRDU);
0fe9f15e 574 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1da177e4
LT
575 writew(0, &regs->RDIdx);
576 writew(vptr->options.numrx - 1, &regs->RDCSize);
577}
578
579/**
2cf71d2e
DJ
580 * velocity_get_opt_media_mode - get media selection
581 * @vptr: velocity adapter
1da177e4 582 *
2cf71d2e
DJ
583 * Get the media mode stored in EEPROM or module options and load
584 * mii_status accordingly. The requested link state information
585 * is also returned.
1da177e4 586 */
2cf71d2e 587static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
1da177e4 588{
2cf71d2e 589 u32 status = 0;
1da177e4 590
2cf71d2e
DJ
591 switch (vptr->options.spd_dpx) {
592 case SPD_DPX_AUTO:
593 status = VELOCITY_AUTONEG_ENABLE;
1da177e4 594 break;
2cf71d2e
DJ
595 case SPD_DPX_100_FULL:
596 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
597 break;
598 case SPD_DPX_10_FULL:
599 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
600 break;
601 case SPD_DPX_100_HALF:
602 status = VELOCITY_SPEED_100;
603 break;
604 case SPD_DPX_10_HALF:
605 status = VELOCITY_SPEED_10;
606 break;
15419227 607 case SPD_DPX_1000_FULL:
608 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
609 break;
2cf71d2e
DJ
610 }
611 vptr->mii_status = status;
612 return status;
613}
1da177e4 614
2cf71d2e
DJ
615/**
616 * safe_disable_mii_autopoll - autopoll off
617 * @regs: velocity registers
618 *
619 * Turn off the autopoll and wait for it to disable on the chip
620 */
621static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
622{
623 u16 ww;
1da177e4 624
2cf71d2e
DJ
625 /* turn off MAUTO */
626 writeb(0, &regs->MIICR);
627 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
628 udelay(1);
629 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
630 break;
631 }
632}
1da177e4 633
2cf71d2e
DJ
634/**
635 * enable_mii_autopoll - turn on autopolling
636 * @regs: velocity registers
637 *
638 * Enable the MII link status autopoll feature on the Velocity
639 * hardware. Wait for it to enable.
640 */
641static void enable_mii_autopoll(struct mac_regs __iomem *regs)
642{
643 int ii;
1da177e4 644
2cf71d2e
DJ
645 writeb(0, &(regs->MIICR));
646 writeb(MIIADR_SWMPL, &regs->MIIADR);
1da177e4 647
2cf71d2e
DJ
648 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
649 udelay(1);
650 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
651 break;
652 }
1da177e4 653
2cf71d2e 654 writeb(MIICR_MAUTO, &regs->MIICR);
1da177e4 655
2cf71d2e
DJ
656 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
657 udelay(1);
658 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
659 break;
660 }
1da177e4 661
2cf71d2e 662}
1da177e4 663
2cf71d2e
DJ
664/**
665 * velocity_mii_read - read MII data
666 * @regs: velocity registers
667 * @index: MII register index
668 * @data: buffer for received data
669 *
670 * Perform a single read of an MII 16bit register. Returns zero
671 * on success or -ETIMEDOUT if the PHY did not respond.
672 */
673static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
674{
675 u16 ww;
1da177e4 676
2cf71d2e
DJ
677 /*
678 * Disable MIICR_MAUTO, so that mii addr can be set normally
679 */
680 safe_disable_mii_autopoll(regs);
1da177e4 681
2cf71d2e 682 writeb(index, &regs->MIIADR);
1da177e4 683
2cf71d2e 684 BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
1da177e4 685
2cf71d2e
DJ
686 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
687 if (!(readb(&regs->MIICR) & MIICR_RCMD))
688 break;
689 }
1da177e4 690
2cf71d2e 691 *data = readw(&regs->MIIDATA);
1da177e4 692
2cf71d2e
DJ
693 enable_mii_autopoll(regs);
694 if (ww == W_MAX_TIMEOUT)
695 return -ETIMEDOUT;
696 return 0;
1da177e4
LT
697}
698
699/**
2cf71d2e
DJ
700 * mii_check_media_mode - check media state
701 * @regs: velocity registers
1da177e4 702 *
2cf71d2e
DJ
703 * Check the current MII status and determine the link status
704 * accordingly
1da177e4 705 */
2cf71d2e 706static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
1da177e4 707{
2cf71d2e
DJ
708 u32 status = 0;
709 u16 ANAR;
1da177e4 710
3a7f8681 711 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
2cf71d2e 712 status |= VELOCITY_LINK_FAIL;
1da177e4 713
3a7f8681 714 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
2cf71d2e 715 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
3a7f8681 716 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
2cf71d2e
DJ
717 status |= (VELOCITY_SPEED_1000);
718 else {
3a7f8681
FR
719 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
720 if (ANAR & ADVERTISE_100FULL)
2cf71d2e 721 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
3a7f8681 722 else if (ANAR & ADVERTISE_100HALF)
2cf71d2e 723 status |= VELOCITY_SPEED_100;
3a7f8681 724 else if (ANAR & ADVERTISE_10FULL)
2cf71d2e
DJ
725 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
726 else
727 status |= (VELOCITY_SPEED_10);
1da177e4
LT
728 }
729
3a7f8681
FR
730 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
731 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
732 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
733 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
734 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
2cf71d2e
DJ
735 status |= VELOCITY_AUTONEG_ENABLE;
736 }
1da177e4 737 }
1da177e4 738
2cf71d2e
DJ
739 return status;
740}
39a11bd9 741
1da177e4 742/**
2cf71d2e
DJ
743 * velocity_mii_write - write MII data
744 * @regs: velocity registers
745 * @index: MII register index
746 * @data: 16bit data for the MII register
1da177e4 747 *
2cf71d2e
DJ
748 * Perform a single write to an MII 16bit register. Returns zero
749 * on success or -ETIMEDOUT if the PHY did not respond.
1da177e4 750 */
2cf71d2e 751static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
1da177e4 752{
2cf71d2e 753 u16 ww;
1da177e4 754
2cf71d2e
DJ
755 /*
756 * Disable MIICR_MAUTO, so that mii addr can be set normally
e54f4893 757 */
2cf71d2e 758 safe_disable_mii_autopoll(regs);
1da177e4 759
2cf71d2e
DJ
760 /* MII reg offset */
761 writeb(mii_addr, &regs->MIIADR);
762 /* set MII data */
763 writew(data, &regs->MIIDATA);
764
765 /* turn on MIICR_WCMD */
766 BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
767
768 /* W_MAX_TIMEOUT is the timeout period */
769 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
770 udelay(5);
771 if (!(readb(&regs->MIICR) & MIICR_WCMD))
772 break;
1da177e4 773 }
2cf71d2e 774 enable_mii_autopoll(regs);
6aa20a22 775
2cf71d2e
DJ
776 if (ww == W_MAX_TIMEOUT)
777 return -ETIMEDOUT;
778 return 0;
779}
6aa20a22 780
2cf71d2e
DJ
781/**
782 * set_mii_flow_control - flow control setup
783 * @vptr: velocity interface
784 *
785 * Set up the flow control on this interface according to
786 * the supplied user/eeprom options.
787 */
788static void set_mii_flow_control(struct velocity_info *vptr)
789{
790 /*Enable or Disable PAUSE in ANAR */
791 switch (vptr->options.flow_cntl) {
792 case FLOW_CNTL_TX:
3a7f8681
FR
793 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
794 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
2cf71d2e 795 break;
1da177e4 796
2cf71d2e 797 case FLOW_CNTL_RX:
3a7f8681
FR
798 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
799 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
2cf71d2e 800 break;
1da177e4 801
2cf71d2e 802 case FLOW_CNTL_TX_RX:
3a7f8681 803 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
4a35ecf8 804 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
2cf71d2e 805 break;
1da177e4 806
2cf71d2e 807 case FLOW_CNTL_DISABLE:
3a7f8681
FR
808 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
809 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
2cf71d2e
DJ
810 break;
811 default:
812 break;
813 }
814}
1da177e4 815
2cf71d2e
DJ
816/**
817 * mii_set_auto_on - autonegotiate on
818 * @vptr: velocity
819 *
820 * Enable autonegotation on this interface
821 */
822static void mii_set_auto_on(struct velocity_info *vptr)
823{
3a7f8681
FR
824 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
825 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2cf71d2e 826 else
3a7f8681 827 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
2cf71d2e 828}
1da177e4 829
2cf71d2e
DJ
830static u32 check_connection_type(struct mac_regs __iomem *regs)
831{
832 u32 status = 0;
833 u8 PHYSR0;
834 u16 ANAR;
835 PHYSR0 = readb(&regs->PHYSR0);
1da177e4 836
2cf71d2e
DJ
837 /*
838 if (!(PHYSR0 & PHYSR0_LINKGD))
839 status|=VELOCITY_LINK_FAIL;
840 */
1da177e4 841
2cf71d2e
DJ
842 if (PHYSR0 & PHYSR0_FDPX)
843 status |= VELOCITY_DUPLEX_FULL;
1da177e4 844
2cf71d2e
DJ
845 if (PHYSR0 & PHYSR0_SPDG)
846 status |= VELOCITY_SPEED_1000;
847 else if (PHYSR0 & PHYSR0_SPD10)
848 status |= VELOCITY_SPEED_10;
849 else
850 status |= VELOCITY_SPEED_100;
1da177e4 851
3a7f8681
FR
852 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
853 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
854 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
855 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
856 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
2cf71d2e
DJ
857 status |= VELOCITY_AUTONEG_ENABLE;
858 }
1da177e4
LT
859 }
860
2cf71d2e
DJ
861 return status;
862}
1da177e4 863
2cf71d2e
DJ
864/**
865 * velocity_set_media_mode - set media mode
866 * @mii_status: old MII link state
867 *
868 * Check the media link state and configure the flow control
869 * PHY and also velocity hardware setup accordingly. In particular
870 * we need to set up CD polling and frame bursting.
871 */
872static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
873{
874 u32 curr_status;
875 struct mac_regs __iomem *regs = vptr->mac_regs;
1da177e4 876
2cf71d2e
DJ
877 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
878 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
07b5f6a6 879
2cf71d2e
DJ
880 /* Set mii link status */
881 set_mii_flow_control(vptr);
1da177e4 882
6aa20a22 883 /*
a34f0b31 884 Check if new status is consistent with current status
8e95a202
JP
885 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
886 (mii_status==curr_status)) {
2cf71d2e
DJ
887 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
888 vptr->mii_status=check_connection_type(vptr->mac_regs);
889 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
890 return 0;
891 }
1da177e4 892 */
6aa20a22 893
2cf71d2e 894 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3a7f8681 895 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
1da177e4
LT
896
897 /*
2cf71d2e 898 * If connection type is AUTO
1da177e4 899 */
2cf71d2e
DJ
900 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
901 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
902 /* clear force MAC mode bit */
903 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
904 /* set duplex mode of MAC according to duplex mode of MII */
3a7f8681
FR
905 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
906 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
907 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
6aa20a22 908
2cf71d2e
DJ
909 /* enable AUTO-NEGO mode */
910 mii_set_auto_on(vptr);
911 } else {
15419227 912 u16 CTRL1000;
2cf71d2e
DJ
913 u16 ANAR;
914 u8 CHIPGCR;
1da177e4 915
2cf71d2e
DJ
916 /*
917 * 1. if it's 3119, disable frame bursting in halfduplex mode
918 * and enable it in fullduplex mode
919 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
920 * 3. only enable CD heart beat counter in 10HD mode
921 */
1da177e4 922
2cf71d2e
DJ
923 /* set force MAC mode bit */
924 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
1da177e4 925
2cf71d2e 926 CHIPGCR = readb(&regs->CHIPGCR);
15419227 927
928 if (mii_status & VELOCITY_SPEED_1000)
929 CHIPGCR |= CHIPGCR_FCGMII;
930 else
931 CHIPGCR &= ~CHIPGCR_FCGMII;
501e4d24 932
2cf71d2e
DJ
933 if (mii_status & VELOCITY_DUPLEX_FULL) {
934 CHIPGCR |= CHIPGCR_FCFDX;
935 writeb(CHIPGCR, &regs->CHIPGCR);
936 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
937 if (vptr->rev_id < REV_ID_VT3216_A0)
938 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
939 } else {
940 CHIPGCR &= ~CHIPGCR_FCFDX;
941 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
942 writeb(CHIPGCR, &regs->CHIPGCR);
943 if (vptr->rev_id < REV_ID_VT3216_A0)
944 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
945 }
1da177e4 946
15419227 947 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
948 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
949 if ((mii_status & VELOCITY_SPEED_1000) &&
950 (mii_status & VELOCITY_DUPLEX_FULL)) {
951 CTRL1000 |= ADVERTISE_1000FULL;
952 }
953 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
1da177e4 954
2cf71d2e
DJ
955 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
956 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
957 else
958 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1da177e4 959
3a7f8681
FR
960 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
961 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
962 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
2cf71d2e
DJ
963 if (mii_status & VELOCITY_SPEED_100) {
964 if (mii_status & VELOCITY_DUPLEX_FULL)
3a7f8681 965 ANAR |= ADVERTISE_100FULL;
2cf71d2e 966 else
3a7f8681 967 ANAR |= ADVERTISE_100HALF;
15419227 968 } else if (mii_status & VELOCITY_SPEED_10) {
2cf71d2e 969 if (mii_status & VELOCITY_DUPLEX_FULL)
3a7f8681 970 ANAR |= ADVERTISE_10FULL;
2cf71d2e 971 else
3a7f8681 972 ANAR |= ADVERTISE_10HALF;
2cf71d2e 973 }
3a7f8681 974 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
2cf71d2e
DJ
975 /* enable AUTO-NEGO mode */
976 mii_set_auto_on(vptr);
3a7f8681 977 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
d3b238a0 978 }
2cf71d2e
DJ
979 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
980 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
981 return VELOCITY_LINK_CHANGE;
982}
8a22dddb 983
2cf71d2e
DJ
984/**
985 * velocity_print_link_status - link status reporting
986 * @vptr: velocity to report on
987 *
988 * Turn the link status of the velocity card into a kernel log
989 * description of the new link state, detailing speed and duplex
990 * status
991 */
992static void velocity_print_link_status(struct velocity_info *vptr)
993{
6aa20a22 994
2cf71d2e
DJ
995 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
996 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
997 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
998 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
6aa20a22 999
2cf71d2e
DJ
1000 if (vptr->mii_status & VELOCITY_SPEED_1000)
1001 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1002 else if (vptr->mii_status & VELOCITY_SPEED_100)
1003 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1004 else
1005 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1da177e4 1006
2cf71d2e
DJ
1007 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1008 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1009 else
1010 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1011 } else {
1012 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1013 switch (vptr->options.spd_dpx) {
15419227 1014 case SPD_DPX_1000_FULL:
1015 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1016 break;
2cf71d2e
DJ
1017 case SPD_DPX_100_HALF:
1018 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1019 break;
1020 case SPD_DPX_100_FULL:
1021 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1022 break;
1023 case SPD_DPX_10_HALF:
1024 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1025 break;
1026 case SPD_DPX_10_FULL:
1027 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1028 break;
1029 default:
1030 break;
1031 }
1da177e4 1032 }
1da177e4
LT
1033}
1034
1035/**
2cf71d2e
DJ
1036 * enable_flow_control_ability - flow control
1037 * @vptr: veloity to configure
1da177e4 1038 *
2cf71d2e
DJ
1039 * Set up flow control according to the flow control options
1040 * determined by the eeprom/configuration.
1da177e4 1041 */
2cf71d2e 1042static void enable_flow_control_ability(struct velocity_info *vptr)
1da177e4 1043{
1da177e4 1044
2cf71d2e 1045 struct mac_regs __iomem *regs = vptr->mac_regs;
1da177e4 1046
2cf71d2e 1047 switch (vptr->options.flow_cntl) {
1da177e4 1048
2cf71d2e
DJ
1049 case FLOW_CNTL_DEFAULT:
1050 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1051 writel(CR0_FDXRFCEN, &regs->CR0Set);
1052 else
1053 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1054
1055 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1056 writel(CR0_FDXTFCEN, &regs->CR0Set);
1057 else
1058 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1059 break;
1060
1061 case FLOW_CNTL_TX:
1062 writel(CR0_FDXTFCEN, &regs->CR0Set);
1063 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1064 break;
1065
1066 case FLOW_CNTL_RX:
1067 writel(CR0_FDXRFCEN, &regs->CR0Set);
1068 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1069 break;
1070
1071 case FLOW_CNTL_TX_RX:
1072 writel(CR0_FDXTFCEN, &regs->CR0Set);
1073 writel(CR0_FDXRFCEN, &regs->CR0Set);
1074 break;
1075
1076 case FLOW_CNTL_DISABLE:
1077 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1078 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1079 break;
1080
1081 default:
1082 break;
1083 }
1da177e4 1084
1da177e4
LT
1085}
1086
1087/**
2cf71d2e
DJ
1088 * velocity_soft_reset - soft reset
1089 * @vptr: velocity to reset
1da177e4 1090 *
2cf71d2e
DJ
1091 * Kick off a soft reset of the velocity adapter and then poll
1092 * until the reset sequence has completed before returning.
1da177e4 1093 */
2cf71d2e 1094static int velocity_soft_reset(struct velocity_info *vptr)
1da177e4 1095{
2cf71d2e
DJ
1096 struct mac_regs __iomem *regs = vptr->mac_regs;
1097 int i = 0;
6aa20a22 1098
2cf71d2e 1099 writel(CR0_SFRST, &regs->CR0Set);
1da177e4 1100
2cf71d2e
DJ
1101 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1102 udelay(5);
1103 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1104 break;
1da177e4
LT
1105 }
1106
2cf71d2e
DJ
1107 if (i == W_MAX_TIMEOUT) {
1108 writel(CR0_FORSRST, &regs->CR0Set);
1109 /* FIXME: PCI POSTING */
1110 /* delay 2ms */
1111 mdelay(2);
1da177e4 1112 }
1da177e4
LT
1113 return 0;
1114}
1115
1116/**
2cf71d2e
DJ
1117 * velocity_set_multi - filter list change callback
1118 * @dev: network device
1da177e4 1119 *
2cf71d2e
DJ
1120 * Called by the network layer when the filter lists need to change
1121 * for a velocity adapter. Reload the CAMs with the new address
1122 * filter ruleset.
1da177e4 1123 */
2cf71d2e 1124static void velocity_set_multi(struct net_device *dev)
1da177e4 1125{
2cf71d2e 1126 struct velocity_info *vptr = netdev_priv(dev);
1da177e4 1127 struct mac_regs __iomem *regs = vptr->mac_regs;
2cf71d2e
DJ
1128 u8 rx_mode;
1129 int i;
22bedad3 1130 struct netdev_hw_addr *ha;
1da177e4 1131
2cf71d2e
DJ
1132 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1133 writel(0xffffffff, &regs->MARCAM[0]);
1134 writel(0xffffffff, &regs->MARCAM[4]);
1135 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
4cd24eaf 1136 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
8e95a202 1137 (dev->flags & IFF_ALLMULTI)) {
2cf71d2e
DJ
1138 writel(0xffffffff, &regs->MARCAM[0]);
1139 writel(0xffffffff, &regs->MARCAM[4]);
1140 rx_mode = (RCR_AM | RCR_AB);
1141 } else {
1142 int offset = MCAM_SIZE - vptr->multicast_limit;
1143 mac_get_cam_mask(regs, vptr->mCAMmask);
1da177e4 1144
567ec874 1145 i = 0;
22bedad3
JP
1146 netdev_for_each_mc_addr(ha, dev) {
1147 mac_set_cam(regs, i + offset, ha->addr);
2cf71d2e 1148 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
567ec874 1149 i++;
1da177e4 1150 }
1da177e4 1151
2cf71d2e
DJ
1152 mac_set_cam_mask(regs, vptr->mCAMmask);
1153 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1da177e4 1154 }
2cf71d2e
DJ
1155 if (dev->mtu > 1500)
1156 rx_mode |= RCR_AL;
1da177e4 1157
2cf71d2e 1158 BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1da177e4 1159
9088d9a4
FR
1160}
1161
2cf71d2e
DJ
1162/*
1163 * MII access , media link mode setting functions
1da177e4
LT
1164 */
1165
1da177e4 1166/**
2cf71d2e
DJ
1167 * mii_init - set up MII
1168 * @vptr: velocity adapter
1169 * @mii_status: links tatus
1da177e4 1170 *
2cf71d2e 1171 * Set up the PHY for the current link state.
1da177e4 1172 */
2cf71d2e 1173static void mii_init(struct velocity_info *vptr, u32 mii_status)
1da177e4 1174{
2cf71d2e 1175 u16 BMCR;
1da177e4 1176
2cf71d2e
DJ
1177 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1178 case PHYID_CICADA_CS8201:
1179 /*
1180 * Reset to hardware default
1181 */
3a7f8681 1182 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
2cf71d2e
DJ
1183 /*
1184 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1185 * off it in NWay-forced half mode for NWay-forced v.s.
1186 * legacy-forced issue.
1187 */
1188 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
3a7f8681 1189 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
2cf71d2e 1190 else
3a7f8681 1191 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
2cf71d2e
DJ
1192 /*
1193 * Turn on Link/Activity LED enable bit for CIS8201
1194 */
3a7f8681 1195 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
2cf71d2e
DJ
1196 break;
1197 case PHYID_VT3216_32BIT:
1198 case PHYID_VT3216_64BIT:
1199 /*
1200 * Reset to hardware default
1201 */
3a7f8681 1202 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
2cf71d2e
DJ
1203 /*
1204 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1205 * off it in NWay-forced half mode for NWay-forced v.s.
1206 * legacy-forced issue
1207 */
1208 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
3a7f8681 1209 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
2cf71d2e 1210 else
3a7f8681 1211 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
2cf71d2e 1212 break;
1da177e4 1213
2cf71d2e
DJ
1214 case PHYID_MARVELL_1000:
1215 case PHYID_MARVELL_1000S:
1216 /*
1217 * Assert CRS on Transmit
1218 */
1219 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1220 /*
1221 * Reset to hardware default
1222 */
3a7f8681 1223 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
2cf71d2e
DJ
1224 break;
1225 default:
1226 ;
1227 }
3a7f8681
FR
1228 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1229 if (BMCR & BMCR_ISOLATE) {
1230 BMCR &= ~BMCR_ISOLATE;
1231 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1da177e4 1232 }
1da177e4
LT
1233}
1234
6dfc4b95
SK
1235/**
1236 * setup_queue_timers - Setup interrupt timers
1237 *
1238 * Setup interrupt frequency during suppression (timeout if the frame
1239 * count isn't filled).
1240 */
1241static void setup_queue_timers(struct velocity_info *vptr)
1242{
1243 /* Only for newer revisions */
1244 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1245 u8 txqueue_timer = 0;
1246 u8 rxqueue_timer = 0;
1247
1248 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1249 VELOCITY_SPEED_100)) {
1250 txqueue_timer = vptr->options.txqueue_timer;
1251 rxqueue_timer = vptr->options.rxqueue_timer;
1252 }
1253
1254 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1255 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1256 }
1257}
5ae297b0 1258
6dfc4b95
SK
1259/**
1260 * setup_adaptive_interrupts - Setup interrupt suppression
1261 *
1262 * @vptr velocity adapter
1263 *
1264 * The velocity is able to suppress interrupt during high interrupt load.
1265 * This function turns on that feature.
1266 */
1267static void setup_adaptive_interrupts(struct velocity_info *vptr)
1268{
1269 struct mac_regs __iomem *regs = vptr->mac_regs;
1270 u16 tx_intsup = vptr->options.tx_intsup;
1271 u16 rx_intsup = vptr->options.rx_intsup;
1272
1273 /* Setup default interrupt mask (will be changed below) */
1274 vptr->int_mask = INT_MASK_DEF;
1275
1276 /* Set Tx Interrupt Suppression Threshold */
1277 writeb(CAMCR_PS0, &regs->CAMCR);
1278 if (tx_intsup != 0) {
1279 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1280 ISR_PTX2I | ISR_PTX3I);
1281 writew(tx_intsup, &regs->ISRCTL);
1282 } else
1283 writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1284
1285 /* Set Rx Interrupt Suppression Threshold */
1286 writeb(CAMCR_PS1, &regs->CAMCR);
1287 if (rx_intsup != 0) {
1288 vptr->int_mask &= ~ISR_PRXI;
1289 writew(rx_intsup, &regs->ISRCTL);
1290 } else
1291 writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1292
1293 /* Select page to interrupt hold timer */
1294 writeb(0, &regs->CAMCR);
1295}
2cf71d2e 1296
1da177e4 1297/**
2cf71d2e
DJ
1298 * velocity_init_registers - initialise MAC registers
1299 * @vptr: velocity to init
1300 * @type: type of initialisation (hot or cold)
1da177e4 1301 *
2cf71d2e
DJ
1302 * Initialise the MAC on a reset or on first set up on the
1303 * hardware.
1da177e4 1304 */
2cf71d2e
DJ
1305static void velocity_init_registers(struct velocity_info *vptr,
1306 enum velocity_init_type type)
1da177e4 1307{
2cf71d2e
DJ
1308 struct mac_regs __iomem *regs = vptr->mac_regs;
1309 int i, mii_status;
1da177e4 1310
2cf71d2e 1311 mac_wol_reset(regs);
1da177e4 1312
2cf71d2e
DJ
1313 switch (type) {
1314 case VELOCITY_INIT_RESET:
1315 case VELOCITY_INIT_WOL:
1da177e4 1316
2cf71d2e 1317 netif_stop_queue(vptr->dev);
6aa20a22 1318
2cf71d2e
DJ
1319 /*
1320 * Reset RX to prevent RX pointer not on the 4X location
1321 */
1322 velocity_rx_reset(vptr);
1323 mac_rx_queue_run(regs);
1324 mac_rx_queue_wake(regs);
6aa20a22 1325
2cf71d2e
DJ
1326 mii_status = velocity_get_opt_media_mode(vptr);
1327 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1328 velocity_print_link_status(vptr);
1329 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1330 netif_wake_queue(vptr->dev);
1da177e4 1331 }
1da177e4 1332
2cf71d2e 1333 enable_flow_control_ability(vptr);
1da177e4 1334
2cf71d2e
DJ
1335 mac_clear_isr(regs);
1336 writel(CR0_STOP, &regs->CR0Clr);
1337 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1338 &regs->CR0Set);
6aa20a22 1339
2cf71d2e 1340 break;
1da177e4 1341
2cf71d2e
DJ
1342 case VELOCITY_INIT_COLD:
1343 default:
1344 /*
1345 * Do reset
1346 */
1347 velocity_soft_reset(vptr);
1348 mdelay(5);
1da177e4 1349
2cf71d2e
DJ
1350 mac_eeprom_reload(regs);
1351 for (i = 0; i < 6; i++)
1352 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1da177e4 1353
2cf71d2e
DJ
1354 /*
1355 * clear Pre_ACPI bit.
1356 */
1357 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1358 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1359 mac_set_dma_length(regs, vptr->options.DMA_length);
1da177e4 1360
2cf71d2e
DJ
1361 writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1362 /*
1363 * Back off algorithm use original IEEE standard
1364 */
1365 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1da177e4
LT
1366
1367 /*
2cf71d2e 1368 * Init CAM filter
1da177e4 1369 */
2cf71d2e 1370 velocity_init_cam_filter(vptr);
1da177e4 1371
2cf71d2e
DJ
1372 /*
1373 * Set packet filter: Receive directed and broadcast address
1374 */
1375 velocity_set_multi(vptr->dev);
1da177e4 1376
2cf71d2e
DJ
1377 /*
1378 * Enable MII auto-polling
1379 */
1380 enable_mii_autopoll(regs);
1da177e4 1381
6dfc4b95 1382 setup_adaptive_interrupts(vptr);
1da177e4 1383
2cf71d2e
DJ
1384 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1385 writew(vptr->options.numrx - 1, &regs->RDCSize);
1386 mac_rx_queue_run(regs);
1387 mac_rx_queue_wake(regs);
1da177e4 1388
2cf71d2e 1389 writew(vptr->options.numtx - 1, &regs->TDCSize);
1da177e4 1390
2cf71d2e
DJ
1391 for (i = 0; i < vptr->tx.numq; i++) {
1392 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1393 mac_tx_queue_run(regs, i);
1394 }
1da177e4 1395
2cf71d2e 1396 init_flow_control_register(vptr);
6aa20a22 1397
2cf71d2e
DJ
1398 writel(CR0_STOP, &regs->CR0Clr);
1399 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1da177e4 1400
2cf71d2e
DJ
1401 mii_status = velocity_get_opt_media_mode(vptr);
1402 netif_stop_queue(vptr->dev);
1da177e4 1403
2cf71d2e 1404 mii_init(vptr, mii_status);
1da177e4 1405
2cf71d2e
DJ
1406 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1407 velocity_print_link_status(vptr);
1408 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1409 netif_wake_queue(vptr->dev);
1da177e4 1410 }
6aa20a22 1411
2cf71d2e
DJ
1412 enable_flow_control_ability(vptr);
1413 mac_hw_mibs_init(regs);
1414 mac_write_int_mask(vptr->int_mask, regs);
1415 mac_clear_isr(regs);
1416
1da177e4 1417 }
1da177e4
LT
1418}
1419
2cf71d2e 1420static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1da177e4 1421{
2cf71d2e
DJ
1422 struct mac_regs __iomem *regs = vptr->mac_regs;
1423 int avail, dirty, unusable;
1424
1425 /*
1426 * RD number must be equal to 4X per hardware spec
1427 * (programming guide rev 1.20, p.13)
1428 */
1429 if (vptr->rx.filled < 4)
1430 return;
1431
1432 wmb();
1433
1434 unusable = vptr->rx.filled & 0x0003;
1435 dirty = vptr->rx.dirty - unusable;
1436 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1437 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1438 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1da177e4 1439 }
2cf71d2e
DJ
1440
1441 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1442 vptr->rx.filled = unusable;
1da177e4
LT
1443}
1444
1445/**
2cf71d2e
DJ
1446 * velocity_init_dma_rings - set up DMA rings
1447 * @vptr: Velocity to set up
6aa20a22 1448 *
2cf71d2e
DJ
1449 * Allocate PCI mapped DMA rings for the receive and transmit layer
1450 * to use.
1da177e4 1451 */
2cf71d2e 1452static int velocity_init_dma_rings(struct velocity_info *vptr)
1da177e4 1453{
2cf71d2e
DJ
1454 struct velocity_opt *opt = &vptr->options;
1455 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1456 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1457 struct pci_dev *pdev = vptr->pdev;
1458 dma_addr_t pool_dma;
1459 void *pool;
1460 unsigned int i;
1da177e4
LT
1461
1462 /*
2cf71d2e
DJ
1463 * Allocate all RD/TD rings a single pool.
1464 *
1465 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1466 * alignment
1da177e4 1467 */
2cf71d2e
DJ
1468 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1469 rx_ring_size, &pool_dma);
1470 if (!pool) {
1471 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1472 vptr->dev->name);
1473 return -ENOMEM;
1da177e4
LT
1474 }
1475
2cf71d2e
DJ
1476 vptr->rx.ring = pool;
1477 vptr->rx.pool_dma = pool_dma;
1da177e4 1478
2cf71d2e
DJ
1479 pool += rx_ring_size;
1480 pool_dma += rx_ring_size;
d4f73c8e 1481
2cf71d2e
DJ
1482 for (i = 0; i < vptr->tx.numq; i++) {
1483 vptr->tx.rings[i] = pool;
1484 vptr->tx.pool_dma[i] = pool_dma;
1485 pool += tx_ring_size;
1486 pool_dma += tx_ring_size;
1487 }
1da177e4
LT
1488
1489 return 0;
1490}
1491
2cf71d2e
DJ
1492static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1493{
1494 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1495}
1496
1da177e4
LT
1497/**
1498 * velocity_alloc_rx_buf - allocate aligned receive buffer
1499 * @vptr: velocity
1500 * @idx: ring index
1501 *
1502 * Allocate a new full sized buffer for the reception of a frame and
1503 * map it into PCI space for the hardware to use. The hardware
1504 * requires *64* byte alignment of the buffer which makes life
1505 * less fun than would be ideal.
1506 */
1da177e4
LT
1507static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1508{
0fe9f15e
FR
1509 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1510 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1da177e4 1511
0fe9f15e 1512 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1da177e4
LT
1513 if (rd_info->skb == NULL)
1514 return -ENOMEM;
1515
1516 /*
1517 * Do the gymnastics to get the buffer head for data at
1518 * 64byte alignment.
1519 */
da95b2d4
SK
1520 skb_reserve(rd_info->skb,
1521 64 - ((unsigned long) rd_info->skb->data & 63));
0fe9f15e
FR
1522 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1523 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
6aa20a22 1524
1da177e4
LT
1525 /*
1526 * Fill in the descriptor to match
0fe9f15e 1527 */
6aa20a22 1528
1da177e4 1529 *((u32 *) & (rd->rdesc0)) = 0;
0fe9f15e 1530 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1da177e4
LT
1531 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1532 rd->pa_high = 0;
1533 return 0;
1534}
1535
6aa20a22 1536
2cf71d2e 1537static int velocity_rx_refill(struct velocity_info *vptr)
1da177e4 1538{
2cf71d2e 1539 int dirty = vptr->rx.dirty, done = 0;
1da177e4 1540
2cf71d2e
DJ
1541 do {
1542 struct rx_desc *rd = vptr->rx.ring + dirty;
1da177e4 1543
2cf71d2e
DJ
1544 /* Fine for an all zero Rx desc at init time as well */
1545 if (rd->rdesc0.len & OWNED_BY_NIC)
1546 break;
1da177e4 1547
2cf71d2e
DJ
1548 if (!vptr->rx.info[dirty].skb) {
1549 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1da177e4 1550 break;
1da177e4 1551 }
2cf71d2e
DJ
1552 done++;
1553 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1554 } while (dirty != vptr->rx.curr);
1da177e4 1555
2cf71d2e
DJ
1556 if (done) {
1557 vptr->rx.dirty = dirty;
1558 vptr->rx.filled += done;
1da177e4 1559 }
2cf71d2e
DJ
1560
1561 return done;
1da177e4
LT
1562}
1563
1564/**
2cf71d2e
DJ
1565 * velocity_free_rd_ring - free receive ring
1566 * @vptr: velocity to clean up
1da177e4 1567 *
2cf71d2e
DJ
1568 * Free the receive buffers for each ring slot and any
1569 * attached socket buffers that need to go away.
1da177e4 1570 */
2cf71d2e 1571static void velocity_free_rd_ring(struct velocity_info *vptr)
1da177e4 1572{
2cf71d2e 1573 int i;
1da177e4 1574
2cf71d2e
DJ
1575 if (vptr->rx.info == NULL)
1576 return;
6aa20a22 1577
2cf71d2e
DJ
1578 for (i = 0; i < vptr->options.numrx; i++) {
1579 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1580 struct rx_desc *rd = vptr->rx.ring + i;
1da177e4 1581
2cf71d2e 1582 memset(rd, 0, sizeof(*rd));
1da177e4 1583
2cf71d2e
DJ
1584 if (!rd_info->skb)
1585 continue;
1586 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1587 PCI_DMA_FROMDEVICE);
1588 rd_info->skb_dma = 0;
6aa20a22 1589
2cf71d2e
DJ
1590 dev_kfree_skb(rd_info->skb);
1591 rd_info->skb = NULL;
1da177e4
LT
1592 }
1593
2cf71d2e
DJ
1594 kfree(vptr->rx.info);
1595 vptr->rx.info = NULL;
1596}
1da177e4 1597
2cf71d2e
DJ
1598/**
1599 * velocity_init_rd_ring - set up receive ring
1600 * @vptr: velocity to configure
1601 *
1602 * Allocate and set up the receive buffers for each ring slot and
1603 * assign them to the network adapter.
1604 */
1605static int velocity_init_rd_ring(struct velocity_info *vptr)
1606{
1607 int ret = -ENOMEM;
1da177e4 1608
2cf71d2e
DJ
1609 vptr->rx.info = kcalloc(vptr->options.numrx,
1610 sizeof(struct velocity_rd_info), GFP_KERNEL);
1611 if (!vptr->rx.info)
1612 goto out;
6aa20a22 1613
2cf71d2e 1614 velocity_init_rx_ring_indexes(vptr);
1da177e4 1615
2cf71d2e
DJ
1616 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1617 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1618 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1619 velocity_free_rd_ring(vptr);
1620 goto out;
1621 }
1da177e4 1622
2cf71d2e
DJ
1623 ret = 0;
1624out:
1625 return ret;
1da177e4
LT
1626}
1627
1628/**
2cf71d2e
DJ
1629 * velocity_init_td_ring - set up transmit ring
1630 * @vptr: velocity
1da177e4 1631 *
2cf71d2e
DJ
1632 * Set up the transmit ring and chain the ring pointers together.
1633 * Returns zero on success or a negative posix errno code for
1634 * failure.
1da177e4 1635 */
2cf71d2e 1636static int velocity_init_td_ring(struct velocity_info *vptr)
1da177e4 1637{
2cf71d2e 1638 int j;
1da177e4 1639
2cf71d2e
DJ
1640 /* Init the TD ring entries */
1641 for (j = 0; j < vptr->tx.numq; j++) {
1da177e4 1642
2cf71d2e
DJ
1643 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1644 sizeof(struct velocity_td_info),
1645 GFP_KERNEL);
1646 if (!vptr->tx.infos[j]) {
1647 while (--j >= 0)
1648 kfree(vptr->tx.infos[j]);
1649 return -ENOMEM;
1da177e4 1650 }
2cf71d2e
DJ
1651
1652 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1da177e4 1653 }
2cf71d2e
DJ
1654 return 0;
1655}
1656
1657/**
1658 * velocity_free_dma_rings - free PCI ring pointers
1659 * @vptr: Velocity to free from
1660 *
1661 * Clean up the PCI ring buffers allocated to this velocity.
1662 */
1663static void velocity_free_dma_rings(struct velocity_info *vptr)
1664{
1665 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1666 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1667
1668 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1da177e4
LT
1669}
1670
3c4dc711
FR
1671static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1672{
1673 int ret;
1674
1675 velocity_set_rxbufsize(vptr, mtu);
1676
1677 ret = velocity_init_dma_rings(vptr);
1678 if (ret < 0)
1679 goto out;
1680
1681 ret = velocity_init_rd_ring(vptr);
1682 if (ret < 0)
1683 goto err_free_dma_rings_0;
1684
1685 ret = velocity_init_td_ring(vptr);
1686 if (ret < 0)
1687 goto err_free_rd_ring_1;
1688out:
1689 return ret;
1690
1691err_free_rd_ring_1:
1692 velocity_free_rd_ring(vptr);
1693err_free_dma_rings_0:
1694 velocity_free_dma_rings(vptr);
1695 goto out;
1696}
1697
1da177e4 1698/**
2cf71d2e
DJ
1699 * velocity_free_tx_buf - free transmit buffer
1700 * @vptr: velocity
1701 * @tdinfo: buffer
1da177e4 1702 *
2cf71d2e
DJ
1703 * Release an transmit buffer. If the buffer was preallocated then
1704 * recycle it, if not then unmap the buffer.
1da177e4 1705 */
c79992fd
SK
1706static void velocity_free_tx_buf(struct velocity_info *vptr,
1707 struct velocity_td_info *tdinfo, struct tx_desc *td)
1da177e4 1708{
2cf71d2e 1709 struct sk_buff *skb = tdinfo->skb;
28133176 1710
2cf71d2e
DJ
1711 /*
1712 * Don't unmap the pre-allocated tx_bufs
1713 */
1714 if (tdinfo->skb_dma) {
c79992fd 1715 int i;
1da177e4 1716
2cf71d2e 1717 for (i = 0; i < tdinfo->nskb_dma; i++) {
c79992fd
SK
1718 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1719
1720 /* For scatter-gather */
1721 if (skb_shinfo(skb)->nr_frags > 0)
1722 pktlen = max_t(size_t, pktlen,
1723 td->td_buf[i].size & ~TD_QUEUE);
1724
1725 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1726 le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
2cf71d2e 1727 }
1da177e4 1728 }
2cf71d2e
DJ
1729 dev_kfree_skb_irq(skb);
1730 tdinfo->skb = NULL;
1da177e4
LT
1731}
1732
2cf71d2e
DJ
1733/*
1734 * FIXME: could we merge this with velocity_free_tx_buf ?
1735 */
1736static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1737 int q, int n)
1da177e4 1738{
2cf71d2e
DJ
1739 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1740 int i;
bd7b3f34 1741
2cf71d2e
DJ
1742 if (td_info == NULL)
1743 return;
3c4dc711 1744
2cf71d2e
DJ
1745 if (td_info->skb) {
1746 for (i = 0; i < td_info->nskb_dma; i++) {
1747 if (td_info->skb_dma[i]) {
1748 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1749 td_info->skb->len, PCI_DMA_TODEVICE);
1750 td_info->skb_dma[i] = 0;
1751 }
3c4dc711 1752 }
2cf71d2e
DJ
1753 dev_kfree_skb(td_info->skb);
1754 td_info->skb = NULL;
3c4dc711 1755 }
1da177e4
LT
1756}
1757
1758/**
2cf71d2e
DJ
1759 * velocity_free_td_ring - free td ring
1760 * @vptr: velocity
1da177e4 1761 *
2cf71d2e
DJ
1762 * Free up the transmit ring for this particular velocity adapter.
1763 * We free the ring contents but not the ring itself.
1764 */
1765static void velocity_free_td_ring(struct velocity_info *vptr)
1766{
1767 int i, j;
1da177e4 1768
2cf71d2e
DJ
1769 for (j = 0; j < vptr->tx.numq; j++) {
1770 if (vptr->tx.infos[j] == NULL)
1771 continue;
1772 for (i = 0; i < vptr->options.numtx; i++)
1773 velocity_free_td_ring_entry(vptr, j, i);
6aa20a22 1774
2cf71d2e
DJ
1775 kfree(vptr->tx.infos[j]);
1776 vptr->tx.infos[j] = NULL;
1777 }
1778}
6aa20a22 1779
2cf71d2e
DJ
1780static void velocity_free_rings(struct velocity_info *vptr)
1781{
1782 velocity_free_td_ring(vptr);
1783 velocity_free_rd_ring(vptr);
1784 velocity_free_dma_rings(vptr);
1da177e4
LT
1785}
1786
1787/**
2cf71d2e
DJ
1788 * velocity_error - handle error from controller
1789 * @vptr: velocity
1790 * @status: card status
1791 *
1792 * Process an error report from the hardware and attempt to recover
1793 * the card itself. At the moment we cannot recover from some
1794 * theoretically impossible errors but this could be fixed using
1795 * the pci_device_failed logic to bounce the hardware
1da177e4 1796 *
1da177e4 1797 */
2cf71d2e 1798static void velocity_error(struct velocity_info *vptr, int status)
1da177e4 1799{
580a6902 1800
2cf71d2e
DJ
1801 if (status & ISR_TXSTLI) {
1802 struct mac_regs __iomem *regs = vptr->mac_regs;
1da177e4 1803
2cf71d2e
DJ
1804 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1805 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1806 writew(TRDCSR_RUN, &regs->TDCSRClr);
1807 netif_stop_queue(vptr->dev);
1da177e4 1808
2cf71d2e
DJ
1809 /* FIXME: port over the pci_device_failed code and use it
1810 here */
1811 }
1da177e4 1812
2cf71d2e
DJ
1813 if (status & ISR_SRCI) {
1814 struct mac_regs __iomem *regs = vptr->mac_regs;
1815 int linked;
1da177e4 1816
2cf71d2e
DJ
1817 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1818 vptr->mii_status = check_connection_type(regs);
1da177e4 1819
2cf71d2e
DJ
1820 /*
1821 * If it is a 3119, disable frame bursting in
1822 * halfduplex mode and enable it in fullduplex
1823 * mode
1824 */
1825 if (vptr->rev_id < REV_ID_VT3216_A0) {
0527a1a8 1826 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2cf71d2e
DJ
1827 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1828 else
1829 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1830 }
1831 /*
1832 * Only enable CD heart beat counter in 10HD mode
1833 */
1834 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1835 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1836 else
1837 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
6dfc4b95
SK
1838
1839 setup_queue_timers(vptr);
2cf71d2e
DJ
1840 }
1841 /*
1842 * Get link status from PHYSR0
1843 */
1844 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1da177e4 1845
2cf71d2e
DJ
1846 if (linked) {
1847 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1848 netif_carrier_on(vptr->dev);
1849 } else {
1850 vptr->mii_status |= VELOCITY_LINK_FAIL;
1851 netif_carrier_off(vptr->dev);
1852 }
1da177e4 1853
2cf71d2e
DJ
1854 velocity_print_link_status(vptr);
1855 enable_flow_control_ability(vptr);
1da177e4 1856
2cf71d2e
DJ
1857 /*
1858 * Re-enable auto-polling because SRCI will disable
1859 * auto-polling
1860 */
1da177e4 1861
2cf71d2e 1862 enable_mii_autopoll(regs);
1da177e4 1863
2cf71d2e
DJ
1864 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1865 netif_stop_queue(vptr->dev);
1866 else
1867 netif_wake_queue(vptr->dev);
1da177e4 1868
6403eab1 1869 }
2cf71d2e
DJ
1870 if (status & ISR_MIBFI)
1871 velocity_update_hw_mibs(vptr);
1872 if (status & ISR_LSTEI)
1873 mac_rx_queue_wake(vptr->mac_regs);
1da177e4
LT
1874}
1875
1876/**
2cf71d2e
DJ
1877 * tx_srv - transmit interrupt service
1878 * @vptr; Velocity
1da177e4 1879 *
2cf71d2e
DJ
1880 * Scan the queues looking for transmitted packets that
1881 * we can complete and clean up. Update any statistics as
1882 * necessary/
1da177e4 1883 */
d6cade0f 1884static int velocity_tx_srv(struct velocity_info *vptr)
1da177e4 1885{
2cf71d2e
DJ
1886 struct tx_desc *td;
1887 int qnum;
1888 int full = 0;
1889 int idx;
1890 int works = 0;
1891 struct velocity_td_info *tdinfo;
1892 struct net_device_stats *stats = &vptr->dev->stats;
1da177e4 1893
2cf71d2e
DJ
1894 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1895 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1896 idx = (idx + 1) % vptr->options.numtx) {
1da177e4 1897
2cf71d2e
DJ
1898 /*
1899 * Get Tx Descriptor
1900 */
1901 td = &(vptr->tx.rings[qnum][idx]);
1902 tdinfo = &(vptr->tx.infos[qnum][idx]);
1da177e4 1903
2cf71d2e
DJ
1904 if (td->tdesc0.len & OWNED_BY_NIC)
1905 break;
1da177e4 1906
2cf71d2e
DJ
1907 if ((works++ > 15))
1908 break;
1909
1910 if (td->tdesc0.TSR & TSR0_TERR) {
1911 stats->tx_errors++;
1912 stats->tx_dropped++;
1913 if (td->tdesc0.TSR & TSR0_CDH)
1914 stats->tx_heartbeat_errors++;
1915 if (td->tdesc0.TSR & TSR0_CRS)
1916 stats->tx_carrier_errors++;
1917 if (td->tdesc0.TSR & TSR0_ABT)
1918 stats->tx_aborted_errors++;
1919 if (td->tdesc0.TSR & TSR0_OWC)
1920 stats->tx_window_errors++;
1921 } else {
1922 stats->tx_packets++;
1923 stats->tx_bytes += tdinfo->skb->len;
1924 }
c79992fd 1925 velocity_free_tx_buf(vptr, tdinfo, td);
2cf71d2e
DJ
1926 vptr->tx.used[qnum]--;
1927 }
1928 vptr->tx.tail[qnum] = idx;
1da177e4 1929
2cf71d2e
DJ
1930 if (AVAIL_TD(vptr, qnum) < 1)
1931 full = 1;
1932 }
1da177e4 1933 /*
2cf71d2e
DJ
1934 * Look to see if we should kick the transmit network
1935 * layer for more work.
1da177e4 1936 */
8e95a202
JP
1937 if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1938 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
2cf71d2e
DJ
1939 netif_wake_queue(vptr->dev);
1940 }
1941 return works;
1942}
1943
1944/**
1945 * velocity_rx_csum - checksum process
1946 * @rd: receive packet descriptor
1947 * @skb: network layer packet buffer
1948 *
1949 * Process the status bits for the received packet and determine
1950 * if the checksum was computed and verified by the hardware
1951 */
1952static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1953{
bc8acf2c 1954 skb_checksum_none_assert(skb);
6aa20a22 1955
2cf71d2e
DJ
1956 if (rd->rdesc1.CSM & CSM_IPKT) {
1957 if (rd->rdesc1.CSM & CSM_IPOK) {
1958 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1959 (rd->rdesc1.CSM & CSM_UDPKT)) {
1960 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1961 return;
1962 }
1963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4
LT
1964 }
1965 }
1da177e4
LT
1966}
1967
1da177e4 1968/**
2cf71d2e
DJ
1969 * velocity_rx_copy - in place Rx copy for small packets
1970 * @rx_skb: network layer packet buffer candidate
1971 * @pkt_size: received data size
1972 * @rd: receive packet descriptor
1da177e4
LT
1973 * @dev: network device
1974 *
2cf71d2e 1975 * Replace the current skb that is scheduled for Rx processing by a
25985edc 1976 * shorter, immediately allocated skb, if the received packet is small
2cf71d2e
DJ
1977 * enough. This function returns a negative value if the received
1978 * packet is too big or if memory is exhausted.
1da177e4 1979 */
2cf71d2e
DJ
1980static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1981 struct velocity_info *vptr)
1da177e4 1982{
2cf71d2e
DJ
1983 int ret = -1;
1984 if (pkt_size < rx_copybreak) {
1985 struct sk_buff *new_skb;
1da177e4 1986
89d71a66 1987 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
2cf71d2e
DJ
1988 if (new_skb) {
1989 new_skb->ip_summed = rx_skb[0]->ip_summed;
2cf71d2e
DJ
1990 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1991 *rx_skb = new_skb;
1992 ret = 0;
1da177e4
LT
1993 }
1994
1da177e4 1995 }
2cf71d2e 1996 return ret;
1da177e4
LT
1997}
1998
1999/**
2cf71d2e
DJ
2000 * velocity_iph_realign - IP header alignment
2001 * @vptr: velocity we are handling
2002 * @skb: network layer packet buffer
2003 * @pkt_size: received data size
1da177e4 2004 *
2cf71d2e
DJ
2005 * Align IP header on a 2 bytes boundary. This behavior can be
2006 * configured by the user.
1da177e4 2007 */
2cf71d2e
DJ
2008static inline void velocity_iph_realign(struct velocity_info *vptr,
2009 struct sk_buff *skb, int pkt_size)
1da177e4 2010{
2cf71d2e
DJ
2011 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2012 memmove(skb->data + 2, skb->data, pkt_size);
2013 skb_reserve(skb, 2);
2014 }
2015}
6aa20a22 2016
2cf71d2e
DJ
2017/**
2018 * velocity_receive_frame - received packet processor
2019 * @vptr: velocity we are handling
2020 * @idx: ring index
2021 *
2022 * A packet has arrived. We process the packet and if appropriate
2023 * pass the frame up the network stack
2024 */
2025static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2026{
2027 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2028 struct net_device_stats *stats = &vptr->dev->stats;
2029 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2030 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2031 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2032 struct sk_buff *skb;
1da177e4 2033
2cf71d2e
DJ
2034 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2035 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2036 stats->rx_length_errors++;
2037 return -EINVAL;
2038 }
1da177e4 2039
2cf71d2e
DJ
2040 if (rd->rdesc0.RSR & RSR_MAR)
2041 stats->multicast++;
1da177e4 2042
2cf71d2e 2043 skb = rd_info->skb;
1da177e4 2044
2cf71d2e
DJ
2045 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2046 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1da177e4 2047
2cf71d2e
DJ
2048 /*
2049 * Drop frame not meeting IEEE 802.3
2050 */
1da177e4 2051
2cf71d2e
DJ
2052 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2053 if (rd->rdesc0.RSR & RSR_RL) {
2054 stats->rx_length_errors++;
2055 return -EINVAL;
2056 }
2057 }
6aa20a22 2058
2cf71d2e 2059 pci_action = pci_dma_sync_single_for_device;
1da177e4 2060
2cf71d2e 2061 velocity_rx_csum(rd, skb);
6aa20a22 2062
2cf71d2e
DJ
2063 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2064 velocity_iph_realign(vptr, skb, pkt_len);
2065 pci_action = pci_unmap_single;
2066 rd_info->skb = NULL;
2067 }
6aa20a22 2068
2cf71d2e
DJ
2069 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2070 PCI_DMA_FROMDEVICE);
1da177e4 2071
2cf71d2e
DJ
2072 skb_put(skb, pkt_len - 4);
2073 skb->protocol = eth_type_trans(skb, vptr->dev);
2074
73b54688
JP
2075 if (rd->rdesc0.RSR & RSR_DETAG) {
2076 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2077
2078 __vlan_hwaccel_put_tag(skb, vid);
2079 }
2080 netif_rx(skb);
6aa20a22 2081
2cf71d2e 2082 stats->rx_bytes += pkt_len;
3cb7a798 2083 stats->rx_packets++;
6aa20a22 2084
2cf71d2e 2085 return 0;
1da177e4
LT
2086}
2087
1da177e4 2088/**
2cf71d2e
DJ
2089 * velocity_rx_srv - service RX interrupt
2090 * @vptr: velocity
1da177e4 2091 *
2cf71d2e
DJ
2092 * Walk the receive ring of the velocity adapter and remove
2093 * any received packets from the receive queue. Hand the ring
2094 * slots back to the adapter for reuse.
1da177e4 2095 */
d6cade0f 2096static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
1da177e4 2097{
2cf71d2e
DJ
2098 struct net_device_stats *stats = &vptr->dev->stats;
2099 int rd_curr = vptr->rx.curr;
2100 int works = 0;
2101
dfff7144 2102 while (works < budget_left) {
2cf71d2e
DJ
2103 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2104
2105 if (!vptr->rx.info[rd_curr].skb)
2106 break;
2107
2108 if (rd->rdesc0.len & OWNED_BY_NIC)
2109 break;
2110
2111 rmb();
1da177e4 2112
2cf71d2e
DJ
2113 /*
2114 * Don't drop CE or RL error frame although RXOK is off
2115 */
2116 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2117 if (velocity_receive_frame(vptr, rd_curr) < 0)
2118 stats->rx_dropped++;
2119 } else {
2120 if (rd->rdesc0.RSR & RSR_CRC)
2121 stats->rx_crc_errors++;
2122 if (rd->rdesc0.RSR & RSR_FAE)
2123 stats->rx_frame_errors++;
1da177e4 2124
2cf71d2e
DJ
2125 stats->rx_dropped++;
2126 }
6aa20a22 2127
2cf71d2e 2128 rd->size |= RX_INTEN;
1da177e4 2129
2cf71d2e
DJ
2130 rd_curr++;
2131 if (rd_curr >= vptr->options.numrx)
2132 rd_curr = 0;
dfff7144
SK
2133 works++;
2134 }
1da177e4 2135
2cf71d2e 2136 vptr->rx.curr = rd_curr;
1da177e4 2137
2cf71d2e
DJ
2138 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2139 velocity_give_many_rx_descs(vptr);
2140
2141 VAR_USED(stats);
2142 return works;
2143}
6aa20a22 2144
dfff7144
SK
2145static int velocity_poll(struct napi_struct *napi, int budget)
2146{
2147 struct velocity_info *vptr = container_of(napi,
2148 struct velocity_info, napi);
2149 unsigned int rx_done;
3f2e8d9f 2150 unsigned long flags;
dfff7144 2151
3f2e8d9f 2152 spin_lock_irqsave(&vptr->lock, flags);
dfff7144
SK
2153 /*
2154 * Do rx and tx twice for performance (taken from the VIA
2155 * out-of-tree driver).
2156 */
d6cade0f
SK
2157 rx_done = velocity_rx_srv(vptr, budget / 2);
2158 velocity_tx_srv(vptr);
2159 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2160 velocity_tx_srv(vptr);
dfff7144
SK
2161
2162 /* If budget not fully consumed, exit the polling mode */
2163 if (rx_done < budget) {
2164 napi_complete(napi);
2165 mac_enable_int(vptr->mac_regs);
2166 }
3f2e8d9f 2167 spin_unlock_irqrestore(&vptr->lock, flags);
dfff7144
SK
2168
2169 return rx_done;
2170}
6aa20a22 2171
1da177e4 2172/**
2cf71d2e
DJ
2173 * velocity_intr - interrupt callback
2174 * @irq: interrupt number
2175 * @dev_instance: interrupting device
1da177e4 2176 *
2cf71d2e
DJ
2177 * Called whenever an interrupt is generated by the velocity
2178 * adapter IRQ line. We may not be the source of the interrupt
2179 * and need to identify initially if we are, and if not exit as
2180 * efficiently as possible.
1da177e4 2181 */
2cf71d2e 2182static irqreturn_t velocity_intr(int irq, void *dev_instance)
1da177e4 2183{
2cf71d2e
DJ
2184 struct net_device *dev = dev_instance;
2185 struct velocity_info *vptr = netdev_priv(dev);
2186 u32 isr_status;
1da177e4 2187
2cf71d2e
DJ
2188 spin_lock(&vptr->lock);
2189 isr_status = mac_read_isr(vptr->mac_regs);
2190
2191 /* Not us ? */
2192 if (isr_status == 0) {
2193 spin_unlock(&vptr->lock);
2194 return IRQ_NONE;
1da177e4 2195 }
1da177e4 2196
3f2e8d9f
SK
2197 /* Ack the interrupt */
2198 mac_write_isr(vptr->mac_regs, isr_status);
2199
dfff7144
SK
2200 if (likely(napi_schedule_prep(&vptr->napi))) {
2201 mac_disable_int(vptr->mac_regs);
2202 __napi_schedule(&vptr->napi);
1da177e4 2203 }
3f2e8d9f
SK
2204
2205 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2206 velocity_error(vptr, isr_status);
2207
2cf71d2e 2208 spin_unlock(&vptr->lock);
2cf71d2e 2209
dfff7144 2210 return IRQ_HANDLED;
1da177e4
LT
2211}
2212
2213/**
2cf71d2e
DJ
2214 * velocity_open - interface activation callback
2215 * @dev: network layer device to open
1da177e4 2216 *
2cf71d2e
DJ
2217 * Called when the network layer brings the interface up. Returns
2218 * a negative posix error code on failure, or zero on success.
2219 *
2220 * All the ring allocation and set up is done on open for this
2221 * adapter to minimise memory usage when inactive
1da177e4 2222 */
2cf71d2e 2223static int velocity_open(struct net_device *dev)
1da177e4 2224{
2cf71d2e
DJ
2225 struct velocity_info *vptr = netdev_priv(dev);
2226 int ret;
1da177e4 2227
2cf71d2e
DJ
2228 ret = velocity_init_rings(vptr, dev->mtu);
2229 if (ret < 0)
2230 goto out;
1da177e4 2231
2cf71d2e
DJ
2232 /* Ensure chip is running */
2233 pci_set_power_state(vptr->pdev, PCI_D0);
1da177e4 2234
2cf71d2e
DJ
2235 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2236
1ede9b52 2237 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2cf71d2e
DJ
2238 dev->name, dev);
2239 if (ret < 0) {
2240 /* Power down the chip */
2241 pci_set_power_state(vptr->pdev, PCI_D3hot);
2242 velocity_free_rings(vptr);
2243 goto out;
1da177e4
LT
2244 }
2245
35bb5cad
BH
2246 velocity_give_many_rx_descs(vptr);
2247
2cf71d2e
DJ
2248 mac_enable_int(vptr->mac_regs);
2249 netif_start_queue(dev);
dfff7144 2250 napi_enable(&vptr->napi);
2cf71d2e
DJ
2251 vptr->flags |= VELOCITY_FLAGS_OPENED;
2252out:
2253 return ret;
1da177e4
LT
2254}
2255
2256/**
2cf71d2e
DJ
2257 * velocity_shutdown - shut down the chip
2258 * @vptr: velocity to deactivate
1da177e4 2259 *
2cf71d2e
DJ
2260 * Shuts down the internal operations of the velocity and
2261 * disables interrupts, autopolling, transmit and receive
1da177e4 2262 */
2cf71d2e 2263static void velocity_shutdown(struct velocity_info *vptr)
1da177e4 2264{
2cf71d2e
DJ
2265 struct mac_regs __iomem *regs = vptr->mac_regs;
2266 mac_disable_int(regs);
2267 writel(CR0_STOP, &regs->CR0Set);
2268 writew(0xFFFF, &regs->TDCSRClr);
2269 writeb(0xFF, &regs->RDCSRClr);
1da177e4 2270 safe_disable_mii_autopoll(regs);
2cf71d2e
DJ
2271 mac_clear_isr(regs);
2272}
1da177e4 2273
2cf71d2e
DJ
2274/**
2275 * velocity_change_mtu - MTU change callback
2276 * @dev: network device
2277 * @new_mtu: desired MTU
2278 *
2279 * Handle requests from the networking layer for MTU change on
2280 * this interface. It gets called on a change by the network layer.
2281 * Return zero for success or negative posix error code.
2282 */
2283static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2284{
2285 struct velocity_info *vptr = netdev_priv(dev);
2286 int ret = 0;
1da177e4 2287
2cf71d2e
DJ
2288 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2289 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2290 vptr->dev->name);
2291 ret = -EINVAL;
2292 goto out_0;
2293 }
1da177e4 2294
2cf71d2e
DJ
2295 if (!netif_running(dev)) {
2296 dev->mtu = new_mtu;
2297 goto out_0;
1da177e4
LT
2298 }
2299
2cf71d2e
DJ
2300 if (dev->mtu != new_mtu) {
2301 struct velocity_info *tmp_vptr;
2302 unsigned long flags;
2303 struct rx_info rx;
2304 struct tx_info tx;
1da177e4 2305
2cf71d2e
DJ
2306 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2307 if (!tmp_vptr) {
2308 ret = -ENOMEM;
2309 goto out_0;
2310 }
1da177e4 2311
2cf71d2e
DJ
2312 tmp_vptr->dev = dev;
2313 tmp_vptr->pdev = vptr->pdev;
2314 tmp_vptr->options = vptr->options;
2315 tmp_vptr->tx.numq = vptr->tx.numq;
6aa20a22 2316
2cf71d2e
DJ
2317 ret = velocity_init_rings(tmp_vptr, new_mtu);
2318 if (ret < 0)
2319 goto out_free_tmp_vptr_1;
1da177e4 2320
2cf71d2e 2321 spin_lock_irqsave(&vptr->lock, flags);
1da177e4 2322
2cf71d2e
DJ
2323 netif_stop_queue(dev);
2324 velocity_shutdown(vptr);
1da177e4 2325
2cf71d2e
DJ
2326 rx = vptr->rx;
2327 tx = vptr->tx;
1da177e4 2328
2cf71d2e
DJ
2329 vptr->rx = tmp_vptr->rx;
2330 vptr->tx = tmp_vptr->tx;
1da177e4 2331
2cf71d2e
DJ
2332 tmp_vptr->rx = rx;
2333 tmp_vptr->tx = tx;
1da177e4 2334
2cf71d2e 2335 dev->mtu = new_mtu;
6aa20a22 2336
2cf71d2e 2337 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1da177e4 2338
35bb5cad
BH
2339 velocity_give_many_rx_descs(vptr);
2340
2cf71d2e
DJ
2341 mac_enable_int(vptr->mac_regs);
2342 netif_start_queue(dev);
6aa20a22 2343
2cf71d2e 2344 spin_unlock_irqrestore(&vptr->lock, flags);
1da177e4 2345
2cf71d2e 2346 velocity_free_rings(tmp_vptr);
1da177e4 2347
2cf71d2e
DJ
2348out_free_tmp_vptr_1:
2349 kfree(tmp_vptr);
2350 }
2351out_0:
2352 return ret;
1da177e4 2353}
1da177e4
LT
2354
2355/**
2cf71d2e
DJ
2356 * velocity_mii_ioctl - MII ioctl handler
2357 * @dev: network device
2358 * @ifr: the ifreq block for the ioctl
2359 * @cmd: the command
1da177e4 2360 *
2cf71d2e
DJ
2361 * Process MII requests made via ioctl from the network layer. These
2362 * are used by tools like kudzu to interrogate the link state of the
2363 * hardware
1da177e4 2364 */
2cf71d2e 2365static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1da177e4 2366{
2cf71d2e
DJ
2367 struct velocity_info *vptr = netdev_priv(dev);
2368 struct mac_regs __iomem *regs = vptr->mac_regs;
2369 unsigned long flags;
2370 struct mii_ioctl_data *miidata = if_mii(ifr);
2371 int err;
1da177e4 2372
2cf71d2e
DJ
2373 switch (cmd) {
2374 case SIOCGMIIPHY:
2375 miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
1da177e4 2376 break;
2cf71d2e 2377 case SIOCGMIIREG:
2cf71d2e
DJ
2378 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2379 return -ETIMEDOUT;
1da177e4 2380 break;
2cf71d2e 2381 case SIOCSMIIREG:
2cf71d2e
DJ
2382 spin_lock_irqsave(&vptr->lock, flags);
2383 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2384 spin_unlock_irqrestore(&vptr->lock, flags);
2385 check_connection_type(vptr->mac_regs);
2386 if (err)
2387 return err;
1da177e4
LT
2388 break;
2389 default:
2cf71d2e 2390 return -EOPNOTSUPP;
1da177e4 2391 }
2cf71d2e 2392 return 0;
1da177e4
LT
2393}
2394
2395/**
2cf71d2e
DJ
2396 * velocity_ioctl - ioctl entry point
2397 * @dev: network device
2398 * @rq: interface request ioctl
2399 * @cmd: command code
1da177e4 2400 *
2cf71d2e
DJ
2401 * Called when the user issues an ioctl request to the network
2402 * device in question. The velocity interface supports MII.
1da177e4 2403 */
2cf71d2e
DJ
2404static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2405{
2406 struct velocity_info *vptr = netdev_priv(dev);
2407 int ret;
6aa20a22 2408
2cf71d2e
DJ
2409 /* If we are asked for information and the device is power
2410 saving then we need to bring the device back up to talk to it */
1da177e4 2411
2cf71d2e
DJ
2412 if (!netif_running(dev))
2413 pci_set_power_state(vptr->pdev, PCI_D0);
1da177e4 2414
2cf71d2e
DJ
2415 switch (cmd) {
2416 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2417 case SIOCGMIIREG: /* Read MII PHY register. */
2418 case SIOCSMIIREG: /* Write to MII PHY register. */
2419 ret = velocity_mii_ioctl(dev, rq, cmd);
2420 break;
1da177e4 2421
2cf71d2e
DJ
2422 default:
2423 ret = -EOPNOTSUPP;
2424 }
2425 if (!netif_running(dev))
2426 pci_set_power_state(vptr->pdev, PCI_D3hot);
1da177e4 2427
c4067400 2428
2cf71d2e 2429 return ret;
1da177e4
LT
2430}
2431
2432/**
2cf71d2e
DJ
2433 * velocity_get_status - statistics callback
2434 * @dev: network device
1da177e4 2435 *
2cf71d2e
DJ
2436 * Callback from the network layer to allow driver statistics
2437 * to be resynchronized with hardware collected state. In the
2438 * case of the velocity we need to pull the MIB counters from
2439 * the hardware into the counters before letting the network
2440 * layer display them.
1da177e4 2441 */
2cf71d2e 2442static struct net_device_stats *velocity_get_stats(struct net_device *dev)
1da177e4 2443{
2cf71d2e 2444 struct velocity_info *vptr = netdev_priv(dev);
1da177e4 2445
2cf71d2e
DJ
2446 /* If the hardware is down, don't touch MII */
2447 if (!netif_running(dev))
2448 return &dev->stats;
1da177e4 2449
2cf71d2e
DJ
2450 spin_lock_irq(&vptr->lock);
2451 velocity_update_hw_mibs(vptr);
2452 spin_unlock_irq(&vptr->lock);
1da177e4 2453
2cf71d2e
DJ
2454 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2455 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2456 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
1da177e4 2457
2cf71d2e
DJ
2458// unsigned long rx_dropped; /* no space in linux buffers */
2459 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2460 /* detailed rx_errors: */
2461// unsigned long rx_length_errors;
2462// unsigned long rx_over_errors; /* receiver ring buff overflow */
2463 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2464// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2465// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2466// unsigned long rx_missed_errors; /* receiver missed packet */
2467
2468 /* detailed tx_errors */
2469// unsigned long tx_fifo_errors;
2470
2471 return &dev->stats;
1da177e4
LT
2472}
2473
2cf71d2e
DJ
2474/**
2475 * velocity_close - close adapter callback
2476 * @dev: network device
2477 *
2478 * Callback from the network layer when the velocity is being
2479 * deactivated by the network layer
2480 */
2481static int velocity_close(struct net_device *dev)
1da177e4 2482{
2cf71d2e 2483 struct velocity_info *vptr = netdev_priv(dev);
1da177e4 2484
dfff7144 2485 napi_disable(&vptr->napi);
2cf71d2e
DJ
2486 netif_stop_queue(dev);
2487 velocity_shutdown(vptr);
1da177e4 2488
2cf71d2e
DJ
2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2490 velocity_get_ip(vptr);
2491 if (dev->irq != 0)
2492 free_irq(dev->irq, dev);
1da177e4 2493
2cf71d2e
DJ
2494 /* Power down the chip */
2495 pci_set_power_state(vptr->pdev, PCI_D3hot);
1da177e4 2496
2cf71d2e 2497 velocity_free_rings(vptr);
1da177e4 2498
2cf71d2e
DJ
2499 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2500 return 0;
1da177e4
LT
2501}
2502
2503/**
2cf71d2e
DJ
2504 * velocity_xmit - transmit packet callback
2505 * @skb: buffer to transmit
2506 * @dev: network device
1da177e4 2507 *
2cf71d2e
DJ
2508 * Called by the networ layer to request a packet is queued to
2509 * the velocity. Returns zero on success.
1da177e4 2510 */
61357325
SH
2511static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2512 struct net_device *dev)
1da177e4 2513{
2cf71d2e
DJ
2514 struct velocity_info *vptr = netdev_priv(dev);
2515 int qnum = 0;
2516 struct tx_desc *td_ptr;
2517 struct velocity_td_info *tdinfo;
2518 unsigned long flags;
2519 int pktlen;
c79992fd
SK
2520 int index, prev;
2521 int i = 0;
1da177e4 2522
2cf71d2e
DJ
2523 if (skb_padto(skb, ETH_ZLEN))
2524 goto out;
1da177e4 2525
c79992fd
SK
2526 /* The hardware can handle at most 7 memory segments, so merge
2527 * the skb if there are more */
2528 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2529 kfree_skb(skb);
2530 return NETDEV_TX_OK;
2531 }
2532
2533 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2534 max_t(unsigned int, skb->len, ETH_ZLEN) :
2535 skb_headlen(skb);
1da177e4 2536
2cf71d2e 2537 spin_lock_irqsave(&vptr->lock, flags);
1da177e4 2538
2cf71d2e
DJ
2539 index = vptr->tx.curr[qnum];
2540 td_ptr = &(vptr->tx.rings[qnum][index]);
2541 tdinfo = &(vptr->tx.infos[qnum][index]);
1da177e4 2542
2cf71d2e
DJ
2543 td_ptr->tdesc1.TCR = TCR0_TIC;
2544 td_ptr->td_buf[0].size &= ~TD_QUEUE;
1da177e4 2545
2cf71d2e
DJ
2546 /*
2547 * Map the linear network buffer into PCI space and
2548 * add it to the transmit ring.
2549 */
2550 tdinfo->skb = skb;
2551 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
c79992fd 2552 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2cf71d2e
DJ
2553 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2554 td_ptr->td_buf[0].pa_high = 0;
c79992fd
SK
2555 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2556
2557 /* Handle fragments */
2558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 2559 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
c79992fd 2560
e4cb193f
IC
2561 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
2562 frag, 0,
9e903e08 2563 skb_frag_size(frag),
5d6bcdfe 2564 DMA_TO_DEVICE);
c79992fd
SK
2565
2566 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2567 td_ptr->td_buf[i + 1].pa_high = 0;
9e903e08 2568 td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
c79992fd
SK
2569 }
2570 tdinfo->nskb_dma = i + 1;
2cf71d2e
DJ
2571
2572 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2573
eab6d18d 2574 if (vlan_tx_tag_present(skb)) {
2cf71d2e
DJ
2575 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2576 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2577 }
2578
2579 /*
2580 * Handle hardware checksum
2581 */
f593fe36 2582 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2cf71d2e
DJ
2583 const struct iphdr *ip = ip_hdr(skb);
2584 if (ip->protocol == IPPROTO_TCP)
2585 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2586 else if (ip->protocol == IPPROTO_UDP)
2587 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2588 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2589 }
1da177e4 2590
c79992fd
SK
2591 prev = index - 1;
2592 if (prev < 0)
2593 prev = vptr->options.numtx - 1;
2594 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2595 vptr->tx.used[qnum]++;
2596 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
1da177e4 2597
c79992fd
SK
2598 if (AVAIL_TD(vptr, qnum) < 1)
2599 netif_stop_queue(dev);
1da177e4 2600
c79992fd
SK
2601 td_ptr = &(vptr->tx.rings[qnum][prev]);
2602 td_ptr->td_buf[0].size |= TD_QUEUE;
2603 mac_tx_queue_wake(vptr->mac_regs, qnum);
1da177e4 2604
2cf71d2e
DJ
2605 spin_unlock_irqrestore(&vptr->lock, flags);
2606out:
2607 return NETDEV_TX_OK;
1da177e4
LT
2608}
2609
2cf71d2e
DJ
2610static const struct net_device_ops velocity_netdev_ops = {
2611 .ndo_open = velocity_open,
2612 .ndo_stop = velocity_close,
2613 .ndo_start_xmit = velocity_xmit,
2614 .ndo_get_stats = velocity_get_stats,
2615 .ndo_validate_addr = eth_validate_addr,
5ae297b0 2616 .ndo_set_mac_address = eth_mac_addr,
afc4b13d 2617 .ndo_set_rx_mode = velocity_set_multi,
2cf71d2e
DJ
2618 .ndo_change_mtu = velocity_change_mtu,
2619 .ndo_do_ioctl = velocity_ioctl,
2620 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2621 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2cf71d2e
DJ
2622};
2623
1da177e4 2624/**
2cf71d2e
DJ
2625 * velocity_init_info - init private data
2626 * @pdev: PCI device
2627 * @vptr: Velocity info
2628 * @info: Board type
1da177e4 2629 *
2cf71d2e
DJ
2630 * Set up the initial velocity_info struct for the device that has been
2631 * discovered.
1da177e4 2632 */
2cf71d2e
DJ
2633static void __devinit velocity_init_info(struct pci_dev *pdev,
2634 struct velocity_info *vptr,
2635 const struct velocity_info_tbl *info)
1da177e4 2636{
2cf71d2e
DJ
2637 memset(vptr, 0, sizeof(struct velocity_info));
2638
2639 vptr->pdev = pdev;
2640 vptr->chip_id = info->chip_id;
2641 vptr->tx.numq = info->txqueue;
2642 vptr->multicast_limit = MCAM_SIZE;
2643 spin_lock_init(&vptr->lock);
6aa20a22 2644}
1da177e4
LT
2645
2646/**
2cf71d2e
DJ
2647 * velocity_get_pci_info - retrieve PCI info for device
2648 * @vptr: velocity device
2649 * @pdev: PCI device it matches
1da177e4 2650 *
2cf71d2e
DJ
2651 * Retrieve the PCI configuration space data that interests us from
2652 * the kernel PCI layer
1da177e4 2653 */
2cf71d2e 2654static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
1da177e4 2655{
2cf71d2e 2656 vptr->rev_id = pdev->revision;
1da177e4 2657
2cf71d2e 2658 pci_set_master(pdev);
1da177e4 2659
2cf71d2e
DJ
2660 vptr->ioaddr = pci_resource_start(pdev, 0);
2661 vptr->memaddr = pci_resource_start(pdev, 1);
1da177e4 2662
2cf71d2e
DJ
2663 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2664 dev_err(&pdev->dev,
2665 "region #0 is not an I/O resource, aborting.\n");
2666 return -EINVAL;
2667 }
2668
2669 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2670 dev_err(&pdev->dev,
2671 "region #1 is an I/O resource, aborting.\n");
2672 return -EINVAL;
2673 }
2674
2675 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2676 dev_err(&pdev->dev, "region #1 is too small.\n");
2677 return -EINVAL;
2678 }
2679 vptr->pdev = pdev;
6aa20a22 2680
1da177e4
LT
2681 return 0;
2682}
2683
2cf71d2e
DJ
2684/**
2685 * velocity_print_info - per driver data
2686 * @vptr: velocity
2687 *
2688 * Print per driver data as the kernel driver finds Velocity
2689 * hardware
2690 */
2691static void __devinit velocity_print_info(struct velocity_info *vptr)
1da177e4 2692{
2cf71d2e 2693 struct net_device *dev = vptr->dev;
1da177e4 2694
2cf71d2e 2695 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
aa7c68a5
HS
2696 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2697 dev->name, dev->dev_addr);
1da177e4
LT
2698}
2699
2700static u32 velocity_get_link(struct net_device *dev)
2701{
8ab6f3f7 2702 struct velocity_info *vptr = netdev_priv(dev);
c4067400 2703 struct mac_regs __iomem *regs = vptr->mac_regs;
59b693fb 2704 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
1da177e4
LT
2705}
2706
2cf71d2e
DJ
2707/**
2708 * velocity_found1 - set up discovered velocity card
2709 * @pdev: PCI device
2710 * @ent: PCI device table entry that matched
2711 *
2712 * Configure a discovered adapter from scratch. Return a negative
2713 * errno error code on failure paths.
2714 */
2715static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2716{
2cf71d2e
DJ
2717 static int first = 1;
2718 struct net_device *dev;
2719 int i;
2720 const char *drv_string;
2721 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2722 struct velocity_info *vptr;
2723 struct mac_regs __iomem *regs;
2724 int ret = -ENOMEM;
1da177e4 2725
2cf71d2e
DJ
2726 /* FIXME: this driver, like almost all other ethernet drivers,
2727 * can support more than MAX_UNITS.
2728 */
2729 if (velocity_nics >= MAX_UNITS) {
2730 dev_notice(&pdev->dev, "already found %d NICs.\n",
2731 velocity_nics);
2732 return -ENODEV;
2733 }
1da177e4 2734
2cf71d2e 2735 dev = alloc_etherdev(sizeof(struct velocity_info));
41de8d4c 2736 if (!dev)
2cf71d2e 2737 goto out;
1da177e4 2738
2cf71d2e 2739 /* Chain it all together */
1da177e4 2740
2cf71d2e
DJ
2741 SET_NETDEV_DEV(dev, &pdev->dev);
2742 vptr = netdev_priv(dev);
2743
2744
2745 if (first) {
2746 printk(KERN_INFO "%s Ver. %s\n",
2747 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2748 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2749 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2750 first = 0;
2751 }
2752
2753 velocity_init_info(pdev, vptr, info);
2754
2755 vptr->dev = dev;
2756
2cf71d2e
DJ
2757 ret = pci_enable_device(pdev);
2758 if (ret < 0)
2759 goto err_free_dev;
2760
889635fd
KV
2761 dev->irq = pdev->irq;
2762
2cf71d2e
DJ
2763 ret = velocity_get_pci_info(vptr, pdev);
2764 if (ret < 0) {
2765 /* error message already printed */
2766 goto err_disable;
1da177e4 2767 }
2cf71d2e
DJ
2768
2769 ret = pci_request_regions(pdev, VELOCITY_NAME);
2770 if (ret < 0) {
2771 dev_err(&pdev->dev, "No PCI resources.\n");
2772 goto err_disable;
1da177e4 2773 }
2cf71d2e
DJ
2774
2775 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2776 if (regs == NULL) {
2777 ret = -EIO;
2778 goto err_release_res;
1da177e4 2779 }
1da177e4 2780
2cf71d2e 2781 vptr->mac_regs = regs;
1da177e4 2782
2cf71d2e 2783 mac_wol_reset(regs);
1da177e4 2784
2cf71d2e 2785 dev->base_addr = vptr->ioaddr;
1da177e4 2786
2cf71d2e
DJ
2787 for (i = 0; i < 6; i++)
2788 dev->dev_addr[i] = readb(&regs->PAR[i]);
6aa20a22 2789
6aa20a22 2790
2cf71d2e 2791 drv_string = dev_driver_string(&pdev->dev);
1da177e4 2792
2cf71d2e 2793 velocity_get_options(&vptr->options, velocity_nics, drv_string);
1da177e4 2794
2cf71d2e
DJ
2795 /*
2796 * Mask out the options cannot be set to the chip
2797 */
6aa20a22 2798
2cf71d2e 2799 vptr->options.flags &= info->flags;
1da177e4 2800
2cf71d2e
DJ
2801 /*
2802 * Enable the chip specified capbilities
2803 */
1da177e4 2804
2cf71d2e 2805 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
1da177e4 2806
2cf71d2e
DJ
2807 vptr->wol_opts = vptr->options.wol_opts;
2808 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
1da177e4 2809
2cf71d2e 2810 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
1da177e4 2811
2cf71d2e
DJ
2812 dev->irq = pdev->irq;
2813 dev->netdev_ops = &velocity_netdev_ops;
2814 dev->ethtool_ops = &velocity_ethtool_ops;
dfff7144 2815 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
6aa20a22 2816
f593fe36 2817 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2cf71d2e 2818 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
de2b96f1 2819 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
1da177e4 2820
2cf71d2e
DJ
2821 ret = register_netdev(dev);
2822 if (ret < 0)
2823 goto err_iounmap;
2824
2825 if (!velocity_get_link(dev)) {
2826 netif_carrier_off(dev);
2827 vptr->mii_status |= VELOCITY_LINK_FAIL;
1da177e4
LT
2828 }
2829
2cf71d2e
DJ
2830 velocity_print_info(vptr);
2831 pci_set_drvdata(pdev, dev);
1da177e4 2832
2cf71d2e 2833 /* and leave the chip powered down */
1da177e4 2834
2cf71d2e 2835 pci_set_power_state(pdev, PCI_D3hot);
2cf71d2e
DJ
2836 velocity_nics++;
2837out:
2838 return ret;
2839
2840err_iounmap:
2841 iounmap(regs);
2842err_release_res:
2843 pci_release_regions(pdev);
2844err_disable:
2845 pci_disable_device(pdev);
2846err_free_dev:
2847 free_netdev(dev);
2848 goto out;
1da177e4
LT
2849}
2850
2cf71d2e 2851#ifdef CONFIG_PM
1da177e4
LT
2852/**
2853 * wol_calc_crc - WOL CRC
2854 * @pattern: data pattern
2855 * @mask_pattern: mask
2856 *
2857 * Compute the wake on lan crc hashes for the packet header
2858 * we are interested in.
2859 */
c4067400 2860static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
1da177e4
LT
2861{
2862 u16 crc = 0xFFFF;
2863 u8 mask;
2864 int i, j;
2865
2866 for (i = 0; i < size; i++) {
2867 mask = mask_pattern[i];
2868
2869 /* Skip this loop if the mask equals to zero */
2870 if (mask == 0x00)
2871 continue;
2872
2873 for (j = 0; j < 8; j++) {
2874 if ((mask & 0x01) == 0) {
2875 mask >>= 1;
2876 continue;
2877 }
2878 mask >>= 1;
2879 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2880 }
2881 }
2882 /* Finally, invert the result once to get the correct data */
2883 crc = ~crc;
906d66df 2884 return bitrev32(crc) >> 16;
1da177e4
LT
2885}
2886
2887/**
2888 * velocity_set_wol - set up for wake on lan
2889 * @vptr: velocity to set WOL status on
2890 *
2891 * Set a card up for wake on lan either by unicast or by
2892 * ARP packet.
2893 *
2894 * FIXME: check static buffer is safe here
2895 */
1da177e4
LT
2896static int velocity_set_wol(struct velocity_info *vptr)
2897{
c4067400 2898 struct mac_regs __iomem *regs = vptr->mac_regs;
2ffa007e 2899 enum speed_opt spd_dpx = vptr->options.spd_dpx;
1da177e4
LT
2900 static u8 buf[256];
2901 int i;
2902
2903 static u32 mask_pattern[2][4] = {
2904 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2905 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
2906 };
2907
2908 writew(0xFFFF, &regs->WOLCRClr);
2909 writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2910 writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2911
2912 /*
2913 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2914 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2915 */
2916
c4067400 2917 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
1da177e4 2918 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
1da177e4
LT
2919
2920 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2921 struct arp_packet *arp = (struct arp_packet *) buf;
2922 u16 crc;
2923 memset(buf, 0, sizeof(struct arp_packet) + 7);
2924
2925 for (i = 0; i < 4; i++)
2926 writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2927
2928 arp->type = htons(ETH_P_ARP);
2929 arp->ar_op = htons(1);
2930
2931 memcpy(arp->ar_tip, vptr->ip_addr, 4);
2932
2933 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2934 (u8 *) & mask_pattern[0][0]);
2935
2936 writew(crc, &regs->PatternCRC[0]);
2937 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2938 }
2939
2940 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2941 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2942
2943 writew(0x0FFF, &regs->WOLSRClr);
2944
2ffa007e 2945 if (spd_dpx == SPD_DPX_1000_FULL)
2946 goto mac_done;
2947
2948 if (spd_dpx != SPD_DPX_AUTO)
2949 goto advertise_done;
2950
1da177e4
LT
2951 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2952 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3a7f8681 2953 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
1da177e4 2954
3a7f8681 2955 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
1da177e4
LT
2956 }
2957
2958 if (vptr->mii_status & VELOCITY_SPEED_1000)
3a7f8681 2959 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
1da177e4 2960
2ffa007e 2961advertise_done:
1da177e4
LT
2962 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2963
2964 {
2965 u8 GCR;
2966 GCR = readb(&regs->CHIPGCR);
2967 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2968 writeb(GCR, &regs->CHIPGCR);
2969 }
2970
2ffa007e 2971mac_done:
1da177e4
LT
2972 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2973 /* Turn on SWPTAG just before entering power mode */
2974 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2975 /* Go to bed ..... */
2976 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2977
2978 return 0;
2979}
2980
2cf71d2e
DJ
2981/**
2982 * velocity_save_context - save registers
2983 * @vptr: velocity
2984 * @context: buffer for stored context
2985 *
2986 * Retrieve the current configuration from the velocity hardware
2987 * and stash it in the context structure, for use by the context
2988 * restore functions. This allows us to save things we need across
2989 * power down states
2990 */
2991static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2992{
2993 struct mac_regs __iomem *regs = vptr->mac_regs;
2994 u16 i;
2995 u8 __iomem *ptr = (u8 __iomem *)regs;
2996
2997 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2998 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2999
3000 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3001 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3002
3003 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3004 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3005
3006}
3007
1da177e4
LT
3008static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3009{
3010 struct net_device *dev = pci_get_drvdata(pdev);
3011 struct velocity_info *vptr = netdev_priv(dev);
3012 unsigned long flags;
3013
c4067400 3014 if (!netif_running(vptr->dev))
1da177e4
LT
3015 return 0;
3016
3017 netif_device_detach(vptr->dev);
3018
3019 spin_lock_irqsave(&vptr->lock, flags);
3020 pci_save_state(pdev);
5ae297b0 3021
1da177e4
LT
3022 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3023 velocity_get_ip(vptr);
3024 velocity_save_context(vptr, &vptr->context);
3025 velocity_shutdown(vptr);
3026 velocity_set_wol(vptr);
4a51c0d0 3027 pci_enable_wake(pdev, PCI_D3hot, 1);
1da177e4
LT
3028 pci_set_power_state(pdev, PCI_D3hot);
3029 } else {
3030 velocity_save_context(vptr, &vptr->context);
3031 velocity_shutdown(vptr);
3032 pci_disable_device(pdev);
3033 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3034 }
5ae297b0 3035
2cf71d2e
DJ
3036 spin_unlock_irqrestore(&vptr->lock, flags);
3037 return 0;
3038}
3039
3040/**
3041 * velocity_restore_context - restore registers
3042 * @vptr: velocity
3043 * @context: buffer for stored context
3044 *
3045 * Reload the register configuration from the velocity context
3046 * created by velocity_save_context.
3047 */
3048static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3049{
3050 struct mac_regs __iomem *regs = vptr->mac_regs;
3051 int i;
3052 u8 __iomem *ptr = (u8 __iomem *)regs;
3053
3054 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3055 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3056
3057 /* Just skip cr0 */
3058 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3059 /* Clear */
3060 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3061 /* Set */
3062 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3063 }
3064
3065 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3066 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3067
3068 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3069 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3070
3071 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3072 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
1da177e4
LT
3073}
3074
3075static int velocity_resume(struct pci_dev *pdev)
3076{
3077 struct net_device *dev = pci_get_drvdata(pdev);
3078 struct velocity_info *vptr = netdev_priv(dev);
3079 unsigned long flags;
3080 int i;
3081
c4067400 3082 if (!netif_running(vptr->dev))
1da177e4
LT
3083 return 0;
3084
3085 pci_set_power_state(pdev, PCI_D0);
3086 pci_enable_wake(pdev, 0, 0);
3087 pci_restore_state(pdev);
3088
3089 mac_wol_reset(vptr->mac_regs);
3090
3091 spin_lock_irqsave(&vptr->lock, flags);
3092 velocity_restore_context(vptr, &vptr->context);
3093 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3094 mac_disable_int(vptr->mac_regs);
3095
d6cade0f 3096 velocity_tx_srv(vptr);
1da177e4 3097
0fe9f15e 3098 for (i = 0; i < vptr->tx.numq; i++) {
c4067400 3099 if (vptr->tx.used[i])
1da177e4 3100 mac_tx_queue_wake(vptr->mac_regs, i);
1da177e4
LT
3101 }
3102
3103 mac_enable_int(vptr->mac_regs);
3104 spin_unlock_irqrestore(&vptr->lock, flags);
3105 netif_device_attach(vptr->dev);
3106
3107 return 0;
3108}
2cf71d2e 3109#endif
1da177e4 3110
2cf71d2e
DJ
3111/*
3112 * Definition for our device driver. The PCI layer interface
3113 * uses this to handle all our card discover and plugging
3114 */
3115static struct pci_driver velocity_driver = {
5ae297b0 3116 .name = VELOCITY_NAME,
3117 .id_table = velocity_id_table,
3118 .probe = velocity_found1,
3119 .remove = __devexit_p(velocity_remove1),
2cf71d2e 3120#ifdef CONFIG_PM
5ae297b0 3121 .suspend = velocity_suspend,
3122 .resume = velocity_resume,
2cf71d2e
DJ
3123#endif
3124};
3125
3126
3127/**
3128 * velocity_ethtool_up - pre hook for ethtool
3129 * @dev: network device
3130 *
3131 * Called before an ethtool operation. We need to make sure the
3132 * chip is out of D3 state before we poke at it.
3133 */
3134static int velocity_ethtool_up(struct net_device *dev)
3135{
3136 struct velocity_info *vptr = netdev_priv(dev);
3137 if (!netif_running(dev))
3138 pci_set_power_state(vptr->pdev, PCI_D0);
3139 return 0;
3140}
3141
3142/**
3143 * velocity_ethtool_down - post hook for ethtool
3144 * @dev: network device
3145 *
3146 * Called after an ethtool operation. Restore the chip back to D3
3147 * state if it isn't running.
3148 */
3149static void velocity_ethtool_down(struct net_device *dev)
3150{
3151 struct velocity_info *vptr = netdev_priv(dev);
3152 if (!netif_running(dev))
3153 pci_set_power_state(vptr->pdev, PCI_D3hot);
3154}
3155
70739497
DD
3156static int velocity_get_settings(struct net_device *dev,
3157 struct ethtool_cmd *cmd)
2cf71d2e
DJ
3158{
3159 struct velocity_info *vptr = netdev_priv(dev);
3160 struct mac_regs __iomem *regs = vptr->mac_regs;
3161 u32 status;
3162 status = check_connection_type(vptr->mac_regs);
3163
3164 cmd->supported = SUPPORTED_TP |
3165 SUPPORTED_Autoneg |
3166 SUPPORTED_10baseT_Half |
3167 SUPPORTED_10baseT_Full |
3168 SUPPORTED_100baseT_Half |
3169 SUPPORTED_100baseT_Full |
3170 SUPPORTED_1000baseT_Half |
3171 SUPPORTED_1000baseT_Full;
15419227 3172
3173 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3174 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3175 cmd->advertising |=
3176 ADVERTISED_10baseT_Half |
3177 ADVERTISED_10baseT_Full |
3178 ADVERTISED_100baseT_Half |
3179 ADVERTISED_100baseT_Full |
3180 ADVERTISED_1000baseT_Half |
3181 ADVERTISED_1000baseT_Full;
3182 } else {
3183 switch (vptr->options.spd_dpx) {
3184 case SPD_DPX_1000_FULL:
3185 cmd->advertising |= ADVERTISED_1000baseT_Full;
3186 break;
3187 case SPD_DPX_100_HALF:
3188 cmd->advertising |= ADVERTISED_100baseT_Half;
3189 break;
3190 case SPD_DPX_100_FULL:
3191 cmd->advertising |= ADVERTISED_100baseT_Full;
3192 break;
3193 case SPD_DPX_10_HALF:
3194 cmd->advertising |= ADVERTISED_10baseT_Half;
3195 break;
3196 case SPD_DPX_10_FULL:
3197 cmd->advertising |= ADVERTISED_10baseT_Full;
3198 break;
3199 default:
3200 break;
3201 }
3202 }
70739497 3203
2cf71d2e 3204 if (status & VELOCITY_SPEED_1000)
70739497 3205 ethtool_cmd_speed_set(cmd, SPEED_1000);
2cf71d2e 3206 else if (status & VELOCITY_SPEED_100)
70739497 3207 ethtool_cmd_speed_set(cmd, SPEED_100);
2cf71d2e 3208 else
70739497
DD
3209 ethtool_cmd_speed_set(cmd, SPEED_10);
3210
2cf71d2e
DJ
3211 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3212 cmd->port = PORT_TP;
3213 cmd->transceiver = XCVR_INTERNAL;
3214 cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3215
3216 if (status & VELOCITY_DUPLEX_FULL)
3217 cmd->duplex = DUPLEX_FULL;
3218 else
3219 cmd->duplex = DUPLEX_HALF;
3220
3221 return 0;
3222}
3223
25db0338
DD
3224static int velocity_set_settings(struct net_device *dev,
3225 struct ethtool_cmd *cmd)
2cf71d2e
DJ
3226{
3227 struct velocity_info *vptr = netdev_priv(dev);
25db0338 3228 u32 speed = ethtool_cmd_speed(cmd);
2cf71d2e
DJ
3229 u32 curr_status;
3230 u32 new_status = 0;
3231 int ret = 0;
3232
3233 curr_status = check_connection_type(vptr->mac_regs);
3234 curr_status &= (~VELOCITY_LINK_FAIL);
3235
3236 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
25db0338
DD
3237 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3238 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3239 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
2cf71d2e
DJ
3240 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3241
15419227 3242 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3243 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
2cf71d2e 3244 ret = -EINVAL;
15419227 3245 } else {
3246 enum speed_opt spd_dpx;
3247
3248 if (new_status & VELOCITY_AUTONEG_ENABLE)
3249 spd_dpx = SPD_DPX_AUTO;
3250 else if ((new_status & VELOCITY_SPEED_1000) &&
3251 (new_status & VELOCITY_DUPLEX_FULL)) {
3252 spd_dpx = SPD_DPX_1000_FULL;
3253 } else if (new_status & VELOCITY_SPEED_100)
3254 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3255 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3256 else if (new_status & VELOCITY_SPEED_10)
3257 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3258 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3259 else
3260 return -EOPNOTSUPP;
3261
3262 vptr->options.spd_dpx = spd_dpx;
3263
2cf71d2e 3264 velocity_set_media_mode(vptr, new_status);
15419227 3265 }
2cf71d2e
DJ
3266
3267 return ret;
3268}
3269
3270static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3271{
3272 struct velocity_info *vptr = netdev_priv(dev);
23020ab3
RJ
3273 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3274 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3275 strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
2cf71d2e
DJ
3276}
3277
3278static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3279{
3280 struct velocity_info *vptr = netdev_priv(dev);
3281 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3282 wol->wolopts |= WAKE_MAGIC;
3283 /*
3284 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3285 wol.wolopts|=WAKE_PHY;
3286 */
3287 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3288 wol->wolopts |= WAKE_UCAST;
3289 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3290 wol->wolopts |= WAKE_ARP;
3291 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3292}
3293
3294static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3295{
3296 struct velocity_info *vptr = netdev_priv(dev);
3297
3298 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3299 return -EFAULT;
3300 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3301
3302 /*
3303 if (wol.wolopts & WAKE_PHY) {
3304 vptr->wol_opts|=VELOCITY_WOL_PHY;
3305 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3306 }
3307 */
3308
3309 if (wol->wolopts & WAKE_MAGIC) {
3310 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3311 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3312 }
3313 if (wol->wolopts & WAKE_UCAST) {
3314 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3315 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3316 }
3317 if (wol->wolopts & WAKE_ARP) {
3318 vptr->wol_opts |= VELOCITY_WOL_ARP;
3319 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3320 }
3321 memcpy(vptr->wol_passwd, wol->sopass, 6);
3322 return 0;
3323}
3324
3325static u32 velocity_get_msglevel(struct net_device *dev)
3326{
3327 return msglevel;
3328}
3329
3330static void velocity_set_msglevel(struct net_device *dev, u32 value)
3331{
3332 msglevel = value;
3333}
3334
6dfc4b95
SK
3335static int get_pending_timer_val(int val)
3336{
3337 int mult_bits = val >> 6;
3338 int mult = 1;
3339
3340 switch (mult_bits)
3341 {
3342 case 1:
3343 mult = 4; break;
3344 case 2:
3345 mult = 16; break;
3346 case 3:
3347 mult = 64; break;
3348 case 0:
3349 default:
3350 break;
3351 }
3352
3353 return (val & 0x3f) * mult;
3354}
3355
3356static void set_pending_timer_val(int *val, u32 us)
3357{
3358 u8 mult = 0;
3359 u8 shift = 0;
3360
3361 if (us >= 0x3f) {
3362 mult = 1; /* mult with 4 */
3363 shift = 2;
3364 }
3365 if (us >= 0x3f * 4) {
3366 mult = 2; /* mult with 16 */
3367 shift = 4;
3368 }
3369 if (us >= 0x3f * 16) {
3370 mult = 3; /* mult with 64 */
3371 shift = 6;
3372 }
3373
3374 *val = (mult << 6) | ((us >> shift) & 0x3f);
3375}
3376
3377
3378static int velocity_get_coalesce(struct net_device *dev,
3379 struct ethtool_coalesce *ecmd)
3380{
3381 struct velocity_info *vptr = netdev_priv(dev);
3382
3383 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3384 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3385
3386 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3387 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3388
3389 return 0;
3390}
3391
3392static int velocity_set_coalesce(struct net_device *dev,
3393 struct ethtool_coalesce *ecmd)
3394{
3395 struct velocity_info *vptr = netdev_priv(dev);
3396 int max_us = 0x3f * 64;
39c2ff43 3397 unsigned long flags;
6dfc4b95
SK
3398
3399 /* 6 bits of */
3400 if (ecmd->tx_coalesce_usecs > max_us)
3401 return -EINVAL;
3402 if (ecmd->rx_coalesce_usecs > max_us)
3403 return -EINVAL;
3404
3405 if (ecmd->tx_max_coalesced_frames > 0xff)
3406 return -EINVAL;
3407 if (ecmd->rx_max_coalesced_frames > 0xff)
3408 return -EINVAL;
3409
3410 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3411 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3412
3413 set_pending_timer_val(&vptr->options.rxqueue_timer,
3414 ecmd->rx_coalesce_usecs);
3415 set_pending_timer_val(&vptr->options.txqueue_timer,
3416 ecmd->tx_coalesce_usecs);
3417
3418 /* Setup the interrupt suppression and queue timers */
39c2ff43 3419 spin_lock_irqsave(&vptr->lock, flags);
6dfc4b95
SK
3420 mac_disable_int(vptr->mac_regs);
3421 setup_adaptive_interrupts(vptr);
3422 setup_queue_timers(vptr);
3423
3424 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3425 mac_clear_isr(vptr->mac_regs);
3426 mac_enable_int(vptr->mac_regs);
39c2ff43 3427 spin_unlock_irqrestore(&vptr->lock, flags);
6dfc4b95
SK
3428
3429 return 0;
3430}
3431
ad66fa7a 3432static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3433 "rx_all",
3434 "rx_ok",
3435 "tx_ok",
3436 "rx_error",
3437 "rx_runt_ok",
3438 "rx_runt_err",
3439 "rx_64",
3440 "tx_64",
3441 "rx_65_to_127",
3442 "tx_65_to_127",
3443 "rx_128_to_255",
3444 "tx_128_to_255",
3445 "rx_256_to_511",
3446 "tx_256_to_511",
3447 "rx_512_to_1023",
3448 "tx_512_to_1023",
3449 "rx_1024_to_1518",
3450 "tx_1024_to_1518",
3451 "tx_ether_collisions",
3452 "rx_crc_errors",
3453 "rx_jumbo",
3454 "tx_jumbo",
3455 "rx_mac_control_frames",
3456 "tx_mac_control_frames",
3457 "rx_frame_alignement_errors",
3458 "rx_long_ok",
3459 "rx_long_err",
3460 "tx_sqe_errors",
3461 "rx_no_buf",
3462 "rx_symbol_errors",
3463 "in_range_length_errors",
3464 "late_collisions"
3465};
3466
3467static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3468{
3469 switch (sset) {
3470 case ETH_SS_STATS:
3471 memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3472 break;
3473 }
3474}
3475
3476static int velocity_get_sset_count(struct net_device *dev, int sset)
3477{
3478 switch (sset) {
3479 case ETH_SS_STATS:
3480 return ARRAY_SIZE(velocity_gstrings);
3481 default:
3482 return -EOPNOTSUPP;
3483 }
3484}
3485
3486static void velocity_get_ethtool_stats(struct net_device *dev,
3487 struct ethtool_stats *stats, u64 *data)
3488{
3489 if (netif_running(dev)) {
3490 struct velocity_info *vptr = netdev_priv(dev);
3491 u32 *p = vptr->mib_counter;
3492 int i;
3493
3494 spin_lock_irq(&vptr->lock);
3495 velocity_update_hw_mibs(vptr);
3496 spin_unlock_irq(&vptr->lock);
3497
3498 for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3499 *data++ = *p++;
3500 }
3501}
3502
2cf71d2e 3503static const struct ethtool_ops velocity_ethtool_ops = {
5ae297b0 3504 .get_settings = velocity_get_settings,
3505 .set_settings = velocity_set_settings,
3506 .get_drvinfo = velocity_get_drvinfo,
3507 .get_wol = velocity_ethtool_get_wol,
3508 .set_wol = velocity_ethtool_set_wol,
3509 .get_msglevel = velocity_get_msglevel,
3510 .set_msglevel = velocity_set_msglevel,
3511 .get_link = velocity_get_link,
ad66fa7a 3512 .get_strings = velocity_get_strings,
3513 .get_sset_count = velocity_get_sset_count,
3514 .get_ethtool_stats = velocity_get_ethtool_stats,
5ae297b0 3515 .get_coalesce = velocity_get_coalesce,
3516 .set_coalesce = velocity_set_coalesce,
3517 .begin = velocity_ethtool_up,
3518 .complete = velocity_ethtool_down
2cf71d2e 3519};
ce9f7fe3 3520
5ae297b0 3521#if defined(CONFIG_PM) && defined(CONFIG_INET)
1da177e4
LT
3522static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3523{
5ae297b0 3524 struct in_ifaddr *ifa = ptr;
a337499f 3525 struct net_device *dev = ifa->ifa_dev->dev;
1da177e4 3526
516b4df1
BH
3527 if (dev_net(dev) == &init_net &&
3528 dev->netdev_ops == &velocity_netdev_ops)
3529 velocity_get_ip(netdev_priv(dev));
a337499f 3530
1da177e4
LT
3531 return NOTIFY_DONE;
3532}
ce9f7fe3 3533
2cf71d2e 3534static struct notifier_block velocity_inetaddr_notifier = {
5ae297b0 3535 .notifier_call = velocity_netdev_event,
2cf71d2e
DJ
3536};
3537
3538static void velocity_register_notifier(void)
3539{
3540 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3541}
3542
3543static void velocity_unregister_notifier(void)
3544{
3545 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3546}
3547
3548#else
3549
3550#define velocity_register_notifier() do {} while (0)
3551#define velocity_unregister_notifier() do {} while (0)
3552
3553#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3554
3555/**
3556 * velocity_init_module - load time function
3557 *
3558 * Called when the velocity module is loaded. The PCI driver
3559 * is registered with the PCI layer, and in turn will call
3560 * the probe functions for each velocity adapter installed
3561 * in the system.
3562 */
3563static int __init velocity_init_module(void)
3564{
3565 int ret;
3566
3567 velocity_register_notifier();
3568 ret = pci_register_driver(&velocity_driver);
3569 if (ret < 0)
3570 velocity_unregister_notifier();
3571 return ret;
3572}
3573
3574/**
3575 * velocity_cleanup - module unload
3576 *
3577 * When the velocity hardware is unloaded this function is called.
3578 * It will clean up the notifiers and the unregister the PCI
3579 * driver interface for this hardware. This in turn cleans up
3580 * all discovered interfaces before returning from the function
3581 */
3582static void __exit velocity_cleanup_module(void)
3583{
3584 velocity_unregister_notifier();
3585 pci_unregister_driver(&velocity_driver);
3586}
3587
3588module_init(velocity_init_module);
3589module_exit(velocity_cleanup_module);
This page took 1.269774 seconds and 5 git commands to generate.