net: makes skb_splice_bits() aware of skb->head_frag
[deliverable/linux.git] / drivers / net / ethernet / oki-semi / pch_gbe / pch_gbe_main.c
CommitLineData
77555ee7
MO
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
1a0bdadb 3 * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
77555ee7
MO
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
9d9779e7 23#include <linux/module.h>
1a0bdadb
TS
24#ifdef CONFIG_PCH_PTP
25#include <linux/net_tstamp.h>
26#include <linux/ptp_classify.h>
27#endif
77555ee7
MO
28
29#define DRV_VERSION "1.00"
30const char pch_driver_version[] = DRV_VERSION;
31
32#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
33#define PCH_GBE_MAR_ENTRIES 16
34#define PCH_GBE_SHORT_PKT 64
35#define DSC_INIT16 0xC000
36#define PCH_GBE_DMA_ALIGN 0
ac096642 37#define PCH_GBE_DMA_PADDING 2
77555ee7
MO
38#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
39#define PCH_GBE_COPYBREAK_DEFAULT 256
40#define PCH_GBE_PCI_BAR 1
124d770a 41#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
77555ee7 42
b0e6baf5
T
43/* Macros for ML7223 */
44#define PCI_VENDOR_ID_ROHM 0x10db
45#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
46
7756332f
TO
47/* Macros for ML7831 */
48#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
49
77555ee7
MO
50#define PCH_GBE_TX_WEIGHT 64
51#define PCH_GBE_RX_WEIGHT 64
52#define PCH_GBE_RX_BUFFER_WRITE 16
53
54/* Initialize the wake-on-LAN settings */
55#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
56
57#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58 PCH_GBE_CHIP_TYPE_INTERNAL | \
ce3dad0f 59 PCH_GBE_RGMII_MODE_RGMII \
77555ee7
MO
60 )
61
62/* Ethertype field values */
124d770a 63#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
77555ee7
MO
64#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
65#define PCH_GBE_FRAME_SIZE_2048 2048
66#define PCH_GBE_FRAME_SIZE_4096 4096
67#define PCH_GBE_FRAME_SIZE_8192 8192
68
69#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
70#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72#define PCH_GBE_DESC_UNUSED(R) \
73 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74 (R)->next_to_clean - (R)->next_to_use - 1)
75
76/* Pause packet value */
77#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
78#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
81
77555ee7
MO
82
83/* This defines the bits that are set in the Interrupt Mask
84 * Set/Read Register. Each bit is documented below:
85 * o RXT0 = Receiver Timer Interrupt (ring 0)
86 * o TXDW = Transmit Descriptor Written Back
87 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
88 * o RXSEQ = Receive Sequence Error
89 * o LSC = Link Status Change
90 */
91#define PCH_GBE_INT_ENABLE_MASK ( \
92 PCH_GBE_INT_RX_DMA_CMPLT | \
93 PCH_GBE_INT_RX_DSC_EMP | \
124d770a 94 PCH_GBE_INT_RX_FIFO_ERR | \
77555ee7
MO
95 PCH_GBE_INT_WOL_DET | \
96 PCH_GBE_INT_TX_CMPLT \
97 )
98
124d770a 99#define PCH_GBE_INT_DISABLE_ALL 0
77555ee7 100
1a0bdadb
TS
101#ifdef CONFIG_PCH_PTP
102/* Macros for ieee1588 */
1a0bdadb
TS
103/* 0x40 Time Synchronization Channel Control Register Bits */
104#define MASTER_MODE (1<<0)
93c8acb5 105#define SLAVE_MODE (0)
1a0bdadb 106#define V2_MODE (1<<31)
93c8acb5 107#define CAP_MODE0 (0)
1a0bdadb
TS
108#define CAP_MODE2 (1<<17)
109
110/* 0x44 Time Synchronization Channel Event Register Bits */
111#define TX_SNAPSHOT_LOCKED (1<<0)
112#define RX_SNAPSHOT_LOCKED (1<<1)
358dfb6d
TS
113
114#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
1a0bdadb
TS
116#endif
117
77555ee7
MO
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
119
191cc687 120static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122 int data);
98200ec2 123
1a0bdadb
TS
124#ifdef CONFIG_PCH_PTP
125static struct sock_filter ptp_filter[] = {
126 PTP_FILTER
127};
128
129static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
130{
131 u8 *data = skb->data;
132 unsigned int offset;
133 u16 *hi, *id;
134 u32 lo;
135
32127a0a 136 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
1a0bdadb 137 return 0;
1a0bdadb
TS
138
139 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
140
141 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
142 return 0;
143
144 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
145 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
146
147 memcpy(&lo, &hi[1], sizeof(lo));
148
149 return (uid_hi == *hi &&
150 uid_lo == lo &&
151 seqid == *id);
152}
153
93c8acb5
TS
154static void
155pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
1a0bdadb
TS
156{
157 struct skb_shared_hwtstamps *shhwtstamps;
158 struct pci_dev *pdev;
159 u64 ns;
160 u32 hi, lo, val;
161 u16 uid, seq;
162
163 if (!adapter->hwts_rx_en)
164 return;
165
166 /* Get ieee1588's dev information */
167 pdev = adapter->ptp_pdev;
168
169 val = pch_ch_event_read(pdev);
170
171 if (!(val & RX_SNAPSHOT_LOCKED))
172 return;
173
174 lo = pch_src_uuid_lo_read(pdev);
175 hi = pch_src_uuid_hi_read(pdev);
176
177 uid = hi & 0xffff;
178 seq = (hi >> 16) & 0xffff;
179
180 if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
181 goto out;
182
183 ns = pch_rx_snap_read(pdev);
1a0bdadb
TS
184
185 shhwtstamps = skb_hwtstamps(skb);
186 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
187 shhwtstamps->hwtstamp = ns_to_ktime(ns);
188out:
189 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
190}
191
93c8acb5
TS
192static void
193pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
1a0bdadb
TS
194{
195 struct skb_shared_hwtstamps shhwtstamps;
196 struct pci_dev *pdev;
197 struct skb_shared_info *shtx;
198 u64 ns;
199 u32 cnt, val;
200
201 shtx = skb_shinfo(skb);
5481c8cd 202 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
1a0bdadb
TS
203 return;
204
5481c8cd
TS
205 shtx->tx_flags |= SKBTX_IN_PROGRESS;
206
1a0bdadb
TS
207 /* Get ieee1588's dev information */
208 pdev = adapter->ptp_pdev;
209
210 /*
211 * This really stinks, but we have to poll for the Tx time stamp.
1a0bdadb
TS
212 */
213 for (cnt = 0; cnt < 100; cnt++) {
214 val = pch_ch_event_read(pdev);
215 if (val & TX_SNAPSHOT_LOCKED)
216 break;
217 udelay(1);
218 }
219 if (!(val & TX_SNAPSHOT_LOCKED)) {
220 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
221 return;
222 }
223
224 ns = pch_tx_snap_read(pdev);
1a0bdadb
TS
225
226 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
227 shhwtstamps.hwtstamp = ns_to_ktime(ns);
228 skb_tstamp_tx(skb, &shhwtstamps);
229
230 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
231}
232
233static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
234{
235 struct hwtstamp_config cfg;
236 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
237 struct pci_dev *pdev;
358dfb6d 238 u8 station[20];
1a0bdadb
TS
239
240 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
241 return -EFAULT;
242
243 if (cfg.flags) /* reserved for future extensions */
244 return -EINVAL;
245
246 /* Get ieee1588's dev information */
247 pdev = adapter->ptp_pdev;
248
249 switch (cfg.tx_type) {
250 case HWTSTAMP_TX_OFF:
251 adapter->hwts_tx_en = 0;
252 break;
253 case HWTSTAMP_TX_ON:
254 adapter->hwts_tx_en = 1;
255 break;
256 default:
257 return -ERANGE;
258 }
259
260 switch (cfg.rx_filter) {
261 case HWTSTAMP_FILTER_NONE:
262 adapter->hwts_rx_en = 0;
263 break;
264 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
265 adapter->hwts_rx_en = 0;
93c8acb5 266 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
1a0bdadb
TS
267 break;
268 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
269 adapter->hwts_rx_en = 1;
93c8acb5 270 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
1a0bdadb 271 break;
358dfb6d
TS
272 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
273 adapter->hwts_rx_en = 1;
274 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
275 strcpy(station, PTP_L4_MULTICAST_SA);
276 pch_set_station_address(station, pdev);
277 break;
278 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1a0bdadb 279 adapter->hwts_rx_en = 1;
93c8acb5 280 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
358dfb6d
TS
281 strcpy(station, PTP_L2_MULTICAST_SA);
282 pch_set_station_address(station, pdev);
1a0bdadb
TS
283 break;
284 default:
285 return -ERANGE;
286 }
287
288 /* Clear out any old time stamps. */
289 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
290
291 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
292}
293#endif
294
98200ec2
TO
295inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
296{
297 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
298}
299
77555ee7
MO
300/**
301 * pch_gbe_mac_read_mac_addr - Read MAC address
302 * @hw: Pointer to the HW structure
303 * Returns
304 * 0: Successful.
305 */
306s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
307{
308 u32 adr1a, adr1b;
309
310 adr1a = ioread32(&hw->reg->mac_adr[0].high);
311 adr1b = ioread32(&hw->reg->mac_adr[0].low);
312
313 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
314 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
315 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
316 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
317 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
318 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
319
320 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
321 return 0;
322}
323
324/**
325 * pch_gbe_wait_clr_bit - Wait to clear a bit
326 * @reg: Pointer of register
327 * @busy: Busy bit
328 */
191cc687 329static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
77555ee7
MO
330{
331 u32 tmp;
332 /* wait busy */
333 tmp = 1000;
334 while ((ioread32(reg) & bit) && --tmp)
335 cpu_relax();
336 if (!tmp)
337 pr_err("Error: busy bit is not cleared\n");
338}
124d770a
TO
339
340/**
341 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
342 * @reg: Pointer of register
343 * @busy: Busy bit
344 */
345static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
346{
347 u32 tmp;
348 int ret = -1;
349 /* wait busy */
350 tmp = 20;
351 while ((ioread32(reg) & bit) && --tmp)
352 udelay(5);
353 if (!tmp)
354 pr_err("Error: busy bit is not cleared\n");
355 else
356 ret = 0;
357 return ret;
358}
359
77555ee7
MO
360/**
361 * pch_gbe_mac_mar_set - Set MAC address register
362 * @hw: Pointer to the HW structure
363 * @addr: Pointer to the MAC address
364 * @index: MAC address array register
365 */
191cc687 366static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
77555ee7
MO
367{
368 u32 mar_low, mar_high, adrmask;
369
370 pr_debug("index : 0x%x\n", index);
371
372 /*
373 * HW expects these in little endian so we reverse the byte order
374 * from network order (big endian) to little endian
375 */
376 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
377 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
378 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
379 /* Stop the MAC Address of index. */
380 adrmask = ioread32(&hw->reg->ADDR_MASK);
381 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
382 /* wait busy */
383 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
384 /* Set the MAC address to the MAC address 1A/1B register */
385 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
386 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
387 /* Start the MAC address of index */
388 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
389 /* wait busy */
390 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
391}
392
eefc48b0
TS
393/**
394 * pch_gbe_mac_save_mac_addr_regs - Save MAC addresse registers
395 * @hw: Pointer to the HW structure
396 * @addr: Pointer to the MAC address
397 * @index: MAC address array register
398 */
399static void
400pch_gbe_mac_save_mac_addr_regs(struct pch_gbe_hw *hw,
401 struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
402{
403 mac_adr->high = ioread32(&hw->reg->mac_adr[index].high);
404 mac_adr->low = ioread32(&hw->reg->mac_adr[index].low);
405}
406
407/**
408 * pch_gbe_mac_store_mac_addr_regs - Store MAC addresse registers
409 * @hw: Pointer to the HW structure
410 * @addr: Pointer to the MAC address
411 * @index: MAC address array register
412 */
413static void
414pch_gbe_mac_store_mac_addr_regs(struct pch_gbe_hw *hw,
415 struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
416{
417 u32 adrmask;
418
419 adrmask = ioread32(&hw->reg->ADDR_MASK);
420 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
421 /* wait busy */
422 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
423 /* Set the MAC address to the MAC address xA/xB register */
424 iowrite32(mac_adr->high, &hw->reg->mac_adr[index].high);
425 iowrite32(mac_adr->low, &hw->reg->mac_adr[index].low);
426 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
427 /* wait busy */
428 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
429}
430
431#define MAC_ADDR_LIST_NUM 16
77555ee7
MO
432/**
433 * pch_gbe_mac_reset_hw - Reset hardware
434 * @hw: Pointer to the HW structure
435 */
191cc687 436static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
77555ee7 437{
eefc48b0
TS
438 struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
439 int i;
440
77555ee7
MO
441 /* Read the MAC address. and store to the private data */
442 pch_gbe_mac_read_mac_addr(hw);
eefc48b0
TS
443 /* Read other MAC addresses */
444 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
445 pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
77555ee7
MO
446 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
447#ifdef PCH_GBE_MAC_IFOP_RGMII
448 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
449#endif
450 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
eefc48b0 451 /* Setup the receive addresses */
77555ee7 452 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
eefc48b0
TS
453 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
454 pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
77555ee7
MO
455 return;
456}
457
124d770a
TO
458static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
459{
eefc48b0
TS
460 struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
461 int i;
462
463 /* Read the MAC addresses. and store to the private data */
124d770a 464 pch_gbe_mac_read_mac_addr(hw);
eefc48b0
TS
465 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
466 pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
124d770a
TO
467 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
468 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
eefc48b0 469 /* Setup the MAC addresses */
124d770a 470 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
eefc48b0
TS
471 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
472 pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
124d770a
TO
473 return;
474}
475
77555ee7
MO
476/**
477 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
478 * @hw: Pointer to the HW structure
479 * @mar_count: Receive address registers
480 */
191cc687 481static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
77555ee7
MO
482{
483 u32 i;
484
485 /* Setup the receive address */
486 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
487
488 /* Zero out the other receive addresses */
489 for (i = 1; i < mar_count; i++) {
490 iowrite32(0, &hw->reg->mac_adr[i].high);
491 iowrite32(0, &hw->reg->mac_adr[i].low);
492 }
493 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
494 /* wait busy */
495 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
496}
497
498
499/**
500 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
501 * @hw: Pointer to the HW structure
502 * @mc_addr_list: Array of multicast addresses to program
503 * @mc_addr_count: Number of multicast addresses to program
504 * @mar_used_count: The first MAC Address register free to program
505 * @mar_total_num: Total number of supported MAC Address Registers
506 */
191cc687 507static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
508 u8 *mc_addr_list, u32 mc_addr_count,
509 u32 mar_used_count, u32 mar_total_num)
77555ee7
MO
510{
511 u32 i, adrmask;
512
513 /* Load the first set of multicast addresses into the exact
514 * filters (RAR). If there are not enough to fill the RAR
515 * array, clear the filters.
516 */
517 for (i = mar_used_count; i < mar_total_num; i++) {
518 if (mc_addr_count) {
519 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
520 mc_addr_count--;
d89bdff1 521 mc_addr_list += ETH_ALEN;
77555ee7
MO
522 } else {
523 /* Clear MAC address mask */
524 adrmask = ioread32(&hw->reg->ADDR_MASK);
525 iowrite32((adrmask | (0x0001 << i)),
526 &hw->reg->ADDR_MASK);
527 /* wait busy */
528 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
529 /* Clear MAC address */
530 iowrite32(0, &hw->reg->mac_adr[i].high);
531 iowrite32(0, &hw->reg->mac_adr[i].low);
532 }
533 }
534}
535
536/**
537 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
538 * @hw: Pointer to the HW structure
539 * Returns
540 * 0: Successful.
541 * Negative value: Failed.
542 */
543s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
544{
545 struct pch_gbe_mac_info *mac = &hw->mac;
546 u32 rx_fctrl;
547
548 pr_debug("mac->fc = %u\n", mac->fc);
549
550 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
551
552 switch (mac->fc) {
553 case PCH_GBE_FC_NONE:
554 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
555 mac->tx_fc_enable = false;
556 break;
557 case PCH_GBE_FC_RX_PAUSE:
558 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
559 mac->tx_fc_enable = false;
560 break;
561 case PCH_GBE_FC_TX_PAUSE:
562 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
563 mac->tx_fc_enable = true;
564 break;
565 case PCH_GBE_FC_FULL:
566 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
567 mac->tx_fc_enable = true;
568 break;
569 default:
570 pr_err("Flow control param set incorrectly\n");
571 return -EINVAL;
572 }
573 if (mac->link_duplex == DUPLEX_HALF)
574 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
575 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
576 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
577 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
578 return 0;
579}
580
581/**
582 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
583 * @hw: Pointer to the HW structure
584 * @wu_evt: Wake up event
585 */
191cc687 586static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
77555ee7
MO
587{
588 u32 addr_mask;
589
590 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
591 wu_evt, ioread32(&hw->reg->ADDR_MASK));
592
593 if (wu_evt) {
594 /* Set Wake-On-Lan address mask */
595 addr_mask = ioread32(&hw->reg->ADDR_MASK);
596 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
597 /* wait busy */
598 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
599 iowrite32(0, &hw->reg->WOL_ST);
600 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
601 iowrite32(0x02, &hw->reg->TCPIP_ACC);
602 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
603 } else {
604 iowrite32(0, &hw->reg->WOL_CTRL);
605 iowrite32(0, &hw->reg->WOL_ST);
606 }
607 return;
608}
609
610/**
611 * pch_gbe_mac_ctrl_miim - Control MIIM interface
612 * @hw: Pointer to the HW structure
613 * @addr: Address of PHY
614 * @dir: Operetion. (Write or Read)
615 * @reg: Access register of PHY
616 * @data: Write data.
617 *
618 * Returns: Read date.
619 */
620u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
621 u16 data)
622{
623 u32 data_out = 0;
624 unsigned int i;
625 unsigned long flags;
626
627 spin_lock_irqsave(&hw->miim_lock, flags);
628
629 for (i = 100; i; --i) {
630 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
631 break;
632 udelay(20);
633 }
634 if (i == 0) {
635 pr_err("pch-gbe.miim won't go Ready\n");
636 spin_unlock_irqrestore(&hw->miim_lock, flags);
637 return 0; /* No way to indicate timeout error */
638 }
639 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
640 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
641 dir | data), &hw->reg->MIIM);
642 for (i = 0; i < 100; i++) {
643 udelay(20);
644 data_out = ioread32(&hw->reg->MIIM);
645 if ((data_out & PCH_GBE_MIIM_OPER_READY))
646 break;
647 }
648 spin_unlock_irqrestore(&hw->miim_lock, flags);
649
650 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
651 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
652 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
653 return (u16) data_out;
654}
655
656/**
657 * pch_gbe_mac_set_pause_packet - Set pause packet
658 * @hw: Pointer to the HW structure
659 */
191cc687 660static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
77555ee7
MO
661{
662 unsigned long tmp2, tmp3;
663
664 /* Set Pause packet */
665 tmp2 = hw->mac.addr[1];
666 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
667 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
668
669 tmp3 = hw->mac.addr[5];
670 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
671 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
672 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
673
674 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
675 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
676 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
677 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
678 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
679
680 /* Transmit Pause Packet */
681 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
682
683 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
684 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
685 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
686 ioread32(&hw->reg->PAUSE_PKT5));
687
688 return;
689}
690
691
692/**
693 * pch_gbe_alloc_queues - Allocate memory for all rings
694 * @adapter: Board private structure to initialize
695 * Returns
696 * 0: Successfully
697 * Negative value: Failed
698 */
699static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
700{
701 int size;
702
703 size = (int)sizeof(struct pch_gbe_tx_ring);
704 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
705 if (!adapter->tx_ring)
706 return -ENOMEM;
707 size = (int)sizeof(struct pch_gbe_rx_ring);
708 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
709 if (!adapter->rx_ring) {
710 kfree(adapter->tx_ring);
711 return -ENOMEM;
712 }
713 return 0;
714}
715
716/**
717 * pch_gbe_init_stats - Initialize status
718 * @adapter: Board private structure to initialize
719 */
720static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
721{
722 memset(&adapter->stats, 0, sizeof(adapter->stats));
723 return;
724}
725
726/**
727 * pch_gbe_init_phy - Initialize PHY
728 * @adapter: Board private structure to initialize
729 * Returns
730 * 0: Successfully
731 * Negative value: Failed
732 */
733static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
734{
735 struct net_device *netdev = adapter->netdev;
736 u32 addr;
737 u16 bmcr, stat;
738
739 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
740 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
741 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
742 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
743 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
744 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
745 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
746 break;
747 }
748 adapter->hw.phy.addr = adapter->mii.phy_id;
749 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
750 if (addr == 32)
751 return -EAGAIN;
752 /* Selected the phy and isolate the rest */
753 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
754 if (addr != adapter->mii.phy_id) {
755 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
756 BMCR_ISOLATE);
757 } else {
758 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
759 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
760 bmcr & ~BMCR_ISOLATE);
761 }
762 }
763
764 /* MII setup */
765 adapter->mii.phy_id_mask = 0x1F;
766 adapter->mii.reg_num_mask = 0x1F;
767 adapter->mii.dev = adapter->netdev;
768 adapter->mii.mdio_read = pch_gbe_mdio_read;
769 adapter->mii.mdio_write = pch_gbe_mdio_write;
770 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
771 return 0;
772}
773
774/**
775 * pch_gbe_mdio_read - The read function for mii
776 * @netdev: Network interface device structure
777 * @addr: Phy ID
778 * @reg: Access location
779 * Returns
780 * 0: Successfully
781 * Negative value: Failed
782 */
191cc687 783static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
77555ee7
MO
784{
785 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
786 struct pch_gbe_hw *hw = &adapter->hw;
787
788 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
789 (u16) 0);
790}
791
792/**
793 * pch_gbe_mdio_write - The write function for mii
794 * @netdev: Network interface device structure
795 * @addr: Phy ID (not used)
796 * @reg: Access location
797 * @data: Write data
798 */
191cc687 799static void pch_gbe_mdio_write(struct net_device *netdev,
800 int addr, int reg, int data)
77555ee7
MO
801{
802 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
803 struct pch_gbe_hw *hw = &adapter->hw;
804
805 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
806}
807
808/**
809 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
810 * @work: Pointer of board private structure
811 */
812static void pch_gbe_reset_task(struct work_struct *work)
813{
814 struct pch_gbe_adapter *adapter;
815 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
816
75d1a752 817 rtnl_lock();
77555ee7 818 pch_gbe_reinit_locked(adapter);
75d1a752 819 rtnl_unlock();
77555ee7
MO
820}
821
822/**
823 * pch_gbe_reinit_locked- Re-initialization
824 * @adapter: Board private structure
825 */
826void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
827{
75d1a752
TO
828 pch_gbe_down(adapter);
829 pch_gbe_up(adapter);
77555ee7
MO
830}
831
832/**
833 * pch_gbe_reset - Reset GbE
834 * @adapter: Board private structure
835 */
836void pch_gbe_reset(struct pch_gbe_adapter *adapter)
837{
838 pch_gbe_mac_reset_hw(&adapter->hw);
839 /* Setup the receive address. */
840 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
841 if (pch_gbe_hal_init_hw(&adapter->hw))
842 pr_err("Hardware Error\n");
843}
844
845/**
846 * pch_gbe_free_irq - Free an interrupt
847 * @adapter: Board private structure
848 */
849static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
850{
851 struct net_device *netdev = adapter->netdev;
852
853 free_irq(adapter->pdev->irq, netdev);
854 if (adapter->have_msi) {
855 pci_disable_msi(adapter->pdev);
856 pr_debug("call pci_disable_msi\n");
857 }
858}
859
860/**
861 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
862 * @adapter: Board private structure
863 */
864static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
865{
866 struct pch_gbe_hw *hw = &adapter->hw;
867
868 atomic_inc(&adapter->irq_sem);
869 iowrite32(0, &hw->reg->INT_EN);
870 ioread32(&hw->reg->INT_ST);
871 synchronize_irq(adapter->pdev->irq);
872
873 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
874}
875
876/**
877 * pch_gbe_irq_enable - Enable default interrupt generation settings
878 * @adapter: Board private structure
879 */
880static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
881{
882 struct pch_gbe_hw *hw = &adapter->hw;
883
884 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
885 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
886 ioread32(&hw->reg->INT_ST);
887 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
888}
889
890
891
892/**
893 * pch_gbe_setup_tctl - configure the Transmit control registers
894 * @adapter: Board private structure
895 */
896static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
897{
898 struct pch_gbe_hw *hw = &adapter->hw;
899 u32 tx_mode, tcpip;
900
901 tx_mode = PCH_GBE_TM_LONG_PKT |
902 PCH_GBE_TM_ST_AND_FD |
903 PCH_GBE_TM_SHORT_PKT |
904 PCH_GBE_TM_TH_TX_STRT_8 |
905 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
906
907 iowrite32(tx_mode, &hw->reg->TX_MODE);
908
909 tcpip = ioread32(&hw->reg->TCPIP_ACC);
910 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
911 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
912 return;
913}
914
915/**
916 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
917 * @adapter: Board private structure
918 */
919static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
920{
921 struct pch_gbe_hw *hw = &adapter->hw;
922 u32 tdba, tdlen, dctrl;
923
924 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
925 (unsigned long long)adapter->tx_ring->dma,
926 adapter->tx_ring->size);
927
928 /* Setup the HW Tx Head and Tail descriptor pointers */
929 tdba = adapter->tx_ring->dma;
930 tdlen = adapter->tx_ring->size - 0x10;
931 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
932 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
933 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
934
935 /* Enables Transmission DMA */
936 dctrl = ioread32(&hw->reg->DMA_CTRL);
937 dctrl |= PCH_GBE_TX_DMA_EN;
938 iowrite32(dctrl, &hw->reg->DMA_CTRL);
939}
940
941/**
942 * pch_gbe_setup_rctl - Configure the receive control registers
943 * @adapter: Board private structure
944 */
945static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
946{
947 struct pch_gbe_hw *hw = &adapter->hw;
948 u32 rx_mode, tcpip;
949
950 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
951 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
952
953 iowrite32(rx_mode, &hw->reg->RX_MODE);
954
955 tcpip = ioread32(&hw->reg->TCPIP_ACC);
956
124d770a
TO
957 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
958 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
77555ee7
MO
959 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
960 return;
961}
962
963/**
964 * pch_gbe_configure_rx - Configure Receive Unit after Reset
965 * @adapter: Board private structure
966 */
967static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
968{
969 struct pch_gbe_hw *hw = &adapter->hw;
970 u32 rdba, rdlen, rctl, rxdma;
971
972 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
973 (unsigned long long)adapter->rx_ring->dma,
974 adapter->rx_ring->size);
975
976 pch_gbe_mac_force_mac_fc(hw);
977
978 /* Disables Receive MAC */
979 rctl = ioread32(&hw->reg->MAC_RX_EN);
980 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
981
982 /* Disables Receive DMA */
983 rxdma = ioread32(&hw->reg->DMA_CTRL);
984 rxdma &= ~PCH_GBE_RX_DMA_EN;
985 iowrite32(rxdma, &hw->reg->DMA_CTRL);
986
987 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
988 ioread32(&hw->reg->MAC_RX_EN),
989 ioread32(&hw->reg->DMA_CTRL));
990
991 /* Setup the HW Rx Head and Tail Descriptor Pointers and
992 * the Base and Length of the Rx Descriptor Ring */
993 rdba = adapter->rx_ring->dma;
994 rdlen = adapter->rx_ring->size - 0x10;
995 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
996 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
997 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
77555ee7
MO
998}
999
1000/**
1001 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
1002 * @adapter: Board private structure
1003 * @buffer_info: Buffer information structure
1004 */
1005static void pch_gbe_unmap_and_free_tx_resource(
1006 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
1007{
1008 if (buffer_info->mapped) {
1009 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1010 buffer_info->length, DMA_TO_DEVICE);
1011 buffer_info->mapped = false;
1012 }
1013 if (buffer_info->skb) {
1014 dev_kfree_skb_any(buffer_info->skb);
1015 buffer_info->skb = NULL;
1016 }
1017}
1018
1019/**
1020 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
1021 * @adapter: Board private structure
1022 * @buffer_info: Buffer information structure
1023 */
1024static void pch_gbe_unmap_and_free_rx_resource(
1025 struct pch_gbe_adapter *adapter,
1026 struct pch_gbe_buffer *buffer_info)
1027{
1028 if (buffer_info->mapped) {
1029 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1030 buffer_info->length, DMA_FROM_DEVICE);
1031 buffer_info->mapped = false;
1032 }
1033 if (buffer_info->skb) {
1034 dev_kfree_skb_any(buffer_info->skb);
1035 buffer_info->skb = NULL;
1036 }
1037}
1038
1039/**
1040 * pch_gbe_clean_tx_ring - Free Tx Buffers
1041 * @adapter: Board private structure
1042 * @tx_ring: Ring to be cleaned
1043 */
1044static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
1045 struct pch_gbe_tx_ring *tx_ring)
1046{
1047 struct pch_gbe_hw *hw = &adapter->hw;
1048 struct pch_gbe_buffer *buffer_info;
1049 unsigned long size;
1050 unsigned int i;
1051
1052 /* Free all the Tx ring sk_buffs */
1053 for (i = 0; i < tx_ring->count; i++) {
1054 buffer_info = &tx_ring->buffer_info[i];
1055 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
1056 }
1057 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
1058
1059 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1060 memset(tx_ring->buffer_info, 0, size);
1061
1062 /* Zero out the descriptor ring */
1063 memset(tx_ring->desc, 0, tx_ring->size);
1064 tx_ring->next_to_use = 0;
1065 tx_ring->next_to_clean = 0;
1066 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
1067 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
1068}
1069
1070/**
1071 * pch_gbe_clean_rx_ring - Free Rx Buffers
1072 * @adapter: Board private structure
1073 * @rx_ring: Ring to free buffers from
1074 */
1075static void
1076pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1077 struct pch_gbe_rx_ring *rx_ring)
1078{
1079 struct pch_gbe_hw *hw = &adapter->hw;
1080 struct pch_gbe_buffer *buffer_info;
1081 unsigned long size;
1082 unsigned int i;
1083
1084 /* Free all the Rx ring sk_buffs */
1085 for (i = 0; i < rx_ring->count; i++) {
1086 buffer_info = &rx_ring->buffer_info[i];
1087 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1088 }
1089 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1090 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1091 memset(rx_ring->buffer_info, 0, size);
1092
1093 /* Zero out the descriptor ring */
1094 memset(rx_ring->desc, 0, rx_ring->size);
1095 rx_ring->next_to_clean = 0;
1096 rx_ring->next_to_use = 0;
1097 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1098 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1099}
1100
1101static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1102 u16 duplex)
1103{
1104 struct pch_gbe_hw *hw = &adapter->hw;
1105 unsigned long rgmii = 0;
1106
1107 /* Set the RGMII control. */
1108#ifdef PCH_GBE_MAC_IFOP_RGMII
1109 switch (speed) {
1110 case SPEED_10:
1111 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1112 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1113 break;
1114 case SPEED_100:
1115 rgmii = (PCH_GBE_RGMII_RATE_25M |
1116 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1117 break;
1118 case SPEED_1000:
1119 rgmii = (PCH_GBE_RGMII_RATE_125M |
1120 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1121 break;
1122 }
1123 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1124#else /* GMII */
1125 rgmii = 0;
1126 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1127#endif
1128}
1129static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1130 u16 duplex)
1131{
1132 struct net_device *netdev = adapter->netdev;
1133 struct pch_gbe_hw *hw = &adapter->hw;
1134 unsigned long mode = 0;
1135
1136 /* Set the communication mode */
1137 switch (speed) {
1138 case SPEED_10:
1139 mode = PCH_GBE_MODE_MII_ETHER;
1140 netdev->tx_queue_len = 10;
1141 break;
1142 case SPEED_100:
1143 mode = PCH_GBE_MODE_MII_ETHER;
1144 netdev->tx_queue_len = 100;
1145 break;
1146 case SPEED_1000:
1147 mode = PCH_GBE_MODE_GMII_ETHER;
1148 break;
1149 }
1150 if (duplex == DUPLEX_FULL)
1151 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1152 else
1153 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1154 iowrite32(mode, &hw->reg->MODE);
1155}
1156
1157/**
1158 * pch_gbe_watchdog - Watchdog process
1159 * @data: Board private structure
1160 */
1161static void pch_gbe_watchdog(unsigned long data)
1162{
1163 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1164 struct net_device *netdev = adapter->netdev;
1165 struct pch_gbe_hw *hw = &adapter->hw;
77555ee7
MO
1166
1167 pr_debug("right now = %ld\n", jiffies);
1168
1169 pch_gbe_update_stats(adapter);
1170 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
8ae6daca 1171 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
77555ee7
MO
1172 netdev->tx_queue_len = adapter->tx_queue_len;
1173 /* mii library handles link maintenance tasks */
1174 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1175 pr_err("ethtool get setting Error\n");
1176 mod_timer(&adapter->watchdog_timer,
1177 round_jiffies(jiffies +
1178 PCH_GBE_WATCHDOG_PERIOD));
1179 return;
1180 }
8ae6daca 1181 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
77555ee7
MO
1182 hw->mac.link_duplex = cmd.duplex;
1183 /* Set the RGMII control. */
1184 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1185 hw->mac.link_duplex);
1186 /* Set the communication mode */
1187 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1188 hw->mac.link_duplex);
1189 netdev_dbg(netdev,
1190 "Link is Up %d Mbps %s-Duplex\n",
8ae6daca 1191 hw->mac.link_speed,
77555ee7
MO
1192 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1193 netif_carrier_on(netdev);
1194 netif_wake_queue(netdev);
1195 } else if ((!mii_link_ok(&adapter->mii)) &&
1196 (netif_carrier_ok(netdev))) {
1197 netdev_dbg(netdev, "NIC Link is Down\n");
1198 hw->mac.link_speed = SPEED_10;
1199 hw->mac.link_duplex = DUPLEX_HALF;
1200 netif_carrier_off(netdev);
1201 netif_stop_queue(netdev);
1202 }
1203 mod_timer(&adapter->watchdog_timer,
1204 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1205}
1206
1207/**
1208 * pch_gbe_tx_queue - Carry out queuing of the transmission data
1209 * @adapter: Board private structure
1210 * @tx_ring: Tx descriptor ring structure
1211 * @skb: Sockt buffer structure
1212 */
1213static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1214 struct pch_gbe_tx_ring *tx_ring,
1215 struct sk_buff *skb)
1216{
1217 struct pch_gbe_hw *hw = &adapter->hw;
1218 struct pch_gbe_tx_desc *tx_desc;
1219 struct pch_gbe_buffer *buffer_info;
1220 struct sk_buff *tmp_skb;
1221 unsigned int frame_ctrl;
1222 unsigned int ring_num;
1223 unsigned long flags;
1224
1225 /*-- Set frame control --*/
1226 frame_ctrl = 0;
1227 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1228 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
756a6b03 1229 if (skb->ip_summed == CHECKSUM_NONE)
77555ee7
MO
1230 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1231
1232 /* Performs checksum processing */
1233 /*
1234 * It is because the hardware accelerator does not support a checksum,
1235 * when the received data size is less than 64 bytes.
1236 */
756a6b03 1237 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
77555ee7
MO
1238 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1239 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1240 if (skb->protocol == htons(ETH_P_IP)) {
1241 struct iphdr *iph = ip_hdr(skb);
1242 unsigned int offset;
77555ee7
MO
1243 offset = skb_transport_offset(skb);
1244 if (iph->protocol == IPPROTO_TCP) {
1245 skb->csum = 0;
1246 tcp_hdr(skb)->check = 0;
1247 skb->csum = skb_checksum(skb, offset,
1248 skb->len - offset, 0);
1249 tcp_hdr(skb)->check =
1250 csum_tcpudp_magic(iph->saddr,
1251 iph->daddr,
1252 skb->len - offset,
1253 IPPROTO_TCP,
1254 skb->csum);
1255 } else if (iph->protocol == IPPROTO_UDP) {
1256 skb->csum = 0;
1257 udp_hdr(skb)->check = 0;
1258 skb->csum =
1259 skb_checksum(skb, offset,
1260 skb->len - offset, 0);
1261 udp_hdr(skb)->check =
1262 csum_tcpudp_magic(iph->saddr,
1263 iph->daddr,
1264 skb->len - offset,
1265 IPPROTO_UDP,
1266 skb->csum);
1267 }
1268 }
1269 }
1270 spin_lock_irqsave(&tx_ring->tx_lock, flags);
1271 ring_num = tx_ring->next_to_use;
1272 if (unlikely((ring_num + 1) == tx_ring->count))
1273 tx_ring->next_to_use = 0;
1274 else
1275 tx_ring->next_to_use = ring_num + 1;
1276
1277 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1278 buffer_info = &tx_ring->buffer_info[ring_num];
1279 tmp_skb = buffer_info->skb;
1280
1281 /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1282 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1283 tmp_skb->data[ETH_HLEN] = 0x00;
1284 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1285 tmp_skb->len = skb->len;
1286 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1287 (skb->len - ETH_HLEN));
25985edc 1288 /*-- Set Buffer information --*/
77555ee7
MO
1289 buffer_info->length = tmp_skb->len;
1290 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1291 buffer_info->length,
1292 DMA_TO_DEVICE);
1293 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1294 pr_err("TX DMA map failed\n");
1295 buffer_info->dma = 0;
1296 buffer_info->time_stamp = 0;
1297 tx_ring->next_to_use = ring_num;
1298 return;
1299 }
1300 buffer_info->mapped = true;
1301 buffer_info->time_stamp = jiffies;
1302
1303 /*-- Set Tx descriptor --*/
1304 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1305 tx_desc->buffer_addr = (buffer_info->dma);
1306 tx_desc->length = (tmp_skb->len);
1307 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1308 tx_desc->tx_frame_ctrl = (frame_ctrl);
1309 tx_desc->gbec_status = (DSC_INIT16);
1310
1311 if (unlikely(++ring_num == tx_ring->count))
1312 ring_num = 0;
1313
1314 /* Update software pointer of TX descriptor */
1315 iowrite32(tx_ring->dma +
1316 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1317 &hw->reg->TX_DSC_SW_P);
1a0bdadb
TS
1318
1319#ifdef CONFIG_PCH_PTP
1320 pch_tx_timestamp(adapter, skb);
1321#endif
1322
77555ee7
MO
1323 dev_kfree_skb_any(skb);
1324}
1325
1326/**
1327 * pch_gbe_update_stats - Update the board statistics counters
1328 * @adapter: Board private structure
1329 */
1330void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1331{
1332 struct net_device *netdev = adapter->netdev;
1333 struct pci_dev *pdev = adapter->pdev;
1334 struct pch_gbe_hw_stats *stats = &adapter->stats;
1335 unsigned long flags;
1336
1337 /*
1338 * Prevent stats update while adapter is being reset, or if the pci
1339 * connection is down.
1340 */
1341 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1342 return;
1343
1344 spin_lock_irqsave(&adapter->stats_lock, flags);
1345
1346 /* Update device status "adapter->stats" */
1347 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1348 stats->tx_errors = stats->tx_length_errors +
1349 stats->tx_aborted_errors +
1350 stats->tx_carrier_errors + stats->tx_timeout_count;
1351
1352 /* Update network device status "adapter->net_stats" */
1353 netdev->stats.rx_packets = stats->rx_packets;
1354 netdev->stats.rx_bytes = stats->rx_bytes;
1355 netdev->stats.rx_dropped = stats->rx_dropped;
1356 netdev->stats.tx_packets = stats->tx_packets;
1357 netdev->stats.tx_bytes = stats->tx_bytes;
1358 netdev->stats.tx_dropped = stats->tx_dropped;
1359 /* Fill out the OS statistics structure */
1360 netdev->stats.multicast = stats->multicast;
1361 netdev->stats.collisions = stats->collisions;
1362 /* Rx Errors */
1363 netdev->stats.rx_errors = stats->rx_errors;
1364 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1365 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1366 /* Tx Errors */
1367 netdev->stats.tx_errors = stats->tx_errors;
1368 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1369 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1370
1371 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1372}
1373
124d770a
TO
1374static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1375{
1376 struct pch_gbe_hw *hw = &adapter->hw;
1377 u32 rxdma;
1378 u16 value;
1379 int ret;
1380
1381 /* Disable Receive DMA */
1382 rxdma = ioread32(&hw->reg->DMA_CTRL);
1383 rxdma &= ~PCH_GBE_RX_DMA_EN;
1384 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1385 /* Wait Rx DMA BUS is IDLE */
1386 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1387 if (ret) {
1388 /* Disable Bus master */
1389 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1390 value &= ~PCI_COMMAND_MASTER;
1391 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1392 /* Stop Receive */
1393 pch_gbe_mac_reset_rx(hw);
1394 /* Enable Bus master */
1395 value |= PCI_COMMAND_MASTER;
1396 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1397 } else {
1398 /* Stop Receive */
1399 pch_gbe_mac_reset_rx(hw);
1400 }
1401}
1402
5229d87e
TO
1403static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1404{
1405 u32 rxdma;
1406
1407 /* Enables Receive DMA */
1408 rxdma = ioread32(&hw->reg->DMA_CTRL);
1409 rxdma |= PCH_GBE_RX_DMA_EN;
1410 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1411 /* Enables Receive */
1412 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1413 return;
1414}
1415
77555ee7
MO
1416/**
1417 * pch_gbe_intr - Interrupt Handler
1418 * @irq: Interrupt number
1419 * @data: Pointer to a network interface device structure
1420 * Returns
1421 * - IRQ_HANDLED: Our interrupt
1422 * - IRQ_NONE: Not our interrupt
1423 */
1424static irqreturn_t pch_gbe_intr(int irq, void *data)
1425{
1426 struct net_device *netdev = data;
1427 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1428 struct pch_gbe_hw *hw = &adapter->hw;
1429 u32 int_st;
1430 u32 int_en;
1431
1432 /* Check request status */
1433 int_st = ioread32(&hw->reg->INT_ST);
1434 int_st = int_st & ioread32(&hw->reg->INT_EN);
1435 /* When request status is no interruption factor */
1436 if (unlikely(!int_st))
1437 return IRQ_NONE; /* Not our interrupt. End processing. */
1438 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1439 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1440 adapter->stats.intr_rx_frame_err_count++;
1441 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
124d770a
TO
1442 if (!adapter->rx_stop_flag) {
1443 adapter->stats.intr_rx_fifo_err_count++;
1444 pr_debug("Rx fifo over run\n");
1445 adapter->rx_stop_flag = true;
1446 int_en = ioread32(&hw->reg->INT_EN);
1447 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1448 &hw->reg->INT_EN);
1449 pch_gbe_stop_receive(adapter);
805e969f
TO
1450 int_st |= ioread32(&hw->reg->INT_ST);
1451 int_st = int_st & ioread32(&hw->reg->INT_EN);
124d770a 1452 }
77555ee7
MO
1453 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1454 adapter->stats.intr_rx_dma_err_count++;
1455 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1456 adapter->stats.intr_tx_fifo_err_count++;
1457 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1458 adapter->stats.intr_tx_dma_err_count++;
1459 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1460 adapter->stats.intr_tcpip_err_count++;
1461 /* When Rx descriptor is empty */
1462 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1463 adapter->stats.intr_rx_dsc_empty_count++;
124d770a 1464 pr_debug("Rx descriptor is empty\n");
77555ee7
MO
1465 int_en = ioread32(&hw->reg->INT_EN);
1466 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1467 if (hw->mac.tx_fc_enable) {
1468 /* Set Pause packet */
1469 pch_gbe_mac_set_pause_packet(hw);
1470 }
77555ee7
MO
1471 }
1472
1473 /* When request status is Receive interruption */
805e969f 1474 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
23677ce3 1475 (adapter->rx_stop_flag)) {
77555ee7
MO
1476 if (likely(napi_schedule_prep(&adapter->napi))) {
1477 /* Enable only Rx Descriptor empty */
1478 atomic_inc(&adapter->irq_sem);
1479 int_en = ioread32(&hw->reg->INT_EN);
1480 int_en &=
1481 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1482 iowrite32(int_en, &hw->reg->INT_EN);
1483 /* Start polling for NAPI */
1484 __napi_schedule(&adapter->napi);
1485 }
1486 }
1487 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1488 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1489 return IRQ_HANDLED;
1490}
1491
1492/**
1493 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1494 * @adapter: Board private structure
1495 * @rx_ring: Rx descriptor ring
1496 * @cleaned_count: Cleaned count
1497 */
1498static void
1499pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1500 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1501{
1502 struct net_device *netdev = adapter->netdev;
1503 struct pci_dev *pdev = adapter->pdev;
1504 struct pch_gbe_hw *hw = &adapter->hw;
1505 struct pch_gbe_rx_desc *rx_desc;
1506 struct pch_gbe_buffer *buffer_info;
1507 struct sk_buff *skb;
1508 unsigned int i;
1509 unsigned int bufsz;
1510
124d770a 1511 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
77555ee7
MO
1512 i = rx_ring->next_to_use;
1513
1514 while ((cleaned_count--)) {
1515 buffer_info = &rx_ring->buffer_info[i];
124d770a
TO
1516 skb = netdev_alloc_skb(netdev, bufsz);
1517 if (unlikely(!skb)) {
1518 /* Better luck next round */
1519 adapter->stats.rx_alloc_buff_failed++;
1520 break;
77555ee7 1521 }
124d770a
TO
1522 /* align */
1523 skb_reserve(skb, NET_IP_ALIGN);
1524 buffer_info->skb = skb;
1525
77555ee7 1526 buffer_info->dma = dma_map_single(&pdev->dev,
124d770a 1527 buffer_info->rx_buffer,
77555ee7
MO
1528 buffer_info->length,
1529 DMA_FROM_DEVICE);
1530 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1531 dev_kfree_skb(skb);
1532 buffer_info->skb = NULL;
1533 buffer_info->dma = 0;
1534 adapter->stats.rx_alloc_buff_failed++;
1535 break; /* while !buffer_info->skb */
1536 }
1537 buffer_info->mapped = true;
1538 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1539 rx_desc->buffer_addr = (buffer_info->dma);
1540 rx_desc->gbec_status = DSC_INIT16;
1541
1542 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1543 i, (unsigned long long)buffer_info->dma,
1544 buffer_info->length);
1545
1546 if (unlikely(++i == rx_ring->count))
1547 i = 0;
1548 }
1549 if (likely(rx_ring->next_to_use != i)) {
1550 rx_ring->next_to_use = i;
1551 if (unlikely(i-- == 0))
1552 i = (rx_ring->count - 1);
1553 iowrite32(rx_ring->dma +
1554 (int)sizeof(struct pch_gbe_rx_desc) * i,
1555 &hw->reg->RX_DSC_SW_P);
1556 }
1557 return;
1558}
1559
124d770a
TO
1560static int
1561pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1562 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1563{
1564 struct pci_dev *pdev = adapter->pdev;
1565 struct pch_gbe_buffer *buffer_info;
1566 unsigned int i;
1567 unsigned int bufsz;
1568 unsigned int size;
1569
1570 bufsz = adapter->rx_buffer_len;
1571
1572 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1573 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1574 &rx_ring->rx_buff_pool_logic,
1575 GFP_KERNEL);
1576 if (!rx_ring->rx_buff_pool) {
1577 pr_err("Unable to allocate memory for the receive poll buffer\n");
1578 return -ENOMEM;
1579 }
1580 memset(rx_ring->rx_buff_pool, 0, size);
1581 rx_ring->rx_buff_pool_size = size;
1582 for (i = 0; i < rx_ring->count; i++) {
1583 buffer_info = &rx_ring->buffer_info[i];
1584 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1585 buffer_info->length = bufsz;
1586 }
1587 return 0;
1588}
1589
77555ee7
MO
1590/**
1591 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1592 * @adapter: Board private structure
1593 * @tx_ring: Tx descriptor ring
1594 */
1595static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1596 struct pch_gbe_tx_ring *tx_ring)
1597{
1598 struct pch_gbe_buffer *buffer_info;
1599 struct sk_buff *skb;
1600 unsigned int i;
1601 unsigned int bufsz;
1602 struct pch_gbe_tx_desc *tx_desc;
1603
1604 bufsz =
1605 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1606
1607 for (i = 0; i < tx_ring->count; i++) {
1608 buffer_info = &tx_ring->buffer_info[i];
1609 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1610 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1611 buffer_info->skb = skb;
1612 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1613 tx_desc->gbec_status = (DSC_INIT16);
1614 }
1615 return;
1616}
1617
1618/**
1619 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1620 * @adapter: Board private structure
1621 * @tx_ring: Tx descriptor ring
1622 * Returns
1623 * true: Cleaned the descriptor
1624 * false: Not cleaned the descriptor
1625 */
1626static bool
1627pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1628 struct pch_gbe_tx_ring *tx_ring)
1629{
1630 struct pch_gbe_tx_desc *tx_desc;
1631 struct pch_gbe_buffer *buffer_info;
1632 struct sk_buff *skb;
1633 unsigned int i;
1634 unsigned int cleaned_count = 0;
805e969f 1635 bool cleaned = true;
77555ee7
MO
1636
1637 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1638
1639 i = tx_ring->next_to_clean;
1640 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1641 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1642 tx_desc->gbec_status, tx_desc->dma_status);
1643
1644 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1645 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
77555ee7
MO
1646 buffer_info = &tx_ring->buffer_info[i];
1647 skb = buffer_info->skb;
1648
1649 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1650 adapter->stats.tx_aborted_errors++;
1651 pr_err("Transfer Abort Error\n");
1652 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1653 ) {
1654 adapter->stats.tx_carrier_errors++;
1655 pr_err("Transfer Carrier Sense Error\n");
1656 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1657 ) {
1658 adapter->stats.tx_aborted_errors++;
1659 pr_err("Transfer Collision Abort Error\n");
1660 } else if ((tx_desc->gbec_status &
1661 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1662 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1663 adapter->stats.collisions++;
1664 adapter->stats.tx_packets++;
1665 adapter->stats.tx_bytes += skb->len;
1666 pr_debug("Transfer Collision\n");
1667 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1668 ) {
1669 adapter->stats.tx_packets++;
1670 adapter->stats.tx_bytes += skb->len;
1671 }
1672 if (buffer_info->mapped) {
1673 pr_debug("unmap buffer_info->dma : %d\n", i);
1674 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1675 buffer_info->length, DMA_TO_DEVICE);
1676 buffer_info->mapped = false;
1677 }
1678 if (buffer_info->skb) {
1679 pr_debug("trim buffer_info->skb : %d\n", i);
1680 skb_trim(buffer_info->skb, 0);
1681 }
1682 tx_desc->gbec_status = DSC_INIT16;
1683 if (unlikely(++i == tx_ring->count))
1684 i = 0;
1685 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1686
1687 /* weight of a sort for tx, to avoid endless transmit cleanup */
805e969f
TO
1688 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1689 cleaned = false;
77555ee7 1690 break;
805e969f 1691 }
77555ee7
MO
1692 }
1693 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1694 cleaned_count);
1695 /* Recover from running out of Tx resources in xmit_frame */
1696 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1697 netif_wake_queue(adapter->netdev);
1698 adapter->stats.tx_restart_count++;
1699 pr_debug("Tx wake queue\n");
1700 }
1701 spin_lock(&adapter->tx_queue_lock);
1702 tx_ring->next_to_clean = i;
1703 spin_unlock(&adapter->tx_queue_lock);
1704 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1705 return cleaned;
1706}
1707
1708/**
1709 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1710 * @adapter: Board private structure
1711 * @rx_ring: Rx descriptor ring
1712 * @work_done: Completed count
1713 * @work_to_do: Request count
1714 * Returns
1715 * true: Cleaned the descriptor
1716 * false: Not cleaned the descriptor
1717 */
1718static bool
1719pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1720 struct pch_gbe_rx_ring *rx_ring,
1721 int *work_done, int work_to_do)
1722{
1723 struct net_device *netdev = adapter->netdev;
1724 struct pci_dev *pdev = adapter->pdev;
1725 struct pch_gbe_buffer *buffer_info;
1726 struct pch_gbe_rx_desc *rx_desc;
1727 u32 length;
77555ee7
MO
1728 unsigned int i;
1729 unsigned int cleaned_count = 0;
1730 bool cleaned = false;
124d770a 1731 struct sk_buff *skb;
77555ee7
MO
1732 u8 dma_status;
1733 u16 gbec_status;
1734 u32 tcp_ip_status;
77555ee7
MO
1735
1736 i = rx_ring->next_to_clean;
1737
1738 while (*work_done < work_to_do) {
1739 /* Check Rx descriptor status */
1740 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1741 if (rx_desc->gbec_status == DSC_INIT16)
1742 break;
1743 cleaned = true;
1744 cleaned_count++;
1745
1746 dma_status = rx_desc->dma_status;
1747 gbec_status = rx_desc->gbec_status;
1748 tcp_ip_status = rx_desc->tcp_ip_status;
1749 rx_desc->gbec_status = DSC_INIT16;
1750 buffer_info = &rx_ring->buffer_info[i];
1751 skb = buffer_info->skb;
124d770a 1752 buffer_info->skb = NULL;
77555ee7
MO
1753
1754 /* unmap dma */
1755 dma_unmap_single(&pdev->dev, buffer_info->dma,
1756 buffer_info->length, DMA_FROM_DEVICE);
1757 buffer_info->mapped = false;
77555ee7
MO
1758
1759 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1760 "TCP:0x%08x] BufInf = 0x%p\n",
1761 i, dma_status, gbec_status, tcp_ip_status,
1762 buffer_info);
1763 /* Error check */
1764 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1765 adapter->stats.rx_frame_errors++;
1766 pr_err("Receive Not Octal Error\n");
1767 } else if (unlikely(gbec_status &
1768 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1769 adapter->stats.rx_frame_errors++;
1770 pr_err("Receive Nibble Error\n");
1771 } else if (unlikely(gbec_status &
1772 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1773 adapter->stats.rx_crc_errors++;
1774 pr_err("Receive CRC Error\n");
1775 } else {
1776 /* get receive length */
124d770a
TO
1777 /* length convert[-3], length includes FCS length */
1778 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1779 if (rx_desc->rx_words_eob & 0x02)
1780 length = length - 4;
1781 /*
1782 * buffer_info->rx_buffer: [Header:14][payload]
1783 * skb->data: [Reserve:2][Header:14][payload]
1784 */
1785 memcpy(skb->data, buffer_info->rx_buffer, length);
1786
77555ee7
MO
1787 /* update status of driver */
1788 adapter->stats.rx_bytes += length;
1789 adapter->stats.rx_packets++;
1790 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1791 adapter->stats.multicast++;
1792 /* Write meta date of skb */
1793 skb_put(skb, length);
1a0bdadb
TS
1794
1795#ifdef CONFIG_PCH_PTP
1796 pch_rx_timestamp(adapter, skb);
1797#endif
1798
77555ee7 1799 skb->protocol = eth_type_trans(skb, netdev);
5d05a04d 1800 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
77555ee7 1801 skb->ip_summed = CHECKSUM_NONE;
5d05a04d
TO
1802 else
1803 skb->ip_summed = CHECKSUM_UNNECESSARY;
1804
77555ee7
MO
1805 napi_gro_receive(&adapter->napi, skb);
1806 (*work_done)++;
1807 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1808 skb->ip_summed, length);
1809 }
77555ee7
MO
1810 /* return some buffers to hardware, one at a time is too slow */
1811 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1812 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1813 cleaned_count);
1814 cleaned_count = 0;
1815 }
1816 if (++i == rx_ring->count)
1817 i = 0;
1818 }
1819 rx_ring->next_to_clean = i;
1820 if (cleaned_count)
1821 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1822 return cleaned;
1823}
1824
1825/**
1826 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1827 * @adapter: Board private structure
1828 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1829 * Returns
1830 * 0: Successfully
1831 * Negative value: Failed
1832 */
1833int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1834 struct pch_gbe_tx_ring *tx_ring)
1835{
1836 struct pci_dev *pdev = adapter->pdev;
1837 struct pch_gbe_tx_desc *tx_desc;
1838 int size;
1839 int desNo;
1840
1841 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
89bf67f1 1842 tx_ring->buffer_info = vzalloc(size);
e404decb 1843 if (!tx_ring->buffer_info)
77555ee7 1844 return -ENOMEM;
77555ee7
MO
1845
1846 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1847
1848 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1849 &tx_ring->dma, GFP_KERNEL);
1850 if (!tx_ring->desc) {
1851 vfree(tx_ring->buffer_info);
1852 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1853 return -ENOMEM;
1854 }
1855 memset(tx_ring->desc, 0, tx_ring->size);
1856
1857 tx_ring->next_to_use = 0;
1858 tx_ring->next_to_clean = 0;
1859 spin_lock_init(&tx_ring->tx_lock);
1860
1861 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1862 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1863 tx_desc->gbec_status = DSC_INIT16;
1864 }
1865 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1866 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1867 tx_ring->desc, (unsigned long long)tx_ring->dma,
1868 tx_ring->next_to_clean, tx_ring->next_to_use);
1869 return 0;
1870}
1871
1872/**
1873 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1874 * @adapter: Board private structure
1875 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1876 * Returns
1877 * 0: Successfully
1878 * Negative value: Failed
1879 */
1880int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1881 struct pch_gbe_rx_ring *rx_ring)
1882{
1883 struct pci_dev *pdev = adapter->pdev;
1884 struct pch_gbe_rx_desc *rx_desc;
1885 int size;
1886 int desNo;
1887
1888 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
89bf67f1 1889 rx_ring->buffer_info = vzalloc(size);
e404decb 1890 if (!rx_ring->buffer_info)
77555ee7 1891 return -ENOMEM;
e404decb 1892
77555ee7
MO
1893 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1894 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1895 &rx_ring->dma, GFP_KERNEL);
1896
1897 if (!rx_ring->desc) {
1898 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1899 vfree(rx_ring->buffer_info);
1900 return -ENOMEM;
1901 }
1902 memset(rx_ring->desc, 0, rx_ring->size);
1903 rx_ring->next_to_clean = 0;
1904 rx_ring->next_to_use = 0;
1905 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1906 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1907 rx_desc->gbec_status = DSC_INIT16;
1908 }
1909 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1910 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1911 rx_ring->desc, (unsigned long long)rx_ring->dma,
1912 rx_ring->next_to_clean, rx_ring->next_to_use);
1913 return 0;
1914}
1915
1916/**
1917 * pch_gbe_free_tx_resources - Free Tx Resources
1918 * @adapter: Board private structure
1919 * @tx_ring: Tx descriptor ring for a specific queue
1920 */
1921void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1922 struct pch_gbe_tx_ring *tx_ring)
1923{
1924 struct pci_dev *pdev = adapter->pdev;
1925
1926 pch_gbe_clean_tx_ring(adapter, tx_ring);
1927 vfree(tx_ring->buffer_info);
1928 tx_ring->buffer_info = NULL;
1929 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1930 tx_ring->desc = NULL;
1931}
1932
1933/**
1934 * pch_gbe_free_rx_resources - Free Rx Resources
1935 * @adapter: Board private structure
1936 * @rx_ring: Ring to clean the resources from
1937 */
1938void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1939 struct pch_gbe_rx_ring *rx_ring)
1940{
1941 struct pci_dev *pdev = adapter->pdev;
1942
1943 pch_gbe_clean_rx_ring(adapter, rx_ring);
1944 vfree(rx_ring->buffer_info);
1945 rx_ring->buffer_info = NULL;
1946 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1947 rx_ring->desc = NULL;
1948}
1949
1950/**
1951 * pch_gbe_request_irq - Allocate an interrupt line
1952 * @adapter: Board private structure
1953 * Returns
1954 * 0: Successfully
1955 * Negative value: Failed
1956 */
1957static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1958{
1959 struct net_device *netdev = adapter->netdev;
1960 int err;
1961 int flags;
1962
1963 flags = IRQF_SHARED;
1964 adapter->have_msi = false;
1965 err = pci_enable_msi(adapter->pdev);
1966 pr_debug("call pci_enable_msi\n");
1967 if (err) {
1968 pr_debug("call pci_enable_msi - Error: %d\n", err);
1969 } else {
1970 flags = 0;
1971 adapter->have_msi = true;
1972 }
1973 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1974 flags, netdev->name, netdev);
1975 if (err)
1976 pr_err("Unable to allocate interrupt Error: %d\n", err);
1977 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1978 adapter->have_msi, flags, err);
1979 return err;
1980}
1981
1982
1983static void pch_gbe_set_multi(struct net_device *netdev);
1984/**
1985 * pch_gbe_up - Up GbE network device
1986 * @adapter: Board private structure
1987 * Returns
1988 * 0: Successfully
1989 * Negative value: Failed
1990 */
1991int pch_gbe_up(struct pch_gbe_adapter *adapter)
1992{
1993 struct net_device *netdev = adapter->netdev;
1994 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1995 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1996 int err;
1997
2b53d078
DH
1998 /* Ensure we have a valid MAC */
1999 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2000 pr_err("Error: Invalid MAC address\n");
2001 return -EINVAL;
2002 }
2003
77555ee7
MO
2004 /* hardware has been reset, we need to reload some things */
2005 pch_gbe_set_multi(netdev);
2006
2007 pch_gbe_setup_tctl(adapter);
2008 pch_gbe_configure_tx(adapter);
2009 pch_gbe_setup_rctl(adapter);
2010 pch_gbe_configure_rx(adapter);
2011
2012 err = pch_gbe_request_irq(adapter);
2013 if (err) {
2014 pr_err("Error: can't bring device up\n");
2015 return err;
2016 }
124d770a
TO
2017 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
2018 if (err) {
2019 pr_err("Error: can't bring device up\n");
2020 return err;
2021 }
77555ee7
MO
2022 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
2023 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
2024 adapter->tx_queue_len = netdev->tx_queue_len;
5229d87e 2025 pch_gbe_start_receive(&adapter->hw);
77555ee7
MO
2026
2027 mod_timer(&adapter->watchdog_timer, jiffies);
2028
2029 napi_enable(&adapter->napi);
2030 pch_gbe_irq_enable(adapter);
2031 netif_start_queue(adapter->netdev);
2032
2033 return 0;
2034}
2035
2036/**
2037 * pch_gbe_down - Down GbE network device
2038 * @adapter: Board private structure
2039 */
2040void pch_gbe_down(struct pch_gbe_adapter *adapter)
2041{
2042 struct net_device *netdev = adapter->netdev;
124d770a 2043 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
77555ee7
MO
2044
2045 /* signal that we're down so the interrupt handler does not
2046 * reschedule our watchdog timer */
2047 napi_disable(&adapter->napi);
2048 atomic_set(&adapter->irq_sem, 0);
2049
2050 pch_gbe_irq_disable(adapter);
2051 pch_gbe_free_irq(adapter);
2052
2053 del_timer_sync(&adapter->watchdog_timer);
2054
2055 netdev->tx_queue_len = adapter->tx_queue_len;
2056 netif_carrier_off(netdev);
2057 netif_stop_queue(netdev);
2058
2059 pch_gbe_reset(adapter);
2060 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2061 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
124d770a
TO
2062
2063 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2064 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2065 rx_ring->rx_buff_pool_logic = 0;
2066 rx_ring->rx_buff_pool_size = 0;
2067 rx_ring->rx_buff_pool = NULL;
77555ee7
MO
2068}
2069
2070/**
2071 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2072 * @adapter: Board private structure to initialize
2073 * Returns
2074 * 0: Successfully
2075 * Negative value: Failed
2076 */
2077static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2078{
2079 struct pch_gbe_hw *hw = &adapter->hw;
2080 struct net_device *netdev = adapter->netdev;
2081
2082 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2083 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2084 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2085
2086 /* Initialize the hardware-specific values */
2087 if (pch_gbe_hal_setup_init_funcs(hw)) {
2088 pr_err("Hardware Initialization Failure\n");
2089 return -EIO;
2090 }
2091 if (pch_gbe_alloc_queues(adapter)) {
2092 pr_err("Unable to allocate memory for queues\n");
2093 return -ENOMEM;
2094 }
2095 spin_lock_init(&adapter->hw.miim_lock);
2096 spin_lock_init(&adapter->tx_queue_lock);
2097 spin_lock_init(&adapter->stats_lock);
2098 spin_lock_init(&adapter->ethtool_lock);
2099 atomic_set(&adapter->irq_sem, 0);
2100 pch_gbe_irq_disable(adapter);
2101
2102 pch_gbe_init_stats(adapter);
2103
2104 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2105 (u32) adapter->rx_buffer_len,
2106 hw->mac.min_frame_size, hw->mac.max_frame_size);
2107 return 0;
2108}
2109
2110/**
2111 * pch_gbe_open - Called when a network interface is made active
2112 * @netdev: Network interface device structure
2113 * Returns
2114 * 0: Successfully
2115 * Negative value: Failed
2116 */
2117static int pch_gbe_open(struct net_device *netdev)
2118{
2119 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2120 struct pch_gbe_hw *hw = &adapter->hw;
2121 int err;
2122
2123 /* allocate transmit descriptors */
2124 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2125 if (err)
2126 goto err_setup_tx;
2127 /* allocate receive descriptors */
2128 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2129 if (err)
2130 goto err_setup_rx;
2131 pch_gbe_hal_power_up_phy(hw);
2132 err = pch_gbe_up(adapter);
2133 if (err)
2134 goto err_up;
2135 pr_debug("Success End\n");
2136 return 0;
2137
2138err_up:
2139 if (!adapter->wake_up_evt)
2140 pch_gbe_hal_power_down_phy(hw);
2141 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2142err_setup_rx:
2143 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2144err_setup_tx:
2145 pch_gbe_reset(adapter);
2146 pr_err("Error End\n");
2147 return err;
2148}
2149
2150/**
2151 * pch_gbe_stop - Disables a network interface
2152 * @netdev: Network interface device structure
2153 * Returns
2154 * 0: Successfully
2155 */
2156static int pch_gbe_stop(struct net_device *netdev)
2157{
2158 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2159 struct pch_gbe_hw *hw = &adapter->hw;
2160
2161 pch_gbe_down(adapter);
2162 if (!adapter->wake_up_evt)
2163 pch_gbe_hal_power_down_phy(hw);
2164 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2165 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2166 return 0;
2167}
2168
2169/**
2170 * pch_gbe_xmit_frame - Packet transmitting start
2171 * @skb: Socket buffer structure
2172 * @netdev: Network interface device structure
2173 * Returns
2174 * - NETDEV_TX_OK: Normal end
2175 * - NETDEV_TX_BUSY: Error end
2176 */
2177static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2178{
2179 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2180 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2181 unsigned long flags;
2182
2183 if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
77555ee7
MO
2184 pr_err("Transfer length Error: skb len: %d > max: %d\n",
2185 skb->len, adapter->hw.mac.max_frame_size);
419c2046 2186 dev_kfree_skb_any(skb);
77555ee7
MO
2187 adapter->stats.tx_length_errors++;
2188 return NETDEV_TX_OK;
2189 }
2190 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2191 /* Collision - tell upper layer to requeue */
2192 return NETDEV_TX_LOCKED;
2193 }
2194 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2195 netif_stop_queue(netdev);
2196 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2197 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2198 tx_ring->next_to_use, tx_ring->next_to_clean);
2199 return NETDEV_TX_BUSY;
2200 }
2201 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2202
2203 /* CRC,ITAG no support */
2204 pch_gbe_tx_queue(adapter, tx_ring, skb);
2205 return NETDEV_TX_OK;
2206}
2207
2208/**
2209 * pch_gbe_get_stats - Get System Network Statistics
2210 * @netdev: Network interface device structure
2211 * Returns: The current stats
2212 */
2213static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2214{
2215 /* only return the current stats */
2216 return &netdev->stats;
2217}
2218
2219/**
2220 * pch_gbe_set_multi - Multicast and Promiscuous mode set
2221 * @netdev: Network interface device structure
2222 */
2223static void pch_gbe_set_multi(struct net_device *netdev)
2224{
2225 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2226 struct pch_gbe_hw *hw = &adapter->hw;
2227 struct netdev_hw_addr *ha;
2228 u8 *mta_list;
2229 u32 rctl;
2230 int i;
2231 int mc_count;
2232
2233 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2234
2235 /* Check for Promiscuous and All Multicast modes */
2236 rctl = ioread32(&hw->reg->RX_MODE);
2237 mc_count = netdev_mc_count(netdev);
2238 if ((netdev->flags & IFF_PROMISC)) {
2239 rctl &= ~PCH_GBE_ADD_FIL_EN;
2240 rctl &= ~PCH_GBE_MLT_FIL_EN;
2241 } else if ((netdev->flags & IFF_ALLMULTI)) {
2242 /* all the multicasting receive permissions */
2243 rctl |= PCH_GBE_ADD_FIL_EN;
2244 rctl &= ~PCH_GBE_MLT_FIL_EN;
2245 } else {
2246 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2247 /* all the multicasting receive permissions */
2248 rctl |= PCH_GBE_ADD_FIL_EN;
2249 rctl &= ~PCH_GBE_MLT_FIL_EN;
2250 } else {
2251 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2252 }
2253 }
2254 iowrite32(rctl, &hw->reg->RX_MODE);
2255
2256 if (mc_count >= PCH_GBE_MAR_ENTRIES)
2257 return;
2258 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2259 if (!mta_list)
2260 return;
2261
2262 /* The shared function expects a packed array of only addresses. */
2263 i = 0;
2264 netdev_for_each_mc_addr(ha, netdev) {
2265 if (i == mc_count)
2266 break;
2267 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2268 }
2269 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2270 PCH_GBE_MAR_ENTRIES);
2271 kfree(mta_list);
2272
2273 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2274 ioread32(&hw->reg->RX_MODE), mc_count);
2275}
2276
2277/**
2278 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2279 * @netdev: Network interface device structure
2280 * @addr: Pointer to an address structure
2281 * Returns
2282 * 0: Successfully
2283 * -EADDRNOTAVAIL: Failed
2284 */
2285static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2286{
2287 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2288 struct sockaddr *skaddr = addr;
2289 int ret_val;
2290
2291 if (!is_valid_ether_addr(skaddr->sa_data)) {
2292 ret_val = -EADDRNOTAVAIL;
2293 } else {
2294 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2295 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2296 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2297 ret_val = 0;
2298 }
2299 pr_debug("ret_val : 0x%08x\n", ret_val);
2300 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2301 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2302 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2303 ioread32(&adapter->hw.reg->mac_adr[0].high),
2304 ioread32(&adapter->hw.reg->mac_adr[0].low));
2305 return ret_val;
2306}
2307
2308/**
2309 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2310 * @netdev: Network interface device structure
2311 * @new_mtu: New value for maximum frame size
2312 * Returns
2313 * 0: Successfully
2314 * -EINVAL: Failed
2315 */
2316static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2317{
2318 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2319 int max_frame;
124d770a
TO
2320 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2321 int err;
77555ee7
MO
2322
2323 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2324 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2325 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2326 pr_err("Invalid MTU setting\n");
2327 return -EINVAL;
2328 }
2329 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2330 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2331 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2332 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2333 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2334 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2335 else
124d770a 2336 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
77555ee7 2337
124d770a
TO
2338 if (netif_running(netdev)) {
2339 pch_gbe_down(adapter);
2340 err = pch_gbe_up(adapter);
2341 if (err) {
2342 adapter->rx_buffer_len = old_rx_buffer_len;
2343 pch_gbe_up(adapter);
2344 return -ENOMEM;
2345 } else {
2346 netdev->mtu = new_mtu;
2347 adapter->hw.mac.max_frame_size = max_frame;
2348 }
2349 } else {
77555ee7 2350 pch_gbe_reset(adapter);
124d770a
TO
2351 netdev->mtu = new_mtu;
2352 adapter->hw.mac.max_frame_size = max_frame;
2353 }
77555ee7
MO
2354
2355 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2356 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2357 adapter->hw.mac.max_frame_size);
2358 return 0;
2359}
2360
756a6b03
MM
2361/**
2362 * pch_gbe_set_features - Reset device after features changed
2363 * @netdev: Network interface device structure
2364 * @features: New features
2365 * Returns
2366 * 0: HW state updated successfully
2367 */
c8f44aff
MM
2368static int pch_gbe_set_features(struct net_device *netdev,
2369 netdev_features_t features)
756a6b03
MM
2370{
2371 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
c8f44aff 2372 netdev_features_t changed = features ^ netdev->features;
756a6b03
MM
2373
2374 if (!(changed & NETIF_F_RXCSUM))
2375 return 0;
2376
2377 if (netif_running(netdev))
2378 pch_gbe_reinit_locked(adapter);
2379 else
2380 pch_gbe_reset(adapter);
2381
2382 return 0;
2383}
2384
77555ee7
MO
2385/**
2386 * pch_gbe_ioctl - Controls register through a MII interface
2387 * @netdev: Network interface device structure
2388 * @ifr: Pointer to ifr structure
2389 * @cmd: Control command
2390 * Returns
2391 * 0: Successfully
2392 * Negative value: Failed
2393 */
2394static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2395{
2396 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2397
2398 pr_debug("cmd : 0x%04x\n", cmd);
2399
1a0bdadb
TS
2400#ifdef CONFIG_PCH_PTP
2401 if (cmd == SIOCSHWTSTAMP)
2402 return hwtstamp_ioctl(netdev, ifr, cmd);
2403#endif
2404
77555ee7
MO
2405 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2406}
2407
2408/**
2409 * pch_gbe_tx_timeout - Respond to a Tx Hang
2410 * @netdev: Network interface device structure
2411 */
2412static void pch_gbe_tx_timeout(struct net_device *netdev)
2413{
2414 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2415
2416 /* Do the reset outside of interrupt context */
2417 adapter->stats.tx_timeout_count++;
2418 schedule_work(&adapter->reset_task);
2419}
2420
2421/**
2422 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2423 * @napi: Pointer of polling device struct
2424 * @budget: The maximum number of a packet
2425 * Returns
2426 * false: Exit the polling mode
2427 * true: Continue the polling mode
2428 */
2429static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2430{
2431 struct pch_gbe_adapter *adapter =
2432 container_of(napi, struct pch_gbe_adapter, napi);
77555ee7
MO
2433 int work_done = 0;
2434 bool poll_end_flag = false;
2435 bool cleaned = false;
124d770a 2436 u32 int_en;
77555ee7
MO
2437
2438 pr_debug("budget : %d\n", budget);
2439
805e969f
TO
2440 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2441 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2442
2443 if (!cleaned)
2444 work_done = budget;
2445 /* If no Tx and not enough Rx work done,
2446 * exit the polling mode
2447 */
2448 if (work_done < budget)
77555ee7 2449 poll_end_flag = true;
805e969f
TO
2450
2451 if (poll_end_flag) {
2452 napi_complete(napi);
2453 if (adapter->rx_stop_flag) {
2454 adapter->rx_stop_flag = false;
2455 pch_gbe_start_receive(&adapter->hw);
2456 }
2457 pch_gbe_irq_enable(adapter);
2458 } else
124d770a
TO
2459 if (adapter->rx_stop_flag) {
2460 adapter->rx_stop_flag = false;
2461 pch_gbe_start_receive(&adapter->hw);
2462 int_en = ioread32(&adapter->hw.reg->INT_EN);
2463 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
805e969f 2464 &adapter->hw.reg->INT_EN);
124d770a 2465 }
77555ee7
MO
2466
2467 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2468 poll_end_flag, work_done, budget);
2469
2470 return work_done;
2471}
2472
2473#ifdef CONFIG_NET_POLL_CONTROLLER
2474/**
2475 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2476 * @netdev: Network interface device structure
2477 */
2478static void pch_gbe_netpoll(struct net_device *netdev)
2479{
2480 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2481
2482 disable_irq(adapter->pdev->irq);
2483 pch_gbe_intr(adapter->pdev->irq, netdev);
2484 enable_irq(adapter->pdev->irq);
2485}
2486#endif
2487
2488static const struct net_device_ops pch_gbe_netdev_ops = {
2489 .ndo_open = pch_gbe_open,
2490 .ndo_stop = pch_gbe_stop,
2491 .ndo_start_xmit = pch_gbe_xmit_frame,
2492 .ndo_get_stats = pch_gbe_get_stats,
2493 .ndo_set_mac_address = pch_gbe_set_mac,
2494 .ndo_tx_timeout = pch_gbe_tx_timeout,
2495 .ndo_change_mtu = pch_gbe_change_mtu,
756a6b03 2496 .ndo_set_features = pch_gbe_set_features,
77555ee7 2497 .ndo_do_ioctl = pch_gbe_ioctl,
afc4b13d 2498 .ndo_set_rx_mode = pch_gbe_set_multi,
77555ee7
MO
2499#ifdef CONFIG_NET_POLL_CONTROLLER
2500 .ndo_poll_controller = pch_gbe_netpoll,
2501#endif
2502};
2503
2504static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2505 pci_channel_state_t state)
2506{
2507 struct net_device *netdev = pci_get_drvdata(pdev);
2508 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2509
2510 netif_device_detach(netdev);
2511 if (netif_running(netdev))
2512 pch_gbe_down(adapter);
2513 pci_disable_device(pdev);
2514 /* Request a slot slot reset. */
2515 return PCI_ERS_RESULT_NEED_RESET;
2516}
2517
2518static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2519{
2520 struct net_device *netdev = pci_get_drvdata(pdev);
2521 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2522 struct pch_gbe_hw *hw = &adapter->hw;
2523
2524 if (pci_enable_device(pdev)) {
2525 pr_err("Cannot re-enable PCI device after reset\n");
2526 return PCI_ERS_RESULT_DISCONNECT;
2527 }
2528 pci_set_master(pdev);
2529 pci_enable_wake(pdev, PCI_D0, 0);
2530 pch_gbe_hal_power_up_phy(hw);
2531 pch_gbe_reset(adapter);
2532 /* Clear wake up status */
2533 pch_gbe_mac_set_wol_event(hw, 0);
2534
2535 return PCI_ERS_RESULT_RECOVERED;
2536}
2537
2538static void pch_gbe_io_resume(struct pci_dev *pdev)
2539{
2540 struct net_device *netdev = pci_get_drvdata(pdev);
2541 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2542
2543 if (netif_running(netdev)) {
2544 if (pch_gbe_up(adapter)) {
2545 pr_debug("can't bring device back up after reset\n");
2546 return;
2547 }
2548 }
2549 netif_device_attach(netdev);
2550}
2551
2552static int __pch_gbe_suspend(struct pci_dev *pdev)
2553{
2554 struct net_device *netdev = pci_get_drvdata(pdev);
2555 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2556 struct pch_gbe_hw *hw = &adapter->hw;
2557 u32 wufc = adapter->wake_up_evt;
2558 int retval = 0;
2559
2560 netif_device_detach(netdev);
2561 if (netif_running(netdev))
2562 pch_gbe_down(adapter);
2563 if (wufc) {
2564 pch_gbe_set_multi(netdev);
2565 pch_gbe_setup_rctl(adapter);
2566 pch_gbe_configure_rx(adapter);
2567 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2568 hw->mac.link_duplex);
2569 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2570 hw->mac.link_duplex);
2571 pch_gbe_mac_set_wol_event(hw, wufc);
2572 pci_disable_device(pdev);
2573 } else {
2574 pch_gbe_hal_power_down_phy(hw);
2575 pch_gbe_mac_set_wol_event(hw, wufc);
2576 pci_disable_device(pdev);
2577 }
2578 return retval;
2579}
2580
2581#ifdef CONFIG_PM
2582static int pch_gbe_suspend(struct device *device)
2583{
2584 struct pci_dev *pdev = to_pci_dev(device);
2585
2586 return __pch_gbe_suspend(pdev);
2587}
2588
2589static int pch_gbe_resume(struct device *device)
2590{
2591 struct pci_dev *pdev = to_pci_dev(device);
2592 struct net_device *netdev = pci_get_drvdata(pdev);
2593 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2594 struct pch_gbe_hw *hw = &adapter->hw;
2595 u32 err;
2596
2597 err = pci_enable_device(pdev);
2598 if (err) {
2599 pr_err("Cannot enable PCI device from suspend\n");
2600 return err;
2601 }
2602 pci_set_master(pdev);
2603 pch_gbe_hal_power_up_phy(hw);
2604 pch_gbe_reset(adapter);
2605 /* Clear wake on lan control and status */
2606 pch_gbe_mac_set_wol_event(hw, 0);
2607
2608 if (netif_running(netdev))
2609 pch_gbe_up(adapter);
2610 netif_device_attach(netdev);
2611
2612 return 0;
2613}
2614#endif /* CONFIG_PM */
2615
2616static void pch_gbe_shutdown(struct pci_dev *pdev)
2617{
2618 __pch_gbe_suspend(pdev);
2619 if (system_state == SYSTEM_POWER_OFF) {
2620 pci_wake_from_d3(pdev, true);
2621 pci_set_power_state(pdev, PCI_D3hot);
2622 }
2623}
2624
2625static void pch_gbe_remove(struct pci_dev *pdev)
2626{
2627 struct net_device *netdev = pci_get_drvdata(pdev);
2628 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2629
2321f3b4 2630 cancel_work_sync(&adapter->reset_task);
77555ee7
MO
2631 unregister_netdev(netdev);
2632
2633 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2634
2635 kfree(adapter->tx_ring);
2636 kfree(adapter->rx_ring);
2637
2638 iounmap(adapter->hw.reg);
2639 pci_release_regions(pdev);
2640 free_netdev(netdev);
2641 pci_disable_device(pdev);
2642}
2643
2644static int pch_gbe_probe(struct pci_dev *pdev,
2645 const struct pci_device_id *pci_id)
2646{
2647 struct net_device *netdev;
2648 struct pch_gbe_adapter *adapter;
2649 int ret;
2650
2651 ret = pci_enable_device(pdev);
2652 if (ret)
2653 return ret;
2654
2655 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2656 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2657 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2658 if (ret) {
2659 ret = pci_set_consistent_dma_mask(pdev,
2660 DMA_BIT_MASK(32));
2661 if (ret) {
2662 dev_err(&pdev->dev, "ERR: No usable DMA "
2663 "configuration, aborting\n");
2664 goto err_disable_device;
2665 }
2666 }
2667 }
2668
2669 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2670 if (ret) {
2671 dev_err(&pdev->dev,
2672 "ERR: Can't reserve PCI I/O and memory resources\n");
2673 goto err_disable_device;
2674 }
2675 pci_set_master(pdev);
2676
2677 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2678 if (!netdev) {
2679 ret = -ENOMEM;
77555ee7
MO
2680 goto err_release_pci;
2681 }
2682 SET_NETDEV_DEV(netdev, &pdev->dev);
2683
2684 pci_set_drvdata(pdev, netdev);
2685 adapter = netdev_priv(netdev);
2686 adapter->netdev = netdev;
2687 adapter->pdev = pdev;
2688 adapter->hw.back = adapter;
2689 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2690 if (!adapter->hw.reg) {
2691 ret = -EIO;
2692 dev_err(&pdev->dev, "Can't ioremap\n");
2693 goto err_free_netdev;
2694 }
2695
1a0bdadb
TS
2696#ifdef CONFIG_PCH_PTP
2697 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2698 PCI_DEVFN(12, 4));
2699 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2700 pr_err("Bad ptp filter\n");
2701 return -EINVAL;
2702 }
2703#endif
2704
77555ee7
MO
2705 netdev->netdev_ops = &pch_gbe_netdev_ops;
2706 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2707 netif_napi_add(netdev, &adapter->napi,
2708 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
756a6b03
MM
2709 netdev->hw_features = NETIF_F_RXCSUM |
2710 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2711 netdev->features = netdev->hw_features;
77555ee7
MO
2712 pch_gbe_set_ethtool_ops(netdev);
2713
98200ec2 2714 pch_gbe_mac_load_mac_addr(&adapter->hw);
77555ee7
MO
2715 pch_gbe_mac_reset_hw(&adapter->hw);
2716
2717 /* setup the private structure */
2718 ret = pch_gbe_sw_init(adapter);
2719 if (ret)
2720 goto err_iounmap;
2721
2722 /* Initialize PHY */
2723 ret = pch_gbe_init_phy(adapter);
2724 if (ret) {
2725 dev_err(&pdev->dev, "PHY initialize error\n");
2726 goto err_free_adapter;
2727 }
2728 pch_gbe_hal_get_bus_info(&adapter->hw);
2729
2730 /* Read the MAC address. and store to the private data */
2731 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2732 if (ret) {
2733 dev_err(&pdev->dev, "MAC address Read Error\n");
2734 goto err_free_adapter;
2735 }
2736
2737 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2738 if (!is_valid_ether_addr(netdev->dev_addr)) {
2b53d078
DH
2739 /*
2740 * If the MAC is invalid (or just missing), display a warning
2741 * but do not abort setting up the device. pch_gbe_up will
2742 * prevent the interface from being brought up until a valid MAC
2743 * is set.
2744 */
2745 dev_err(&pdev->dev, "Invalid MAC address, "
2746 "interface disabled.\n");
77555ee7
MO
2747 }
2748 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2749 (unsigned long)adapter);
2750
2751 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2752
2753 pch_gbe_check_options(adapter);
2754
77555ee7
MO
2755 /* initialize the wol settings based on the eeprom settings */
2756 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2757 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2758
2759 /* reset the hardware with the new settings */
2760 pch_gbe_reset(adapter);
2761
2762 ret = register_netdev(netdev);
2763 if (ret)
2764 goto err_free_adapter;
2765 /* tell the stack to leave us alone until pch_gbe_open() is called */
2766 netif_carrier_off(netdev);
2767 netif_stop_queue(netdev);
2768
1a0bdadb 2769 dev_dbg(&pdev->dev, "PCH Network Connection\n");
77555ee7
MO
2770
2771 device_set_wakeup_enable(&pdev->dev, 1);
2772 return 0;
2773
2774err_free_adapter:
2775 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2776 kfree(adapter->tx_ring);
2777 kfree(adapter->rx_ring);
2778err_iounmap:
2779 iounmap(adapter->hw.reg);
2780err_free_netdev:
2781 free_netdev(netdev);
2782err_release_pci:
2783 pci_release_regions(pdev);
2784err_disable_device:
2785 pci_disable_device(pdev);
2786 return ret;
2787}
2788
7fc44633 2789static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
77555ee7
MO
2790 {.vendor = PCI_VENDOR_ID_INTEL,
2791 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2792 .subvendor = PCI_ANY_ID,
2793 .subdevice = PCI_ANY_ID,
2794 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2795 .class_mask = (0xFFFF00)
2796 },
b0e6baf5
T
2797 {.vendor = PCI_VENDOR_ID_ROHM,
2798 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2799 .subvendor = PCI_ANY_ID,
2800 .subdevice = PCI_ANY_ID,
2801 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2802 .class_mask = (0xFFFF00)
2803 },
7756332f
TO
2804 {.vendor = PCI_VENDOR_ID_ROHM,
2805 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2806 .subvendor = PCI_ANY_ID,
2807 .subdevice = PCI_ANY_ID,
2808 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2809 .class_mask = (0xFFFF00)
2810 },
77555ee7
MO
2811 /* required last entry */
2812 {0}
2813};
2814
2815#ifdef CONFIG_PM
2816static const struct dev_pm_ops pch_gbe_pm_ops = {
2817 .suspend = pch_gbe_suspend,
2818 .resume = pch_gbe_resume,
2819 .freeze = pch_gbe_suspend,
2820 .thaw = pch_gbe_resume,
2821 .poweroff = pch_gbe_suspend,
2822 .restore = pch_gbe_resume,
2823};
2824#endif
2825
2826static struct pci_error_handlers pch_gbe_err_handler = {
2827 .error_detected = pch_gbe_io_error_detected,
2828 .slot_reset = pch_gbe_io_slot_reset,
2829 .resume = pch_gbe_io_resume
2830};
2831
f7594d42 2832static struct pci_driver pch_gbe_driver = {
77555ee7
MO
2833 .name = KBUILD_MODNAME,
2834 .id_table = pch_gbe_pcidev_id,
2835 .probe = pch_gbe_probe,
2836 .remove = pch_gbe_remove,
aa338601 2837#ifdef CONFIG_PM
77555ee7
MO
2838 .driver.pm = &pch_gbe_pm_ops,
2839#endif
2840 .shutdown = pch_gbe_shutdown,
2841 .err_handler = &pch_gbe_err_handler
2842};
2843
2844
2845static int __init pch_gbe_init_module(void)
2846{
2847 int ret;
2848
f7594d42 2849 ret = pci_register_driver(&pch_gbe_driver);
77555ee7
MO
2850 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2851 if (copybreak == 0) {
2852 pr_info("copybreak disabled\n");
2853 } else {
2854 pr_info("copybreak enabled for packets <= %u bytes\n",
2855 copybreak);
2856 }
2857 }
2858 return ret;
2859}
2860
2861static void __exit pch_gbe_exit_module(void)
2862{
f7594d42 2863 pci_unregister_driver(&pch_gbe_driver);
77555ee7
MO
2864}
2865
2866module_init(pch_gbe_init_module);
2867module_exit(pch_gbe_exit_module);
2868
a1dcfcb7 2869MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
1a0bdadb 2870MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
77555ee7
MO
2871MODULE_LICENSE("GPL");
2872MODULE_VERSION(DRV_VERSION);
2873MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2874
2875module_param(copybreak, uint, 0644);
2876MODULE_PARM_DESC(copybreak,
2877 "Maximum size of packet that is copied to a new buffer on receive");
2878
2879/* pch_gbe_main.c */
This page took 0.298349 seconds and 5 git commands to generate.