pch_gbe: simplify transmit time stamping flag test
[deliverable/linux.git] / drivers / net / ethernet / oki-semi / pch_gbe / pch_gbe_main.c
CommitLineData
77555ee7
MO
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
1a0bdadb 3 * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
77555ee7
MO
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
9d9779e7 23#include <linux/module.h>
1a0bdadb
TS
24#ifdef CONFIG_PCH_PTP
25#include <linux/net_tstamp.h>
26#include <linux/ptp_classify.h>
27#endif
77555ee7
MO
28
29#define DRV_VERSION "1.00"
30const char pch_driver_version[] = DRV_VERSION;
31
32#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
33#define PCH_GBE_MAR_ENTRIES 16
34#define PCH_GBE_SHORT_PKT 64
35#define DSC_INIT16 0xC000
36#define PCH_GBE_DMA_ALIGN 0
ac096642 37#define PCH_GBE_DMA_PADDING 2
77555ee7
MO
38#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
39#define PCH_GBE_COPYBREAK_DEFAULT 256
40#define PCH_GBE_PCI_BAR 1
124d770a 41#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
77555ee7 42
b0e6baf5
T
43/* Macros for ML7223 */
44#define PCI_VENDOR_ID_ROHM 0x10db
45#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
46
7756332f
TO
47/* Macros for ML7831 */
48#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
49
77555ee7
MO
50#define PCH_GBE_TX_WEIGHT 64
51#define PCH_GBE_RX_WEIGHT 64
52#define PCH_GBE_RX_BUFFER_WRITE 16
53
54/* Initialize the wake-on-LAN settings */
55#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
56
57#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58 PCH_GBE_CHIP_TYPE_INTERNAL | \
ce3dad0f 59 PCH_GBE_RGMII_MODE_RGMII \
77555ee7
MO
60 )
61
62/* Ethertype field values */
124d770a 63#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
77555ee7
MO
64#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
65#define PCH_GBE_FRAME_SIZE_2048 2048
66#define PCH_GBE_FRAME_SIZE_4096 4096
67#define PCH_GBE_FRAME_SIZE_8192 8192
68
69#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
70#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72#define PCH_GBE_DESC_UNUSED(R) \
73 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74 (R)->next_to_clean - (R)->next_to_use - 1)
75
76/* Pause packet value */
77#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
78#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
81
82#define PCH_GBE_ETH_ALEN 6
83
84/* This defines the bits that are set in the Interrupt Mask
85 * Set/Read Register. Each bit is documented below:
86 * o RXT0 = Receiver Timer Interrupt (ring 0)
87 * o TXDW = Transmit Descriptor Written Back
88 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
89 * o RXSEQ = Receive Sequence Error
90 * o LSC = Link Status Change
91 */
92#define PCH_GBE_INT_ENABLE_MASK ( \
93 PCH_GBE_INT_RX_DMA_CMPLT | \
94 PCH_GBE_INT_RX_DSC_EMP | \
124d770a 95 PCH_GBE_INT_RX_FIFO_ERR | \
77555ee7
MO
96 PCH_GBE_INT_WOL_DET | \
97 PCH_GBE_INT_TX_CMPLT \
98 )
99
124d770a 100#define PCH_GBE_INT_DISABLE_ALL 0
77555ee7 101
1a0bdadb
TS
102#ifdef CONFIG_PCH_PTP
103/* Macros for ieee1588 */
1a0bdadb
TS
104/* 0x40 Time Synchronization Channel Control Register Bits */
105#define MASTER_MODE (1<<0)
106#define SLAVE_MODE (0<<0)
107#define V2_MODE (1<<31)
108#define CAP_MODE0 (0<<16)
109#define CAP_MODE2 (1<<17)
110
111/* 0x44 Time Synchronization Channel Event Register Bits */
112#define TX_SNAPSHOT_LOCKED (1<<0)
113#define RX_SNAPSHOT_LOCKED (1<<1)
114#endif
115
77555ee7
MO
116static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
117
191cc687 118static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
119static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
120 int data);
98200ec2 121
1a0bdadb
TS
122#ifdef CONFIG_PCH_PTP
123static struct sock_filter ptp_filter[] = {
124 PTP_FILTER
125};
126
127static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
128{
129 u8 *data = skb->data;
130 unsigned int offset;
131 u16 *hi, *id;
132 u32 lo;
133
134 if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
135 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
136 return 0;
137 }
138
139 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
140
141 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
142 return 0;
143
144 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
145 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
146
147 memcpy(&lo, &hi[1], sizeof(lo));
148
149 return (uid_hi == *hi &&
150 uid_lo == lo &&
151 seqid == *id);
152}
153
154static void pch_rx_timestamp(
155 struct pch_gbe_adapter *adapter, struct sk_buff *skb)
156{
157 struct skb_shared_hwtstamps *shhwtstamps;
158 struct pci_dev *pdev;
159 u64 ns;
160 u32 hi, lo, val;
161 u16 uid, seq;
162
163 if (!adapter->hwts_rx_en)
164 return;
165
166 /* Get ieee1588's dev information */
167 pdev = adapter->ptp_pdev;
168
169 val = pch_ch_event_read(pdev);
170
171 if (!(val & RX_SNAPSHOT_LOCKED))
172 return;
173
174 lo = pch_src_uuid_lo_read(pdev);
175 hi = pch_src_uuid_hi_read(pdev);
176
177 uid = hi & 0xffff;
178 seq = (hi >> 16) & 0xffff;
179
180 if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
181 goto out;
182
183 ns = pch_rx_snap_read(pdev);
1a0bdadb
TS
184
185 shhwtstamps = skb_hwtstamps(skb);
186 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
187 shhwtstamps->hwtstamp = ns_to_ktime(ns);
188out:
189 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
190}
191
192static void pch_tx_timestamp(
193 struct pch_gbe_adapter *adapter, struct sk_buff *skb)
194{
195 struct skb_shared_hwtstamps shhwtstamps;
196 struct pci_dev *pdev;
197 struct skb_shared_info *shtx;
198 u64 ns;
199 u32 cnt, val;
200
201 shtx = skb_shinfo(skb);
5481c8cd 202 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
1a0bdadb
TS
203 return;
204
5481c8cd
TS
205 shtx->tx_flags |= SKBTX_IN_PROGRESS;
206
1a0bdadb
TS
207 /* Get ieee1588's dev information */
208 pdev = adapter->ptp_pdev;
209
210 /*
211 * This really stinks, but we have to poll for the Tx time stamp.
212 * Usually, the time stamp is ready after 4 to 6 microseconds.
213 */
214 for (cnt = 0; cnt < 100; cnt++) {
215 val = pch_ch_event_read(pdev);
216 if (val & TX_SNAPSHOT_LOCKED)
217 break;
218 udelay(1);
219 }
220 if (!(val & TX_SNAPSHOT_LOCKED)) {
221 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
222 return;
223 }
224
225 ns = pch_tx_snap_read(pdev);
1a0bdadb
TS
226
227 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
228 shhwtstamps.hwtstamp = ns_to_ktime(ns);
229 skb_tstamp_tx(skb, &shhwtstamps);
230
231 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
232}
233
234static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
235{
236 struct hwtstamp_config cfg;
237 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
238 struct pci_dev *pdev;
239
240 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
241 return -EFAULT;
242
243 if (cfg.flags) /* reserved for future extensions */
244 return -EINVAL;
245
246 /* Get ieee1588's dev information */
247 pdev = adapter->ptp_pdev;
248
249 switch (cfg.tx_type) {
250 case HWTSTAMP_TX_OFF:
251 adapter->hwts_tx_en = 0;
252 break;
253 case HWTSTAMP_TX_ON:
254 adapter->hwts_tx_en = 1;
255 break;
256 default:
257 return -ERANGE;
258 }
259
260 switch (cfg.rx_filter) {
261 case HWTSTAMP_FILTER_NONE:
262 adapter->hwts_rx_en = 0;
263 break;
264 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
265 adapter->hwts_rx_en = 0;
266 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
267 break;
268 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
269 adapter->hwts_rx_en = 1;
270 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
271 break;
272 case HWTSTAMP_FILTER_PTP_V2_EVENT:
273 adapter->hwts_rx_en = 1;
274 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
275 break;
276 default:
277 return -ERANGE;
278 }
279
280 /* Clear out any old time stamps. */
281 pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
282
283 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
284}
285#endif
286
98200ec2
TO
287inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
288{
289 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
290}
291
77555ee7
MO
292/**
293 * pch_gbe_mac_read_mac_addr - Read MAC address
294 * @hw: Pointer to the HW structure
295 * Returns
296 * 0: Successful.
297 */
298s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
299{
300 u32 adr1a, adr1b;
301
302 adr1a = ioread32(&hw->reg->mac_adr[0].high);
303 adr1b = ioread32(&hw->reg->mac_adr[0].low);
304
305 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
306 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
307 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
308 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
309 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
310 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
311
312 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
313 return 0;
314}
315
316/**
317 * pch_gbe_wait_clr_bit - Wait to clear a bit
318 * @reg: Pointer of register
319 * @busy: Busy bit
320 */
191cc687 321static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
77555ee7
MO
322{
323 u32 tmp;
324 /* wait busy */
325 tmp = 1000;
326 while ((ioread32(reg) & bit) && --tmp)
327 cpu_relax();
328 if (!tmp)
329 pr_err("Error: busy bit is not cleared\n");
330}
124d770a
TO
331
332/**
333 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
334 * @reg: Pointer of register
335 * @busy: Busy bit
336 */
337static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
338{
339 u32 tmp;
340 int ret = -1;
341 /* wait busy */
342 tmp = 20;
343 while ((ioread32(reg) & bit) && --tmp)
344 udelay(5);
345 if (!tmp)
346 pr_err("Error: busy bit is not cleared\n");
347 else
348 ret = 0;
349 return ret;
350}
351
77555ee7
MO
352/**
353 * pch_gbe_mac_mar_set - Set MAC address register
354 * @hw: Pointer to the HW structure
355 * @addr: Pointer to the MAC address
356 * @index: MAC address array register
357 */
191cc687 358static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
77555ee7
MO
359{
360 u32 mar_low, mar_high, adrmask;
361
362 pr_debug("index : 0x%x\n", index);
363
364 /*
365 * HW expects these in little endian so we reverse the byte order
366 * from network order (big endian) to little endian
367 */
368 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
369 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
370 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
371 /* Stop the MAC Address of index. */
372 adrmask = ioread32(&hw->reg->ADDR_MASK);
373 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
374 /* wait busy */
375 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
376 /* Set the MAC address to the MAC address 1A/1B register */
377 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
378 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
379 /* Start the MAC address of index */
380 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
381 /* wait busy */
382 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
383}
384
385/**
386 * pch_gbe_mac_reset_hw - Reset hardware
387 * @hw: Pointer to the HW structure
388 */
191cc687 389static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
77555ee7
MO
390{
391 /* Read the MAC address. and store to the private data */
392 pch_gbe_mac_read_mac_addr(hw);
393 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
394#ifdef PCH_GBE_MAC_IFOP_RGMII
395 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
396#endif
397 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
398 /* Setup the receive address */
399 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
400 return;
401}
402
124d770a
TO
403static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
404{
405 /* Read the MAC address. and store to the private data */
406 pch_gbe_mac_read_mac_addr(hw);
407 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
408 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
409 /* Setup the MAC address */
410 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
411 return;
412}
413
77555ee7
MO
414/**
415 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
416 * @hw: Pointer to the HW structure
417 * @mar_count: Receive address registers
418 */
191cc687 419static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
77555ee7
MO
420{
421 u32 i;
422
423 /* Setup the receive address */
424 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
425
426 /* Zero out the other receive addresses */
427 for (i = 1; i < mar_count; i++) {
428 iowrite32(0, &hw->reg->mac_adr[i].high);
429 iowrite32(0, &hw->reg->mac_adr[i].low);
430 }
431 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
432 /* wait busy */
433 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
434}
435
436
437/**
438 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
439 * @hw: Pointer to the HW structure
440 * @mc_addr_list: Array of multicast addresses to program
441 * @mc_addr_count: Number of multicast addresses to program
442 * @mar_used_count: The first MAC Address register free to program
443 * @mar_total_num: Total number of supported MAC Address Registers
444 */
191cc687 445static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
446 u8 *mc_addr_list, u32 mc_addr_count,
447 u32 mar_used_count, u32 mar_total_num)
77555ee7
MO
448{
449 u32 i, adrmask;
450
451 /* Load the first set of multicast addresses into the exact
452 * filters (RAR). If there are not enough to fill the RAR
453 * array, clear the filters.
454 */
455 for (i = mar_used_count; i < mar_total_num; i++) {
456 if (mc_addr_count) {
457 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
458 mc_addr_count--;
459 mc_addr_list += PCH_GBE_ETH_ALEN;
460 } else {
461 /* Clear MAC address mask */
462 adrmask = ioread32(&hw->reg->ADDR_MASK);
463 iowrite32((adrmask | (0x0001 << i)),
464 &hw->reg->ADDR_MASK);
465 /* wait busy */
466 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
467 /* Clear MAC address */
468 iowrite32(0, &hw->reg->mac_adr[i].high);
469 iowrite32(0, &hw->reg->mac_adr[i].low);
470 }
471 }
472}
473
474/**
475 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
476 * @hw: Pointer to the HW structure
477 * Returns
478 * 0: Successful.
479 * Negative value: Failed.
480 */
481s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
482{
483 struct pch_gbe_mac_info *mac = &hw->mac;
484 u32 rx_fctrl;
485
486 pr_debug("mac->fc = %u\n", mac->fc);
487
488 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
489
490 switch (mac->fc) {
491 case PCH_GBE_FC_NONE:
492 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
493 mac->tx_fc_enable = false;
494 break;
495 case PCH_GBE_FC_RX_PAUSE:
496 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
497 mac->tx_fc_enable = false;
498 break;
499 case PCH_GBE_FC_TX_PAUSE:
500 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
501 mac->tx_fc_enable = true;
502 break;
503 case PCH_GBE_FC_FULL:
504 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
505 mac->tx_fc_enable = true;
506 break;
507 default:
508 pr_err("Flow control param set incorrectly\n");
509 return -EINVAL;
510 }
511 if (mac->link_duplex == DUPLEX_HALF)
512 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
513 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
514 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
515 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
516 return 0;
517}
518
519/**
520 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
521 * @hw: Pointer to the HW structure
522 * @wu_evt: Wake up event
523 */
191cc687 524static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
77555ee7
MO
525{
526 u32 addr_mask;
527
528 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
529 wu_evt, ioread32(&hw->reg->ADDR_MASK));
530
531 if (wu_evt) {
532 /* Set Wake-On-Lan address mask */
533 addr_mask = ioread32(&hw->reg->ADDR_MASK);
534 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
535 /* wait busy */
536 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
537 iowrite32(0, &hw->reg->WOL_ST);
538 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
539 iowrite32(0x02, &hw->reg->TCPIP_ACC);
540 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
541 } else {
542 iowrite32(0, &hw->reg->WOL_CTRL);
543 iowrite32(0, &hw->reg->WOL_ST);
544 }
545 return;
546}
547
548/**
549 * pch_gbe_mac_ctrl_miim - Control MIIM interface
550 * @hw: Pointer to the HW structure
551 * @addr: Address of PHY
552 * @dir: Operetion. (Write or Read)
553 * @reg: Access register of PHY
554 * @data: Write data.
555 *
556 * Returns: Read date.
557 */
558u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
559 u16 data)
560{
561 u32 data_out = 0;
562 unsigned int i;
563 unsigned long flags;
564
565 spin_lock_irqsave(&hw->miim_lock, flags);
566
567 for (i = 100; i; --i) {
568 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
569 break;
570 udelay(20);
571 }
572 if (i == 0) {
573 pr_err("pch-gbe.miim won't go Ready\n");
574 spin_unlock_irqrestore(&hw->miim_lock, flags);
575 return 0; /* No way to indicate timeout error */
576 }
577 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
578 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
579 dir | data), &hw->reg->MIIM);
580 for (i = 0; i < 100; i++) {
581 udelay(20);
582 data_out = ioread32(&hw->reg->MIIM);
583 if ((data_out & PCH_GBE_MIIM_OPER_READY))
584 break;
585 }
586 spin_unlock_irqrestore(&hw->miim_lock, flags);
587
588 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
589 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
590 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
591 return (u16) data_out;
592}
593
594/**
595 * pch_gbe_mac_set_pause_packet - Set pause packet
596 * @hw: Pointer to the HW structure
597 */
191cc687 598static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
77555ee7
MO
599{
600 unsigned long tmp2, tmp3;
601
602 /* Set Pause packet */
603 tmp2 = hw->mac.addr[1];
604 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
605 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
606
607 tmp3 = hw->mac.addr[5];
608 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
609 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
610 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
611
612 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
613 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
614 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
615 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
616 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
617
618 /* Transmit Pause Packet */
619 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
620
621 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
622 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
623 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
624 ioread32(&hw->reg->PAUSE_PKT5));
625
626 return;
627}
628
629
630/**
631 * pch_gbe_alloc_queues - Allocate memory for all rings
632 * @adapter: Board private structure to initialize
633 * Returns
634 * 0: Successfully
635 * Negative value: Failed
636 */
637static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
638{
639 int size;
640
641 size = (int)sizeof(struct pch_gbe_tx_ring);
642 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
643 if (!adapter->tx_ring)
644 return -ENOMEM;
645 size = (int)sizeof(struct pch_gbe_rx_ring);
646 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
647 if (!adapter->rx_ring) {
648 kfree(adapter->tx_ring);
649 return -ENOMEM;
650 }
651 return 0;
652}
653
654/**
655 * pch_gbe_init_stats - Initialize status
656 * @adapter: Board private structure to initialize
657 */
658static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
659{
660 memset(&adapter->stats, 0, sizeof(adapter->stats));
661 return;
662}
663
664/**
665 * pch_gbe_init_phy - Initialize PHY
666 * @adapter: Board private structure to initialize
667 * Returns
668 * 0: Successfully
669 * Negative value: Failed
670 */
671static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
672{
673 struct net_device *netdev = adapter->netdev;
674 u32 addr;
675 u16 bmcr, stat;
676
677 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
678 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
679 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
680 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
681 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
682 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
683 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
684 break;
685 }
686 adapter->hw.phy.addr = adapter->mii.phy_id;
687 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
688 if (addr == 32)
689 return -EAGAIN;
690 /* Selected the phy and isolate the rest */
691 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
692 if (addr != adapter->mii.phy_id) {
693 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
694 BMCR_ISOLATE);
695 } else {
696 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
697 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
698 bmcr & ~BMCR_ISOLATE);
699 }
700 }
701
702 /* MII setup */
703 adapter->mii.phy_id_mask = 0x1F;
704 adapter->mii.reg_num_mask = 0x1F;
705 adapter->mii.dev = adapter->netdev;
706 adapter->mii.mdio_read = pch_gbe_mdio_read;
707 adapter->mii.mdio_write = pch_gbe_mdio_write;
708 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
709 return 0;
710}
711
712/**
713 * pch_gbe_mdio_read - The read function for mii
714 * @netdev: Network interface device structure
715 * @addr: Phy ID
716 * @reg: Access location
717 * Returns
718 * 0: Successfully
719 * Negative value: Failed
720 */
191cc687 721static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
77555ee7
MO
722{
723 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
724 struct pch_gbe_hw *hw = &adapter->hw;
725
726 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
727 (u16) 0);
728}
729
730/**
731 * pch_gbe_mdio_write - The write function for mii
732 * @netdev: Network interface device structure
733 * @addr: Phy ID (not used)
734 * @reg: Access location
735 * @data: Write data
736 */
191cc687 737static void pch_gbe_mdio_write(struct net_device *netdev,
738 int addr, int reg, int data)
77555ee7
MO
739{
740 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
741 struct pch_gbe_hw *hw = &adapter->hw;
742
743 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
744}
745
746/**
747 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
748 * @work: Pointer of board private structure
749 */
750static void pch_gbe_reset_task(struct work_struct *work)
751{
752 struct pch_gbe_adapter *adapter;
753 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
754
75d1a752 755 rtnl_lock();
77555ee7 756 pch_gbe_reinit_locked(adapter);
75d1a752 757 rtnl_unlock();
77555ee7
MO
758}
759
760/**
761 * pch_gbe_reinit_locked- Re-initialization
762 * @adapter: Board private structure
763 */
764void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
765{
75d1a752
TO
766 pch_gbe_down(adapter);
767 pch_gbe_up(adapter);
77555ee7
MO
768}
769
770/**
771 * pch_gbe_reset - Reset GbE
772 * @adapter: Board private structure
773 */
774void pch_gbe_reset(struct pch_gbe_adapter *adapter)
775{
776 pch_gbe_mac_reset_hw(&adapter->hw);
777 /* Setup the receive address. */
778 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
779 if (pch_gbe_hal_init_hw(&adapter->hw))
780 pr_err("Hardware Error\n");
781}
782
783/**
784 * pch_gbe_free_irq - Free an interrupt
785 * @adapter: Board private structure
786 */
787static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
788{
789 struct net_device *netdev = adapter->netdev;
790
791 free_irq(adapter->pdev->irq, netdev);
792 if (adapter->have_msi) {
793 pci_disable_msi(adapter->pdev);
794 pr_debug("call pci_disable_msi\n");
795 }
796}
797
798/**
799 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
800 * @adapter: Board private structure
801 */
802static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
803{
804 struct pch_gbe_hw *hw = &adapter->hw;
805
806 atomic_inc(&adapter->irq_sem);
807 iowrite32(0, &hw->reg->INT_EN);
808 ioread32(&hw->reg->INT_ST);
809 synchronize_irq(adapter->pdev->irq);
810
811 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
812}
813
814/**
815 * pch_gbe_irq_enable - Enable default interrupt generation settings
816 * @adapter: Board private structure
817 */
818static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
819{
820 struct pch_gbe_hw *hw = &adapter->hw;
821
822 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
823 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
824 ioread32(&hw->reg->INT_ST);
825 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
826}
827
828
829
830/**
831 * pch_gbe_setup_tctl - configure the Transmit control registers
832 * @adapter: Board private structure
833 */
834static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
835{
836 struct pch_gbe_hw *hw = &adapter->hw;
837 u32 tx_mode, tcpip;
838
839 tx_mode = PCH_GBE_TM_LONG_PKT |
840 PCH_GBE_TM_ST_AND_FD |
841 PCH_GBE_TM_SHORT_PKT |
842 PCH_GBE_TM_TH_TX_STRT_8 |
843 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
844
845 iowrite32(tx_mode, &hw->reg->TX_MODE);
846
847 tcpip = ioread32(&hw->reg->TCPIP_ACC);
848 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
849 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
850 return;
851}
852
853/**
854 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
855 * @adapter: Board private structure
856 */
857static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
858{
859 struct pch_gbe_hw *hw = &adapter->hw;
860 u32 tdba, tdlen, dctrl;
861
862 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
863 (unsigned long long)adapter->tx_ring->dma,
864 adapter->tx_ring->size);
865
866 /* Setup the HW Tx Head and Tail descriptor pointers */
867 tdba = adapter->tx_ring->dma;
868 tdlen = adapter->tx_ring->size - 0x10;
869 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
870 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
871 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
872
873 /* Enables Transmission DMA */
874 dctrl = ioread32(&hw->reg->DMA_CTRL);
875 dctrl |= PCH_GBE_TX_DMA_EN;
876 iowrite32(dctrl, &hw->reg->DMA_CTRL);
877}
878
879/**
880 * pch_gbe_setup_rctl - Configure the receive control registers
881 * @adapter: Board private structure
882 */
883static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
884{
885 struct pch_gbe_hw *hw = &adapter->hw;
886 u32 rx_mode, tcpip;
887
888 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
889 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
890
891 iowrite32(rx_mode, &hw->reg->RX_MODE);
892
893 tcpip = ioread32(&hw->reg->TCPIP_ACC);
894
124d770a
TO
895 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
896 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
77555ee7
MO
897 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
898 return;
899}
900
901/**
902 * pch_gbe_configure_rx - Configure Receive Unit after Reset
903 * @adapter: Board private structure
904 */
905static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
906{
907 struct pch_gbe_hw *hw = &adapter->hw;
908 u32 rdba, rdlen, rctl, rxdma;
909
910 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
911 (unsigned long long)adapter->rx_ring->dma,
912 adapter->rx_ring->size);
913
914 pch_gbe_mac_force_mac_fc(hw);
915
916 /* Disables Receive MAC */
917 rctl = ioread32(&hw->reg->MAC_RX_EN);
918 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
919
920 /* Disables Receive DMA */
921 rxdma = ioread32(&hw->reg->DMA_CTRL);
922 rxdma &= ~PCH_GBE_RX_DMA_EN;
923 iowrite32(rxdma, &hw->reg->DMA_CTRL);
924
925 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
926 ioread32(&hw->reg->MAC_RX_EN),
927 ioread32(&hw->reg->DMA_CTRL));
928
929 /* Setup the HW Rx Head and Tail Descriptor Pointers and
930 * the Base and Length of the Rx Descriptor Ring */
931 rdba = adapter->rx_ring->dma;
932 rdlen = adapter->rx_ring->size - 0x10;
933 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
934 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
935 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
77555ee7
MO
936}
937
938/**
939 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
940 * @adapter: Board private structure
941 * @buffer_info: Buffer information structure
942 */
943static void pch_gbe_unmap_and_free_tx_resource(
944 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
945{
946 if (buffer_info->mapped) {
947 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
948 buffer_info->length, DMA_TO_DEVICE);
949 buffer_info->mapped = false;
950 }
951 if (buffer_info->skb) {
952 dev_kfree_skb_any(buffer_info->skb);
953 buffer_info->skb = NULL;
954 }
955}
956
957/**
958 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
959 * @adapter: Board private structure
960 * @buffer_info: Buffer information structure
961 */
962static void pch_gbe_unmap_and_free_rx_resource(
963 struct pch_gbe_adapter *adapter,
964 struct pch_gbe_buffer *buffer_info)
965{
966 if (buffer_info->mapped) {
967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
968 buffer_info->length, DMA_FROM_DEVICE);
969 buffer_info->mapped = false;
970 }
971 if (buffer_info->skb) {
972 dev_kfree_skb_any(buffer_info->skb);
973 buffer_info->skb = NULL;
974 }
975}
976
977/**
978 * pch_gbe_clean_tx_ring - Free Tx Buffers
979 * @adapter: Board private structure
980 * @tx_ring: Ring to be cleaned
981 */
982static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
983 struct pch_gbe_tx_ring *tx_ring)
984{
985 struct pch_gbe_hw *hw = &adapter->hw;
986 struct pch_gbe_buffer *buffer_info;
987 unsigned long size;
988 unsigned int i;
989
990 /* Free all the Tx ring sk_buffs */
991 for (i = 0; i < tx_ring->count; i++) {
992 buffer_info = &tx_ring->buffer_info[i];
993 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
994 }
995 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
996
997 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
998 memset(tx_ring->buffer_info, 0, size);
999
1000 /* Zero out the descriptor ring */
1001 memset(tx_ring->desc, 0, tx_ring->size);
1002 tx_ring->next_to_use = 0;
1003 tx_ring->next_to_clean = 0;
1004 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
1005 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
1006}
1007
1008/**
1009 * pch_gbe_clean_rx_ring - Free Rx Buffers
1010 * @adapter: Board private structure
1011 * @rx_ring: Ring to free buffers from
1012 */
1013static void
1014pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1015 struct pch_gbe_rx_ring *rx_ring)
1016{
1017 struct pch_gbe_hw *hw = &adapter->hw;
1018 struct pch_gbe_buffer *buffer_info;
1019 unsigned long size;
1020 unsigned int i;
1021
1022 /* Free all the Rx ring sk_buffs */
1023 for (i = 0; i < rx_ring->count; i++) {
1024 buffer_info = &rx_ring->buffer_info[i];
1025 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1026 }
1027 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1028 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1029 memset(rx_ring->buffer_info, 0, size);
1030
1031 /* Zero out the descriptor ring */
1032 memset(rx_ring->desc, 0, rx_ring->size);
1033 rx_ring->next_to_clean = 0;
1034 rx_ring->next_to_use = 0;
1035 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1036 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1037}
1038
1039static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1040 u16 duplex)
1041{
1042 struct pch_gbe_hw *hw = &adapter->hw;
1043 unsigned long rgmii = 0;
1044
1045 /* Set the RGMII control. */
1046#ifdef PCH_GBE_MAC_IFOP_RGMII
1047 switch (speed) {
1048 case SPEED_10:
1049 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1050 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1051 break;
1052 case SPEED_100:
1053 rgmii = (PCH_GBE_RGMII_RATE_25M |
1054 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1055 break;
1056 case SPEED_1000:
1057 rgmii = (PCH_GBE_RGMII_RATE_125M |
1058 PCH_GBE_MAC_RGMII_CTRL_SETTING);
1059 break;
1060 }
1061 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1062#else /* GMII */
1063 rgmii = 0;
1064 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1065#endif
1066}
1067static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1068 u16 duplex)
1069{
1070 struct net_device *netdev = adapter->netdev;
1071 struct pch_gbe_hw *hw = &adapter->hw;
1072 unsigned long mode = 0;
1073
1074 /* Set the communication mode */
1075 switch (speed) {
1076 case SPEED_10:
1077 mode = PCH_GBE_MODE_MII_ETHER;
1078 netdev->tx_queue_len = 10;
1079 break;
1080 case SPEED_100:
1081 mode = PCH_GBE_MODE_MII_ETHER;
1082 netdev->tx_queue_len = 100;
1083 break;
1084 case SPEED_1000:
1085 mode = PCH_GBE_MODE_GMII_ETHER;
1086 break;
1087 }
1088 if (duplex == DUPLEX_FULL)
1089 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1090 else
1091 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1092 iowrite32(mode, &hw->reg->MODE);
1093}
1094
1095/**
1096 * pch_gbe_watchdog - Watchdog process
1097 * @data: Board private structure
1098 */
1099static void pch_gbe_watchdog(unsigned long data)
1100{
1101 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1102 struct net_device *netdev = adapter->netdev;
1103 struct pch_gbe_hw *hw = &adapter->hw;
77555ee7
MO
1104
1105 pr_debug("right now = %ld\n", jiffies);
1106
1107 pch_gbe_update_stats(adapter);
1108 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
8ae6daca 1109 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
77555ee7
MO
1110 netdev->tx_queue_len = adapter->tx_queue_len;
1111 /* mii library handles link maintenance tasks */
1112 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1113 pr_err("ethtool get setting Error\n");
1114 mod_timer(&adapter->watchdog_timer,
1115 round_jiffies(jiffies +
1116 PCH_GBE_WATCHDOG_PERIOD));
1117 return;
1118 }
8ae6daca 1119 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
77555ee7
MO
1120 hw->mac.link_duplex = cmd.duplex;
1121 /* Set the RGMII control. */
1122 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1123 hw->mac.link_duplex);
1124 /* Set the communication mode */
1125 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1126 hw->mac.link_duplex);
1127 netdev_dbg(netdev,
1128 "Link is Up %d Mbps %s-Duplex\n",
8ae6daca 1129 hw->mac.link_speed,
77555ee7
MO
1130 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1131 netif_carrier_on(netdev);
1132 netif_wake_queue(netdev);
1133 } else if ((!mii_link_ok(&adapter->mii)) &&
1134 (netif_carrier_ok(netdev))) {
1135 netdev_dbg(netdev, "NIC Link is Down\n");
1136 hw->mac.link_speed = SPEED_10;
1137 hw->mac.link_duplex = DUPLEX_HALF;
1138 netif_carrier_off(netdev);
1139 netif_stop_queue(netdev);
1140 }
1141 mod_timer(&adapter->watchdog_timer,
1142 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1143}
1144
1145/**
1146 * pch_gbe_tx_queue - Carry out queuing of the transmission data
1147 * @adapter: Board private structure
1148 * @tx_ring: Tx descriptor ring structure
1149 * @skb: Sockt buffer structure
1150 */
1151static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1152 struct pch_gbe_tx_ring *tx_ring,
1153 struct sk_buff *skb)
1154{
1155 struct pch_gbe_hw *hw = &adapter->hw;
1156 struct pch_gbe_tx_desc *tx_desc;
1157 struct pch_gbe_buffer *buffer_info;
1158 struct sk_buff *tmp_skb;
1159 unsigned int frame_ctrl;
1160 unsigned int ring_num;
1161 unsigned long flags;
1162
1163 /*-- Set frame control --*/
1164 frame_ctrl = 0;
1165 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1166 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
756a6b03 1167 if (skb->ip_summed == CHECKSUM_NONE)
77555ee7
MO
1168 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1169
1170 /* Performs checksum processing */
1171 /*
1172 * It is because the hardware accelerator does not support a checksum,
1173 * when the received data size is less than 64 bytes.
1174 */
756a6b03 1175 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
77555ee7
MO
1176 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1177 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1178 if (skb->protocol == htons(ETH_P_IP)) {
1179 struct iphdr *iph = ip_hdr(skb);
1180 unsigned int offset;
1181 iph->check = 0;
1182 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
1183 offset = skb_transport_offset(skb);
1184 if (iph->protocol == IPPROTO_TCP) {
1185 skb->csum = 0;
1186 tcp_hdr(skb)->check = 0;
1187 skb->csum = skb_checksum(skb, offset,
1188 skb->len - offset, 0);
1189 tcp_hdr(skb)->check =
1190 csum_tcpudp_magic(iph->saddr,
1191 iph->daddr,
1192 skb->len - offset,
1193 IPPROTO_TCP,
1194 skb->csum);
1195 } else if (iph->protocol == IPPROTO_UDP) {
1196 skb->csum = 0;
1197 udp_hdr(skb)->check = 0;
1198 skb->csum =
1199 skb_checksum(skb, offset,
1200 skb->len - offset, 0);
1201 udp_hdr(skb)->check =
1202 csum_tcpudp_magic(iph->saddr,
1203 iph->daddr,
1204 skb->len - offset,
1205 IPPROTO_UDP,
1206 skb->csum);
1207 }
1208 }
1209 }
1210 spin_lock_irqsave(&tx_ring->tx_lock, flags);
1211 ring_num = tx_ring->next_to_use;
1212 if (unlikely((ring_num + 1) == tx_ring->count))
1213 tx_ring->next_to_use = 0;
1214 else
1215 tx_ring->next_to_use = ring_num + 1;
1216
1217 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1218 buffer_info = &tx_ring->buffer_info[ring_num];
1219 tmp_skb = buffer_info->skb;
1220
1221 /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1222 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1223 tmp_skb->data[ETH_HLEN] = 0x00;
1224 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1225 tmp_skb->len = skb->len;
1226 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1227 (skb->len - ETH_HLEN));
25985edc 1228 /*-- Set Buffer information --*/
77555ee7
MO
1229 buffer_info->length = tmp_skb->len;
1230 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1231 buffer_info->length,
1232 DMA_TO_DEVICE);
1233 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1234 pr_err("TX DMA map failed\n");
1235 buffer_info->dma = 0;
1236 buffer_info->time_stamp = 0;
1237 tx_ring->next_to_use = ring_num;
1238 return;
1239 }
1240 buffer_info->mapped = true;
1241 buffer_info->time_stamp = jiffies;
1242
1243 /*-- Set Tx descriptor --*/
1244 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1245 tx_desc->buffer_addr = (buffer_info->dma);
1246 tx_desc->length = (tmp_skb->len);
1247 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1248 tx_desc->tx_frame_ctrl = (frame_ctrl);
1249 tx_desc->gbec_status = (DSC_INIT16);
1250
1251 if (unlikely(++ring_num == tx_ring->count))
1252 ring_num = 0;
1253
1254 /* Update software pointer of TX descriptor */
1255 iowrite32(tx_ring->dma +
1256 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1257 &hw->reg->TX_DSC_SW_P);
1a0bdadb
TS
1258
1259#ifdef CONFIG_PCH_PTP
1260 pch_tx_timestamp(adapter, skb);
1261#endif
1262
77555ee7
MO
1263 dev_kfree_skb_any(skb);
1264}
1265
1266/**
1267 * pch_gbe_update_stats - Update the board statistics counters
1268 * @adapter: Board private structure
1269 */
1270void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1271{
1272 struct net_device *netdev = adapter->netdev;
1273 struct pci_dev *pdev = adapter->pdev;
1274 struct pch_gbe_hw_stats *stats = &adapter->stats;
1275 unsigned long flags;
1276
1277 /*
1278 * Prevent stats update while adapter is being reset, or if the pci
1279 * connection is down.
1280 */
1281 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1282 return;
1283
1284 spin_lock_irqsave(&adapter->stats_lock, flags);
1285
1286 /* Update device status "adapter->stats" */
1287 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1288 stats->tx_errors = stats->tx_length_errors +
1289 stats->tx_aborted_errors +
1290 stats->tx_carrier_errors + stats->tx_timeout_count;
1291
1292 /* Update network device status "adapter->net_stats" */
1293 netdev->stats.rx_packets = stats->rx_packets;
1294 netdev->stats.rx_bytes = stats->rx_bytes;
1295 netdev->stats.rx_dropped = stats->rx_dropped;
1296 netdev->stats.tx_packets = stats->tx_packets;
1297 netdev->stats.tx_bytes = stats->tx_bytes;
1298 netdev->stats.tx_dropped = stats->tx_dropped;
1299 /* Fill out the OS statistics structure */
1300 netdev->stats.multicast = stats->multicast;
1301 netdev->stats.collisions = stats->collisions;
1302 /* Rx Errors */
1303 netdev->stats.rx_errors = stats->rx_errors;
1304 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1305 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1306 /* Tx Errors */
1307 netdev->stats.tx_errors = stats->tx_errors;
1308 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1309 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1310
1311 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1312}
1313
124d770a
TO
1314static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1315{
1316 struct pch_gbe_hw *hw = &adapter->hw;
1317 u32 rxdma;
1318 u16 value;
1319 int ret;
1320
1321 /* Disable Receive DMA */
1322 rxdma = ioread32(&hw->reg->DMA_CTRL);
1323 rxdma &= ~PCH_GBE_RX_DMA_EN;
1324 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1325 /* Wait Rx DMA BUS is IDLE */
1326 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1327 if (ret) {
1328 /* Disable Bus master */
1329 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1330 value &= ~PCI_COMMAND_MASTER;
1331 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1332 /* Stop Receive */
1333 pch_gbe_mac_reset_rx(hw);
1334 /* Enable Bus master */
1335 value |= PCI_COMMAND_MASTER;
1336 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1337 } else {
1338 /* Stop Receive */
1339 pch_gbe_mac_reset_rx(hw);
1340 }
1341}
1342
5229d87e
TO
1343static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1344{
1345 u32 rxdma;
1346
1347 /* Enables Receive DMA */
1348 rxdma = ioread32(&hw->reg->DMA_CTRL);
1349 rxdma |= PCH_GBE_RX_DMA_EN;
1350 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1351 /* Enables Receive */
1352 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1353 return;
1354}
1355
77555ee7
MO
1356/**
1357 * pch_gbe_intr - Interrupt Handler
1358 * @irq: Interrupt number
1359 * @data: Pointer to a network interface device structure
1360 * Returns
1361 * - IRQ_HANDLED: Our interrupt
1362 * - IRQ_NONE: Not our interrupt
1363 */
1364static irqreturn_t pch_gbe_intr(int irq, void *data)
1365{
1366 struct net_device *netdev = data;
1367 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1368 struct pch_gbe_hw *hw = &adapter->hw;
1369 u32 int_st;
1370 u32 int_en;
1371
1372 /* Check request status */
1373 int_st = ioread32(&hw->reg->INT_ST);
1374 int_st = int_st & ioread32(&hw->reg->INT_EN);
1375 /* When request status is no interruption factor */
1376 if (unlikely(!int_st))
1377 return IRQ_NONE; /* Not our interrupt. End processing. */
1378 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1379 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1380 adapter->stats.intr_rx_frame_err_count++;
1381 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
124d770a
TO
1382 if (!adapter->rx_stop_flag) {
1383 adapter->stats.intr_rx_fifo_err_count++;
1384 pr_debug("Rx fifo over run\n");
1385 adapter->rx_stop_flag = true;
1386 int_en = ioread32(&hw->reg->INT_EN);
1387 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1388 &hw->reg->INT_EN);
1389 pch_gbe_stop_receive(adapter);
805e969f
TO
1390 int_st |= ioread32(&hw->reg->INT_ST);
1391 int_st = int_st & ioread32(&hw->reg->INT_EN);
124d770a 1392 }
77555ee7
MO
1393 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1394 adapter->stats.intr_rx_dma_err_count++;
1395 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1396 adapter->stats.intr_tx_fifo_err_count++;
1397 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1398 adapter->stats.intr_tx_dma_err_count++;
1399 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1400 adapter->stats.intr_tcpip_err_count++;
1401 /* When Rx descriptor is empty */
1402 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1403 adapter->stats.intr_rx_dsc_empty_count++;
124d770a 1404 pr_debug("Rx descriptor is empty\n");
77555ee7
MO
1405 int_en = ioread32(&hw->reg->INT_EN);
1406 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1407 if (hw->mac.tx_fc_enable) {
1408 /* Set Pause packet */
1409 pch_gbe_mac_set_pause_packet(hw);
1410 }
77555ee7
MO
1411 }
1412
1413 /* When request status is Receive interruption */
805e969f 1414 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
23677ce3 1415 (adapter->rx_stop_flag)) {
77555ee7
MO
1416 if (likely(napi_schedule_prep(&adapter->napi))) {
1417 /* Enable only Rx Descriptor empty */
1418 atomic_inc(&adapter->irq_sem);
1419 int_en = ioread32(&hw->reg->INT_EN);
1420 int_en &=
1421 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1422 iowrite32(int_en, &hw->reg->INT_EN);
1423 /* Start polling for NAPI */
1424 __napi_schedule(&adapter->napi);
1425 }
1426 }
1427 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1428 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1429 return IRQ_HANDLED;
1430}
1431
1432/**
1433 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1434 * @adapter: Board private structure
1435 * @rx_ring: Rx descriptor ring
1436 * @cleaned_count: Cleaned count
1437 */
1438static void
1439pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1440 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1441{
1442 struct net_device *netdev = adapter->netdev;
1443 struct pci_dev *pdev = adapter->pdev;
1444 struct pch_gbe_hw *hw = &adapter->hw;
1445 struct pch_gbe_rx_desc *rx_desc;
1446 struct pch_gbe_buffer *buffer_info;
1447 struct sk_buff *skb;
1448 unsigned int i;
1449 unsigned int bufsz;
1450
124d770a 1451 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
77555ee7
MO
1452 i = rx_ring->next_to_use;
1453
1454 while ((cleaned_count--)) {
1455 buffer_info = &rx_ring->buffer_info[i];
124d770a
TO
1456 skb = netdev_alloc_skb(netdev, bufsz);
1457 if (unlikely(!skb)) {
1458 /* Better luck next round */
1459 adapter->stats.rx_alloc_buff_failed++;
1460 break;
77555ee7 1461 }
124d770a
TO
1462 /* align */
1463 skb_reserve(skb, NET_IP_ALIGN);
1464 buffer_info->skb = skb;
1465
77555ee7 1466 buffer_info->dma = dma_map_single(&pdev->dev,
124d770a 1467 buffer_info->rx_buffer,
77555ee7
MO
1468 buffer_info->length,
1469 DMA_FROM_DEVICE);
1470 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1471 dev_kfree_skb(skb);
1472 buffer_info->skb = NULL;
1473 buffer_info->dma = 0;
1474 adapter->stats.rx_alloc_buff_failed++;
1475 break; /* while !buffer_info->skb */
1476 }
1477 buffer_info->mapped = true;
1478 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1479 rx_desc->buffer_addr = (buffer_info->dma);
1480 rx_desc->gbec_status = DSC_INIT16;
1481
1482 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1483 i, (unsigned long long)buffer_info->dma,
1484 buffer_info->length);
1485
1486 if (unlikely(++i == rx_ring->count))
1487 i = 0;
1488 }
1489 if (likely(rx_ring->next_to_use != i)) {
1490 rx_ring->next_to_use = i;
1491 if (unlikely(i-- == 0))
1492 i = (rx_ring->count - 1);
1493 iowrite32(rx_ring->dma +
1494 (int)sizeof(struct pch_gbe_rx_desc) * i,
1495 &hw->reg->RX_DSC_SW_P);
1496 }
1497 return;
1498}
1499
124d770a
TO
1500static int
1501pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1502 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1503{
1504 struct pci_dev *pdev = adapter->pdev;
1505 struct pch_gbe_buffer *buffer_info;
1506 unsigned int i;
1507 unsigned int bufsz;
1508 unsigned int size;
1509
1510 bufsz = adapter->rx_buffer_len;
1511
1512 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1513 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1514 &rx_ring->rx_buff_pool_logic,
1515 GFP_KERNEL);
1516 if (!rx_ring->rx_buff_pool) {
1517 pr_err("Unable to allocate memory for the receive poll buffer\n");
1518 return -ENOMEM;
1519 }
1520 memset(rx_ring->rx_buff_pool, 0, size);
1521 rx_ring->rx_buff_pool_size = size;
1522 for (i = 0; i < rx_ring->count; i++) {
1523 buffer_info = &rx_ring->buffer_info[i];
1524 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1525 buffer_info->length = bufsz;
1526 }
1527 return 0;
1528}
1529
77555ee7
MO
1530/**
1531 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1532 * @adapter: Board private structure
1533 * @tx_ring: Tx descriptor ring
1534 */
1535static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1536 struct pch_gbe_tx_ring *tx_ring)
1537{
1538 struct pch_gbe_buffer *buffer_info;
1539 struct sk_buff *skb;
1540 unsigned int i;
1541 unsigned int bufsz;
1542 struct pch_gbe_tx_desc *tx_desc;
1543
1544 bufsz =
1545 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1546
1547 for (i = 0; i < tx_ring->count; i++) {
1548 buffer_info = &tx_ring->buffer_info[i];
1549 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1550 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1551 buffer_info->skb = skb;
1552 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1553 tx_desc->gbec_status = (DSC_INIT16);
1554 }
1555 return;
1556}
1557
1558/**
1559 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1560 * @adapter: Board private structure
1561 * @tx_ring: Tx descriptor ring
1562 * Returns
1563 * true: Cleaned the descriptor
1564 * false: Not cleaned the descriptor
1565 */
1566static bool
1567pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1568 struct pch_gbe_tx_ring *tx_ring)
1569{
1570 struct pch_gbe_tx_desc *tx_desc;
1571 struct pch_gbe_buffer *buffer_info;
1572 struct sk_buff *skb;
1573 unsigned int i;
1574 unsigned int cleaned_count = 0;
805e969f 1575 bool cleaned = true;
77555ee7
MO
1576
1577 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1578
1579 i = tx_ring->next_to_clean;
1580 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1581 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1582 tx_desc->gbec_status, tx_desc->dma_status);
1583
1584 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1585 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
77555ee7
MO
1586 buffer_info = &tx_ring->buffer_info[i];
1587 skb = buffer_info->skb;
1588
1589 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1590 adapter->stats.tx_aborted_errors++;
1591 pr_err("Transfer Abort Error\n");
1592 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1593 ) {
1594 adapter->stats.tx_carrier_errors++;
1595 pr_err("Transfer Carrier Sense Error\n");
1596 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1597 ) {
1598 adapter->stats.tx_aborted_errors++;
1599 pr_err("Transfer Collision Abort Error\n");
1600 } else if ((tx_desc->gbec_status &
1601 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1602 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1603 adapter->stats.collisions++;
1604 adapter->stats.tx_packets++;
1605 adapter->stats.tx_bytes += skb->len;
1606 pr_debug("Transfer Collision\n");
1607 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1608 ) {
1609 adapter->stats.tx_packets++;
1610 adapter->stats.tx_bytes += skb->len;
1611 }
1612 if (buffer_info->mapped) {
1613 pr_debug("unmap buffer_info->dma : %d\n", i);
1614 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1615 buffer_info->length, DMA_TO_DEVICE);
1616 buffer_info->mapped = false;
1617 }
1618 if (buffer_info->skb) {
1619 pr_debug("trim buffer_info->skb : %d\n", i);
1620 skb_trim(buffer_info->skb, 0);
1621 }
1622 tx_desc->gbec_status = DSC_INIT16;
1623 if (unlikely(++i == tx_ring->count))
1624 i = 0;
1625 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1626
1627 /* weight of a sort for tx, to avoid endless transmit cleanup */
805e969f
TO
1628 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1629 cleaned = false;
77555ee7 1630 break;
805e969f 1631 }
77555ee7
MO
1632 }
1633 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1634 cleaned_count);
1635 /* Recover from running out of Tx resources in xmit_frame */
1636 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1637 netif_wake_queue(adapter->netdev);
1638 adapter->stats.tx_restart_count++;
1639 pr_debug("Tx wake queue\n");
1640 }
1641 spin_lock(&adapter->tx_queue_lock);
1642 tx_ring->next_to_clean = i;
1643 spin_unlock(&adapter->tx_queue_lock);
1644 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1645 return cleaned;
1646}
1647
1648/**
1649 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1650 * @adapter: Board private structure
1651 * @rx_ring: Rx descriptor ring
1652 * @work_done: Completed count
1653 * @work_to_do: Request count
1654 * Returns
1655 * true: Cleaned the descriptor
1656 * false: Not cleaned the descriptor
1657 */
1658static bool
1659pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1660 struct pch_gbe_rx_ring *rx_ring,
1661 int *work_done, int work_to_do)
1662{
1663 struct net_device *netdev = adapter->netdev;
1664 struct pci_dev *pdev = adapter->pdev;
1665 struct pch_gbe_buffer *buffer_info;
1666 struct pch_gbe_rx_desc *rx_desc;
1667 u32 length;
77555ee7
MO
1668 unsigned int i;
1669 unsigned int cleaned_count = 0;
1670 bool cleaned = false;
124d770a 1671 struct sk_buff *skb;
77555ee7
MO
1672 u8 dma_status;
1673 u16 gbec_status;
1674 u32 tcp_ip_status;
77555ee7
MO
1675
1676 i = rx_ring->next_to_clean;
1677
1678 while (*work_done < work_to_do) {
1679 /* Check Rx descriptor status */
1680 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1681 if (rx_desc->gbec_status == DSC_INIT16)
1682 break;
1683 cleaned = true;
1684 cleaned_count++;
1685
1686 dma_status = rx_desc->dma_status;
1687 gbec_status = rx_desc->gbec_status;
1688 tcp_ip_status = rx_desc->tcp_ip_status;
1689 rx_desc->gbec_status = DSC_INIT16;
1690 buffer_info = &rx_ring->buffer_info[i];
1691 skb = buffer_info->skb;
124d770a 1692 buffer_info->skb = NULL;
77555ee7
MO
1693
1694 /* unmap dma */
1695 dma_unmap_single(&pdev->dev, buffer_info->dma,
1696 buffer_info->length, DMA_FROM_DEVICE);
1697 buffer_info->mapped = false;
77555ee7
MO
1698
1699 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1700 "TCP:0x%08x] BufInf = 0x%p\n",
1701 i, dma_status, gbec_status, tcp_ip_status,
1702 buffer_info);
1703 /* Error check */
1704 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1705 adapter->stats.rx_frame_errors++;
1706 pr_err("Receive Not Octal Error\n");
1707 } else if (unlikely(gbec_status &
1708 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1709 adapter->stats.rx_frame_errors++;
1710 pr_err("Receive Nibble Error\n");
1711 } else if (unlikely(gbec_status &
1712 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1713 adapter->stats.rx_crc_errors++;
1714 pr_err("Receive CRC Error\n");
1715 } else {
1716 /* get receive length */
124d770a
TO
1717 /* length convert[-3], length includes FCS length */
1718 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1719 if (rx_desc->rx_words_eob & 0x02)
1720 length = length - 4;
1721 /*
1722 * buffer_info->rx_buffer: [Header:14][payload]
1723 * skb->data: [Reserve:2][Header:14][payload]
1724 */
1725 memcpy(skb->data, buffer_info->rx_buffer, length);
1726
77555ee7
MO
1727 /* update status of driver */
1728 adapter->stats.rx_bytes += length;
1729 adapter->stats.rx_packets++;
1730 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1731 adapter->stats.multicast++;
1732 /* Write meta date of skb */
1733 skb_put(skb, length);
1a0bdadb
TS
1734
1735#ifdef CONFIG_PCH_PTP
1736 pch_rx_timestamp(adapter, skb);
1737#endif
1738
77555ee7 1739 skb->protocol = eth_type_trans(skb, netdev);
5d05a04d 1740 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
77555ee7 1741 skb->ip_summed = CHECKSUM_NONE;
5d05a04d
TO
1742 else
1743 skb->ip_summed = CHECKSUM_UNNECESSARY;
1744
77555ee7
MO
1745 napi_gro_receive(&adapter->napi, skb);
1746 (*work_done)++;
1747 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1748 skb->ip_summed, length);
1749 }
77555ee7
MO
1750 /* return some buffers to hardware, one at a time is too slow */
1751 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1752 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1753 cleaned_count);
1754 cleaned_count = 0;
1755 }
1756 if (++i == rx_ring->count)
1757 i = 0;
1758 }
1759 rx_ring->next_to_clean = i;
1760 if (cleaned_count)
1761 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1762 return cleaned;
1763}
1764
1765/**
1766 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1767 * @adapter: Board private structure
1768 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1769 * Returns
1770 * 0: Successfully
1771 * Negative value: Failed
1772 */
1773int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1774 struct pch_gbe_tx_ring *tx_ring)
1775{
1776 struct pci_dev *pdev = adapter->pdev;
1777 struct pch_gbe_tx_desc *tx_desc;
1778 int size;
1779 int desNo;
1780
1781 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
89bf67f1 1782 tx_ring->buffer_info = vzalloc(size);
e404decb 1783 if (!tx_ring->buffer_info)
77555ee7 1784 return -ENOMEM;
77555ee7
MO
1785
1786 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1787
1788 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1789 &tx_ring->dma, GFP_KERNEL);
1790 if (!tx_ring->desc) {
1791 vfree(tx_ring->buffer_info);
1792 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1793 return -ENOMEM;
1794 }
1795 memset(tx_ring->desc, 0, tx_ring->size);
1796
1797 tx_ring->next_to_use = 0;
1798 tx_ring->next_to_clean = 0;
1799 spin_lock_init(&tx_ring->tx_lock);
1800
1801 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1802 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1803 tx_desc->gbec_status = DSC_INIT16;
1804 }
1805 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1806 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1807 tx_ring->desc, (unsigned long long)tx_ring->dma,
1808 tx_ring->next_to_clean, tx_ring->next_to_use);
1809 return 0;
1810}
1811
1812/**
1813 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1814 * @adapter: Board private structure
1815 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1816 * Returns
1817 * 0: Successfully
1818 * Negative value: Failed
1819 */
1820int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1821 struct pch_gbe_rx_ring *rx_ring)
1822{
1823 struct pci_dev *pdev = adapter->pdev;
1824 struct pch_gbe_rx_desc *rx_desc;
1825 int size;
1826 int desNo;
1827
1828 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
89bf67f1 1829 rx_ring->buffer_info = vzalloc(size);
e404decb 1830 if (!rx_ring->buffer_info)
77555ee7 1831 return -ENOMEM;
e404decb 1832
77555ee7
MO
1833 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1834 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1835 &rx_ring->dma, GFP_KERNEL);
1836
1837 if (!rx_ring->desc) {
1838 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1839 vfree(rx_ring->buffer_info);
1840 return -ENOMEM;
1841 }
1842 memset(rx_ring->desc, 0, rx_ring->size);
1843 rx_ring->next_to_clean = 0;
1844 rx_ring->next_to_use = 0;
1845 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1846 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1847 rx_desc->gbec_status = DSC_INIT16;
1848 }
1849 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1850 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1851 rx_ring->desc, (unsigned long long)rx_ring->dma,
1852 rx_ring->next_to_clean, rx_ring->next_to_use);
1853 return 0;
1854}
1855
1856/**
1857 * pch_gbe_free_tx_resources - Free Tx Resources
1858 * @adapter: Board private structure
1859 * @tx_ring: Tx descriptor ring for a specific queue
1860 */
1861void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1862 struct pch_gbe_tx_ring *tx_ring)
1863{
1864 struct pci_dev *pdev = adapter->pdev;
1865
1866 pch_gbe_clean_tx_ring(adapter, tx_ring);
1867 vfree(tx_ring->buffer_info);
1868 tx_ring->buffer_info = NULL;
1869 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1870 tx_ring->desc = NULL;
1871}
1872
1873/**
1874 * pch_gbe_free_rx_resources - Free Rx Resources
1875 * @adapter: Board private structure
1876 * @rx_ring: Ring to clean the resources from
1877 */
1878void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1879 struct pch_gbe_rx_ring *rx_ring)
1880{
1881 struct pci_dev *pdev = adapter->pdev;
1882
1883 pch_gbe_clean_rx_ring(adapter, rx_ring);
1884 vfree(rx_ring->buffer_info);
1885 rx_ring->buffer_info = NULL;
1886 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1887 rx_ring->desc = NULL;
1888}
1889
1890/**
1891 * pch_gbe_request_irq - Allocate an interrupt line
1892 * @adapter: Board private structure
1893 * Returns
1894 * 0: Successfully
1895 * Negative value: Failed
1896 */
1897static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1898{
1899 struct net_device *netdev = adapter->netdev;
1900 int err;
1901 int flags;
1902
1903 flags = IRQF_SHARED;
1904 adapter->have_msi = false;
1905 err = pci_enable_msi(adapter->pdev);
1906 pr_debug("call pci_enable_msi\n");
1907 if (err) {
1908 pr_debug("call pci_enable_msi - Error: %d\n", err);
1909 } else {
1910 flags = 0;
1911 adapter->have_msi = true;
1912 }
1913 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1914 flags, netdev->name, netdev);
1915 if (err)
1916 pr_err("Unable to allocate interrupt Error: %d\n", err);
1917 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1918 adapter->have_msi, flags, err);
1919 return err;
1920}
1921
1922
1923static void pch_gbe_set_multi(struct net_device *netdev);
1924/**
1925 * pch_gbe_up - Up GbE network device
1926 * @adapter: Board private structure
1927 * Returns
1928 * 0: Successfully
1929 * Negative value: Failed
1930 */
1931int pch_gbe_up(struct pch_gbe_adapter *adapter)
1932{
1933 struct net_device *netdev = adapter->netdev;
1934 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1935 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1936 int err;
1937
2b53d078
DH
1938 /* Ensure we have a valid MAC */
1939 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1940 pr_err("Error: Invalid MAC address\n");
1941 return -EINVAL;
1942 }
1943
77555ee7
MO
1944 /* hardware has been reset, we need to reload some things */
1945 pch_gbe_set_multi(netdev);
1946
1947 pch_gbe_setup_tctl(adapter);
1948 pch_gbe_configure_tx(adapter);
1949 pch_gbe_setup_rctl(adapter);
1950 pch_gbe_configure_rx(adapter);
1951
1952 err = pch_gbe_request_irq(adapter);
1953 if (err) {
1954 pr_err("Error: can't bring device up\n");
1955 return err;
1956 }
124d770a
TO
1957 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1958 if (err) {
1959 pr_err("Error: can't bring device up\n");
1960 return err;
1961 }
77555ee7
MO
1962 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1963 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1964 adapter->tx_queue_len = netdev->tx_queue_len;
5229d87e 1965 pch_gbe_start_receive(&adapter->hw);
77555ee7
MO
1966
1967 mod_timer(&adapter->watchdog_timer, jiffies);
1968
1969 napi_enable(&adapter->napi);
1970 pch_gbe_irq_enable(adapter);
1971 netif_start_queue(adapter->netdev);
1972
1973 return 0;
1974}
1975
1976/**
1977 * pch_gbe_down - Down GbE network device
1978 * @adapter: Board private structure
1979 */
1980void pch_gbe_down(struct pch_gbe_adapter *adapter)
1981{
1982 struct net_device *netdev = adapter->netdev;
124d770a 1983 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
77555ee7
MO
1984
1985 /* signal that we're down so the interrupt handler does not
1986 * reschedule our watchdog timer */
1987 napi_disable(&adapter->napi);
1988 atomic_set(&adapter->irq_sem, 0);
1989
1990 pch_gbe_irq_disable(adapter);
1991 pch_gbe_free_irq(adapter);
1992
1993 del_timer_sync(&adapter->watchdog_timer);
1994
1995 netdev->tx_queue_len = adapter->tx_queue_len;
1996 netif_carrier_off(netdev);
1997 netif_stop_queue(netdev);
1998
1999 pch_gbe_reset(adapter);
2000 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2001 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
124d770a
TO
2002
2003 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2004 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2005 rx_ring->rx_buff_pool_logic = 0;
2006 rx_ring->rx_buff_pool_size = 0;
2007 rx_ring->rx_buff_pool = NULL;
77555ee7
MO
2008}
2009
2010/**
2011 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2012 * @adapter: Board private structure to initialize
2013 * Returns
2014 * 0: Successfully
2015 * Negative value: Failed
2016 */
2017static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2018{
2019 struct pch_gbe_hw *hw = &adapter->hw;
2020 struct net_device *netdev = adapter->netdev;
2021
2022 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2023 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2024 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2025
2026 /* Initialize the hardware-specific values */
2027 if (pch_gbe_hal_setup_init_funcs(hw)) {
2028 pr_err("Hardware Initialization Failure\n");
2029 return -EIO;
2030 }
2031 if (pch_gbe_alloc_queues(adapter)) {
2032 pr_err("Unable to allocate memory for queues\n");
2033 return -ENOMEM;
2034 }
2035 spin_lock_init(&adapter->hw.miim_lock);
2036 spin_lock_init(&adapter->tx_queue_lock);
2037 spin_lock_init(&adapter->stats_lock);
2038 spin_lock_init(&adapter->ethtool_lock);
2039 atomic_set(&adapter->irq_sem, 0);
2040 pch_gbe_irq_disable(adapter);
2041
2042 pch_gbe_init_stats(adapter);
2043
2044 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2045 (u32) adapter->rx_buffer_len,
2046 hw->mac.min_frame_size, hw->mac.max_frame_size);
2047 return 0;
2048}
2049
2050/**
2051 * pch_gbe_open - Called when a network interface is made active
2052 * @netdev: Network interface device structure
2053 * Returns
2054 * 0: Successfully
2055 * Negative value: Failed
2056 */
2057static int pch_gbe_open(struct net_device *netdev)
2058{
2059 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2060 struct pch_gbe_hw *hw = &adapter->hw;
2061 int err;
2062
2063 /* allocate transmit descriptors */
2064 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2065 if (err)
2066 goto err_setup_tx;
2067 /* allocate receive descriptors */
2068 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2069 if (err)
2070 goto err_setup_rx;
2071 pch_gbe_hal_power_up_phy(hw);
2072 err = pch_gbe_up(adapter);
2073 if (err)
2074 goto err_up;
2075 pr_debug("Success End\n");
2076 return 0;
2077
2078err_up:
2079 if (!adapter->wake_up_evt)
2080 pch_gbe_hal_power_down_phy(hw);
2081 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2082err_setup_rx:
2083 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2084err_setup_tx:
2085 pch_gbe_reset(adapter);
2086 pr_err("Error End\n");
2087 return err;
2088}
2089
2090/**
2091 * pch_gbe_stop - Disables a network interface
2092 * @netdev: Network interface device structure
2093 * Returns
2094 * 0: Successfully
2095 */
2096static int pch_gbe_stop(struct net_device *netdev)
2097{
2098 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2099 struct pch_gbe_hw *hw = &adapter->hw;
2100
2101 pch_gbe_down(adapter);
2102 if (!adapter->wake_up_evt)
2103 pch_gbe_hal_power_down_phy(hw);
2104 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2105 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2106 return 0;
2107}
2108
2109/**
2110 * pch_gbe_xmit_frame - Packet transmitting start
2111 * @skb: Socket buffer structure
2112 * @netdev: Network interface device structure
2113 * Returns
2114 * - NETDEV_TX_OK: Normal end
2115 * - NETDEV_TX_BUSY: Error end
2116 */
2117static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2118{
2119 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2120 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2121 unsigned long flags;
2122
2123 if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
77555ee7
MO
2124 pr_err("Transfer length Error: skb len: %d > max: %d\n",
2125 skb->len, adapter->hw.mac.max_frame_size);
419c2046 2126 dev_kfree_skb_any(skb);
77555ee7
MO
2127 adapter->stats.tx_length_errors++;
2128 return NETDEV_TX_OK;
2129 }
2130 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2131 /* Collision - tell upper layer to requeue */
2132 return NETDEV_TX_LOCKED;
2133 }
2134 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2135 netif_stop_queue(netdev);
2136 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2137 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2138 tx_ring->next_to_use, tx_ring->next_to_clean);
2139 return NETDEV_TX_BUSY;
2140 }
2141 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2142
2143 /* CRC,ITAG no support */
2144 pch_gbe_tx_queue(adapter, tx_ring, skb);
2145 return NETDEV_TX_OK;
2146}
2147
2148/**
2149 * pch_gbe_get_stats - Get System Network Statistics
2150 * @netdev: Network interface device structure
2151 * Returns: The current stats
2152 */
2153static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2154{
2155 /* only return the current stats */
2156 return &netdev->stats;
2157}
2158
2159/**
2160 * pch_gbe_set_multi - Multicast and Promiscuous mode set
2161 * @netdev: Network interface device structure
2162 */
2163static void pch_gbe_set_multi(struct net_device *netdev)
2164{
2165 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2166 struct pch_gbe_hw *hw = &adapter->hw;
2167 struct netdev_hw_addr *ha;
2168 u8 *mta_list;
2169 u32 rctl;
2170 int i;
2171 int mc_count;
2172
2173 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2174
2175 /* Check for Promiscuous and All Multicast modes */
2176 rctl = ioread32(&hw->reg->RX_MODE);
2177 mc_count = netdev_mc_count(netdev);
2178 if ((netdev->flags & IFF_PROMISC)) {
2179 rctl &= ~PCH_GBE_ADD_FIL_EN;
2180 rctl &= ~PCH_GBE_MLT_FIL_EN;
2181 } else if ((netdev->flags & IFF_ALLMULTI)) {
2182 /* all the multicasting receive permissions */
2183 rctl |= PCH_GBE_ADD_FIL_EN;
2184 rctl &= ~PCH_GBE_MLT_FIL_EN;
2185 } else {
2186 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2187 /* all the multicasting receive permissions */
2188 rctl |= PCH_GBE_ADD_FIL_EN;
2189 rctl &= ~PCH_GBE_MLT_FIL_EN;
2190 } else {
2191 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2192 }
2193 }
2194 iowrite32(rctl, &hw->reg->RX_MODE);
2195
2196 if (mc_count >= PCH_GBE_MAR_ENTRIES)
2197 return;
2198 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2199 if (!mta_list)
2200 return;
2201
2202 /* The shared function expects a packed array of only addresses. */
2203 i = 0;
2204 netdev_for_each_mc_addr(ha, netdev) {
2205 if (i == mc_count)
2206 break;
2207 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2208 }
2209 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2210 PCH_GBE_MAR_ENTRIES);
2211 kfree(mta_list);
2212
2213 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2214 ioread32(&hw->reg->RX_MODE), mc_count);
2215}
2216
2217/**
2218 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2219 * @netdev: Network interface device structure
2220 * @addr: Pointer to an address structure
2221 * Returns
2222 * 0: Successfully
2223 * -EADDRNOTAVAIL: Failed
2224 */
2225static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2226{
2227 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2228 struct sockaddr *skaddr = addr;
2229 int ret_val;
2230
2231 if (!is_valid_ether_addr(skaddr->sa_data)) {
2232 ret_val = -EADDRNOTAVAIL;
2233 } else {
2234 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2235 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2236 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2237 ret_val = 0;
2238 }
2239 pr_debug("ret_val : 0x%08x\n", ret_val);
2240 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2241 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2242 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2243 ioread32(&adapter->hw.reg->mac_adr[0].high),
2244 ioread32(&adapter->hw.reg->mac_adr[0].low));
2245 return ret_val;
2246}
2247
2248/**
2249 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2250 * @netdev: Network interface device structure
2251 * @new_mtu: New value for maximum frame size
2252 * Returns
2253 * 0: Successfully
2254 * -EINVAL: Failed
2255 */
2256static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2257{
2258 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2259 int max_frame;
124d770a
TO
2260 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2261 int err;
77555ee7
MO
2262
2263 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2264 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2265 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2266 pr_err("Invalid MTU setting\n");
2267 return -EINVAL;
2268 }
2269 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2270 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2271 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2272 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2273 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2274 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2275 else
124d770a 2276 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
77555ee7 2277
124d770a
TO
2278 if (netif_running(netdev)) {
2279 pch_gbe_down(adapter);
2280 err = pch_gbe_up(adapter);
2281 if (err) {
2282 adapter->rx_buffer_len = old_rx_buffer_len;
2283 pch_gbe_up(adapter);
2284 return -ENOMEM;
2285 } else {
2286 netdev->mtu = new_mtu;
2287 adapter->hw.mac.max_frame_size = max_frame;
2288 }
2289 } else {
77555ee7 2290 pch_gbe_reset(adapter);
124d770a
TO
2291 netdev->mtu = new_mtu;
2292 adapter->hw.mac.max_frame_size = max_frame;
2293 }
77555ee7
MO
2294
2295 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2296 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2297 adapter->hw.mac.max_frame_size);
2298 return 0;
2299}
2300
756a6b03
MM
2301/**
2302 * pch_gbe_set_features - Reset device after features changed
2303 * @netdev: Network interface device structure
2304 * @features: New features
2305 * Returns
2306 * 0: HW state updated successfully
2307 */
c8f44aff
MM
2308static int pch_gbe_set_features(struct net_device *netdev,
2309 netdev_features_t features)
756a6b03
MM
2310{
2311 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
c8f44aff 2312 netdev_features_t changed = features ^ netdev->features;
756a6b03
MM
2313
2314 if (!(changed & NETIF_F_RXCSUM))
2315 return 0;
2316
2317 if (netif_running(netdev))
2318 pch_gbe_reinit_locked(adapter);
2319 else
2320 pch_gbe_reset(adapter);
2321
2322 return 0;
2323}
2324
77555ee7
MO
2325/**
2326 * pch_gbe_ioctl - Controls register through a MII interface
2327 * @netdev: Network interface device structure
2328 * @ifr: Pointer to ifr structure
2329 * @cmd: Control command
2330 * Returns
2331 * 0: Successfully
2332 * Negative value: Failed
2333 */
2334static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2335{
2336 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2337
2338 pr_debug("cmd : 0x%04x\n", cmd);
2339
1a0bdadb
TS
2340#ifdef CONFIG_PCH_PTP
2341 if (cmd == SIOCSHWTSTAMP)
2342 return hwtstamp_ioctl(netdev, ifr, cmd);
2343#endif
2344
77555ee7
MO
2345 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2346}
2347
2348/**
2349 * pch_gbe_tx_timeout - Respond to a Tx Hang
2350 * @netdev: Network interface device structure
2351 */
2352static void pch_gbe_tx_timeout(struct net_device *netdev)
2353{
2354 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2355
2356 /* Do the reset outside of interrupt context */
2357 adapter->stats.tx_timeout_count++;
2358 schedule_work(&adapter->reset_task);
2359}
2360
2361/**
2362 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2363 * @napi: Pointer of polling device struct
2364 * @budget: The maximum number of a packet
2365 * Returns
2366 * false: Exit the polling mode
2367 * true: Continue the polling mode
2368 */
2369static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2370{
2371 struct pch_gbe_adapter *adapter =
2372 container_of(napi, struct pch_gbe_adapter, napi);
77555ee7
MO
2373 int work_done = 0;
2374 bool poll_end_flag = false;
2375 bool cleaned = false;
124d770a 2376 u32 int_en;
77555ee7
MO
2377
2378 pr_debug("budget : %d\n", budget);
2379
805e969f
TO
2380 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2381 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2382
2383 if (!cleaned)
2384 work_done = budget;
2385 /* If no Tx and not enough Rx work done,
2386 * exit the polling mode
2387 */
2388 if (work_done < budget)
77555ee7 2389 poll_end_flag = true;
805e969f
TO
2390
2391 if (poll_end_flag) {
2392 napi_complete(napi);
2393 if (adapter->rx_stop_flag) {
2394 adapter->rx_stop_flag = false;
2395 pch_gbe_start_receive(&adapter->hw);
2396 }
2397 pch_gbe_irq_enable(adapter);
2398 } else
124d770a
TO
2399 if (adapter->rx_stop_flag) {
2400 adapter->rx_stop_flag = false;
2401 pch_gbe_start_receive(&adapter->hw);
2402 int_en = ioread32(&adapter->hw.reg->INT_EN);
2403 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
805e969f 2404 &adapter->hw.reg->INT_EN);
124d770a 2405 }
77555ee7
MO
2406
2407 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2408 poll_end_flag, work_done, budget);
2409
2410 return work_done;
2411}
2412
2413#ifdef CONFIG_NET_POLL_CONTROLLER
2414/**
2415 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2416 * @netdev: Network interface device structure
2417 */
2418static void pch_gbe_netpoll(struct net_device *netdev)
2419{
2420 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2421
2422 disable_irq(adapter->pdev->irq);
2423 pch_gbe_intr(adapter->pdev->irq, netdev);
2424 enable_irq(adapter->pdev->irq);
2425}
2426#endif
2427
2428static const struct net_device_ops pch_gbe_netdev_ops = {
2429 .ndo_open = pch_gbe_open,
2430 .ndo_stop = pch_gbe_stop,
2431 .ndo_start_xmit = pch_gbe_xmit_frame,
2432 .ndo_get_stats = pch_gbe_get_stats,
2433 .ndo_set_mac_address = pch_gbe_set_mac,
2434 .ndo_tx_timeout = pch_gbe_tx_timeout,
2435 .ndo_change_mtu = pch_gbe_change_mtu,
756a6b03 2436 .ndo_set_features = pch_gbe_set_features,
77555ee7 2437 .ndo_do_ioctl = pch_gbe_ioctl,
afc4b13d 2438 .ndo_set_rx_mode = pch_gbe_set_multi,
77555ee7
MO
2439#ifdef CONFIG_NET_POLL_CONTROLLER
2440 .ndo_poll_controller = pch_gbe_netpoll,
2441#endif
2442};
2443
2444static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2445 pci_channel_state_t state)
2446{
2447 struct net_device *netdev = pci_get_drvdata(pdev);
2448 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2449
2450 netif_device_detach(netdev);
2451 if (netif_running(netdev))
2452 pch_gbe_down(adapter);
2453 pci_disable_device(pdev);
2454 /* Request a slot slot reset. */
2455 return PCI_ERS_RESULT_NEED_RESET;
2456}
2457
2458static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2459{
2460 struct net_device *netdev = pci_get_drvdata(pdev);
2461 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2462 struct pch_gbe_hw *hw = &adapter->hw;
2463
2464 if (pci_enable_device(pdev)) {
2465 pr_err("Cannot re-enable PCI device after reset\n");
2466 return PCI_ERS_RESULT_DISCONNECT;
2467 }
2468 pci_set_master(pdev);
2469 pci_enable_wake(pdev, PCI_D0, 0);
2470 pch_gbe_hal_power_up_phy(hw);
2471 pch_gbe_reset(adapter);
2472 /* Clear wake up status */
2473 pch_gbe_mac_set_wol_event(hw, 0);
2474
2475 return PCI_ERS_RESULT_RECOVERED;
2476}
2477
2478static void pch_gbe_io_resume(struct pci_dev *pdev)
2479{
2480 struct net_device *netdev = pci_get_drvdata(pdev);
2481 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2482
2483 if (netif_running(netdev)) {
2484 if (pch_gbe_up(adapter)) {
2485 pr_debug("can't bring device back up after reset\n");
2486 return;
2487 }
2488 }
2489 netif_device_attach(netdev);
2490}
2491
2492static int __pch_gbe_suspend(struct pci_dev *pdev)
2493{
2494 struct net_device *netdev = pci_get_drvdata(pdev);
2495 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2496 struct pch_gbe_hw *hw = &adapter->hw;
2497 u32 wufc = adapter->wake_up_evt;
2498 int retval = 0;
2499
2500 netif_device_detach(netdev);
2501 if (netif_running(netdev))
2502 pch_gbe_down(adapter);
2503 if (wufc) {
2504 pch_gbe_set_multi(netdev);
2505 pch_gbe_setup_rctl(adapter);
2506 pch_gbe_configure_rx(adapter);
2507 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2508 hw->mac.link_duplex);
2509 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2510 hw->mac.link_duplex);
2511 pch_gbe_mac_set_wol_event(hw, wufc);
2512 pci_disable_device(pdev);
2513 } else {
2514 pch_gbe_hal_power_down_phy(hw);
2515 pch_gbe_mac_set_wol_event(hw, wufc);
2516 pci_disable_device(pdev);
2517 }
2518 return retval;
2519}
2520
2521#ifdef CONFIG_PM
2522static int pch_gbe_suspend(struct device *device)
2523{
2524 struct pci_dev *pdev = to_pci_dev(device);
2525
2526 return __pch_gbe_suspend(pdev);
2527}
2528
2529static int pch_gbe_resume(struct device *device)
2530{
2531 struct pci_dev *pdev = to_pci_dev(device);
2532 struct net_device *netdev = pci_get_drvdata(pdev);
2533 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2534 struct pch_gbe_hw *hw = &adapter->hw;
2535 u32 err;
2536
2537 err = pci_enable_device(pdev);
2538 if (err) {
2539 pr_err("Cannot enable PCI device from suspend\n");
2540 return err;
2541 }
2542 pci_set_master(pdev);
2543 pch_gbe_hal_power_up_phy(hw);
2544 pch_gbe_reset(adapter);
2545 /* Clear wake on lan control and status */
2546 pch_gbe_mac_set_wol_event(hw, 0);
2547
2548 if (netif_running(netdev))
2549 pch_gbe_up(adapter);
2550 netif_device_attach(netdev);
2551
2552 return 0;
2553}
2554#endif /* CONFIG_PM */
2555
2556static void pch_gbe_shutdown(struct pci_dev *pdev)
2557{
2558 __pch_gbe_suspend(pdev);
2559 if (system_state == SYSTEM_POWER_OFF) {
2560 pci_wake_from_d3(pdev, true);
2561 pci_set_power_state(pdev, PCI_D3hot);
2562 }
2563}
2564
2565static void pch_gbe_remove(struct pci_dev *pdev)
2566{
2567 struct net_device *netdev = pci_get_drvdata(pdev);
2568 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2569
2321f3b4 2570 cancel_work_sync(&adapter->reset_task);
77555ee7
MO
2571 unregister_netdev(netdev);
2572
2573 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2574
2575 kfree(adapter->tx_ring);
2576 kfree(adapter->rx_ring);
2577
2578 iounmap(adapter->hw.reg);
2579 pci_release_regions(pdev);
2580 free_netdev(netdev);
2581 pci_disable_device(pdev);
2582}
2583
2584static int pch_gbe_probe(struct pci_dev *pdev,
2585 const struct pci_device_id *pci_id)
2586{
2587 struct net_device *netdev;
2588 struct pch_gbe_adapter *adapter;
2589 int ret;
2590
2591 ret = pci_enable_device(pdev);
2592 if (ret)
2593 return ret;
2594
2595 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2596 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2597 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2598 if (ret) {
2599 ret = pci_set_consistent_dma_mask(pdev,
2600 DMA_BIT_MASK(32));
2601 if (ret) {
2602 dev_err(&pdev->dev, "ERR: No usable DMA "
2603 "configuration, aborting\n");
2604 goto err_disable_device;
2605 }
2606 }
2607 }
2608
2609 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2610 if (ret) {
2611 dev_err(&pdev->dev,
2612 "ERR: Can't reserve PCI I/O and memory resources\n");
2613 goto err_disable_device;
2614 }
2615 pci_set_master(pdev);
2616
2617 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2618 if (!netdev) {
2619 ret = -ENOMEM;
77555ee7
MO
2620 goto err_release_pci;
2621 }
2622 SET_NETDEV_DEV(netdev, &pdev->dev);
2623
2624 pci_set_drvdata(pdev, netdev);
2625 adapter = netdev_priv(netdev);
2626 adapter->netdev = netdev;
2627 adapter->pdev = pdev;
2628 adapter->hw.back = adapter;
2629 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2630 if (!adapter->hw.reg) {
2631 ret = -EIO;
2632 dev_err(&pdev->dev, "Can't ioremap\n");
2633 goto err_free_netdev;
2634 }
2635
1a0bdadb
TS
2636#ifdef CONFIG_PCH_PTP
2637 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2638 PCI_DEVFN(12, 4));
2639 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2640 pr_err("Bad ptp filter\n");
2641 return -EINVAL;
2642 }
2643#endif
2644
77555ee7
MO
2645 netdev->netdev_ops = &pch_gbe_netdev_ops;
2646 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2647 netif_napi_add(netdev, &adapter->napi,
2648 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
756a6b03
MM
2649 netdev->hw_features = NETIF_F_RXCSUM |
2650 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2651 netdev->features = netdev->hw_features;
77555ee7
MO
2652 pch_gbe_set_ethtool_ops(netdev);
2653
98200ec2 2654 pch_gbe_mac_load_mac_addr(&adapter->hw);
77555ee7
MO
2655 pch_gbe_mac_reset_hw(&adapter->hw);
2656
2657 /* setup the private structure */
2658 ret = pch_gbe_sw_init(adapter);
2659 if (ret)
2660 goto err_iounmap;
2661
2662 /* Initialize PHY */
2663 ret = pch_gbe_init_phy(adapter);
2664 if (ret) {
2665 dev_err(&pdev->dev, "PHY initialize error\n");
2666 goto err_free_adapter;
2667 }
2668 pch_gbe_hal_get_bus_info(&adapter->hw);
2669
2670 /* Read the MAC address. and store to the private data */
2671 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2672 if (ret) {
2673 dev_err(&pdev->dev, "MAC address Read Error\n");
2674 goto err_free_adapter;
2675 }
2676
2677 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2678 if (!is_valid_ether_addr(netdev->dev_addr)) {
2b53d078
DH
2679 /*
2680 * If the MAC is invalid (or just missing), display a warning
2681 * but do not abort setting up the device. pch_gbe_up will
2682 * prevent the interface from being brought up until a valid MAC
2683 * is set.
2684 */
2685 dev_err(&pdev->dev, "Invalid MAC address, "
2686 "interface disabled.\n");
77555ee7
MO
2687 }
2688 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2689 (unsigned long)adapter);
2690
2691 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2692
2693 pch_gbe_check_options(adapter);
2694
77555ee7
MO
2695 /* initialize the wol settings based on the eeprom settings */
2696 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2697 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2698
2699 /* reset the hardware with the new settings */
2700 pch_gbe_reset(adapter);
2701
2702 ret = register_netdev(netdev);
2703 if (ret)
2704 goto err_free_adapter;
2705 /* tell the stack to leave us alone until pch_gbe_open() is called */
2706 netif_carrier_off(netdev);
2707 netif_stop_queue(netdev);
2708
1a0bdadb 2709 dev_dbg(&pdev->dev, "PCH Network Connection\n");
77555ee7
MO
2710
2711 device_set_wakeup_enable(&pdev->dev, 1);
2712 return 0;
2713
2714err_free_adapter:
2715 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2716 kfree(adapter->tx_ring);
2717 kfree(adapter->rx_ring);
2718err_iounmap:
2719 iounmap(adapter->hw.reg);
2720err_free_netdev:
2721 free_netdev(netdev);
2722err_release_pci:
2723 pci_release_regions(pdev);
2724err_disable_device:
2725 pci_disable_device(pdev);
2726 return ret;
2727}
2728
7fc44633 2729static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
77555ee7
MO
2730 {.vendor = PCI_VENDOR_ID_INTEL,
2731 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2732 .subvendor = PCI_ANY_ID,
2733 .subdevice = PCI_ANY_ID,
2734 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2735 .class_mask = (0xFFFF00)
2736 },
b0e6baf5
T
2737 {.vendor = PCI_VENDOR_ID_ROHM,
2738 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2739 .subvendor = PCI_ANY_ID,
2740 .subdevice = PCI_ANY_ID,
2741 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2742 .class_mask = (0xFFFF00)
2743 },
7756332f
TO
2744 {.vendor = PCI_VENDOR_ID_ROHM,
2745 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2746 .subvendor = PCI_ANY_ID,
2747 .subdevice = PCI_ANY_ID,
2748 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2749 .class_mask = (0xFFFF00)
2750 },
77555ee7
MO
2751 /* required last entry */
2752 {0}
2753};
2754
2755#ifdef CONFIG_PM
2756static const struct dev_pm_ops pch_gbe_pm_ops = {
2757 .suspend = pch_gbe_suspend,
2758 .resume = pch_gbe_resume,
2759 .freeze = pch_gbe_suspend,
2760 .thaw = pch_gbe_resume,
2761 .poweroff = pch_gbe_suspend,
2762 .restore = pch_gbe_resume,
2763};
2764#endif
2765
2766static struct pci_error_handlers pch_gbe_err_handler = {
2767 .error_detected = pch_gbe_io_error_detected,
2768 .slot_reset = pch_gbe_io_slot_reset,
2769 .resume = pch_gbe_io_resume
2770};
2771
f7594d42 2772static struct pci_driver pch_gbe_driver = {
77555ee7
MO
2773 .name = KBUILD_MODNAME,
2774 .id_table = pch_gbe_pcidev_id,
2775 .probe = pch_gbe_probe,
2776 .remove = pch_gbe_remove,
aa338601 2777#ifdef CONFIG_PM
77555ee7
MO
2778 .driver.pm = &pch_gbe_pm_ops,
2779#endif
2780 .shutdown = pch_gbe_shutdown,
2781 .err_handler = &pch_gbe_err_handler
2782};
2783
2784
2785static int __init pch_gbe_init_module(void)
2786{
2787 int ret;
2788
f7594d42 2789 ret = pci_register_driver(&pch_gbe_driver);
77555ee7
MO
2790 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2791 if (copybreak == 0) {
2792 pr_info("copybreak disabled\n");
2793 } else {
2794 pr_info("copybreak enabled for packets <= %u bytes\n",
2795 copybreak);
2796 }
2797 }
2798 return ret;
2799}
2800
2801static void __exit pch_gbe_exit_module(void)
2802{
f7594d42 2803 pci_unregister_driver(&pch_gbe_driver);
77555ee7
MO
2804}
2805
2806module_init(pch_gbe_init_module);
2807module_exit(pch_gbe_exit_module);
2808
a1dcfcb7 2809MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
1a0bdadb 2810MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
77555ee7
MO
2811MODULE_LICENSE("GPL");
2812MODULE_VERSION(DRV_VERSION);
2813MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2814
2815module_param(copybreak, uint, 0644);
2816MODULE_PARM_DESC(copybreak,
2817 "Maximum size of packet that is copied to a new buffer on receive");
2818
2819/* pch_gbe_main.c */
This page took 0.29974 seconds and 5 git commands to generate.