pch_gbe: fixed the issue which receives an unnecessary packet.
[deliverable/linux.git] / drivers / net / pch_gbe / pch_gbe_main.c
CommitLineData
77555ee7
MO
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
a1dcfcb7 3 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
77555ee7
MO
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
70c71606 23#include <linux/prefetch.h>
77555ee7
MO
24
25#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION;
27
28#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
29#define PCH_GBE_MAR_ENTRIES 16
30#define PCH_GBE_SHORT_PKT 64
31#define DSC_INIT16 0xC000
32#define PCH_GBE_DMA_ALIGN 0
ac096642 33#define PCH_GBE_DMA_PADDING 2
77555ee7
MO
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1
37
b0e6baf5
T
38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db
40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
41
77555ee7
MO
42#define PCH_GBE_TX_WEIGHT 64
43#define PCH_GBE_RX_WEIGHT 64
44#define PCH_GBE_RX_BUFFER_WRITE 16
45
46/* Initialize the wake-on-LAN settings */
47#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
48
49#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
50 PCH_GBE_CHIP_TYPE_INTERNAL | \
ce3dad0f 51 PCH_GBE_RGMII_MODE_RGMII \
77555ee7
MO
52 )
53
54/* Ethertype field values */
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096
58#define PCH_GBE_FRAME_SIZE_8192 8192
59
60#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
61#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
62#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
63#define PCH_GBE_DESC_UNUSED(R) \
64 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
65 (R)->next_to_clean - (R)->next_to_use - 1)
66
67/* Pause packet value */
68#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
69#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
70#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
71#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
72
73#define PCH_GBE_ETH_ALEN 6
74
75/* This defines the bits that are set in the Interrupt Mask
76 * Set/Read Register. Each bit is documented below:
77 * o RXT0 = Receiver Timer Interrupt (ring 0)
78 * o TXDW = Transmit Descriptor Written Back
79 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
80 * o RXSEQ = Receive Sequence Error
81 * o LSC = Link Status Change
82 */
83#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \
86 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \
88 )
89
90
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92
191cc687 93static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
94static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
95 int data);
98200ec2
TO
96
97inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
98{
99 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
100}
101
77555ee7
MO
102/**
103 * pch_gbe_mac_read_mac_addr - Read MAC address
104 * @hw: Pointer to the HW structure
105 * Returns
106 * 0: Successful.
107 */
108s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
109{
110 u32 adr1a, adr1b;
111
112 adr1a = ioread32(&hw->reg->mac_adr[0].high);
113 adr1b = ioread32(&hw->reg->mac_adr[0].low);
114
115 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
116 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
117 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
118 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
119 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
120 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
121
122 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
123 return 0;
124}
125
126/**
127 * pch_gbe_wait_clr_bit - Wait to clear a bit
128 * @reg: Pointer of register
129 * @busy: Busy bit
130 */
191cc687 131static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
77555ee7
MO
132{
133 u32 tmp;
134 /* wait busy */
135 tmp = 1000;
136 while ((ioread32(reg) & bit) && --tmp)
137 cpu_relax();
138 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n");
140}
141/**
142 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure
144 * @addr: Pointer to the MAC address
145 * @index: MAC address array register
146 */
191cc687 147static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
77555ee7
MO
148{
149 u32 mar_low, mar_high, adrmask;
150
151 pr_debug("index : 0x%x\n", index);
152
153 /*
154 * HW expects these in little endian so we reverse the byte order
155 * from network order (big endian) to little endian
156 */
157 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
158 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
159 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
160 /* Stop the MAC Address of index. */
161 adrmask = ioread32(&hw->reg->ADDR_MASK);
162 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
163 /* wait busy */
164 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
165 /* Set the MAC address to the MAC address 1A/1B register */
166 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
167 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
168 /* Start the MAC address of index */
169 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
170 /* wait busy */
171 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
172}
173
174/**
175 * pch_gbe_mac_reset_hw - Reset hardware
176 * @hw: Pointer to the HW structure
177 */
191cc687 178static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
77555ee7
MO
179{
180 /* Read the MAC address. and store to the private data */
181 pch_gbe_mac_read_mac_addr(hw);
182 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
183#ifdef PCH_GBE_MAC_IFOP_RGMII
184 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
185#endif
186 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
187 /* Setup the receive address */
188 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
189 return;
190}
191
192/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure
195 * @mar_count: Receive address registers
196 */
191cc687 197static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
77555ee7
MO
198{
199 u32 i;
200
201 /* Setup the receive address */
202 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
203
204 /* Zero out the other receive addresses */
205 for (i = 1; i < mar_count; i++) {
206 iowrite32(0, &hw->reg->mac_adr[i].high);
207 iowrite32(0, &hw->reg->mac_adr[i].low);
208 }
209 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
210 /* wait busy */
211 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
212}
213
214
215/**
216 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
217 * @hw: Pointer to the HW structure
218 * @mc_addr_list: Array of multicast addresses to program
219 * @mc_addr_count: Number of multicast addresses to program
220 * @mar_used_count: The first MAC Address register free to program
221 * @mar_total_num: Total number of supported MAC Address Registers
222 */
191cc687 223static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 mar_used_count, u32 mar_total_num)
77555ee7
MO
226{
227 u32 i, adrmask;
228
229 /* Load the first set of multicast addresses into the exact
230 * filters (RAR). If there are not enough to fill the RAR
231 * array, clear the filters.
232 */
233 for (i = mar_used_count; i < mar_total_num; i++) {
234 if (mc_addr_count) {
235 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
236 mc_addr_count--;
237 mc_addr_list += PCH_GBE_ETH_ALEN;
238 } else {
239 /* Clear MAC address mask */
240 adrmask = ioread32(&hw->reg->ADDR_MASK);
241 iowrite32((adrmask | (0x0001 << i)),
242 &hw->reg->ADDR_MASK);
243 /* wait busy */
244 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
245 /* Clear MAC address */
246 iowrite32(0, &hw->reg->mac_adr[i].high);
247 iowrite32(0, &hw->reg->mac_adr[i].low);
248 }
249 }
250}
251
252/**
253 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
254 * @hw: Pointer to the HW structure
255 * Returns
256 * 0: Successful.
257 * Negative value: Failed.
258 */
259s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
260{
261 struct pch_gbe_mac_info *mac = &hw->mac;
262 u32 rx_fctrl;
263
264 pr_debug("mac->fc = %u\n", mac->fc);
265
266 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
267
268 switch (mac->fc) {
269 case PCH_GBE_FC_NONE:
270 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
271 mac->tx_fc_enable = false;
272 break;
273 case PCH_GBE_FC_RX_PAUSE:
274 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
275 mac->tx_fc_enable = false;
276 break;
277 case PCH_GBE_FC_TX_PAUSE:
278 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
279 mac->tx_fc_enable = true;
280 break;
281 case PCH_GBE_FC_FULL:
282 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
283 mac->tx_fc_enable = true;
284 break;
285 default:
286 pr_err("Flow control param set incorrectly\n");
287 return -EINVAL;
288 }
289 if (mac->link_duplex == DUPLEX_HALF)
290 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
291 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
292 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
293 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
294 return 0;
295}
296
297/**
298 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
299 * @hw: Pointer to the HW structure
300 * @wu_evt: Wake up event
301 */
191cc687 302static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
77555ee7
MO
303{
304 u32 addr_mask;
305
306 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
307 wu_evt, ioread32(&hw->reg->ADDR_MASK));
308
309 if (wu_evt) {
310 /* Set Wake-On-Lan address mask */
311 addr_mask = ioread32(&hw->reg->ADDR_MASK);
312 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
313 /* wait busy */
314 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
315 iowrite32(0, &hw->reg->WOL_ST);
316 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
317 iowrite32(0x02, &hw->reg->TCPIP_ACC);
318 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
319 } else {
320 iowrite32(0, &hw->reg->WOL_CTRL);
321 iowrite32(0, &hw->reg->WOL_ST);
322 }
323 return;
324}
325
326/**
327 * pch_gbe_mac_ctrl_miim - Control MIIM interface
328 * @hw: Pointer to the HW structure
329 * @addr: Address of PHY
330 * @dir: Operetion. (Write or Read)
331 * @reg: Access register of PHY
332 * @data: Write data.
333 *
334 * Returns: Read date.
335 */
336u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
337 u16 data)
338{
339 u32 data_out = 0;
340 unsigned int i;
341 unsigned long flags;
342
343 spin_lock_irqsave(&hw->miim_lock, flags);
344
345 for (i = 100; i; --i) {
346 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
347 break;
348 udelay(20);
349 }
350 if (i == 0) {
351 pr_err("pch-gbe.miim won't go Ready\n");
352 spin_unlock_irqrestore(&hw->miim_lock, flags);
353 return 0; /* No way to indicate timeout error */
354 }
355 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
356 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
357 dir | data), &hw->reg->MIIM);
358 for (i = 0; i < 100; i++) {
359 udelay(20);
360 data_out = ioread32(&hw->reg->MIIM);
361 if ((data_out & PCH_GBE_MIIM_OPER_READY))
362 break;
363 }
364 spin_unlock_irqrestore(&hw->miim_lock, flags);
365
366 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
367 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
368 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
369 return (u16) data_out;
370}
371
372/**
373 * pch_gbe_mac_set_pause_packet - Set pause packet
374 * @hw: Pointer to the HW structure
375 */
191cc687 376static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
77555ee7
MO
377{
378 unsigned long tmp2, tmp3;
379
380 /* Set Pause packet */
381 tmp2 = hw->mac.addr[1];
382 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
383 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
384
385 tmp3 = hw->mac.addr[5];
386 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
387 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
388 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
389
390 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
391 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
392 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
393 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
394 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
395
396 /* Transmit Pause Packet */
397 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
398
399 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
400 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
401 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
402 ioread32(&hw->reg->PAUSE_PKT5));
403
404 return;
405}
406
407
408/**
409 * pch_gbe_alloc_queues - Allocate memory for all rings
410 * @adapter: Board private structure to initialize
411 * Returns
412 * 0: Successfully
413 * Negative value: Failed
414 */
415static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
416{
417 int size;
418
419 size = (int)sizeof(struct pch_gbe_tx_ring);
420 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
421 if (!adapter->tx_ring)
422 return -ENOMEM;
423 size = (int)sizeof(struct pch_gbe_rx_ring);
424 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
425 if (!adapter->rx_ring) {
426 kfree(adapter->tx_ring);
427 return -ENOMEM;
428 }
429 return 0;
430}
431
432/**
433 * pch_gbe_init_stats - Initialize status
434 * @adapter: Board private structure to initialize
435 */
436static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
437{
438 memset(&adapter->stats, 0, sizeof(adapter->stats));
439 return;
440}
441
442/**
443 * pch_gbe_init_phy - Initialize PHY
444 * @adapter: Board private structure to initialize
445 * Returns
446 * 0: Successfully
447 * Negative value: Failed
448 */
449static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
450{
451 struct net_device *netdev = adapter->netdev;
452 u32 addr;
453 u16 bmcr, stat;
454
455 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
456 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
457 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
458 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
459 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
460 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
461 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
462 break;
463 }
464 adapter->hw.phy.addr = adapter->mii.phy_id;
465 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
466 if (addr == 32)
467 return -EAGAIN;
468 /* Selected the phy and isolate the rest */
469 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
470 if (addr != adapter->mii.phy_id) {
471 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
472 BMCR_ISOLATE);
473 } else {
474 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
475 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
476 bmcr & ~BMCR_ISOLATE);
477 }
478 }
479
480 /* MII setup */
481 adapter->mii.phy_id_mask = 0x1F;
482 adapter->mii.reg_num_mask = 0x1F;
483 adapter->mii.dev = adapter->netdev;
484 adapter->mii.mdio_read = pch_gbe_mdio_read;
485 adapter->mii.mdio_write = pch_gbe_mdio_write;
486 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
487 return 0;
488}
489
490/**
491 * pch_gbe_mdio_read - The read function for mii
492 * @netdev: Network interface device structure
493 * @addr: Phy ID
494 * @reg: Access location
495 * Returns
496 * 0: Successfully
497 * Negative value: Failed
498 */
191cc687 499static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
77555ee7
MO
500{
501 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
502 struct pch_gbe_hw *hw = &adapter->hw;
503
504 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
505 (u16) 0);
506}
507
508/**
509 * pch_gbe_mdio_write - The write function for mii
510 * @netdev: Network interface device structure
511 * @addr: Phy ID (not used)
512 * @reg: Access location
513 * @data: Write data
514 */
191cc687 515static void pch_gbe_mdio_write(struct net_device *netdev,
516 int addr, int reg, int data)
77555ee7
MO
517{
518 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
519 struct pch_gbe_hw *hw = &adapter->hw;
520
521 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
522}
523
524/**
525 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
526 * @work: Pointer of board private structure
527 */
528static void pch_gbe_reset_task(struct work_struct *work)
529{
530 struct pch_gbe_adapter *adapter;
531 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
532
75d1a752 533 rtnl_lock();
77555ee7 534 pch_gbe_reinit_locked(adapter);
75d1a752 535 rtnl_unlock();
77555ee7
MO
536}
537
538/**
539 * pch_gbe_reinit_locked- Re-initialization
540 * @adapter: Board private structure
541 */
542void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
543{
75d1a752
TO
544 pch_gbe_down(adapter);
545 pch_gbe_up(adapter);
77555ee7
MO
546}
547
548/**
549 * pch_gbe_reset - Reset GbE
550 * @adapter: Board private structure
551 */
552void pch_gbe_reset(struct pch_gbe_adapter *adapter)
553{
554 pch_gbe_mac_reset_hw(&adapter->hw);
555 /* Setup the receive address. */
556 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
557 if (pch_gbe_hal_init_hw(&adapter->hw))
558 pr_err("Hardware Error\n");
559}
560
561/**
562 * pch_gbe_free_irq - Free an interrupt
563 * @adapter: Board private structure
564 */
565static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
566{
567 struct net_device *netdev = adapter->netdev;
568
569 free_irq(adapter->pdev->irq, netdev);
570 if (adapter->have_msi) {
571 pci_disable_msi(adapter->pdev);
572 pr_debug("call pci_disable_msi\n");
573 }
574}
575
576/**
577 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
578 * @adapter: Board private structure
579 */
580static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
581{
582 struct pch_gbe_hw *hw = &adapter->hw;
583
584 atomic_inc(&adapter->irq_sem);
585 iowrite32(0, &hw->reg->INT_EN);
586 ioread32(&hw->reg->INT_ST);
587 synchronize_irq(adapter->pdev->irq);
588
589 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
590}
591
592/**
593 * pch_gbe_irq_enable - Enable default interrupt generation settings
594 * @adapter: Board private structure
595 */
596static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
597{
598 struct pch_gbe_hw *hw = &adapter->hw;
599
600 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
601 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
602 ioread32(&hw->reg->INT_ST);
603 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
604}
605
606
607
608/**
609 * pch_gbe_setup_tctl - configure the Transmit control registers
610 * @adapter: Board private structure
611 */
612static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
613{
614 struct pch_gbe_hw *hw = &adapter->hw;
615 u32 tx_mode, tcpip;
616
617 tx_mode = PCH_GBE_TM_LONG_PKT |
618 PCH_GBE_TM_ST_AND_FD |
619 PCH_GBE_TM_SHORT_PKT |
620 PCH_GBE_TM_TH_TX_STRT_8 |
621 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
622
623 iowrite32(tx_mode, &hw->reg->TX_MODE);
624
625 tcpip = ioread32(&hw->reg->TCPIP_ACC);
626 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
627 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
628 return;
629}
630
631/**
632 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
633 * @adapter: Board private structure
634 */
635static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
636{
637 struct pch_gbe_hw *hw = &adapter->hw;
638 u32 tdba, tdlen, dctrl;
639
640 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
641 (unsigned long long)adapter->tx_ring->dma,
642 adapter->tx_ring->size);
643
644 /* Setup the HW Tx Head and Tail descriptor pointers */
645 tdba = adapter->tx_ring->dma;
646 tdlen = adapter->tx_ring->size - 0x10;
647 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
648 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
649 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
650
651 /* Enables Transmission DMA */
652 dctrl = ioread32(&hw->reg->DMA_CTRL);
653 dctrl |= PCH_GBE_TX_DMA_EN;
654 iowrite32(dctrl, &hw->reg->DMA_CTRL);
655}
656
657/**
658 * pch_gbe_setup_rctl - Configure the receive control registers
659 * @adapter: Board private structure
660 */
661static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
662{
756a6b03 663 struct net_device *netdev = adapter->netdev;
77555ee7
MO
664 struct pch_gbe_hw *hw = &adapter->hw;
665 u32 rx_mode, tcpip;
666
667 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
668 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
669
670 iowrite32(rx_mode, &hw->reg->RX_MODE);
671
672 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673
756a6b03 674 if (netdev->features & NETIF_F_RXCSUM) {
77555ee7
MO
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return;
683}
684
685/**
686 * pch_gbe_configure_rx - Configure Receive Unit after Reset
687 * @adapter: Board private structure
688 */
689static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
690{
691 struct pch_gbe_hw *hw = &adapter->hw;
692 u32 rdba, rdlen, rctl, rxdma;
693
694 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
695 (unsigned long long)adapter->rx_ring->dma,
696 adapter->rx_ring->size);
697
698 pch_gbe_mac_force_mac_fc(hw);
699
700 /* Disables Receive MAC */
701 rctl = ioread32(&hw->reg->MAC_RX_EN);
702 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
703
704 /* Disables Receive DMA */
705 rxdma = ioread32(&hw->reg->DMA_CTRL);
706 rxdma &= ~PCH_GBE_RX_DMA_EN;
707 iowrite32(rxdma, &hw->reg->DMA_CTRL);
708
709 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
710 ioread32(&hw->reg->MAC_RX_EN),
711 ioread32(&hw->reg->DMA_CTRL));
712
713 /* Setup the HW Rx Head and Tail Descriptor Pointers and
714 * the Base and Length of the Rx Descriptor Ring */
715 rdba = adapter->rx_ring->dma;
716 rdlen = adapter->rx_ring->size - 0x10;
717 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
77555ee7
MO
720}
721
722/**
723 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
724 * @adapter: Board private structure
725 * @buffer_info: Buffer information structure
726 */
727static void pch_gbe_unmap_and_free_tx_resource(
728 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
729{
730 if (buffer_info->mapped) {
731 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
732 buffer_info->length, DMA_TO_DEVICE);
733 buffer_info->mapped = false;
734 }
735 if (buffer_info->skb) {
736 dev_kfree_skb_any(buffer_info->skb);
737 buffer_info->skb = NULL;
738 }
739}
740
741/**
742 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
743 * @adapter: Board private structure
744 * @buffer_info: Buffer information structure
745 */
746static void pch_gbe_unmap_and_free_rx_resource(
747 struct pch_gbe_adapter *adapter,
748 struct pch_gbe_buffer *buffer_info)
749{
750 if (buffer_info->mapped) {
751 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
752 buffer_info->length, DMA_FROM_DEVICE);
753 buffer_info->mapped = false;
754 }
755 if (buffer_info->skb) {
756 dev_kfree_skb_any(buffer_info->skb);
757 buffer_info->skb = NULL;
758 }
759}
760
761/**
762 * pch_gbe_clean_tx_ring - Free Tx Buffers
763 * @adapter: Board private structure
764 * @tx_ring: Ring to be cleaned
765 */
766static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
767 struct pch_gbe_tx_ring *tx_ring)
768{
769 struct pch_gbe_hw *hw = &adapter->hw;
770 struct pch_gbe_buffer *buffer_info;
771 unsigned long size;
772 unsigned int i;
773
774 /* Free all the Tx ring sk_buffs */
775 for (i = 0; i < tx_ring->count; i++) {
776 buffer_info = &tx_ring->buffer_info[i];
777 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
778 }
779 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
780
781 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
782 memset(tx_ring->buffer_info, 0, size);
783
784 /* Zero out the descriptor ring */
785 memset(tx_ring->desc, 0, tx_ring->size);
786 tx_ring->next_to_use = 0;
787 tx_ring->next_to_clean = 0;
788 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
789 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
790}
791
792/**
793 * pch_gbe_clean_rx_ring - Free Rx Buffers
794 * @adapter: Board private structure
795 * @rx_ring: Ring to free buffers from
796 */
797static void
798pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
799 struct pch_gbe_rx_ring *rx_ring)
800{
801 struct pch_gbe_hw *hw = &adapter->hw;
802 struct pch_gbe_buffer *buffer_info;
803 unsigned long size;
804 unsigned int i;
805
806 /* Free all the Rx ring sk_buffs */
807 for (i = 0; i < rx_ring->count; i++) {
808 buffer_info = &rx_ring->buffer_info[i];
809 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
810 }
811 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
812 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
813 memset(rx_ring->buffer_info, 0, size);
814
815 /* Zero out the descriptor ring */
816 memset(rx_ring->desc, 0, rx_ring->size);
817 rx_ring->next_to_clean = 0;
818 rx_ring->next_to_use = 0;
819 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
820 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
821}
822
823static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
824 u16 duplex)
825{
826 struct pch_gbe_hw *hw = &adapter->hw;
827 unsigned long rgmii = 0;
828
829 /* Set the RGMII control. */
830#ifdef PCH_GBE_MAC_IFOP_RGMII
831 switch (speed) {
832 case SPEED_10:
833 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
834 PCH_GBE_MAC_RGMII_CTRL_SETTING);
835 break;
836 case SPEED_100:
837 rgmii = (PCH_GBE_RGMII_RATE_25M |
838 PCH_GBE_MAC_RGMII_CTRL_SETTING);
839 break;
840 case SPEED_1000:
841 rgmii = (PCH_GBE_RGMII_RATE_125M |
842 PCH_GBE_MAC_RGMII_CTRL_SETTING);
843 break;
844 }
845 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
846#else /* GMII */
847 rgmii = 0;
848 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
849#endif
850}
851static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
852 u16 duplex)
853{
854 struct net_device *netdev = adapter->netdev;
855 struct pch_gbe_hw *hw = &adapter->hw;
856 unsigned long mode = 0;
857
858 /* Set the communication mode */
859 switch (speed) {
860 case SPEED_10:
861 mode = PCH_GBE_MODE_MII_ETHER;
862 netdev->tx_queue_len = 10;
863 break;
864 case SPEED_100:
865 mode = PCH_GBE_MODE_MII_ETHER;
866 netdev->tx_queue_len = 100;
867 break;
868 case SPEED_1000:
869 mode = PCH_GBE_MODE_GMII_ETHER;
870 break;
871 }
872 if (duplex == DUPLEX_FULL)
873 mode |= PCH_GBE_MODE_FULL_DUPLEX;
874 else
875 mode |= PCH_GBE_MODE_HALF_DUPLEX;
876 iowrite32(mode, &hw->reg->MODE);
877}
878
879/**
880 * pch_gbe_watchdog - Watchdog process
881 * @data: Board private structure
882 */
883static void pch_gbe_watchdog(unsigned long data)
884{
885 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
886 struct net_device *netdev = adapter->netdev;
887 struct pch_gbe_hw *hw = &adapter->hw;
77555ee7
MO
888
889 pr_debug("right now = %ld\n", jiffies);
890
891 pch_gbe_update_stats(adapter);
892 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
8ae6daca 893 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
77555ee7
MO
894 netdev->tx_queue_len = adapter->tx_queue_len;
895 /* mii library handles link maintenance tasks */
896 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
897 pr_err("ethtool get setting Error\n");
898 mod_timer(&adapter->watchdog_timer,
899 round_jiffies(jiffies +
900 PCH_GBE_WATCHDOG_PERIOD));
901 return;
902 }
8ae6daca 903 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
77555ee7
MO
904 hw->mac.link_duplex = cmd.duplex;
905 /* Set the RGMII control. */
906 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
907 hw->mac.link_duplex);
908 /* Set the communication mode */
909 pch_gbe_set_mode(adapter, hw->mac.link_speed,
910 hw->mac.link_duplex);
911 netdev_dbg(netdev,
912 "Link is Up %d Mbps %s-Duplex\n",
8ae6daca 913 hw->mac.link_speed,
77555ee7
MO
914 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
915 netif_carrier_on(netdev);
916 netif_wake_queue(netdev);
917 } else if ((!mii_link_ok(&adapter->mii)) &&
918 (netif_carrier_ok(netdev))) {
919 netdev_dbg(netdev, "NIC Link is Down\n");
920 hw->mac.link_speed = SPEED_10;
921 hw->mac.link_duplex = DUPLEX_HALF;
922 netif_carrier_off(netdev);
923 netif_stop_queue(netdev);
924 }
925 mod_timer(&adapter->watchdog_timer,
926 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
927}
928
929/**
930 * pch_gbe_tx_queue - Carry out queuing of the transmission data
931 * @adapter: Board private structure
932 * @tx_ring: Tx descriptor ring structure
933 * @skb: Sockt buffer structure
934 */
935static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
936 struct pch_gbe_tx_ring *tx_ring,
937 struct sk_buff *skb)
938{
939 struct pch_gbe_hw *hw = &adapter->hw;
940 struct pch_gbe_tx_desc *tx_desc;
941 struct pch_gbe_buffer *buffer_info;
942 struct sk_buff *tmp_skb;
943 unsigned int frame_ctrl;
944 unsigned int ring_num;
945 unsigned long flags;
946
947 /*-- Set frame control --*/
948 frame_ctrl = 0;
949 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
950 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
756a6b03 951 if (skb->ip_summed == CHECKSUM_NONE)
77555ee7
MO
952 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
953
954 /* Performs checksum processing */
955 /*
956 * It is because the hardware accelerator does not support a checksum,
957 * when the received data size is less than 64 bytes.
958 */
756a6b03 959 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
77555ee7
MO
960 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
961 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
962 if (skb->protocol == htons(ETH_P_IP)) {
963 struct iphdr *iph = ip_hdr(skb);
964 unsigned int offset;
965 iph->check = 0;
966 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
967 offset = skb_transport_offset(skb);
968 if (iph->protocol == IPPROTO_TCP) {
969 skb->csum = 0;
970 tcp_hdr(skb)->check = 0;
971 skb->csum = skb_checksum(skb, offset,
972 skb->len - offset, 0);
973 tcp_hdr(skb)->check =
974 csum_tcpudp_magic(iph->saddr,
975 iph->daddr,
976 skb->len - offset,
977 IPPROTO_TCP,
978 skb->csum);
979 } else if (iph->protocol == IPPROTO_UDP) {
980 skb->csum = 0;
981 udp_hdr(skb)->check = 0;
982 skb->csum =
983 skb_checksum(skb, offset,
984 skb->len - offset, 0);
985 udp_hdr(skb)->check =
986 csum_tcpudp_magic(iph->saddr,
987 iph->daddr,
988 skb->len - offset,
989 IPPROTO_UDP,
990 skb->csum);
991 }
992 }
993 }
994 spin_lock_irqsave(&tx_ring->tx_lock, flags);
995 ring_num = tx_ring->next_to_use;
996 if (unlikely((ring_num + 1) == tx_ring->count))
997 tx_ring->next_to_use = 0;
998 else
999 tx_ring->next_to_use = ring_num + 1;
1000
1001 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1002 buffer_info = &tx_ring->buffer_info[ring_num];
1003 tmp_skb = buffer_info->skb;
1004
1005 /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1006 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1007 tmp_skb->data[ETH_HLEN] = 0x00;
1008 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1009 tmp_skb->len = skb->len;
1010 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1011 (skb->len - ETH_HLEN));
25985edc 1012 /*-- Set Buffer information --*/
77555ee7
MO
1013 buffer_info->length = tmp_skb->len;
1014 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1015 buffer_info->length,
1016 DMA_TO_DEVICE);
1017 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1018 pr_err("TX DMA map failed\n");
1019 buffer_info->dma = 0;
1020 buffer_info->time_stamp = 0;
1021 tx_ring->next_to_use = ring_num;
1022 return;
1023 }
1024 buffer_info->mapped = true;
1025 buffer_info->time_stamp = jiffies;
1026
1027 /*-- Set Tx descriptor --*/
1028 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1029 tx_desc->buffer_addr = (buffer_info->dma);
1030 tx_desc->length = (tmp_skb->len);
1031 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1032 tx_desc->tx_frame_ctrl = (frame_ctrl);
1033 tx_desc->gbec_status = (DSC_INIT16);
1034
1035 if (unlikely(++ring_num == tx_ring->count))
1036 ring_num = 0;
1037
1038 /* Update software pointer of TX descriptor */
1039 iowrite32(tx_ring->dma +
1040 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1041 &hw->reg->TX_DSC_SW_P);
1042 dev_kfree_skb_any(skb);
1043}
1044
1045/**
1046 * pch_gbe_update_stats - Update the board statistics counters
1047 * @adapter: Board private structure
1048 */
1049void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1050{
1051 struct net_device *netdev = adapter->netdev;
1052 struct pci_dev *pdev = adapter->pdev;
1053 struct pch_gbe_hw_stats *stats = &adapter->stats;
1054 unsigned long flags;
1055
1056 /*
1057 * Prevent stats update while adapter is being reset, or if the pci
1058 * connection is down.
1059 */
1060 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1061 return;
1062
1063 spin_lock_irqsave(&adapter->stats_lock, flags);
1064
1065 /* Update device status "adapter->stats" */
1066 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1067 stats->tx_errors = stats->tx_length_errors +
1068 stats->tx_aborted_errors +
1069 stats->tx_carrier_errors + stats->tx_timeout_count;
1070
1071 /* Update network device status "adapter->net_stats" */
1072 netdev->stats.rx_packets = stats->rx_packets;
1073 netdev->stats.rx_bytes = stats->rx_bytes;
1074 netdev->stats.rx_dropped = stats->rx_dropped;
1075 netdev->stats.tx_packets = stats->tx_packets;
1076 netdev->stats.tx_bytes = stats->tx_bytes;
1077 netdev->stats.tx_dropped = stats->tx_dropped;
1078 /* Fill out the OS statistics structure */
1079 netdev->stats.multicast = stats->multicast;
1080 netdev->stats.collisions = stats->collisions;
1081 /* Rx Errors */
1082 netdev->stats.rx_errors = stats->rx_errors;
1083 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1084 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1085 /* Tx Errors */
1086 netdev->stats.tx_errors = stats->tx_errors;
1087 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1088 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1089
1090 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1091}
1092
5229d87e
TO
1093static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1094{
1095 u32 rxdma;
1096
1097 /* Enables Receive DMA */
1098 rxdma = ioread32(&hw->reg->DMA_CTRL);
1099 rxdma |= PCH_GBE_RX_DMA_EN;
1100 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1101 /* Enables Receive */
1102 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1103 return;
1104}
1105
77555ee7
MO
1106/**
1107 * pch_gbe_intr - Interrupt Handler
1108 * @irq: Interrupt number
1109 * @data: Pointer to a network interface device structure
1110 * Returns
1111 * - IRQ_HANDLED: Our interrupt
1112 * - IRQ_NONE: Not our interrupt
1113 */
1114static irqreturn_t pch_gbe_intr(int irq, void *data)
1115{
1116 struct net_device *netdev = data;
1117 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1118 struct pch_gbe_hw *hw = &adapter->hw;
1119 u32 int_st;
1120 u32 int_en;
1121
1122 /* Check request status */
1123 int_st = ioread32(&hw->reg->INT_ST);
1124 int_st = int_st & ioread32(&hw->reg->INT_EN);
1125 /* When request status is no interruption factor */
1126 if (unlikely(!int_st))
1127 return IRQ_NONE; /* Not our interrupt. End processing. */
1128 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1129 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1130 adapter->stats.intr_rx_frame_err_count++;
1131 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1132 adapter->stats.intr_rx_fifo_err_count++;
1133 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1134 adapter->stats.intr_rx_dma_err_count++;
1135 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1136 adapter->stats.intr_tx_fifo_err_count++;
1137 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1138 adapter->stats.intr_tx_dma_err_count++;
1139 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1140 adapter->stats.intr_tcpip_err_count++;
1141 /* When Rx descriptor is empty */
1142 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1143 adapter->stats.intr_rx_dsc_empty_count++;
1144 pr_err("Rx descriptor is empty\n");
1145 int_en = ioread32(&hw->reg->INT_EN);
1146 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1147 if (hw->mac.tx_fc_enable) {
1148 /* Set Pause packet */
1149 pch_gbe_mac_set_pause_packet(hw);
1150 }
1151 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1152 == 0) {
1153 return IRQ_HANDLED;
1154 }
1155 }
1156
1157 /* When request status is Receive interruption */
1158 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
1159 if (likely(napi_schedule_prep(&adapter->napi))) {
1160 /* Enable only Rx Descriptor empty */
1161 atomic_inc(&adapter->irq_sem);
1162 int_en = ioread32(&hw->reg->INT_EN);
1163 int_en &=
1164 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1165 iowrite32(int_en, &hw->reg->INT_EN);
1166 /* Start polling for NAPI */
1167 __napi_schedule(&adapter->napi);
1168 }
1169 }
1170 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1171 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1172 return IRQ_HANDLED;
1173}
1174
1175/**
1176 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1177 * @adapter: Board private structure
1178 * @rx_ring: Rx descriptor ring
1179 * @cleaned_count: Cleaned count
1180 */
1181static void
1182pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1183 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1184{
1185 struct net_device *netdev = adapter->netdev;
1186 struct pci_dev *pdev = adapter->pdev;
1187 struct pch_gbe_hw *hw = &adapter->hw;
1188 struct pch_gbe_rx_desc *rx_desc;
1189 struct pch_gbe_buffer *buffer_info;
1190 struct sk_buff *skb;
1191 unsigned int i;
1192 unsigned int bufsz;
1193
1194 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
1195 i = rx_ring->next_to_use;
1196
1197 while ((cleaned_count--)) {
1198 buffer_info = &rx_ring->buffer_info[i];
1199 skb = buffer_info->skb;
1200 if (skb) {
1201 skb_trim(skb, 0);
1202 } else {
1203 skb = netdev_alloc_skb(netdev, bufsz);
1204 if (unlikely(!skb)) {
1205 /* Better luck next round */
1206 adapter->stats.rx_alloc_buff_failed++;
1207 break;
1208 }
1209 /* 64byte align */
1210 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1211
1212 buffer_info->skb = skb;
1213 buffer_info->length = adapter->rx_buffer_len;
1214 }
1215 buffer_info->dma = dma_map_single(&pdev->dev,
1216 skb->data,
1217 buffer_info->length,
1218 DMA_FROM_DEVICE);
1219 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1220 dev_kfree_skb(skb);
1221 buffer_info->skb = NULL;
1222 buffer_info->dma = 0;
1223 adapter->stats.rx_alloc_buff_failed++;
1224 break; /* while !buffer_info->skb */
1225 }
1226 buffer_info->mapped = true;
1227 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1228 rx_desc->buffer_addr = (buffer_info->dma);
1229 rx_desc->gbec_status = DSC_INIT16;
1230
1231 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1232 i, (unsigned long long)buffer_info->dma,
1233 buffer_info->length);
1234
1235 if (unlikely(++i == rx_ring->count))
1236 i = 0;
1237 }
1238 if (likely(rx_ring->next_to_use != i)) {
1239 rx_ring->next_to_use = i;
1240 if (unlikely(i-- == 0))
1241 i = (rx_ring->count - 1);
1242 iowrite32(rx_ring->dma +
1243 (int)sizeof(struct pch_gbe_rx_desc) * i,
1244 &hw->reg->RX_DSC_SW_P);
1245 }
1246 return;
1247}
1248
1249/**
1250 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1251 * @adapter: Board private structure
1252 * @tx_ring: Tx descriptor ring
1253 */
1254static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1255 struct pch_gbe_tx_ring *tx_ring)
1256{
1257 struct pch_gbe_buffer *buffer_info;
1258 struct sk_buff *skb;
1259 unsigned int i;
1260 unsigned int bufsz;
1261 struct pch_gbe_tx_desc *tx_desc;
1262
1263 bufsz =
1264 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1265
1266 for (i = 0; i < tx_ring->count; i++) {
1267 buffer_info = &tx_ring->buffer_info[i];
1268 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1269 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1270 buffer_info->skb = skb;
1271 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1272 tx_desc->gbec_status = (DSC_INIT16);
1273 }
1274 return;
1275}
1276
1277/**
1278 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1279 * @adapter: Board private structure
1280 * @tx_ring: Tx descriptor ring
1281 * Returns
1282 * true: Cleaned the descriptor
1283 * false: Not cleaned the descriptor
1284 */
1285static bool
1286pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1287 struct pch_gbe_tx_ring *tx_ring)
1288{
1289 struct pch_gbe_tx_desc *tx_desc;
1290 struct pch_gbe_buffer *buffer_info;
1291 struct sk_buff *skb;
1292 unsigned int i;
1293 unsigned int cleaned_count = 0;
1294 bool cleaned = false;
1295
1296 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1297
1298 i = tx_ring->next_to_clean;
1299 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1300 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1301 tx_desc->gbec_status, tx_desc->dma_status);
1302
1303 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1304 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1305 cleaned = true;
1306 buffer_info = &tx_ring->buffer_info[i];
1307 skb = buffer_info->skb;
1308
1309 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1310 adapter->stats.tx_aborted_errors++;
1311 pr_err("Transfer Abort Error\n");
1312 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1313 ) {
1314 adapter->stats.tx_carrier_errors++;
1315 pr_err("Transfer Carrier Sense Error\n");
1316 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1317 ) {
1318 adapter->stats.tx_aborted_errors++;
1319 pr_err("Transfer Collision Abort Error\n");
1320 } else if ((tx_desc->gbec_status &
1321 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1322 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1323 adapter->stats.collisions++;
1324 adapter->stats.tx_packets++;
1325 adapter->stats.tx_bytes += skb->len;
1326 pr_debug("Transfer Collision\n");
1327 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1328 ) {
1329 adapter->stats.tx_packets++;
1330 adapter->stats.tx_bytes += skb->len;
1331 }
1332 if (buffer_info->mapped) {
1333 pr_debug("unmap buffer_info->dma : %d\n", i);
1334 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1335 buffer_info->length, DMA_TO_DEVICE);
1336 buffer_info->mapped = false;
1337 }
1338 if (buffer_info->skb) {
1339 pr_debug("trim buffer_info->skb : %d\n", i);
1340 skb_trim(buffer_info->skb, 0);
1341 }
1342 tx_desc->gbec_status = DSC_INIT16;
1343 if (unlikely(++i == tx_ring->count))
1344 i = 0;
1345 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1346
1347 /* weight of a sort for tx, to avoid endless transmit cleanup */
1348 if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
1349 break;
1350 }
1351 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1352 cleaned_count);
1353 /* Recover from running out of Tx resources in xmit_frame */
1354 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1355 netif_wake_queue(adapter->netdev);
1356 adapter->stats.tx_restart_count++;
1357 pr_debug("Tx wake queue\n");
1358 }
1359 spin_lock(&adapter->tx_queue_lock);
1360 tx_ring->next_to_clean = i;
1361 spin_unlock(&adapter->tx_queue_lock);
1362 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1363 return cleaned;
1364}
1365
1366/**
1367 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1368 * @adapter: Board private structure
1369 * @rx_ring: Rx descriptor ring
1370 * @work_done: Completed count
1371 * @work_to_do: Request count
1372 * Returns
1373 * true: Cleaned the descriptor
1374 * false: Not cleaned the descriptor
1375 */
1376static bool
1377pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1378 struct pch_gbe_rx_ring *rx_ring,
1379 int *work_done, int work_to_do)
1380{
1381 struct net_device *netdev = adapter->netdev;
1382 struct pci_dev *pdev = adapter->pdev;
1383 struct pch_gbe_buffer *buffer_info;
1384 struct pch_gbe_rx_desc *rx_desc;
1385 u32 length;
77555ee7
MO
1386 unsigned int i;
1387 unsigned int cleaned_count = 0;
1388 bool cleaned = false;
ac096642 1389 struct sk_buff *skb, *new_skb;
77555ee7
MO
1390 u8 dma_status;
1391 u16 gbec_status;
1392 u32 tcp_ip_status;
77555ee7
MO
1393
1394 i = rx_ring->next_to_clean;
1395
1396 while (*work_done < work_to_do) {
1397 /* Check Rx descriptor status */
1398 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1399 if (rx_desc->gbec_status == DSC_INIT16)
1400 break;
1401 cleaned = true;
1402 cleaned_count++;
1403
1404 dma_status = rx_desc->dma_status;
1405 gbec_status = rx_desc->gbec_status;
1406 tcp_ip_status = rx_desc->tcp_ip_status;
1407 rx_desc->gbec_status = DSC_INIT16;
1408 buffer_info = &rx_ring->buffer_info[i];
1409 skb = buffer_info->skb;
1410
1411 /* unmap dma */
1412 dma_unmap_single(&pdev->dev, buffer_info->dma,
1413 buffer_info->length, DMA_FROM_DEVICE);
1414 buffer_info->mapped = false;
1415 /* Prefetch the packet */
1416 prefetch(skb->data);
1417
1418 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1419 "TCP:0x%08x] BufInf = 0x%p\n",
1420 i, dma_status, gbec_status, tcp_ip_status,
1421 buffer_info);
1422 /* Error check */
1423 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1424 adapter->stats.rx_frame_errors++;
1425 pr_err("Receive Not Octal Error\n");
1426 } else if (unlikely(gbec_status &
1427 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1428 adapter->stats.rx_frame_errors++;
1429 pr_err("Receive Nibble Error\n");
1430 } else if (unlikely(gbec_status &
1431 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1432 adapter->stats.rx_crc_errors++;
1433 pr_err("Receive CRC Error\n");
1434 } else {
1435 /* get receive length */
ac096642
TO
1436 /* length convert[-3] */
1437 length = (rx_desc->rx_words_eob) - 3;
77555ee7
MO
1438
1439 /* Decide the data conversion method */
756a6b03 1440 if (!(netdev->features & NETIF_F_RXCSUM)) {
77555ee7 1441 /* [Header:14][payload] */
ac096642
TO
1442 if (NET_IP_ALIGN) {
1443 /* Because alignment differs,
1444 * the new_skb is newly allocated,
1445 * and data is copied to new_skb.*/
1446 new_skb = netdev_alloc_skb(netdev,
1447 length + NET_IP_ALIGN);
1448 if (!new_skb) {
1449 /* dorrop error */
1450 pr_err("New skb allocation "
1451 "Error\n");
1452 goto dorrop;
1453 }
1454 skb_reserve(new_skb, NET_IP_ALIGN);
1455 memcpy(new_skb->data, skb->data,
1456 length);
1457 skb = new_skb;
1458 } else {
1459 /* DMA buffer is used as SKB as it is.*/
1460 buffer_info->skb = NULL;
1461 }
77555ee7
MO
1462 } else {
1463 /* [Header:14][padding:2][payload] */
ac096642
TO
1464 /* The length includes padding length */
1465 length = length - PCH_GBE_DMA_PADDING;
1466 if ((length < copybreak) ||
1467 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1468 /* Because alignment differs,
1469 * the new_skb is newly allocated,
1470 * and data is copied to new_skb.
1471 * Padding data is deleted
1472 * at the time of a copy.*/
1473 new_skb = netdev_alloc_skb(netdev,
1474 length + NET_IP_ALIGN);
1475 if (!new_skb) {
1476 /* dorrop error */
1477 pr_err("New skb allocation "
1478 "Error\n");
1479 goto dorrop;
77555ee7 1480 }
ac096642 1481 skb_reserve(new_skb, NET_IP_ALIGN);
77555ee7 1482 memcpy(new_skb->data, skb->data,
ac096642
TO
1483 ETH_HLEN);
1484 memcpy(&new_skb->data[ETH_HLEN],
1485 &skb->data[ETH_HLEN +
1486 PCH_GBE_DMA_PADDING],
1487 length - ETH_HLEN);
77555ee7 1488 skb = new_skb;
ac096642
TO
1489 } else {
1490 /* Padding data is deleted
1491 * by moving header data.*/
1492 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1493 &skb->data[0], ETH_HLEN);
1494 skb_reserve(skb, NET_IP_ALIGN);
1495 buffer_info->skb = NULL;
77555ee7 1496 }
77555ee7 1497 }
ac096642
TO
1498 /* The length includes FCS length */
1499 length = length - ETH_FCS_LEN;
77555ee7
MO
1500 /* update status of driver */
1501 adapter->stats.rx_bytes += length;
1502 adapter->stats.rx_packets++;
1503 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1504 adapter->stats.multicast++;
1505 /* Write meta date of skb */
1506 skb_put(skb, length);
1507 skb->protocol = eth_type_trans(skb, netdev);
5d05a04d 1508 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
77555ee7 1509 skb->ip_summed = CHECKSUM_NONE;
5d05a04d
TO
1510 else
1511 skb->ip_summed = CHECKSUM_UNNECESSARY;
1512
77555ee7
MO
1513 napi_gro_receive(&adapter->napi, skb);
1514 (*work_done)++;
1515 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1516 skb->ip_summed, length);
1517 }
1518dorrop:
1519 /* return some buffers to hardware, one at a time is too slow */
1520 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1521 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1522 cleaned_count);
1523 cleaned_count = 0;
1524 }
1525 if (++i == rx_ring->count)
1526 i = 0;
1527 }
1528 rx_ring->next_to_clean = i;
1529 if (cleaned_count)
1530 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1531 return cleaned;
1532}
1533
1534/**
1535 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1536 * @adapter: Board private structure
1537 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1538 * Returns
1539 * 0: Successfully
1540 * Negative value: Failed
1541 */
1542int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1543 struct pch_gbe_tx_ring *tx_ring)
1544{
1545 struct pci_dev *pdev = adapter->pdev;
1546 struct pch_gbe_tx_desc *tx_desc;
1547 int size;
1548 int desNo;
1549
1550 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
89bf67f1 1551 tx_ring->buffer_info = vzalloc(size);
77555ee7 1552 if (!tx_ring->buffer_info) {
25985edc 1553 pr_err("Unable to allocate memory for the buffer information\n");
77555ee7
MO
1554 return -ENOMEM;
1555 }
77555ee7
MO
1556
1557 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1558
1559 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1560 &tx_ring->dma, GFP_KERNEL);
1561 if (!tx_ring->desc) {
1562 vfree(tx_ring->buffer_info);
1563 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1564 return -ENOMEM;
1565 }
1566 memset(tx_ring->desc, 0, tx_ring->size);
1567
1568 tx_ring->next_to_use = 0;
1569 tx_ring->next_to_clean = 0;
1570 spin_lock_init(&tx_ring->tx_lock);
1571
1572 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1573 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1574 tx_desc->gbec_status = DSC_INIT16;
1575 }
1576 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1577 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1578 tx_ring->desc, (unsigned long long)tx_ring->dma,
1579 tx_ring->next_to_clean, tx_ring->next_to_use);
1580 return 0;
1581}
1582
1583/**
1584 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1585 * @adapter: Board private structure
1586 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1587 * Returns
1588 * 0: Successfully
1589 * Negative value: Failed
1590 */
1591int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1592 struct pch_gbe_rx_ring *rx_ring)
1593{
1594 struct pci_dev *pdev = adapter->pdev;
1595 struct pch_gbe_rx_desc *rx_desc;
1596 int size;
1597 int desNo;
1598
1599 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
89bf67f1 1600 rx_ring->buffer_info = vzalloc(size);
77555ee7
MO
1601 if (!rx_ring->buffer_info) {
1602 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1603 return -ENOMEM;
1604 }
77555ee7
MO
1605 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1606 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1607 &rx_ring->dma, GFP_KERNEL);
1608
1609 if (!rx_ring->desc) {
1610 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1611 vfree(rx_ring->buffer_info);
1612 return -ENOMEM;
1613 }
1614 memset(rx_ring->desc, 0, rx_ring->size);
1615 rx_ring->next_to_clean = 0;
1616 rx_ring->next_to_use = 0;
1617 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1618 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1619 rx_desc->gbec_status = DSC_INIT16;
1620 }
1621 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1622 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1623 rx_ring->desc, (unsigned long long)rx_ring->dma,
1624 rx_ring->next_to_clean, rx_ring->next_to_use);
1625 return 0;
1626}
1627
1628/**
1629 * pch_gbe_free_tx_resources - Free Tx Resources
1630 * @adapter: Board private structure
1631 * @tx_ring: Tx descriptor ring for a specific queue
1632 */
1633void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1634 struct pch_gbe_tx_ring *tx_ring)
1635{
1636 struct pci_dev *pdev = adapter->pdev;
1637
1638 pch_gbe_clean_tx_ring(adapter, tx_ring);
1639 vfree(tx_ring->buffer_info);
1640 tx_ring->buffer_info = NULL;
1641 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1642 tx_ring->desc = NULL;
1643}
1644
1645/**
1646 * pch_gbe_free_rx_resources - Free Rx Resources
1647 * @adapter: Board private structure
1648 * @rx_ring: Ring to clean the resources from
1649 */
1650void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1651 struct pch_gbe_rx_ring *rx_ring)
1652{
1653 struct pci_dev *pdev = adapter->pdev;
1654
1655 pch_gbe_clean_rx_ring(adapter, rx_ring);
1656 vfree(rx_ring->buffer_info);
1657 rx_ring->buffer_info = NULL;
1658 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1659 rx_ring->desc = NULL;
1660}
1661
1662/**
1663 * pch_gbe_request_irq - Allocate an interrupt line
1664 * @adapter: Board private structure
1665 * Returns
1666 * 0: Successfully
1667 * Negative value: Failed
1668 */
1669static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1670{
1671 struct net_device *netdev = adapter->netdev;
1672 int err;
1673 int flags;
1674
1675 flags = IRQF_SHARED;
1676 adapter->have_msi = false;
1677 err = pci_enable_msi(adapter->pdev);
1678 pr_debug("call pci_enable_msi\n");
1679 if (err) {
1680 pr_debug("call pci_enable_msi - Error: %d\n", err);
1681 } else {
1682 flags = 0;
1683 adapter->have_msi = true;
1684 }
1685 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1686 flags, netdev->name, netdev);
1687 if (err)
1688 pr_err("Unable to allocate interrupt Error: %d\n", err);
1689 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1690 adapter->have_msi, flags, err);
1691 return err;
1692}
1693
1694
1695static void pch_gbe_set_multi(struct net_device *netdev);
1696/**
1697 * pch_gbe_up - Up GbE network device
1698 * @adapter: Board private structure
1699 * Returns
1700 * 0: Successfully
1701 * Negative value: Failed
1702 */
1703int pch_gbe_up(struct pch_gbe_adapter *adapter)
1704{
1705 struct net_device *netdev = adapter->netdev;
1706 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1707 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1708 int err;
1709
1710 /* hardware has been reset, we need to reload some things */
1711 pch_gbe_set_multi(netdev);
1712
1713 pch_gbe_setup_tctl(adapter);
1714 pch_gbe_configure_tx(adapter);
1715 pch_gbe_setup_rctl(adapter);
1716 pch_gbe_configure_rx(adapter);
1717
1718 err = pch_gbe_request_irq(adapter);
1719 if (err) {
1720 pr_err("Error: can't bring device up\n");
1721 return err;
1722 }
1723 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1724 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1725 adapter->tx_queue_len = netdev->tx_queue_len;
5229d87e 1726 pch_gbe_start_receive(&adapter->hw);
77555ee7
MO
1727
1728 mod_timer(&adapter->watchdog_timer, jiffies);
1729
1730 napi_enable(&adapter->napi);
1731 pch_gbe_irq_enable(adapter);
1732 netif_start_queue(adapter->netdev);
1733
1734 return 0;
1735}
1736
1737/**
1738 * pch_gbe_down - Down GbE network device
1739 * @adapter: Board private structure
1740 */
1741void pch_gbe_down(struct pch_gbe_adapter *adapter)
1742{
1743 struct net_device *netdev = adapter->netdev;
1744
1745 /* signal that we're down so the interrupt handler does not
1746 * reschedule our watchdog timer */
1747 napi_disable(&adapter->napi);
1748 atomic_set(&adapter->irq_sem, 0);
1749
1750 pch_gbe_irq_disable(adapter);
1751 pch_gbe_free_irq(adapter);
1752
1753 del_timer_sync(&adapter->watchdog_timer);
1754
1755 netdev->tx_queue_len = adapter->tx_queue_len;
1756 netif_carrier_off(netdev);
1757 netif_stop_queue(netdev);
1758
1759 pch_gbe_reset(adapter);
1760 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1761 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1762}
1763
1764/**
1765 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1766 * @adapter: Board private structure to initialize
1767 * Returns
1768 * 0: Successfully
1769 * Negative value: Failed
1770 */
1771static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1772{
1773 struct pch_gbe_hw *hw = &adapter->hw;
1774 struct net_device *netdev = adapter->netdev;
1775
1776 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1777 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1778 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1779
1780 /* Initialize the hardware-specific values */
1781 if (pch_gbe_hal_setup_init_funcs(hw)) {
1782 pr_err("Hardware Initialization Failure\n");
1783 return -EIO;
1784 }
1785 if (pch_gbe_alloc_queues(adapter)) {
1786 pr_err("Unable to allocate memory for queues\n");
1787 return -ENOMEM;
1788 }
1789 spin_lock_init(&adapter->hw.miim_lock);
1790 spin_lock_init(&adapter->tx_queue_lock);
1791 spin_lock_init(&adapter->stats_lock);
1792 spin_lock_init(&adapter->ethtool_lock);
1793 atomic_set(&adapter->irq_sem, 0);
1794 pch_gbe_irq_disable(adapter);
1795
1796 pch_gbe_init_stats(adapter);
1797
1798 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1799 (u32) adapter->rx_buffer_len,
1800 hw->mac.min_frame_size, hw->mac.max_frame_size);
1801 return 0;
1802}
1803
1804/**
1805 * pch_gbe_open - Called when a network interface is made active
1806 * @netdev: Network interface device structure
1807 * Returns
1808 * 0: Successfully
1809 * Negative value: Failed
1810 */
1811static int pch_gbe_open(struct net_device *netdev)
1812{
1813 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1814 struct pch_gbe_hw *hw = &adapter->hw;
1815 int err;
1816
1817 /* allocate transmit descriptors */
1818 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1819 if (err)
1820 goto err_setup_tx;
1821 /* allocate receive descriptors */
1822 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1823 if (err)
1824 goto err_setup_rx;
1825 pch_gbe_hal_power_up_phy(hw);
1826 err = pch_gbe_up(adapter);
1827 if (err)
1828 goto err_up;
1829 pr_debug("Success End\n");
1830 return 0;
1831
1832err_up:
1833 if (!adapter->wake_up_evt)
1834 pch_gbe_hal_power_down_phy(hw);
1835 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1836err_setup_rx:
1837 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1838err_setup_tx:
1839 pch_gbe_reset(adapter);
1840 pr_err("Error End\n");
1841 return err;
1842}
1843
1844/**
1845 * pch_gbe_stop - Disables a network interface
1846 * @netdev: Network interface device structure
1847 * Returns
1848 * 0: Successfully
1849 */
1850static int pch_gbe_stop(struct net_device *netdev)
1851{
1852 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1853 struct pch_gbe_hw *hw = &adapter->hw;
1854
1855 pch_gbe_down(adapter);
1856 if (!adapter->wake_up_evt)
1857 pch_gbe_hal_power_down_phy(hw);
1858 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1859 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1860 return 0;
1861}
1862
1863/**
1864 * pch_gbe_xmit_frame - Packet transmitting start
1865 * @skb: Socket buffer structure
1866 * @netdev: Network interface device structure
1867 * Returns
1868 * - NETDEV_TX_OK: Normal end
1869 * - NETDEV_TX_BUSY: Error end
1870 */
1871static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1872{
1873 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1874 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1875 unsigned long flags;
1876
1877 if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
77555ee7
MO
1878 pr_err("Transfer length Error: skb len: %d > max: %d\n",
1879 skb->len, adapter->hw.mac.max_frame_size);
419c2046 1880 dev_kfree_skb_any(skb);
77555ee7
MO
1881 adapter->stats.tx_length_errors++;
1882 return NETDEV_TX_OK;
1883 }
1884 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
1885 /* Collision - tell upper layer to requeue */
1886 return NETDEV_TX_LOCKED;
1887 }
1888 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
1889 netif_stop_queue(netdev);
1890 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1891 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
1892 tx_ring->next_to_use, tx_ring->next_to_clean);
1893 return NETDEV_TX_BUSY;
1894 }
1895 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1896
1897 /* CRC,ITAG no support */
1898 pch_gbe_tx_queue(adapter, tx_ring, skb);
1899 return NETDEV_TX_OK;
1900}
1901
1902/**
1903 * pch_gbe_get_stats - Get System Network Statistics
1904 * @netdev: Network interface device structure
1905 * Returns: The current stats
1906 */
1907static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
1908{
1909 /* only return the current stats */
1910 return &netdev->stats;
1911}
1912
1913/**
1914 * pch_gbe_set_multi - Multicast and Promiscuous mode set
1915 * @netdev: Network interface device structure
1916 */
1917static void pch_gbe_set_multi(struct net_device *netdev)
1918{
1919 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1920 struct pch_gbe_hw *hw = &adapter->hw;
1921 struct netdev_hw_addr *ha;
1922 u8 *mta_list;
1923 u32 rctl;
1924 int i;
1925 int mc_count;
1926
1927 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
1928
1929 /* Check for Promiscuous and All Multicast modes */
1930 rctl = ioread32(&hw->reg->RX_MODE);
1931 mc_count = netdev_mc_count(netdev);
1932 if ((netdev->flags & IFF_PROMISC)) {
1933 rctl &= ~PCH_GBE_ADD_FIL_EN;
1934 rctl &= ~PCH_GBE_MLT_FIL_EN;
1935 } else if ((netdev->flags & IFF_ALLMULTI)) {
1936 /* all the multicasting receive permissions */
1937 rctl |= PCH_GBE_ADD_FIL_EN;
1938 rctl &= ~PCH_GBE_MLT_FIL_EN;
1939 } else {
1940 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
1941 /* all the multicasting receive permissions */
1942 rctl |= PCH_GBE_ADD_FIL_EN;
1943 rctl &= ~PCH_GBE_MLT_FIL_EN;
1944 } else {
1945 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
1946 }
1947 }
1948 iowrite32(rctl, &hw->reg->RX_MODE);
1949
1950 if (mc_count >= PCH_GBE_MAR_ENTRIES)
1951 return;
1952 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
1953 if (!mta_list)
1954 return;
1955
1956 /* The shared function expects a packed array of only addresses. */
1957 i = 0;
1958 netdev_for_each_mc_addr(ha, netdev) {
1959 if (i == mc_count)
1960 break;
1961 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
1962 }
1963 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
1964 PCH_GBE_MAR_ENTRIES);
1965 kfree(mta_list);
1966
1967 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
1968 ioread32(&hw->reg->RX_MODE), mc_count);
1969}
1970
1971/**
1972 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
1973 * @netdev: Network interface device structure
1974 * @addr: Pointer to an address structure
1975 * Returns
1976 * 0: Successfully
1977 * -EADDRNOTAVAIL: Failed
1978 */
1979static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
1980{
1981 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1982 struct sockaddr *skaddr = addr;
1983 int ret_val;
1984
1985 if (!is_valid_ether_addr(skaddr->sa_data)) {
1986 ret_val = -EADDRNOTAVAIL;
1987 } else {
1988 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1989 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
1990 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1991 ret_val = 0;
1992 }
1993 pr_debug("ret_val : 0x%08x\n", ret_val);
1994 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
1995 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
1996 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
1997 ioread32(&adapter->hw.reg->mac_adr[0].high),
1998 ioread32(&adapter->hw.reg->mac_adr[0].low));
1999 return ret_val;
2000}
2001
2002/**
2003 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2004 * @netdev: Network interface device structure
2005 * @new_mtu: New value for maximum frame size
2006 * Returns
2007 * 0: Successfully
2008 * -EINVAL: Failed
2009 */
2010static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2011{
2012 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2013 int max_frame;
2014
2015 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2016 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2017 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2018 pr_err("Invalid MTU setting\n");
2019 return -EINVAL;
2020 }
2021 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2022 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2023 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2024 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2025 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2026 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2027 else
2028 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
2029 netdev->mtu = new_mtu;
2030 adapter->hw.mac.max_frame_size = max_frame;
2031
2032 if (netif_running(netdev))
2033 pch_gbe_reinit_locked(adapter);
2034 else
2035 pch_gbe_reset(adapter);
2036
2037 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2038 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2039 adapter->hw.mac.max_frame_size);
2040 return 0;
2041}
2042
756a6b03
MM
2043/**
2044 * pch_gbe_set_features - Reset device after features changed
2045 * @netdev: Network interface device structure
2046 * @features: New features
2047 * Returns
2048 * 0: HW state updated successfully
2049 */
2050static int pch_gbe_set_features(struct net_device *netdev, u32 features)
2051{
2052 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2053 u32 changed = features ^ netdev->features;
2054
2055 if (!(changed & NETIF_F_RXCSUM))
2056 return 0;
2057
2058 if (netif_running(netdev))
2059 pch_gbe_reinit_locked(adapter);
2060 else
2061 pch_gbe_reset(adapter);
2062
2063 return 0;
2064}
2065
77555ee7
MO
2066/**
2067 * pch_gbe_ioctl - Controls register through a MII interface
2068 * @netdev: Network interface device structure
2069 * @ifr: Pointer to ifr structure
2070 * @cmd: Control command
2071 * Returns
2072 * 0: Successfully
2073 * Negative value: Failed
2074 */
2075static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2076{
2077 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2078
2079 pr_debug("cmd : 0x%04x\n", cmd);
2080
2081 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2082}
2083
2084/**
2085 * pch_gbe_tx_timeout - Respond to a Tx Hang
2086 * @netdev: Network interface device structure
2087 */
2088static void pch_gbe_tx_timeout(struct net_device *netdev)
2089{
2090 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2091
2092 /* Do the reset outside of interrupt context */
2093 adapter->stats.tx_timeout_count++;
2094 schedule_work(&adapter->reset_task);
2095}
2096
2097/**
2098 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2099 * @napi: Pointer of polling device struct
2100 * @budget: The maximum number of a packet
2101 * Returns
2102 * false: Exit the polling mode
2103 * true: Continue the polling mode
2104 */
2105static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2106{
2107 struct pch_gbe_adapter *adapter =
2108 container_of(napi, struct pch_gbe_adapter, napi);
2109 struct net_device *netdev = adapter->netdev;
2110 int work_done = 0;
2111 bool poll_end_flag = false;
2112 bool cleaned = false;
2113
2114 pr_debug("budget : %d\n", budget);
2115
2116 /* Keep link state information with original netdev */
2117 if (!netif_carrier_ok(netdev)) {
2118 poll_end_flag = true;
2119 } else {
2120 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2121 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2122
2123 if (cleaned)
2124 work_done = budget;
2125 /* If no Tx and not enough Rx work done,
2126 * exit the polling mode
2127 */
2128 if ((work_done < budget) || !netif_running(netdev))
2129 poll_end_flag = true;
2130 }
2131
2132 if (poll_end_flag) {
2133 napi_complete(napi);
2134 pch_gbe_irq_enable(adapter);
2135 }
2136
2137 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2138 poll_end_flag, work_done, budget);
2139
2140 return work_done;
2141}
2142
2143#ifdef CONFIG_NET_POLL_CONTROLLER
2144/**
2145 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2146 * @netdev: Network interface device structure
2147 */
2148static void pch_gbe_netpoll(struct net_device *netdev)
2149{
2150 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2151
2152 disable_irq(adapter->pdev->irq);
2153 pch_gbe_intr(adapter->pdev->irq, netdev);
2154 enable_irq(adapter->pdev->irq);
2155}
2156#endif
2157
2158static const struct net_device_ops pch_gbe_netdev_ops = {
2159 .ndo_open = pch_gbe_open,
2160 .ndo_stop = pch_gbe_stop,
2161 .ndo_start_xmit = pch_gbe_xmit_frame,
2162 .ndo_get_stats = pch_gbe_get_stats,
2163 .ndo_set_mac_address = pch_gbe_set_mac,
2164 .ndo_tx_timeout = pch_gbe_tx_timeout,
2165 .ndo_change_mtu = pch_gbe_change_mtu,
756a6b03 2166 .ndo_set_features = pch_gbe_set_features,
77555ee7
MO
2167 .ndo_do_ioctl = pch_gbe_ioctl,
2168 .ndo_set_multicast_list = &pch_gbe_set_multi,
2169#ifdef CONFIG_NET_POLL_CONTROLLER
2170 .ndo_poll_controller = pch_gbe_netpoll,
2171#endif
2172};
2173
2174static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2175 pci_channel_state_t state)
2176{
2177 struct net_device *netdev = pci_get_drvdata(pdev);
2178 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2179
2180 netif_device_detach(netdev);
2181 if (netif_running(netdev))
2182 pch_gbe_down(adapter);
2183 pci_disable_device(pdev);
2184 /* Request a slot slot reset. */
2185 return PCI_ERS_RESULT_NEED_RESET;
2186}
2187
2188static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2189{
2190 struct net_device *netdev = pci_get_drvdata(pdev);
2191 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2192 struct pch_gbe_hw *hw = &adapter->hw;
2193
2194 if (pci_enable_device(pdev)) {
2195 pr_err("Cannot re-enable PCI device after reset\n");
2196 return PCI_ERS_RESULT_DISCONNECT;
2197 }
2198 pci_set_master(pdev);
2199 pci_enable_wake(pdev, PCI_D0, 0);
2200 pch_gbe_hal_power_up_phy(hw);
2201 pch_gbe_reset(adapter);
2202 /* Clear wake up status */
2203 pch_gbe_mac_set_wol_event(hw, 0);
2204
2205 return PCI_ERS_RESULT_RECOVERED;
2206}
2207
2208static void pch_gbe_io_resume(struct pci_dev *pdev)
2209{
2210 struct net_device *netdev = pci_get_drvdata(pdev);
2211 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2212
2213 if (netif_running(netdev)) {
2214 if (pch_gbe_up(adapter)) {
2215 pr_debug("can't bring device back up after reset\n");
2216 return;
2217 }
2218 }
2219 netif_device_attach(netdev);
2220}
2221
2222static int __pch_gbe_suspend(struct pci_dev *pdev)
2223{
2224 struct net_device *netdev = pci_get_drvdata(pdev);
2225 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2226 struct pch_gbe_hw *hw = &adapter->hw;
2227 u32 wufc = adapter->wake_up_evt;
2228 int retval = 0;
2229
2230 netif_device_detach(netdev);
2231 if (netif_running(netdev))
2232 pch_gbe_down(adapter);
2233 if (wufc) {
2234 pch_gbe_set_multi(netdev);
2235 pch_gbe_setup_rctl(adapter);
2236 pch_gbe_configure_rx(adapter);
2237 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2238 hw->mac.link_duplex);
2239 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2240 hw->mac.link_duplex);
2241 pch_gbe_mac_set_wol_event(hw, wufc);
2242 pci_disable_device(pdev);
2243 } else {
2244 pch_gbe_hal_power_down_phy(hw);
2245 pch_gbe_mac_set_wol_event(hw, wufc);
2246 pci_disable_device(pdev);
2247 }
2248 return retval;
2249}
2250
2251#ifdef CONFIG_PM
2252static int pch_gbe_suspend(struct device *device)
2253{
2254 struct pci_dev *pdev = to_pci_dev(device);
2255
2256 return __pch_gbe_suspend(pdev);
2257}
2258
2259static int pch_gbe_resume(struct device *device)
2260{
2261 struct pci_dev *pdev = to_pci_dev(device);
2262 struct net_device *netdev = pci_get_drvdata(pdev);
2263 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2264 struct pch_gbe_hw *hw = &adapter->hw;
2265 u32 err;
2266
2267 err = pci_enable_device(pdev);
2268 if (err) {
2269 pr_err("Cannot enable PCI device from suspend\n");
2270 return err;
2271 }
2272 pci_set_master(pdev);
2273 pch_gbe_hal_power_up_phy(hw);
2274 pch_gbe_reset(adapter);
2275 /* Clear wake on lan control and status */
2276 pch_gbe_mac_set_wol_event(hw, 0);
2277
2278 if (netif_running(netdev))
2279 pch_gbe_up(adapter);
2280 netif_device_attach(netdev);
2281
2282 return 0;
2283}
2284#endif /* CONFIG_PM */
2285
2286static void pch_gbe_shutdown(struct pci_dev *pdev)
2287{
2288 __pch_gbe_suspend(pdev);
2289 if (system_state == SYSTEM_POWER_OFF) {
2290 pci_wake_from_d3(pdev, true);
2291 pci_set_power_state(pdev, PCI_D3hot);
2292 }
2293}
2294
2295static void pch_gbe_remove(struct pci_dev *pdev)
2296{
2297 struct net_device *netdev = pci_get_drvdata(pdev);
2298 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2299
2321f3b4 2300 cancel_work_sync(&adapter->reset_task);
77555ee7
MO
2301 unregister_netdev(netdev);
2302
2303 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2304
2305 kfree(adapter->tx_ring);
2306 kfree(adapter->rx_ring);
2307
2308 iounmap(adapter->hw.reg);
2309 pci_release_regions(pdev);
2310 free_netdev(netdev);
2311 pci_disable_device(pdev);
2312}
2313
2314static int pch_gbe_probe(struct pci_dev *pdev,
2315 const struct pci_device_id *pci_id)
2316{
2317 struct net_device *netdev;
2318 struct pch_gbe_adapter *adapter;
2319 int ret;
2320
2321 ret = pci_enable_device(pdev);
2322 if (ret)
2323 return ret;
2324
2325 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2326 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2327 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2328 if (ret) {
2329 ret = pci_set_consistent_dma_mask(pdev,
2330 DMA_BIT_MASK(32));
2331 if (ret) {
2332 dev_err(&pdev->dev, "ERR: No usable DMA "
2333 "configuration, aborting\n");
2334 goto err_disable_device;
2335 }
2336 }
2337 }
2338
2339 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2340 if (ret) {
2341 dev_err(&pdev->dev,
2342 "ERR: Can't reserve PCI I/O and memory resources\n");
2343 goto err_disable_device;
2344 }
2345 pci_set_master(pdev);
2346
2347 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2348 if (!netdev) {
2349 ret = -ENOMEM;
2350 dev_err(&pdev->dev,
2351 "ERR: Can't allocate and set up an Ethernet device\n");
2352 goto err_release_pci;
2353 }
2354 SET_NETDEV_DEV(netdev, &pdev->dev);
2355
2356 pci_set_drvdata(pdev, netdev);
2357 adapter = netdev_priv(netdev);
2358 adapter->netdev = netdev;
2359 adapter->pdev = pdev;
2360 adapter->hw.back = adapter;
2361 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2362 if (!adapter->hw.reg) {
2363 ret = -EIO;
2364 dev_err(&pdev->dev, "Can't ioremap\n");
2365 goto err_free_netdev;
2366 }
2367
2368 netdev->netdev_ops = &pch_gbe_netdev_ops;
2369 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2370 netif_napi_add(netdev, &adapter->napi,
2371 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
756a6b03
MM
2372 netdev->hw_features = NETIF_F_RXCSUM |
2373 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2374 netdev->features = netdev->hw_features;
77555ee7
MO
2375 pch_gbe_set_ethtool_ops(netdev);
2376
98200ec2 2377 pch_gbe_mac_load_mac_addr(&adapter->hw);
77555ee7
MO
2378 pch_gbe_mac_reset_hw(&adapter->hw);
2379
2380 /* setup the private structure */
2381 ret = pch_gbe_sw_init(adapter);
2382 if (ret)
2383 goto err_iounmap;
2384
2385 /* Initialize PHY */
2386 ret = pch_gbe_init_phy(adapter);
2387 if (ret) {
2388 dev_err(&pdev->dev, "PHY initialize error\n");
2389 goto err_free_adapter;
2390 }
2391 pch_gbe_hal_get_bus_info(&adapter->hw);
2392
2393 /* Read the MAC address. and store to the private data */
2394 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2395 if (ret) {
2396 dev_err(&pdev->dev, "MAC address Read Error\n");
2397 goto err_free_adapter;
2398 }
2399
2400 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2401 if (!is_valid_ether_addr(netdev->dev_addr)) {
2402 dev_err(&pdev->dev, "Invalid MAC Address\n");
2403 ret = -EIO;
2404 goto err_free_adapter;
2405 }
2406 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2407 (unsigned long)adapter);
2408
2409 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2410
2411 pch_gbe_check_options(adapter);
2412
77555ee7
MO
2413 /* initialize the wol settings based on the eeprom settings */
2414 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2415 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2416
2417 /* reset the hardware with the new settings */
2418 pch_gbe_reset(adapter);
2419
2420 ret = register_netdev(netdev);
2421 if (ret)
2422 goto err_free_adapter;
2423 /* tell the stack to leave us alone until pch_gbe_open() is called */
2424 netif_carrier_off(netdev);
2425 netif_stop_queue(netdev);
2426
2427 dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
2428
2429 device_set_wakeup_enable(&pdev->dev, 1);
2430 return 0;
2431
2432err_free_adapter:
2433 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2434 kfree(adapter->tx_ring);
2435 kfree(adapter->rx_ring);
2436err_iounmap:
2437 iounmap(adapter->hw.reg);
2438err_free_netdev:
2439 free_netdev(netdev);
2440err_release_pci:
2441 pci_release_regions(pdev);
2442err_disable_device:
2443 pci_disable_device(pdev);
2444 return ret;
2445}
2446
7fc44633 2447static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
77555ee7
MO
2448 {.vendor = PCI_VENDOR_ID_INTEL,
2449 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2450 .subvendor = PCI_ANY_ID,
2451 .subdevice = PCI_ANY_ID,
2452 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2453 .class_mask = (0xFFFF00)
2454 },
b0e6baf5
T
2455 {.vendor = PCI_VENDOR_ID_ROHM,
2456 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2457 .subvendor = PCI_ANY_ID,
2458 .subdevice = PCI_ANY_ID,
2459 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2460 .class_mask = (0xFFFF00)
2461 },
77555ee7
MO
2462 /* required last entry */
2463 {0}
2464};
2465
2466#ifdef CONFIG_PM
2467static const struct dev_pm_ops pch_gbe_pm_ops = {
2468 .suspend = pch_gbe_suspend,
2469 .resume = pch_gbe_resume,
2470 .freeze = pch_gbe_suspend,
2471 .thaw = pch_gbe_resume,
2472 .poweroff = pch_gbe_suspend,
2473 .restore = pch_gbe_resume,
2474};
2475#endif
2476
2477static struct pci_error_handlers pch_gbe_err_handler = {
2478 .error_detected = pch_gbe_io_error_detected,
2479 .slot_reset = pch_gbe_io_slot_reset,
2480 .resume = pch_gbe_io_resume
2481};
2482
f7594d42 2483static struct pci_driver pch_gbe_driver = {
77555ee7
MO
2484 .name = KBUILD_MODNAME,
2485 .id_table = pch_gbe_pcidev_id,
2486 .probe = pch_gbe_probe,
2487 .remove = pch_gbe_remove,
aa338601 2488#ifdef CONFIG_PM
77555ee7
MO
2489 .driver.pm = &pch_gbe_pm_ops,
2490#endif
2491 .shutdown = pch_gbe_shutdown,
2492 .err_handler = &pch_gbe_err_handler
2493};
2494
2495
2496static int __init pch_gbe_init_module(void)
2497{
2498 int ret;
2499
f7594d42 2500 ret = pci_register_driver(&pch_gbe_driver);
77555ee7
MO
2501 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2502 if (copybreak == 0) {
2503 pr_info("copybreak disabled\n");
2504 } else {
2505 pr_info("copybreak enabled for packets <= %u bytes\n",
2506 copybreak);
2507 }
2508 }
2509 return ret;
2510}
2511
2512static void __exit pch_gbe_exit_module(void)
2513{
f7594d42 2514 pci_unregister_driver(&pch_gbe_driver);
77555ee7
MO
2515}
2516
2517module_init(pch_gbe_init_module);
2518module_exit(pch_gbe_exit_module);
2519
a1dcfcb7
TO
2520MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2521MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
77555ee7
MO
2522MODULE_LICENSE("GPL");
2523MODULE_VERSION(DRV_VERSION);
2524MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2525
2526module_param(copybreak, uint, 0644);
2527MODULE_PARM_DESC(copybreak,
2528 "Maximum size of packet that is copied to a new buffer on receive");
2529
2530/* pch_gbe_main.c */
This page took 0.212079 seconds and 5 git commands to generate.