be2net: fix truesize errors
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
db3ea781 144 u32 reg, enabled;
5f0b849e 145
cf588477
SP
146 if (adapter->eeh_err)
147 return;
148
db3ea781
SP
149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
db3ea781
SP
160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
162}
163
8788fdc2 164static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
165{
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
169
170 wmb();
8788fdc2 171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
185 bool arm, bool clear_int, u16 num_popped)
186{
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
191
192 if (adapter->eeh_err)
193 return;
194
6b7c5b94
SP
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
202}
203
8788fdc2 204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
210
211 if (adapter->eeh_err)
212 return;
213
6b7c5b94
SP
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
218}
219
6b7c5b94
SP
220static int be_mac_addr_set(struct net_device *netdev, void *p)
221{
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
ca9e4988
AK
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
ba343c77
SB
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
f8617e08
AK
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
a65027e4
SP
237 if (status)
238 return status;
6b7c5b94 239
a65027e4 240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 241 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 242netdev_addr:
6b7c5b94
SP
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247}
248
89a88ab8
AK
249static void populate_be2_stats(struct be_adapter *adapter)
250{
ac124ff9
SP
251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 254 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 257
ac124ff9 258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
ac124ff9 276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
279
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
282
283 if (adapter->port_num)
ac124ff9 284 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 285 else
ac124ff9 286 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296}
297
298static void populate_be3_stats(struct be_adapter *adapter)
299{
ac124ff9
SP
300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 303 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 306
ac124ff9 307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
ac124ff9 328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341}
342
005d5696
SX
343static void populate_lancer_stats(struct be_adapter *adapter)
344{
89a88ab8 345
005d5696 346 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
349
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 373 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 377 drvs->rx_drops_too_many_frags =
ac124ff9 378 pport_stats->rx_drops_too_many_frags_lo;
005d5696 379}
89a88ab8 380
09c1c68f
SP
381static void accumulate_16bit_val(u32 *acc, u16 val)
382{
383#define lo(x) (x & 0xFFFF)
384#define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
387
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
391}
392
89a88ab8
AK
393void be_parse_stats(struct be_adapter *adapter)
394{
ac124ff9
SP
395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
398
005d5696
SX
399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
89a88ab8 405 populate_be2_stats(adapter);
005d5696 406 }
ac124ff9
SP
407
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
412 */
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415 }
89a88ab8
AK
416}
417
ab1594e9
SP
418static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
6b7c5b94 420{
ab1594e9 421 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 422 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 423 struct be_rx_obj *rxo;
3c8def97 424 struct be_tx_obj *txo;
ab1594e9
SP
425 u64 pkts, bytes;
426 unsigned int start;
3abcdeda 427 int i;
6b7c5b94 428
3abcdeda 429 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
441 }
442
3c8def97 443 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
3c8def97 452 }
6b7c5b94
SP
453
454 /* bad pkts received */
ab1594e9 455 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
ab1594e9 464 drvs->rx_dropped_runt;
68110868 465
6b7c5b94 466 /* detailed rx errors */
ab1594e9 467 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
68110868 470
ab1594e9 471 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
472
473 /* frame alignment errors */
ab1594e9 474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 475
6b7c5b94
SP
476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
ab1594e9 481 return stats;
6b7c5b94
SP
482}
483
ea172a01 484void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 485{
6b7c5b94
SP
486 struct net_device *netdev = adapter->netdev;
487
ea172a01
SP
488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 496 }
6b7c5b94
SP
497}
498
3c8def97 499static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 501{
3c8def97
SP
502 struct be_tx_stats *stats = tx_stats(txo);
503
ab1594e9 504 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 509 if (stopped)
ac124ff9 510 stats->tx_stops++;
ab1594e9 511 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
512}
513
514/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
515static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
6b7c5b94 517{
ebc8d2ab
DM
518 int cnt = (skb->len > skb->data_len);
519
520 cnt += skb_shinfo(skb)->nr_frags;
521
6b7c5b94
SP
522 /* to account for hdr wrb */
523 cnt++;
fe6d2a38
SP
524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
6b7c5b94
SP
527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
fe6d2a38 530 }
6b7c5b94
SP
531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
533}
534
535static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536{
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540}
541
cc4ce020
SK
542static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 544{
cc4ce020
SK
545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
547
6b7c5b94
SP
548 memset(hdr, 0, sizeof(*hdr));
549
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
49e4b847 552 if (skb_is_gso(skb)) {
6b7c5b94
SP
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
567 }
6b7c5b94
SP
568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573 }
574
4c5102f9 575 if (vlan_tx_tag_present(skb)) {
6b7c5b94 576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
584 }
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590}
591
2b7bcebf 592static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
593 bool unmap_single)
594{
595 dma_addr_t dma;
596
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 600 if (wrb->frag_len) {
7101e111 601 if (unmap_single)
2b7bcebf
IV
602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
7101e111 604 else
2b7bcebf 605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
606 }
607}
6b7c5b94 608
3c8def97 609static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611{
7101e111
SP
612 dma_addr_t busaddr;
613 int i, copied = 0;
2b7bcebf 614 struct device *dev = &adapter->pdev->dev;
6b7c5b94 615 struct sk_buff *first_skb = skb;
6b7c5b94
SP
616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
7101e111
SP
618 bool map_single = false;
619 u16 map_head;
6b7c5b94 620
6b7c5b94
SP
621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
7101e111 623 map_head = txq->head;
6b7c5b94 624
ebc8d2ab 625 if (skb->len > skb->data_len) {
e743d313 626 int len = skb_headlen(skb);
2b7bcebf
IV
627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
7101e111
SP
629 goto dma_err;
630 map_single = true;
ebc8d2ab
DM
631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
636 }
6b7c5b94 637
ebc8d2ab
DM
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
b061b39e
IC
641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 frag->size, DMA_TO_DEVICE);
2b7bcebf 643 if (dma_mapping_error(dev, busaddr))
7101e111 644 goto dma_err;
ebc8d2ab
DM
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, frag->size);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += frag->size;
6b7c5b94
SP
650 }
651
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
657 }
658
cc4ce020 659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662 return copied;
7101e111
SP
663dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
2b7bcebf 667 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
671 }
672 return 0;
6b7c5b94
SP
673}
674
61357325 675static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 676 struct net_device *netdev)
6b7c5b94
SP
677{
678 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
684
fe6d2a38 685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 686
3c8def97 687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
688 if (copied) {
689 /* record the sent skb in the sent_skb table */
3c8def97
SP
690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
c190e3c8
AK
692
693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
696 */
7101e111 697 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
3c8def97 700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
701 stopped = true;
702 }
6b7c5b94 703
c190e3c8 704 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 705
3c8def97 706 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 707 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
6b7c5b94 711 }
6b7c5b94
SP
712 return NETDEV_TX_OK;
713}
714
715static int be_change_mtu(struct net_device *netdev, int new_mtu)
716{
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
725 return -EINVAL;
726 }
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
731}
732
733/*
82903e4b
AK
734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 736 */
1da87b7f 737static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 738{
6b7c5b94
SP
739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
82903e4b 741 int status = 0;
1da87b7f
AK
742 u32 if_handle;
743
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748 }
6b7c5b94 749
c0e64ef4
SP
750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
753
82903e4b 754 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 755 /* Construct VLAN Table to give to HW */
b738127d 756 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
760 }
761 }
b31c50a7
SP
762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
6b7c5b94 764 } else {
b31c50a7
SP
765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
6b7c5b94 767 }
1da87b7f 768
b31c50a7 769 return status;
6b7c5b94
SP
770}
771
6b7c5b94
SP
772static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773{
774 struct be_adapter *adapter = netdev_priv(netdev);
775
1da87b7f 776 adapter->vlans_added++;
ba343c77
SB
777 if (!be_physfn(adapter))
778 return;
779
6b7c5b94 780 adapter->vlan_tag[vid] = 1;
82903e4b 781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 782 be_vid_config(adapter, false, 0);
6b7c5b94
SP
783}
784
785static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786{
787 struct be_adapter *adapter = netdev_priv(netdev);
788
1da87b7f 789 adapter->vlans_added--;
1da87b7f 790
ba343c77
SB
791 if (!be_physfn(adapter))
792 return;
793
6b7c5b94 794 adapter->vlan_tag[vid] = 0;
82903e4b 795 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 796 be_vid_config(adapter, false, 0);
6b7c5b94
SP
797}
798
24307eef 799static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
800{
801 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 802
24307eef 803 if (netdev->flags & IFF_PROMISC) {
5b8821b7 804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
805 adapter->promiscuous = true;
806 goto done;
6b7c5b94
SP
807 }
808
25985edc 809 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
5b8821b7 812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
813
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
6b7c5b94
SP
816 }
817
e7b909a6 818 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 819 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 822 goto done;
6b7c5b94 823 }
6b7c5b94 824
5b8821b7 825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
826done:
827 return;
6b7c5b94
SP
828}
829
ba343c77
SB
830static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831{
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
840
64600ea5
AK
841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 845
64600ea5
AK
846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
849
850 if (status)
ba343c77
SB
851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
64600ea5
AK
853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
ba343c77
SB
856 return status;
857}
858
64600ea5
AK
859static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if (vf >= num_vfs)
868 return -EINVAL;
869
870 vi->vf = vf;
e1d18735 871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876 return 0;
877}
878
1da87b7f
AK
879static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
884
885 if (!adapter->sriov_enabled)
886 return -EPERM;
887
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
890
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
897 }
898
899 status = be_vid_config(adapter, true, vf);
900
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
905}
906
e1d18735
AK
907static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
918
919 if (rate > 10000)
920 rate = 10000;
921
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
924
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
929}
930
ac124ff9 931static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 932{
ac124ff9
SP
933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 935 ulong now = jiffies;
ac124ff9 936 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
937 u64 pkts;
938 unsigned int start, eqd;
ac124ff9
SP
939
940 if (!rx_eq->enable_aic)
941 return;
6b7c5b94 942
4097f663 943 /* Wrapped around */
3abcdeda
SP
944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
4097f663
SP
946 return;
947 }
6b7c5b94 948
ac124ff9
SP
949 /* Update once a second */
950 if (delta < HZ)
6b7c5b94
SP
951 return;
952
ab1594e9
SP
953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
68c3e5a7 958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 959 stats->rx_pkts_prev = pkts;
3abcdeda 960 stats->rx_jiffies = now;
ac124ff9
SP
961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
972 }
6b7c5b94
SP
973}
974
3abcdeda 975static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 976 struct be_rx_compl_info *rxcp)
4097f663 977{
ac124ff9 978 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 979
ab1594e9 980 u64_stats_update_begin(&stats->sync);
3abcdeda 981 stats->rx_compl++;
2e588f84 982 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 983 stats->rx_pkts++;
2e588f84 984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 985 stats->rx_mcast_pkts++;
2e588f84 986 if (rxcp->err)
ac124ff9 987 stats->rx_compl_err++;
ab1594e9 988 u64_stats_update_end(&stats->sync);
4097f663
SP
989}
990
2e588f84 991static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 992{
19fad86f
PR
993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
997}
998
6b7c5b94 999static struct be_rx_page_info *
3abcdeda
SP
1000get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
6b7c5b94
SP
1003{
1004 struct be_rx_page_info *rx_page_info;
3abcdeda 1005 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1006
3abcdeda 1007 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1008 BUG_ON(!rx_page_info->page);
1009
205859a2 1010 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1014 rx_page_info->last_page_user = false;
1015 }
6b7c5b94
SP
1016
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1019}
1020
1021/* Throwaway the data in the Rx completion */
1022static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1023 struct be_rx_obj *rxo,
2e588f84 1024 struct be_rx_compl_info *rxcp)
6b7c5b94 1025{
3abcdeda 1026 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1027 struct be_rx_page_info *page_info;
2e588f84 1028 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1029
e80d9da6 1030 for (i = 0; i < num_rcvd; i++) {
2e588f84 1031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
2e588f84 1034 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1035 }
1036}
1037
1038/*
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1041 */
3abcdeda 1042static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1044{
3abcdeda 1045 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1046 struct be_rx_page_info *page_info;
2e588f84
SP
1047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1049 u8 *start;
6b7c5b94 1050
2e588f84 1051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1054
1055 /* Copy data in the first descriptor of this completion */
2e588f84 1056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1057
1058 /* Copy the header portion into skb_data */
2e588f84 1059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1069 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1073 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1074 skb->truesize += rx_frag_size;
6b7c5b94
SP
1075 skb->tail += hdr_len;
1076 }
205859a2 1077 page_info->page = NULL;
6b7c5b94 1078
2e588f84
SP
1079 if (rxcp->pkt_size <= rx_frag_size) {
1080 BUG_ON(rxcp->num_rcvd != 1);
1081 return;
6b7c5b94
SP
1082 }
1083
1084 /* More frags present for this completion */
2e588f84
SP
1085 index_inc(&rxcp->rxq_idx, rxq->len);
1086 remaining = rxcp->pkt_size - curr_frag_len;
1087 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1090
bd46cb6c
AK
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (page_info->page_offset == 0) {
1093 /* Fresh page */
1094 j++;
b061b39e 1095 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0;
1099 skb_shinfo(skb)->nr_frags++;
1100 } else {
1101 put_page(page_info->page);
1102 }
1103
1104 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len;
bdb28a97 1107 skb->truesize += rx_frag_size;
2e588f84
SP
1108 remaining -= curr_frag_len;
1109 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1110 page_info->page = NULL;
6b7c5b94 1111 }
bd46cb6c 1112 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1113}
1114
5be93b9a 1115/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1116static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1117 struct be_rx_obj *rxo,
2e588f84 1118 struct be_rx_compl_info *rxcp)
6b7c5b94 1119{
6332c8d3 1120 struct net_device *netdev = adapter->netdev;
6b7c5b94 1121 struct sk_buff *skb;
89420424 1122
6332c8d3 1123 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1124 if (unlikely(!skb)) {
ac124ff9 1125 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1126 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1127 return;
1128 }
1129
2e588f84 1130 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1131
6332c8d3 1132 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1133 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1134 else
1135 skb_checksum_none_assert(skb);
6b7c5b94 1136
6332c8d3 1137 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1140
6b7c5b94 1141
343e43c0 1142 if (rxcp->vlanf)
4c5102f9
AK
1143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145 netif_receive_skb(skb);
6b7c5b94
SP
1146}
1147
5be93b9a
AK
1148/* Process the RX completion indicated by rxcp when GRO is enabled */
1149static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1150 struct be_rx_obj *rxo,
2e588f84 1151 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1152{
1153 struct be_rx_page_info *page_info;
5be93b9a 1154 struct sk_buff *skb = NULL;
3abcdeda
SP
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
3968fa1e 1159
5be93b9a
AK
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
3abcdeda 1162 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1163 return;
1164 }
1165
2e588f84
SP
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
bd46cb6c
AK
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
b061b39e 1176 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1180 } else {
1181 put_page(page_info->page);
1182 }
5be93b9a 1183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
bdb28a97 1184 skb->truesize += rx_frag_size;
bd46cb6c 1185 remaining -= curr_frag_len;
2e588f84 1186 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1187 memset(page_info, 0, sizeof(*page_info));
1188 }
bd46cb6c 1189 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1190
5be93b9a 1191 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
5be93b9a 1194 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
5be93b9a 1197
343e43c0 1198 if (rxcp->vlanf)
4c5102f9
AK
1199 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1200
1201 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1202}
1203
1204static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205 struct be_eth_rx_compl *compl,
1206 struct be_rx_compl_info *rxcp)
1207{
1208 rxcp->pkt_size =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1213 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1214 rxcp->ip_csum =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216 rxcp->l4_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218 rxcp->ipv6 =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220 rxcp->rxq_idx =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222 rxcp->num_rcvd =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224 rxcp->pkt_type =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1226 rxcp->rss_hash =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1228 if (rxcp->vlanf) {
1229 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1230 compl);
1231 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232 compl);
15d72184 1233 }
12004ae9 1234 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1235}
1236
1237static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238 struct be_eth_rx_compl *compl,
1239 struct be_rx_compl_info *rxcp)
1240{
1241 rxcp->pkt_size =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1246 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1247 rxcp->ip_csum =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249 rxcp->l4_csum =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251 rxcp->ipv6 =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253 rxcp->rxq_idx =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255 rxcp->num_rcvd =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257 rxcp->pkt_type =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1259 rxcp->rss_hash =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1261 if (rxcp->vlanf) {
1262 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1263 compl);
1264 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265 compl);
15d72184 1266 }
12004ae9 1267 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1268}
1269
1270static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271{
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1275
2e588f84
SP
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
6b7c5b94 1280
2e588f84
SP
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1283
2e588f84
SP
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1288
15d72184
SP
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
6b7c5b94 1294
15d72184 1295 if (!lancer_chip(adapter))
3c709f8f 1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1297
939cf306 1298 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1299 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1300 rxcp->vlanf = 0;
1301 }
2e588f84
SP
1302
1303 /* As the compl has been parsed, reset it; we wont touch it again */
1304 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1305
3abcdeda 1306 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1307 return rxcp;
1308}
1309
1829b086 1310static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1311{
6b7c5b94 1312 u32 order = get_order(size);
1829b086 1313
6b7c5b94 1314 if (order > 0)
1829b086
ED
1315 gfp |= __GFP_COMP;
1316 return alloc_pages(gfp, order);
6b7c5b94
SP
1317}
1318
1319/*
1320 * Allocate a page, split it to fragments of size rx_frag_size and post as
1321 * receive buffers to BE
1322 */
1829b086 1323static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1324{
3abcdeda
SP
1325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1327 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1328 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1329 struct page *pagep = NULL;
1330 struct be_eth_rx_d *rxd;
1331 u64 page_dmaaddr = 0, frag_dmaaddr;
1332 u32 posted, page_offset = 0;
1333
3abcdeda 1334 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1335 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336 if (!pagep) {
1829b086 1337 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1338 if (unlikely(!pagep)) {
ac124ff9 1339 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1340 break;
1341 }
2b7bcebf
IV
1342 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343 0, adapter->big_page_size,
1344 DMA_FROM_DEVICE);
6b7c5b94
SP
1345 page_info->page_offset = 0;
1346 } else {
1347 get_page(pagep);
1348 page_info->page_offset = page_offset + rx_frag_size;
1349 }
1350 page_offset = page_info->page_offset;
1351 page_info->page = pagep;
fac6da5b 1352 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1354
1355 rxd = queue_head_node(rxq);
1356 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1358
1359 /* Any space left in the current big page for another frag? */
1360 if ((page_offset + rx_frag_size + rx_frag_size) >
1361 adapter->big_page_size) {
1362 pagep = NULL;
1363 page_info->last_page_user = true;
1364 }
26d92f92
SP
1365
1366 prev_page_info = page_info;
1367 queue_head_inc(rxq);
6b7c5b94
SP
1368 page_info = &page_info_tbl[rxq->head];
1369 }
1370 if (pagep)
26d92f92 1371 prev_page_info->last_page_user = true;
6b7c5b94
SP
1372
1373 if (posted) {
6b7c5b94 1374 atomic_add(posted, &rxq->used);
8788fdc2 1375 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1376 } else if (atomic_read(&rxq->used) == 0) {
1377 /* Let be_worker replenish when memory is available */
3abcdeda 1378 rxo->rx_post_starved = true;
6b7c5b94 1379 }
6b7c5b94
SP
1380}
1381
5fb379ee 1382static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1383{
6b7c5b94
SP
1384 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1385
1386 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387 return NULL;
1388
f3eb62d2 1389 rmb();
6b7c5b94
SP
1390 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1391
1392 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1393
1394 queue_tail_inc(tx_cq);
1395 return txcp;
1396}
1397
3c8def97
SP
1398static u16 be_tx_compl_process(struct be_adapter *adapter,
1399 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1400{
3c8def97 1401 struct be_queue_info *txq = &txo->q;
a73b796e 1402 struct be_eth_wrb *wrb;
3c8def97 1403 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1404 struct sk_buff *sent_skb;
ec43b1a6
SP
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
6b7c5b94 1407
ec43b1a6 1408 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1409 BUG_ON(!sent_skb);
ec43b1a6
SP
1410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
a73b796e 1413 queue_tail_inc(txq);
6b7c5b94 1414
ec43b1a6 1415 do {
6b7c5b94 1416 cur_index = txq->tail;
a73b796e 1417 wrb = queue_tail_node(txq);
2b7bcebf
IV
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1420 unmap_skb_hdr = false;
1421
6b7c5b94
SP
1422 num_wrbs++;
1423 queue_tail_inc(txq);
ec43b1a6 1424 } while (cur_index != last_index);
6b7c5b94 1425
6b7c5b94 1426 kfree_skb(sent_skb);
4d586b82 1427 return num_wrbs;
6b7c5b94
SP
1428}
1429
859b1e4e
SP
1430static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431{
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
f3eb62d2 1437 rmb();
859b1e4e
SP
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441}
1442
1443static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1444 struct be_eq_obj *eq_obj,
1445 bool rearm)
859b1e4e
SP
1446{
1447 struct be_eq_entry *eqe;
1448 u16 num = 0;
1449
1450 while ((eqe = event_get(eq_obj)) != NULL) {
1451 eqe->evt = 0;
1452 num++;
1453 }
1454
1455 /* Deal with any spurious interrupts that come
1456 * without events
1457 */
3c8def97
SP
1458 if (!num)
1459 rearm = true;
1460
1461 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1462 if (num)
1463 napi_schedule(&eq_obj->napi);
1464
1465 return num;
1466}
1467
1468/* Just read and notify events without processing them.
1469 * Used at the time of destroying event queues */
1470static void be_eq_clean(struct be_adapter *adapter,
1471 struct be_eq_obj *eq_obj)
1472{
1473 struct be_eq_entry *eqe;
1474 u16 num = 0;
1475
1476 while ((eqe = event_get(eq_obj)) != NULL) {
1477 eqe->evt = 0;
1478 num++;
1479 }
1480
1481 if (num)
1482 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1483}
1484
3abcdeda 1485static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1486{
1487 struct be_rx_page_info *page_info;
3abcdeda
SP
1488 struct be_queue_info *rxq = &rxo->q;
1489 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1490 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1491 u16 tail;
1492
1493 /* First cleanup pending rx completions */
3abcdeda
SP
1494 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1496 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1497 }
1498
1499 /* Then free posted rx buffer that were not used */
1500 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1501 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1502 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1503 put_page(page_info->page);
1504 memset(page_info, 0, sizeof(*page_info));
1505 }
1506 BUG_ON(atomic_read(&rxq->used));
482c9e79 1507 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1508}
1509
3c8def97
SP
1510static void be_tx_compl_clean(struct be_adapter *adapter,
1511 struct be_tx_obj *txo)
6b7c5b94 1512{
3c8def97
SP
1513 struct be_queue_info *tx_cq = &txo->cq;
1514 struct be_queue_info *txq = &txo->q;
a8e9179a 1515 struct be_eth_tx_compl *txcp;
4d586b82 1516 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1517 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1518 struct sk_buff *sent_skb;
1519 bool dummy_wrb;
a8e9179a
SP
1520
1521 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522 do {
1523 while ((txcp = be_tx_compl_get(tx_cq))) {
1524 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525 wrb_index, txcp);
3c8def97 1526 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1527 cmpl++;
1528 }
1529 if (cmpl) {
1530 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1531 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1532 cmpl = 0;
4d586b82 1533 num_wrbs = 0;
a8e9179a
SP
1534 }
1535
1536 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537 break;
1538
1539 mdelay(1);
1540 } while (true);
1541
1542 if (atomic_read(&txq->used))
1543 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544 atomic_read(&txq->used));
b03388d6
SP
1545
1546 /* free posted tx for which compls will never arrive */
1547 while (atomic_read(&txq->used)) {
1548 sent_skb = sent_skbs[txq->tail];
1549 end_idx = txq->tail;
1550 index_adv(&end_idx,
fe6d2a38
SP
1551 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552 txq->len);
3c8def97 1553 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1554 atomic_sub(num_wrbs, &txq->used);
b03388d6 1555 }
6b7c5b94
SP
1556}
1557
5fb379ee
SP
1558static void be_mcc_queues_destroy(struct be_adapter *adapter)
1559{
1560 struct be_queue_info *q;
5fb379ee 1561
8788fdc2 1562 q = &adapter->mcc_obj.q;
5fb379ee 1563 if (q->created)
8788fdc2 1564 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1565 be_queue_free(adapter, q);
1566
8788fdc2 1567 q = &adapter->mcc_obj.cq;
5fb379ee 1568 if (q->created)
8788fdc2 1569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1570 be_queue_free(adapter, q);
1571}
1572
1573/* Must be called only after TX qs are created as MCC shares TX EQ */
1574static int be_mcc_queues_create(struct be_adapter *adapter)
1575{
1576 struct be_queue_info *q, *cq;
5fb379ee
SP
1577
1578 /* Alloc MCC compl queue */
8788fdc2 1579 cq = &adapter->mcc_obj.cq;
5fb379ee 1580 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1581 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1582 goto err;
1583
1584 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1585 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1586 goto mcc_cq_free;
1587
1588 /* Alloc MCC queue */
8788fdc2 1589 q = &adapter->mcc_obj.q;
5fb379ee
SP
1590 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591 goto mcc_cq_destroy;
1592
1593 /* Ask BE to create MCC queue */
8788fdc2 1594 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1595 goto mcc_q_free;
1596
1597 return 0;
1598
1599mcc_q_free:
1600 be_queue_free(adapter, q);
1601mcc_cq_destroy:
8788fdc2 1602 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1603mcc_cq_free:
1604 be_queue_free(adapter, cq);
1605err:
1606 return -1;
1607}
1608
6b7c5b94
SP
1609static void be_tx_queues_destroy(struct be_adapter *adapter)
1610{
1611 struct be_queue_info *q;
3c8def97
SP
1612 struct be_tx_obj *txo;
1613 u8 i;
6b7c5b94 1614
3c8def97
SP
1615 for_all_tx_queues(adapter, txo, i) {
1616 q = &txo->q;
1617 if (q->created)
1618 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619 be_queue_free(adapter, q);
6b7c5b94 1620
3c8def97
SP
1621 q = &txo->cq;
1622 if (q->created)
1623 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624 be_queue_free(adapter, q);
1625 }
6b7c5b94 1626
859b1e4e
SP
1627 /* Clear any residual events */
1628 be_eq_clean(adapter, &adapter->tx_eq);
1629
6b7c5b94
SP
1630 q = &adapter->tx_eq.q;
1631 if (q->created)
8788fdc2 1632 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1633 be_queue_free(adapter, q);
1634}
1635
3c8def97 1636/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1637static int be_tx_queues_create(struct be_adapter *adapter)
1638{
1639 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1640 struct be_tx_obj *txo;
1641 u8 i;
6b7c5b94
SP
1642
1643 adapter->tx_eq.max_eqd = 0;
1644 adapter->tx_eq.min_eqd = 0;
1645 adapter->tx_eq.cur_eqd = 96;
1646 adapter->tx_eq.enable_aic = false;
3c8def97 1647
6b7c5b94 1648 eq = &adapter->tx_eq.q;
3c8def97
SP
1649 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1650 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1651 return -1;
1652
8788fdc2 1653 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1654 goto err;
ecd62107 1655 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1656
3c8def97
SP
1657 for_all_tx_queues(adapter, txo, i) {
1658 cq = &txo->cq;
1659 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1660 sizeof(struct be_eth_tx_compl)))
3c8def97 1661 goto err;
6b7c5b94 1662
3c8def97
SP
1663 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1664 goto err;
6b7c5b94 1665
3c8def97
SP
1666 q = &txo->q;
1667 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1668 sizeof(struct be_eth_wrb)))
1669 goto err;
6b7c5b94 1670
3c8def97
SP
1671 if (be_cmd_txq_create(adapter, q, cq))
1672 goto err;
1673 }
6b7c5b94
SP
1674 return 0;
1675
3c8def97
SP
1676err:
1677 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1678 return -1;
1679}
1680
1681static void be_rx_queues_destroy(struct be_adapter *adapter)
1682{
1683 struct be_queue_info *q;
3abcdeda
SP
1684 struct be_rx_obj *rxo;
1685 int i;
1686
1687 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1688 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1689
1690 q = &rxo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694
3abcdeda 1695 q = &rxo->rx_eq.q;
482c9e79 1696 if (q->created)
3abcdeda 1697 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1698 be_queue_free(adapter, q);
6b7c5b94 1699 }
6b7c5b94
SP
1700}
1701
ac6a0c4a
SP
1702static u32 be_num_rxqs_want(struct be_adapter *adapter)
1703{
c814fd36 1704 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1705 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1706 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1707 } else {
1708 dev_warn(&adapter->pdev->dev,
1709 "No support for multiple RX queues\n");
1710 return 1;
1711 }
1712}
1713
6b7c5b94
SP
1714static int be_rx_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1717 struct be_rx_obj *rxo;
1718 int rc, i;
6b7c5b94 1719
ac6a0c4a
SP
1720 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1721 msix_enabled(adapter) ?
1722 adapter->num_msix_vec - 1 : 1);
1723 if (adapter->num_rx_qs != MAX_RX_QS)
1724 dev_warn(&adapter->pdev->dev,
1725 "Can create only %d RX queues", adapter->num_rx_qs);
1726
6b7c5b94 1727 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1728 for_all_rx_queues(adapter, rxo, i) {
1729 rxo->adapter = adapter;
1730 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1731 rxo->rx_eq.enable_aic = true;
1732
1733 /* EQ */
1734 eq = &rxo->rx_eq.q;
1735 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry));
1737 if (rc)
1738 goto err;
1739
1740 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1741 if (rc)
1742 goto err;
1743
ecd62107 1744 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1745
3abcdeda
SP
1746 /* CQ */
1747 cq = &rxo->cq;
1748 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1749 sizeof(struct be_eth_rx_compl));
1750 if (rc)
1751 goto err;
1752
1753 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1754 if (rc)
1755 goto err;
482c9e79
SP
1756
1757 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1758 q = &rxo->q;
1759 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1760 sizeof(struct be_eth_rx_d));
1761 if (rc)
1762 goto err;
1763
3abcdeda 1764 }
6b7c5b94
SP
1765
1766 return 0;
3abcdeda
SP
1767err:
1768 be_rx_queues_destroy(adapter);
1769 return -1;
6b7c5b94 1770}
6b7c5b94 1771
fe6d2a38 1772static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1773{
fe6d2a38
SP
1774 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1775 if (!eqe->evt)
1776 return false;
1777 else
1778 return true;
b628bde2
SP
1779}
1780
6b7c5b94
SP
1781static irqreturn_t be_intx(int irq, void *dev)
1782{
1783 struct be_adapter *adapter = dev;
3abcdeda 1784 struct be_rx_obj *rxo;
fe6d2a38 1785 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1786
fe6d2a38
SP
1787 if (lancer_chip(adapter)) {
1788 if (event_peek(&adapter->tx_eq))
3c8def97 1789 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1790 for_all_rx_queues(adapter, rxo, i) {
1791 if (event_peek(&rxo->rx_eq))
3c8def97 1792 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1793 }
6b7c5b94 1794
fe6d2a38
SP
1795 if (!(tx || rx))
1796 return IRQ_NONE;
3abcdeda 1797
fe6d2a38
SP
1798 } else {
1799 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1800 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1801 if (!isr)
1802 return IRQ_NONE;
1803
ecd62107 1804 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1805 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1806
1807 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1808 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1809 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1810 }
3abcdeda 1811 }
c001c213 1812
8788fdc2 1813 return IRQ_HANDLED;
6b7c5b94
SP
1814}
1815
1816static irqreturn_t be_msix_rx(int irq, void *dev)
1817{
3abcdeda
SP
1818 struct be_rx_obj *rxo = dev;
1819 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1820
3c8def97 1821 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1822
1823 return IRQ_HANDLED;
1824}
1825
5fb379ee 1826static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1827{
1828 struct be_adapter *adapter = dev;
1829
3c8def97 1830 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1831
1832 return IRQ_HANDLED;
1833}
1834
2e588f84 1835static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1836{
2e588f84 1837 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1838}
1839
49b05221 1840static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1841{
1842 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1843 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1844 struct be_adapter *adapter = rxo->adapter;
1845 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1846 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1847 u32 work_done;
1848
ac124ff9 1849 rx_stats(rxo)->rx_polls++;
6b7c5b94 1850 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1851 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1852 if (!rxcp)
1853 break;
1854
12004ae9
SP
1855 /* Is it a flush compl that has no data */
1856 if (unlikely(rxcp->num_rcvd == 0))
1857 goto loop_continue;
1858
1859 /* Discard compl with partial DMA Lancer B0 */
1860 if (unlikely(!rxcp->pkt_size)) {
1861 be_rx_compl_discard(adapter, rxo, rxcp);
1862 goto loop_continue;
1863 }
1864
1865 /* On BE drop pkts that arrive due to imperfect filtering in
1866 * promiscuous mode on some skews
1867 */
1868 if (unlikely(rxcp->port != adapter->port_num &&
1869 !lancer_chip(adapter))) {
009dd872 1870 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1871 goto loop_continue;
64642811 1872 }
009dd872 1873
12004ae9
SP
1874 if (do_gro(rxcp))
1875 be_rx_compl_process_gro(adapter, rxo, rxcp);
1876 else
1877 be_rx_compl_process(adapter, rxo, rxcp);
1878loop_continue:
2e588f84 1879 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1880 }
1881
6b7c5b94 1882 /* Refill the queue */
857c9905 1883 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1884 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1885
1886 /* All consumed */
1887 if (work_done < budget) {
1888 napi_complete(napi);
8788fdc2 1889 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1890 } else {
1891 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1892 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1893 }
1894 return work_done;
1895}
1896
f31e50a8
SP
1897/* As TX and MCC share the same EQ check for both TX and MCC completions.
1898 * For TX/MCC we don't honour budget; consume everything
1899 */
1900static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1901{
f31e50a8
SP
1902 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1903 struct be_adapter *adapter =
1904 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1905 struct be_tx_obj *txo;
6b7c5b94 1906 struct be_eth_tx_compl *txcp;
3c8def97
SP
1907 int tx_compl, mcc_compl, status = 0;
1908 u8 i;
1909 u16 num_wrbs;
1910
1911 for_all_tx_queues(adapter, txo, i) {
1912 tx_compl = 0;
1913 num_wrbs = 0;
1914 while ((txcp = be_tx_compl_get(&txo->cq))) {
1915 num_wrbs += be_tx_compl_process(adapter, txo,
1916 AMAP_GET_BITS(struct amap_eth_tx_compl,
1917 wrb_index, txcp));
1918 tx_compl++;
1919 }
1920 if (tx_compl) {
1921 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1922
1923 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1924
3c8def97
SP
1925 /* As Tx wrbs have been freed up, wake up netdev queue
1926 * if it was stopped due to lack of tx wrbs. */
1927 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1928 atomic_read(&txo->q.used) < txo->q.len / 2) {
1929 netif_wake_subqueue(adapter->netdev, i);
1930 }
1931
ab1594e9 1932 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1933 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1934 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1935 }
6b7c5b94
SP
1936 }
1937
f31e50a8
SP
1938 mcc_compl = be_process_mcc(adapter, &status);
1939
f31e50a8
SP
1940 if (mcc_compl) {
1941 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1942 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1943 }
1944
3c8def97 1945 napi_complete(napi);
6b7c5b94 1946
3c8def97 1947 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1948 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1949 return 1;
1950}
1951
d053de91 1952void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1953{
1954 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1955 u32 i;
1956
1957 pci_read_config_dword(adapter->pdev,
1958 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1959 pci_read_config_dword(adapter->pdev,
1960 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1961 pci_read_config_dword(adapter->pdev,
1962 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1963 pci_read_config_dword(adapter->pdev,
1964 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1965
1966 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1967 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1968
d053de91
AK
1969 if (ue_status_lo || ue_status_hi) {
1970 adapter->ue_detected = true;
7acc2087 1971 adapter->eeh_err = true;
d053de91
AK
1972 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1973 }
1974
7c185276
AK
1975 if (ue_status_lo) {
1976 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1977 if (ue_status_lo & 1)
1978 dev_err(&adapter->pdev->dev,
1979 "UE: %s bit set\n", ue_status_low_desc[i]);
1980 }
1981 }
1982 if (ue_status_hi) {
1983 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1984 if (ue_status_hi & 1)
1985 dev_err(&adapter->pdev->dev,
1986 "UE: %s bit set\n", ue_status_hi_desc[i]);
1987 }
1988 }
1989
1990}
1991
ea1dae11
SP
1992static void be_worker(struct work_struct *work)
1993{
1994 struct be_adapter *adapter =
1995 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1996 struct be_rx_obj *rxo;
1997 int i;
ea1dae11 1998
16da8250
SP
1999 if (!adapter->ue_detected && !lancer_chip(adapter))
2000 be_detect_dump_ue(adapter);
2001
f203af70
SK
2002 /* when interrupts are not yet enabled, just reap any pending
2003 * mcc completions */
2004 if (!netif_running(adapter->netdev)) {
2005 int mcc_compl, status = 0;
2006
2007 mcc_compl = be_process_mcc(adapter, &status);
2008
2009 if (mcc_compl) {
2010 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2011 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2012 }
9b037f38 2013
f203af70
SK
2014 goto reschedule;
2015 }
2016
005d5696
SX
2017 if (!adapter->stats_cmd_sent) {
2018 if (lancer_chip(adapter))
2019 lancer_cmd_get_pport_stats(adapter,
2020 &adapter->stats_cmd);
2021 else
2022 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2023 }
3c8def97 2024
3abcdeda 2025 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2026 be_rx_eqd_update(adapter, rxo);
2027
2028 if (rxo->rx_post_starved) {
2029 rxo->rx_post_starved = false;
1829b086 2030 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2031 }
ea1dae11
SP
2032 }
2033
f203af70 2034reschedule:
e74fbd03 2035 adapter->work_counter++;
ea1dae11
SP
2036 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2037}
2038
8d56ff11
SP
2039static void be_msix_disable(struct be_adapter *adapter)
2040{
ac6a0c4a 2041 if (msix_enabled(adapter)) {
8d56ff11 2042 pci_disable_msix(adapter->pdev);
ac6a0c4a 2043 adapter->num_msix_vec = 0;
3abcdeda
SP
2044 }
2045}
2046
6b7c5b94
SP
2047static void be_msix_enable(struct be_adapter *adapter)
2048{
3abcdeda 2049#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2050 int i, status, num_vec;
6b7c5b94 2051
ac6a0c4a 2052 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2053
ac6a0c4a 2054 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2055 adapter->msix_entries[i].entry = i;
2056
ac6a0c4a 2057 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2058 if (status == 0) {
2059 goto done;
2060 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2061 num_vec = status;
3abcdeda 2062 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2063 num_vec) == 0)
3abcdeda 2064 goto done;
3abcdeda
SP
2065 }
2066 return;
2067done:
ac6a0c4a
SP
2068 adapter->num_msix_vec = num_vec;
2069 return;
6b7c5b94
SP
2070}
2071
ba343c77
SB
2072static void be_sriov_enable(struct be_adapter *adapter)
2073{
344dbf10 2074 be_check_sriov_fn_type(adapter);
6dedec81 2075#ifdef CONFIG_PCI_IOV
ba343c77 2076 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2077 int status, pos;
2078 u16 nvfs;
2079
2080 pos = pci_find_ext_capability(adapter->pdev,
2081 PCI_EXT_CAP_ID_SRIOV);
2082 pci_read_config_word(adapter->pdev,
2083 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2084
2085 if (num_vfs > nvfs) {
2086 dev_info(&adapter->pdev->dev,
2087 "Device supports %d VFs and not %d\n",
2088 nvfs, num_vfs);
2089 num_vfs = nvfs;
2090 }
6dedec81 2091
ba343c77
SB
2092 status = pci_enable_sriov(adapter->pdev, num_vfs);
2093 adapter->sriov_enabled = status ? false : true;
2094 }
2095#endif
ba343c77
SB
2096}
2097
2098static void be_sriov_disable(struct be_adapter *adapter)
2099{
2100#ifdef CONFIG_PCI_IOV
2101 if (adapter->sriov_enabled) {
2102 pci_disable_sriov(adapter->pdev);
2103 adapter->sriov_enabled = false;
2104 }
2105#endif
2106}
2107
fe6d2a38
SP
2108static inline int be_msix_vec_get(struct be_adapter *adapter,
2109 struct be_eq_obj *eq_obj)
6b7c5b94 2110{
ecd62107 2111 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2112}
2113
b628bde2
SP
2114static int be_request_irq(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj,
3abcdeda 2116 void *handler, char *desc, void *context)
6b7c5b94
SP
2117{
2118 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2119 int vec;
2120
2121 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2122 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2123 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2124}
2125
3abcdeda
SP
2126static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2127 void *context)
b628bde2 2128{
fe6d2a38 2129 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2130 free_irq(vec, context);
b628bde2 2131}
6b7c5b94 2132
b628bde2
SP
2133static int be_msix_register(struct be_adapter *adapter)
2134{
3abcdeda
SP
2135 struct be_rx_obj *rxo;
2136 int status, i;
2137 char qname[10];
b628bde2 2138
3abcdeda
SP
2139 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2140 adapter);
6b7c5b94
SP
2141 if (status)
2142 goto err;
2143
3abcdeda
SP
2144 for_all_rx_queues(adapter, rxo, i) {
2145 sprintf(qname, "rxq%d", i);
2146 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2147 qname, rxo);
2148 if (status)
2149 goto err_msix;
2150 }
b628bde2 2151
6b7c5b94 2152 return 0;
b628bde2 2153
3abcdeda
SP
2154err_msix:
2155 be_free_irq(adapter, &adapter->tx_eq, adapter);
2156
2157 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2158 be_free_irq(adapter, &rxo->rx_eq, rxo);
2159
6b7c5b94
SP
2160err:
2161 dev_warn(&adapter->pdev->dev,
2162 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2163 be_msix_disable(adapter);
6b7c5b94
SP
2164 return status;
2165}
2166
2167static int be_irq_register(struct be_adapter *adapter)
2168{
2169 struct net_device *netdev = adapter->netdev;
2170 int status;
2171
ac6a0c4a 2172 if (msix_enabled(adapter)) {
6b7c5b94
SP
2173 status = be_msix_register(adapter);
2174 if (status == 0)
2175 goto done;
ba343c77
SB
2176 /* INTx is not supported for VF */
2177 if (!be_physfn(adapter))
2178 return status;
6b7c5b94
SP
2179 }
2180
2181 /* INTx */
2182 netdev->irq = adapter->pdev->irq;
2183 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2184 adapter);
2185 if (status) {
2186 dev_err(&adapter->pdev->dev,
2187 "INTx request IRQ failed - err %d\n", status);
2188 return status;
2189 }
2190done:
2191 adapter->isr_registered = true;
2192 return 0;
2193}
2194
2195static void be_irq_unregister(struct be_adapter *adapter)
2196{
2197 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2198 struct be_rx_obj *rxo;
2199 int i;
6b7c5b94
SP
2200
2201 if (!adapter->isr_registered)
2202 return;
2203
2204 /* INTx */
ac6a0c4a 2205 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2206 free_irq(netdev->irq, adapter);
2207 goto done;
2208 }
2209
2210 /* MSIx */
3abcdeda
SP
2211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2212
2213 for_all_rx_queues(adapter, rxo, i)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2215
6b7c5b94
SP
2216done:
2217 adapter->isr_registered = false;
6b7c5b94
SP
2218}
2219
482c9e79
SP
2220static void be_rx_queues_clear(struct be_adapter *adapter)
2221{
2222 struct be_queue_info *q;
2223 struct be_rx_obj *rxo;
2224 int i;
2225
2226 for_all_rx_queues(adapter, rxo, i) {
2227 q = &rxo->q;
2228 if (q->created) {
2229 be_cmd_rxq_destroy(adapter, q);
2230 /* After the rxq is invalidated, wait for a grace time
2231 * of 1ms for all dma to end and the flush compl to
2232 * arrive
2233 */
2234 mdelay(1);
2235 be_rx_q_clean(adapter, rxo);
2236 }
2237
2238 /* Clear any residual events */
2239 q = &rxo->rx_eq.q;
2240 if (q->created)
2241 be_eq_clean(adapter, &rxo->rx_eq);
2242 }
2243}
2244
889cd4b2
SP
2245static int be_close(struct net_device *netdev)
2246{
2247 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2248 struct be_rx_obj *rxo;
3c8def97 2249 struct be_tx_obj *txo;
889cd4b2 2250 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2251 int vec, i;
889cd4b2 2252
889cd4b2
SP
2253 be_async_mcc_disable(adapter);
2254
fe6d2a38
SP
2255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
889cd4b2 2257
63fcb27f
PR
2258 for_all_rx_queues(adapter, rxo, i)
2259 napi_disable(&rxo->rx_eq.napi);
2260
2261 napi_disable(&tx_eq->napi);
2262
2263 if (lancer_chip(adapter)) {
63fcb27f
PR
2264 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2265 for_all_rx_queues(adapter, rxo, i)
2266 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2267 for_all_tx_queues(adapter, txo, i)
2268 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2269 }
2270
ac6a0c4a 2271 if (msix_enabled(adapter)) {
fe6d2a38 2272 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2273 synchronize_irq(vec);
3abcdeda
SP
2274
2275 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2276 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2277 synchronize_irq(vec);
2278 }
889cd4b2
SP
2279 } else {
2280 synchronize_irq(netdev->irq);
2281 }
2282 be_irq_unregister(adapter);
2283
889cd4b2
SP
2284 /* Wait for all pending tx completions to arrive so that
2285 * all tx skbs are freed.
2286 */
3c8def97
SP
2287 for_all_tx_queues(adapter, txo, i)
2288 be_tx_compl_clean(adapter, txo);
889cd4b2 2289
482c9e79
SP
2290 be_rx_queues_clear(adapter);
2291 return 0;
2292}
2293
2294static int be_rx_queues_setup(struct be_adapter *adapter)
2295{
2296 struct be_rx_obj *rxo;
2297 int rc, i;
2298 u8 rsstable[MAX_RSS_QS];
2299
2300 for_all_rx_queues(adapter, rxo, i) {
2301 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2302 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2303 adapter->if_handle,
2304 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2305 if (rc)
2306 return rc;
2307 }
2308
2309 if (be_multi_rxq(adapter)) {
2310 for_all_rss_queues(adapter, rxo, i)
2311 rsstable[i] = rxo->rss_id;
2312
2313 rc = be_cmd_rss_config(adapter, rsstable,
2314 adapter->num_rx_qs - 1);
2315 if (rc)
2316 return rc;
2317 }
2318
2319 /* First time posting */
2320 for_all_rx_queues(adapter, rxo, i) {
2321 be_post_rx_frags(rxo, GFP_KERNEL);
2322 napi_enable(&rxo->rx_eq.napi);
2323 }
889cd4b2
SP
2324 return 0;
2325}
2326
6b7c5b94
SP
2327static int be_open(struct net_device *netdev)
2328{
2329 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2330 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2331 struct be_rx_obj *rxo;
3abcdeda 2332 int status, i;
5fb379ee 2333
482c9e79
SP
2334 status = be_rx_queues_setup(adapter);
2335 if (status)
2336 goto err;
2337
5fb379ee
SP
2338 napi_enable(&tx_eq->napi);
2339
2340 be_irq_register(adapter);
2341
fe6d2a38
SP
2342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, true);
5fb379ee
SP
2344
2345 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2348 be_cq_notify(adapter, rxo->cq.id, true, 0);
2349 }
8788fdc2 2350 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2351
7a1e9b20
SP
2352 /* Now that interrupts are on we can process async mcc */
2353 be_async_mcc_enable(adapter);
2354
889cd4b2 2355 if (be_physfn(adapter)) {
1da87b7f 2356 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2357 if (status)
2358 goto err;
4f2aa89c 2359
ba343c77
SB
2360 status = be_cmd_set_flow_control(adapter,
2361 adapter->tx_fc, adapter->rx_fc);
2362 if (status)
889cd4b2 2363 goto err;
ba343c77 2364 }
4f2aa89c 2365
889cd4b2
SP
2366 return 0;
2367err:
2368 be_close(adapter->netdev);
2369 return -EIO;
5fb379ee
SP
2370}
2371
71d8d1b5
AK
2372static int be_setup_wol(struct be_adapter *adapter, bool enable)
2373{
2374 struct be_dma_mem cmd;
2375 int status = 0;
2376 u8 mac[ETH_ALEN];
2377
2378 memset(mac, 0, ETH_ALEN);
2379
2380 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2381 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2382 GFP_KERNEL);
71d8d1b5
AK
2383 if (cmd.va == NULL)
2384 return -1;
2385 memset(cmd.va, 0, cmd.size);
2386
2387 if (enable) {
2388 status = pci_write_config_dword(adapter->pdev,
2389 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2390 if (status) {
2391 dev_err(&adapter->pdev->dev,
2381a55c 2392 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2393 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2394 cmd.dma);
71d8d1b5
AK
2395 return status;
2396 }
2397 status = be_cmd_enable_magic_wol(adapter,
2398 adapter->netdev->dev_addr, &cmd);
2399 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2400 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2401 } else {
2402 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2403 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2404 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2405 }
2406
2b7bcebf 2407 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2408 return status;
2409}
2410
6d87f5c3
AK
2411/*
2412 * Generate a seed MAC address from the PF MAC Address using jhash.
2413 * MAC Address for VFs are assigned incrementally starting from the seed.
2414 * These addresses are programmed in the ASIC by the PF and the VF driver
2415 * queries for the MAC address during its probe.
2416 */
2417static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2418{
2419 u32 vf = 0;
3abcdeda 2420 int status = 0;
6d87f5c3
AK
2421 u8 mac[ETH_ALEN];
2422
2423 be_vf_eth_addr_generate(adapter, mac);
2424
2425 for (vf = 0; vf < num_vfs; vf++) {
2426 status = be_cmd_pmac_add(adapter, mac,
2427 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2428 &adapter->vf_cfg[vf].vf_pmac_id,
2429 vf + 1);
6d87f5c3
AK
2430 if (status)
2431 dev_err(&adapter->pdev->dev,
2432 "Mac address add failed for VF %d\n", vf);
2433 else
2434 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2435
2436 mac[5] += 1;
2437 }
2438 return status;
2439}
2440
2441static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2442{
2443 u32 vf;
2444
2445 for (vf = 0; vf < num_vfs; vf++) {
2446 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2447 be_cmd_pmac_del(adapter,
2448 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2449 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2450 }
2451}
2452
5fb379ee
SP
2453static int be_setup(struct be_adapter *adapter)
2454{
5fb379ee 2455 struct net_device *netdev = adapter->netdev;
ba343c77 2456 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2457 int status;
ba343c77
SB
2458 u8 mac[ETH_ALEN];
2459
2dc1deb6
SP
2460 be_cmd_req_native_mode(adapter);
2461
f21b538c
PR
2462 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2463 BE_IF_FLAGS_BROADCAST |
2464 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2465
ba343c77
SB
2466 if (be_physfn(adapter)) {
2467 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2468 BE_IF_FLAGS_PROMISCUOUS |
2469 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2470 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2471
ac6a0c4a 2472 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2473 cap_flags |= BE_IF_FLAGS_RSS;
2474 en_flags |= BE_IF_FLAGS_RSS;
2475 }
ba343c77 2476 }
73d540f2
SP
2477
2478 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2479 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2480 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2481 if (status != 0)
2482 goto do_none;
2483
ba343c77 2484 if (be_physfn(adapter)) {
c99ac3e7
AK
2485 if (adapter->sriov_enabled) {
2486 while (vf < num_vfs) {
2487 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2488 BE_IF_FLAGS_BROADCAST;
2489 status = be_cmd_if_create(adapter, cap_flags,
2490 en_flags, mac, true,
64600ea5 2491 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2492 NULL, vf+1);
c99ac3e7
AK
2493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "Interface Create failed for VF %d\n",
2496 vf);
2497 goto if_destroy;
2498 }
2499 adapter->vf_cfg[vf].vf_pmac_id =
2500 BE_INVALID_PMAC_ID;
2501 vf++;
ba343c77 2502 }
84e5b9f7 2503 }
c99ac3e7 2504 } else {
ba343c77
SB
2505 status = be_cmd_mac_addr_query(adapter, mac,
2506 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2507 if (!status) {
2508 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2509 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2510 }
2511 }
2512
6b7c5b94
SP
2513 status = be_tx_queues_create(adapter);
2514 if (status != 0)
2515 goto if_destroy;
2516
2517 status = be_rx_queues_create(adapter);
2518 if (status != 0)
2519 goto tx_qs_destroy;
2520
2903dd65
SP
2521 /* Allow all priorities by default. A GRP5 evt may modify this */
2522 adapter->vlan_prio_bmap = 0xff;
2523
5fb379ee
SP
2524 status = be_mcc_queues_create(adapter);
2525 if (status != 0)
2526 goto rx_qs_destroy;
6b7c5b94 2527
0dffc83e
AK
2528 adapter->link_speed = -1;
2529
04b71175 2530 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10
SK
2531
2532 pcie_set_readrq(adapter->pdev, 4096);
6b7c5b94
SP
2533 return 0;
2534
5fb379ee
SP
2535rx_qs_destroy:
2536 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2537tx_qs_destroy:
2538 be_tx_queues_destroy(adapter);
2539if_destroy:
c99ac3e7
AK
2540 if (be_physfn(adapter) && adapter->sriov_enabled)
2541 for (vf = 0; vf < num_vfs; vf++)
2542 if (adapter->vf_cfg[vf].vf_if_handle)
2543 be_cmd_if_destroy(adapter,
658681f7
AK
2544 adapter->vf_cfg[vf].vf_if_handle,
2545 vf + 1);
2546 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2547do_none:
2548 return status;
2549}
2550
5fb379ee
SP
2551static int be_clear(struct be_adapter *adapter)
2552{
7ab8b0b4
AK
2553 int vf;
2554
c99ac3e7 2555 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2556 be_vf_eth_addr_rem(adapter);
2557
1a8887d8 2558 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2559 be_rx_queues_destroy(adapter);
2560 be_tx_queues_destroy(adapter);
1f5db833 2561 adapter->eq_next_idx = 0;
5fb379ee 2562
7ab8b0b4
AK
2563 if (be_physfn(adapter) && adapter->sriov_enabled)
2564 for (vf = 0; vf < num_vfs; vf++)
2565 if (adapter->vf_cfg[vf].vf_if_handle)
2566 be_cmd_if_destroy(adapter,
2567 adapter->vf_cfg[vf].vf_if_handle,
2568 vf + 1);
2569
658681f7 2570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2571
2dc1deb6
SP
2572 adapter->be3_native = 0;
2573
2243e2e9
SP
2574 /* tell fw we're done with firing cmds */
2575 be_cmd_fw_clean(adapter);
5fb379ee
SP
2576 return 0;
2577}
2578
6b7c5b94 2579
84517482 2580#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2581static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2582 const u8 *p, u32 img_start, int image_size,
2583 int hdr_size)
fa9a6fed
SB
2584{
2585 u32 crc_offset;
2586 u8 flashed_crc[4];
2587 int status;
3f0d4560
AK
2588
2589 crc_offset = hdr_size + img_start + image_size - 4;
2590
fa9a6fed 2591 p += crc_offset;
3f0d4560
AK
2592
2593 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2594 (image_size - 4));
fa9a6fed
SB
2595 if (status) {
2596 dev_err(&adapter->pdev->dev,
2597 "could not get crc from flash, not flashing redboot\n");
2598 return false;
2599 }
2600
2601 /*update redboot only if crc does not match*/
2602 if (!memcmp(flashed_crc, p, 4))
2603 return false;
2604 else
2605 return true;
fa9a6fed
SB
2606}
2607
306f1348
SP
2608static bool phy_flashing_required(struct be_adapter *adapter)
2609{
2610 int status = 0;
2611 struct be_phy_info phy_info;
2612
2613 status = be_cmd_get_phy_info(adapter, &phy_info);
2614 if (status)
2615 return false;
2616 if ((phy_info.phy_type == TN_8022) &&
2617 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2618 return true;
2619 }
2620 return false;
2621}
2622
3f0d4560 2623static int be_flash_data(struct be_adapter *adapter,
84517482 2624 const struct firmware *fw,
3f0d4560
AK
2625 struct be_dma_mem *flash_cmd, int num_of_images)
2626
84517482 2627{
3f0d4560
AK
2628 int status = 0, i, filehdr_size = 0;
2629 u32 total_bytes = 0, flash_op;
84517482
AK
2630 int num_bytes;
2631 const u8 *p = fw->data;
2632 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2633 const struct flash_comp *pflashcomp;
9fe96934 2634 int num_comp;
3f0d4560 2635
306f1348 2636 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2637 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2638 FLASH_IMAGE_MAX_SIZE_g3},
2639 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2640 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2641 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2647 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2652 FLASH_IMAGE_MAX_SIZE_g3},
2653 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2654 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2655 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2656 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2657 };
215faf9c 2658 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2659 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2660 FLASH_IMAGE_MAX_SIZE_g2},
2661 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2662 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2663 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2664 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2666 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2668 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2669 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2670 FLASH_IMAGE_MAX_SIZE_g2},
2671 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2672 FLASH_IMAGE_MAX_SIZE_g2},
2673 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2674 FLASH_IMAGE_MAX_SIZE_g2}
2675 };
2676
2677 if (adapter->generation == BE_GEN3) {
2678 pflashcomp = gen3_flash_types;
2679 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2680 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2681 } else {
2682 pflashcomp = gen2_flash_types;
2683 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2684 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2685 }
9fe96934
SB
2686 for (i = 0; i < num_comp; i++) {
2687 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2688 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2689 continue;
306f1348
SP
2690 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2691 if (!phy_flashing_required(adapter))
2692 continue;
2693 }
3f0d4560
AK
2694 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2695 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2696 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2697 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2698 continue;
2699 p = fw->data;
2700 p += filehdr_size + pflashcomp[i].offset
2701 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2702 if (p + pflashcomp[i].size > fw->data + fw->size)
2703 return -1;
2704 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2705 while (total_bytes) {
2706 if (total_bytes > 32*1024)
2707 num_bytes = 32*1024;
2708 else
2709 num_bytes = total_bytes;
2710 total_bytes -= num_bytes;
306f1348
SP
2711 if (!total_bytes) {
2712 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2713 flash_op = FLASHROM_OPER_PHY_FLASH;
2714 else
2715 flash_op = FLASHROM_OPER_FLASH;
2716 } else {
2717 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2718 flash_op = FLASHROM_OPER_PHY_SAVE;
2719 else
2720 flash_op = FLASHROM_OPER_SAVE;
2721 }
3f0d4560
AK
2722 memcpy(req->params.data_buf, p, num_bytes);
2723 p += num_bytes;
2724 status = be_cmd_write_flashrom(adapter, flash_cmd,
2725 pflashcomp[i].optype, flash_op, num_bytes);
2726 if (status) {
306f1348
SP
2727 if ((status == ILLEGAL_IOCTL_REQ) &&
2728 (pflashcomp[i].optype ==
2729 IMG_TYPE_PHY_FW))
2730 break;
3f0d4560
AK
2731 dev_err(&adapter->pdev->dev,
2732 "cmd to write to flash rom failed.\n");
2733 return -1;
2734 }
84517482 2735 }
84517482 2736 }
84517482
AK
2737 return 0;
2738}
2739
3f0d4560
AK
2740static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2741{
2742 if (fhdr == NULL)
2743 return 0;
2744 if (fhdr->build[0] == '3')
2745 return BE_GEN3;
2746 else if (fhdr->build[0] == '2')
2747 return BE_GEN2;
2748 else
2749 return 0;
2750}
2751
485bf569
SN
2752static int lancer_fw_download(struct be_adapter *adapter,
2753 const struct firmware *fw)
84517482 2754{
485bf569
SN
2755#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2756#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2757 struct be_dma_mem flash_cmd;
485bf569
SN
2758 const u8 *data_ptr = NULL;
2759 u8 *dest_image_ptr = NULL;
2760 size_t image_size = 0;
2761 u32 chunk_size = 0;
2762 u32 data_written = 0;
2763 u32 offset = 0;
2764 int status = 0;
2765 u8 add_status = 0;
84517482 2766
485bf569 2767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2768 dev_err(&adapter->pdev->dev,
485bf569
SN
2769 "FW Image not properly aligned. "
2770 "Length must be 4 byte aligned.\n");
2771 status = -EINVAL;
2772 goto lancer_fw_exit;
d9efd2af
SB
2773 }
2774
485bf569
SN
2775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2776 + LANCER_FW_DOWNLOAD_CHUNK;
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
2783 goto lancer_fw_exit;
2784 }
84517482 2785
485bf569
SN
2786 dest_image_ptr = flash_cmd.va +
2787 sizeof(struct lancer_cmd_req_write_object);
2788 image_size = fw->size;
2789 data_ptr = fw->data;
2790
2791 while (image_size) {
2792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2793
2794 /* Copy the image chunk content. */
2795 memcpy(dest_image_ptr, data_ptr, chunk_size);
2796
2797 status = lancer_cmd_write_object(adapter, &flash_cmd,
2798 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2799 &data_written, &add_status);
2800
2801 if (status)
2802 break;
2803
2804 offset += data_written;
2805 data_ptr += data_written;
2806 image_size -= data_written;
2807 }
2808
2809 if (!status) {
2810 /* Commit the FW written */
2811 status = lancer_cmd_write_object(adapter, &flash_cmd,
2812 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2813 &data_written, &add_status);
2814 }
2815
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
2818 if (status) {
2819 dev_err(&adapter->pdev->dev,
2820 "Firmware load error. "
2821 "Status code: 0x%x Additional Status: 0x%x\n",
2822 status, add_status);
2823 goto lancer_fw_exit;
2824 }
2825
2826 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2827lancer_fw_exit:
2828 return status;
2829}
2830
2831static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2832{
2833 struct flash_file_hdr_g2 *fhdr;
2834 struct flash_file_hdr_g3 *fhdr3;
2835 struct image_hdr *img_hdr_ptr = NULL;
2836 struct be_dma_mem flash_cmd;
2837 const u8 *p;
2838 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2839
2840 p = fw->data;
3f0d4560 2841 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2842
84517482 2843 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2844 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2845 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2846 if (!flash_cmd.va) {
2847 status = -ENOMEM;
2848 dev_err(&adapter->pdev->dev,
2849 "Memory allocation failure while flashing\n");
485bf569 2850 goto be_fw_exit;
84517482
AK
2851 }
2852
3f0d4560
AK
2853 if ((adapter->generation == BE_GEN3) &&
2854 (get_ufigen_type(fhdr) == BE_GEN3)) {
2855 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2856 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2857 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2858 img_hdr_ptr = (struct image_hdr *) (fw->data +
2859 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2860 i * sizeof(struct image_hdr)));
2861 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2862 status = be_flash_data(adapter, fw, &flash_cmd,
2863 num_imgs);
3f0d4560
AK
2864 }
2865 } else if ((adapter->generation == BE_GEN2) &&
2866 (get_ufigen_type(fhdr) == BE_GEN2)) {
2867 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2868 } else {
2869 dev_err(&adapter->pdev->dev,
2870 "UFI and Interface are not compatible for flashing\n");
2871 status = -1;
84517482
AK
2872 }
2873
2b7bcebf
IV
2874 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2875 flash_cmd.dma);
84517482
AK
2876 if (status) {
2877 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2878 goto be_fw_exit;
84517482
AK
2879 }
2880
af901ca1 2881 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2882
485bf569
SN
2883be_fw_exit:
2884 return status;
2885}
2886
2887int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2888{
2889 const struct firmware *fw;
2890 int status;
2891
2892 if (!netif_running(adapter->netdev)) {
2893 dev_err(&adapter->pdev->dev,
2894 "Firmware load not allowed (interface is down)\n");
2895 return -1;
2896 }
2897
2898 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2899 if (status)
2900 goto fw_exit;
2901
2902 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2903
2904 if (lancer_chip(adapter))
2905 status = lancer_fw_download(adapter, fw);
2906 else
2907 status = be_fw_download(adapter, fw);
2908
84517482
AK
2909fw_exit:
2910 release_firmware(fw);
2911 return status;
2912}
2913
6b7c5b94
SP
2914static struct net_device_ops be_netdev_ops = {
2915 .ndo_open = be_open,
2916 .ndo_stop = be_close,
2917 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2918 .ndo_set_rx_mode = be_set_multicast_list,
2919 .ndo_set_mac_address = be_mac_addr_set,
2920 .ndo_change_mtu = be_change_mtu,
ab1594e9 2921 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2922 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2923 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2924 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2925 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2926 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2927 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2928 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2929};
2930
2931static void be_netdev_init(struct net_device *netdev)
2932{
2933 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2934 struct be_rx_obj *rxo;
2935 int i;
6b7c5b94 2936
6332c8d3 2937 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2938 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2939 NETIF_F_HW_VLAN_TX;
2940 if (be_multi_rxq(adapter))
2941 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2942
2943 netdev->features |= netdev->hw_features |
8b8ddc68 2944 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2945
eb8a50d9 2946 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2947 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2948
6b7c5b94
SP
2949 netdev->flags |= IFF_MULTICAST;
2950
9e90c961
AK
2951 /* Default settings for Rx and Tx flow control */
2952 adapter->rx_fc = true;
2953 adapter->tx_fc = true;
2954
c190e3c8
AK
2955 netif_set_gso_max_size(netdev, 65535);
2956
6b7c5b94
SP
2957 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2958
2959 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2960
3abcdeda
SP
2961 for_all_rx_queues(adapter, rxo, i)
2962 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2963 BE_NAPI_WEIGHT);
2964
5fb379ee 2965 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2966 BE_NAPI_WEIGHT);
6b7c5b94
SP
2967}
2968
2969static void be_unmap_pci_bars(struct be_adapter *adapter)
2970{
8788fdc2
SP
2971 if (adapter->csr)
2972 iounmap(adapter->csr);
2973 if (adapter->db)
2974 iounmap(adapter->db);
6b7c5b94
SP
2975}
2976
2977static int be_map_pci_bars(struct be_adapter *adapter)
2978{
2979 u8 __iomem *addr;
db3ea781 2980 int db_reg;
6b7c5b94 2981
fe6d2a38
SP
2982 if (lancer_chip(adapter)) {
2983 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2984 pci_resource_len(adapter->pdev, 0));
2985 if (addr == NULL)
2986 return -ENOMEM;
2987 adapter->db = addr;
2988 return 0;
2989 }
2990
ba343c77
SB
2991 if (be_physfn(adapter)) {
2992 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2993 pci_resource_len(adapter->pdev, 2));
2994 if (addr == NULL)
2995 return -ENOMEM;
2996 adapter->csr = addr;
2997 }
6b7c5b94 2998
ba343c77 2999 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3000 db_reg = 4;
3001 } else {
ba343c77
SB
3002 if (be_physfn(adapter))
3003 db_reg = 4;
3004 else
3005 db_reg = 0;
3006 }
3007 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3008 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3009 if (addr == NULL)
3010 goto pci_map_err;
ba343c77
SB
3011 adapter->db = addr;
3012
6b7c5b94
SP
3013 return 0;
3014pci_map_err:
3015 be_unmap_pci_bars(adapter);
3016 return -ENOMEM;
3017}
3018
3019
3020static void be_ctrl_cleanup(struct be_adapter *adapter)
3021{
8788fdc2 3022 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3023
3024 be_unmap_pci_bars(adapter);
3025
3026 if (mem->va)
2b7bcebf
IV
3027 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028 mem->dma);
e7b909a6 3029
5b8821b7 3030 mem = &adapter->rx_filter;
e7b909a6 3031 if (mem->va)
2b7bcebf
IV
3032 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3033 mem->dma);
6b7c5b94
SP
3034}
3035
6b7c5b94
SP
3036static int be_ctrl_init(struct be_adapter *adapter)
3037{
8788fdc2
SP
3038 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3039 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3040 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3041 int status;
6b7c5b94
SP
3042
3043 status = be_map_pci_bars(adapter);
3044 if (status)
e7b909a6 3045 goto done;
6b7c5b94
SP
3046
3047 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3048 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3049 mbox_mem_alloc->size,
3050 &mbox_mem_alloc->dma,
3051 GFP_KERNEL);
6b7c5b94 3052 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3053 status = -ENOMEM;
3054 goto unmap_pci_bars;
6b7c5b94
SP
3055 }
3056 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3057 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3058 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3059 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3060
5b8821b7
SP
3061 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3062 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3063 &rx_filter->dma, GFP_KERNEL);
3064 if (rx_filter->va == NULL) {
e7b909a6
SP
3065 status = -ENOMEM;
3066 goto free_mbox;
3067 }
5b8821b7 3068 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3069
2984961c 3070 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3071 spin_lock_init(&adapter->mcc_lock);
3072 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3073
dd131e76 3074 init_completion(&adapter->flash_compl);
cf588477 3075 pci_save_state(adapter->pdev);
6b7c5b94 3076 return 0;
e7b909a6
SP
3077
3078free_mbox:
2b7bcebf
IV
3079 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3081
3082unmap_pci_bars:
3083 be_unmap_pci_bars(adapter);
3084
3085done:
3086 return status;
6b7c5b94
SP
3087}
3088
3089static void be_stats_cleanup(struct be_adapter *adapter)
3090{
3abcdeda 3091 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3092
3093 if (cmd->va)
2b7bcebf
IV
3094 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3095 cmd->va, cmd->dma);
6b7c5b94
SP
3096}
3097
3098static int be_stats_init(struct be_adapter *adapter)
3099{
3abcdeda 3100 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3101
005d5696 3102 if (adapter->generation == BE_GEN2) {
89a88ab8 3103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3104 } else {
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107 else
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 }
2b7bcebf
IV
3110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3111 GFP_KERNEL);
6b7c5b94
SP
3112 if (cmd->va == NULL)
3113 return -1;
d291b9af 3114 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3115 return 0;
3116}
3117
3118static void __devexit be_remove(struct pci_dev *pdev)
3119{
3120 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3121
6b7c5b94
SP
3122 if (!adapter)
3123 return;
3124
f203af70
SK
3125 cancel_delayed_work_sync(&adapter->work);
3126
6b7c5b94
SP
3127 unregister_netdev(adapter->netdev);
3128
5fb379ee
SP
3129 be_clear(adapter);
3130
6b7c5b94
SP
3131 be_stats_cleanup(adapter);
3132
3133 be_ctrl_cleanup(adapter);
3134
48f5a191 3135 kfree(adapter->vf_cfg);
ba343c77
SB
3136 be_sriov_disable(adapter);
3137
8d56ff11 3138 be_msix_disable(adapter);
6b7c5b94
SP
3139
3140 pci_set_drvdata(pdev, NULL);
3141 pci_release_regions(pdev);
3142 pci_disable_device(pdev);
3143
3144 free_netdev(adapter->netdev);
3145}
3146
2243e2e9 3147static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3148{
6b7c5b94 3149 int status;
2243e2e9 3150 u8 mac[ETH_ALEN];
6b7c5b94 3151
3abcdeda
SP
3152 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3153 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3154 if (status)
3155 return status;
3156
2243e2e9 3157 memset(mac, 0, ETH_ALEN);
ba343c77 3158
12f4d0a8
ME
3159 /* A default permanent address is given to each VF for Lancer*/
3160 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3161 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3162 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3163
ba343c77
SB
3164 if (status)
3165 return status;
ca9e4988 3166
ba343c77
SB
3167 if (!is_valid_ether_addr(mac))
3168 return -EADDRNOTAVAIL;
3169
3170 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3171 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3172 }
6b7c5b94 3173
3486be29 3174 if (adapter->function_mode & 0x400)
82903e4b
AK
3175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3176 else
3177 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3178
9e1453c5
AK
3179 status = be_cmd_get_cntl_attributes(adapter);
3180 if (status)
3181 return status;
3182
3c8def97
SP
3183 if ((num_vfs && adapter->sriov_enabled) ||
3184 (adapter->function_mode & 0x400) ||
3185 lancer_chip(adapter) || !be_physfn(adapter)) {
3186 adapter->num_tx_qs = 1;
3187 netif_set_real_num_tx_queues(adapter->netdev,
3188 adapter->num_tx_qs);
3189 } else {
3190 adapter->num_tx_qs = MAX_TX_QS;
3191 }
3192
2243e2e9 3193 return 0;
6b7c5b94
SP
3194}
3195
fe6d2a38
SP
3196static int be_dev_family_check(struct be_adapter *adapter)
3197{
3198 struct pci_dev *pdev = adapter->pdev;
3199 u32 sli_intf = 0, if_type;
3200
3201 switch (pdev->device) {
3202 case BE_DEVICE_ID1:
3203 case OC_DEVICE_ID1:
3204 adapter->generation = BE_GEN2;
3205 break;
3206 case BE_DEVICE_ID2:
3207 case OC_DEVICE_ID2:
3208 adapter->generation = BE_GEN3;
3209 break;
3210 case OC_DEVICE_ID3:
12f4d0a8 3211 case OC_DEVICE_ID4:
fe6d2a38
SP
3212 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3213 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3214 SLI_INTF_IF_TYPE_SHIFT;
3215
3216 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3217 if_type != 0x02) {
3218 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3219 return -EINVAL;
3220 }
fe6d2a38
SP
3221 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3222 SLI_INTF_FAMILY_SHIFT);
3223 adapter->generation = BE_GEN3;
3224 break;
3225 default:
3226 adapter->generation = 0;
3227 }
3228 return 0;
3229}
3230
37eed1cb
PR
3231static int lancer_wait_ready(struct be_adapter *adapter)
3232{
3233#define SLIPORT_READY_TIMEOUT 500
3234 u32 sliport_status;
3235 int status = 0, i;
3236
3237 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3238 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3239 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3240 break;
3241
3242 msleep(20);
3243 }
3244
3245 if (i == SLIPORT_READY_TIMEOUT)
3246 status = -1;
3247
3248 return status;
3249}
3250
3251static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3252{
3253 int status;
3254 u32 sliport_status, err, reset_needed;
3255 status = lancer_wait_ready(adapter);
3256 if (!status) {
3257 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3258 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3259 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3260 if (err && reset_needed) {
3261 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3262 adapter->db + SLIPORT_CONTROL_OFFSET);
3263
3264 /* check adapter has corrected the error */
3265 status = lancer_wait_ready(adapter);
3266 sliport_status = ioread32(adapter->db +
3267 SLIPORT_STATUS_OFFSET);
3268 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3269 SLIPORT_STATUS_RN_MASK);
3270 if (status || sliport_status)
3271 status = -1;
3272 } else if (err || reset_needed) {
3273 status = -1;
3274 }
3275 }
3276 return status;
3277}
3278
6b7c5b94
SP
3279static int __devinit be_probe(struct pci_dev *pdev,
3280 const struct pci_device_id *pdev_id)
3281{
3282 int status = 0;
3283 struct be_adapter *adapter;
3284 struct net_device *netdev;
6b7c5b94
SP
3285
3286 status = pci_enable_device(pdev);
3287 if (status)
3288 goto do_none;
3289
3290 status = pci_request_regions(pdev, DRV_NAME);
3291 if (status)
3292 goto disable_dev;
3293 pci_set_master(pdev);
3294
3c8def97 3295 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3296 if (netdev == NULL) {
3297 status = -ENOMEM;
3298 goto rel_reg;
3299 }
3300 adapter = netdev_priv(netdev);
3301 adapter->pdev = pdev;
3302 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3303
3304 status = be_dev_family_check(adapter);
63657b9c 3305 if (status)
fe6d2a38
SP
3306 goto free_netdev;
3307
6b7c5b94 3308 adapter->netdev = netdev;
2243e2e9 3309 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3310
2b7bcebf 3311 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3312 if (!status) {
3313 netdev->features |= NETIF_F_HIGHDMA;
3314 } else {
2b7bcebf 3315 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3316 if (status) {
3317 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3318 goto free_netdev;
3319 }
3320 }
3321
ba343c77 3322 be_sriov_enable(adapter);
48f5a191
AK
3323 if (adapter->sriov_enabled) {
3324 adapter->vf_cfg = kcalloc(num_vfs,
3325 sizeof(struct be_vf_cfg), GFP_KERNEL);
3326
3327 if (!adapter->vf_cfg)
3328 goto free_netdev;
3329 }
ba343c77 3330
6b7c5b94
SP
3331 status = be_ctrl_init(adapter);
3332 if (status)
48f5a191 3333 goto free_vf_cfg;
6b7c5b94 3334
37eed1cb
PR
3335 if (lancer_chip(adapter)) {
3336 status = lancer_test_and_set_rdy_state(adapter);
3337 if (status) {
3338 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3339 goto ctrl_clean;
37eed1cb
PR
3340 }
3341 }
3342
2243e2e9 3343 /* sync up with fw's ready state */
ba343c77
SB
3344 if (be_physfn(adapter)) {
3345 status = be_cmd_POST(adapter);
3346 if (status)
3347 goto ctrl_clean;
ba343c77 3348 }
6b7c5b94 3349
2243e2e9
SP
3350 /* tell fw we're ready to fire cmds */
3351 status = be_cmd_fw_init(adapter);
6b7c5b94 3352 if (status)
2243e2e9
SP
3353 goto ctrl_clean;
3354
a4b4dfab
AK
3355 status = be_cmd_reset_function(adapter);
3356 if (status)
3357 goto ctrl_clean;
556ae191 3358
2243e2e9
SP
3359 status = be_stats_init(adapter);
3360 if (status)
3361 goto ctrl_clean;
3362
3363 status = be_get_config(adapter);
6b7c5b94
SP
3364 if (status)
3365 goto stats_clean;
6b7c5b94 3366
b9ab82c7
SP
3367 /* The INTR bit may be set in the card when probed by a kdump kernel
3368 * after a crash.
3369 */
3370 if (!lancer_chip(adapter))
3371 be_intr_set(adapter, false);
3372
3abcdeda
SP
3373 be_msix_enable(adapter);
3374
6b7c5b94 3375 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3376
5fb379ee
SP
3377 status = be_setup(adapter);
3378 if (status)
3abcdeda 3379 goto msix_disable;
2243e2e9 3380
3abcdeda 3381 be_netdev_init(netdev);
6b7c5b94
SP
3382 status = register_netdev(netdev);
3383 if (status != 0)
5fb379ee 3384 goto unsetup;
6b7c5b94 3385
e6319365 3386 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42 3387 u8 mac_speed;
d0381c42
AK
3388 u16 vf, lnk_speed;
3389
12f4d0a8
ME
3390 if (!lancer_chip(adapter)) {
3391 status = be_vf_eth_addr_config(adapter);
3392 if (status)
3393 goto unreg_netdev;
3394 }
d0381c42
AK
3395
3396 for (vf = 0; vf < num_vfs; vf++) {
ea172a01
SP
3397 status = be_cmd_link_status_query(adapter, &mac_speed,
3398 &lnk_speed, vf + 1);
d0381c42
AK
3399 if (!status)
3400 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3401 else
3402 goto unreg_netdev;
3403 }
e6319365
AK
3404 }
3405
c4ca2374 3406 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3407
f203af70 3408 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3409 return 0;
3410
e6319365
AK
3411unreg_netdev:
3412 unregister_netdev(netdev);
5fb379ee
SP
3413unsetup:
3414 be_clear(adapter);
3abcdeda
SP
3415msix_disable:
3416 be_msix_disable(adapter);
6b7c5b94
SP
3417stats_clean:
3418 be_stats_cleanup(adapter);
3419ctrl_clean:
3420 be_ctrl_cleanup(adapter);
48f5a191
AK
3421free_vf_cfg:
3422 kfree(adapter->vf_cfg);
6b7c5b94 3423free_netdev:
ba343c77 3424 be_sriov_disable(adapter);
fe6d2a38 3425 free_netdev(netdev);
8d56ff11 3426 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3427rel_reg:
3428 pci_release_regions(pdev);
3429disable_dev:
3430 pci_disable_device(pdev);
3431do_none:
c4ca2374 3432 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3433 return status;
3434}
3435
3436static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3437{
3438 struct be_adapter *adapter = pci_get_drvdata(pdev);
3439 struct net_device *netdev = adapter->netdev;
3440
a4ca055f 3441 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3442 if (adapter->wol)
3443 be_setup_wol(adapter, true);
3444
6b7c5b94
SP
3445 netif_device_detach(netdev);
3446 if (netif_running(netdev)) {
3447 rtnl_lock();
3448 be_close(netdev);
3449 rtnl_unlock();
3450 }
9e90c961 3451 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3452 be_clear(adapter);
6b7c5b94 3453
a4ca055f 3454 be_msix_disable(adapter);
6b7c5b94
SP
3455 pci_save_state(pdev);
3456 pci_disable_device(pdev);
3457 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3458 return 0;
3459}
3460
3461static int be_resume(struct pci_dev *pdev)
3462{
3463 int status = 0;
3464 struct be_adapter *adapter = pci_get_drvdata(pdev);
3465 struct net_device *netdev = adapter->netdev;
3466
3467 netif_device_detach(netdev);
3468
3469 status = pci_enable_device(pdev);
3470 if (status)
3471 return status;
3472
3473 pci_set_power_state(pdev, 0);
3474 pci_restore_state(pdev);
3475
a4ca055f 3476 be_msix_enable(adapter);
2243e2e9
SP
3477 /* tell fw we're ready to fire cmds */
3478 status = be_cmd_fw_init(adapter);
3479 if (status)
3480 return status;
3481
9b0365f1 3482 be_setup(adapter);
6b7c5b94
SP
3483 if (netif_running(netdev)) {
3484 rtnl_lock();
3485 be_open(netdev);
3486 rtnl_unlock();
3487 }
3488 netif_device_attach(netdev);
71d8d1b5
AK
3489
3490 if (adapter->wol)
3491 be_setup_wol(adapter, false);
a4ca055f
AK
3492
3493 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3494 return 0;
3495}
3496
82456b03
SP
3497/*
3498 * An FLR will stop BE from DMAing any data.
3499 */
3500static void be_shutdown(struct pci_dev *pdev)
3501{
3502 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3503
2d5d4154
AK
3504 if (!adapter)
3505 return;
82456b03 3506
0f4a6828 3507 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3508
2d5d4154 3509 netif_device_detach(adapter->netdev);
82456b03 3510
82456b03
SP
3511 if (adapter->wol)
3512 be_setup_wol(adapter, true);
3513
57841869
AK
3514 be_cmd_reset_function(adapter);
3515
82456b03 3516 pci_disable_device(pdev);
82456b03
SP
3517}
3518
cf588477
SP
3519static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3520 pci_channel_state_t state)
3521{
3522 struct be_adapter *adapter = pci_get_drvdata(pdev);
3523 struct net_device *netdev = adapter->netdev;
3524
3525 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3526
3527 adapter->eeh_err = true;
3528
3529 netif_device_detach(netdev);
3530
3531 if (netif_running(netdev)) {
3532 rtnl_lock();
3533 be_close(netdev);
3534 rtnl_unlock();
3535 }
3536 be_clear(adapter);
3537
3538 if (state == pci_channel_io_perm_failure)
3539 return PCI_ERS_RESULT_DISCONNECT;
3540
3541 pci_disable_device(pdev);
3542
3543 return PCI_ERS_RESULT_NEED_RESET;
3544}
3545
3546static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3547{
3548 struct be_adapter *adapter = pci_get_drvdata(pdev);
3549 int status;
3550
3551 dev_info(&adapter->pdev->dev, "EEH reset\n");
3552 adapter->eeh_err = false;
3553
3554 status = pci_enable_device(pdev);
3555 if (status)
3556 return PCI_ERS_RESULT_DISCONNECT;
3557
3558 pci_set_master(pdev);
3559 pci_set_power_state(pdev, 0);
3560 pci_restore_state(pdev);
3561
3562 /* Check if card is ok and fw is ready */
3563 status = be_cmd_POST(adapter);
3564 if (status)
3565 return PCI_ERS_RESULT_DISCONNECT;
3566
3567 return PCI_ERS_RESULT_RECOVERED;
3568}
3569
3570static void be_eeh_resume(struct pci_dev *pdev)
3571{
3572 int status = 0;
3573 struct be_adapter *adapter = pci_get_drvdata(pdev);
3574 struct net_device *netdev = adapter->netdev;
3575
3576 dev_info(&adapter->pdev->dev, "EEH resume\n");
3577
3578 pci_save_state(pdev);
3579
3580 /* tell fw we're ready to fire cmds */
3581 status = be_cmd_fw_init(adapter);
3582 if (status)
3583 goto err;
3584
3585 status = be_setup(adapter);
3586 if (status)
3587 goto err;
3588
3589 if (netif_running(netdev)) {
3590 status = be_open(netdev);
3591 if (status)
3592 goto err;
3593 }
3594 netif_device_attach(netdev);
3595 return;
3596err:
3597 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3598}
3599
3600static struct pci_error_handlers be_eeh_handlers = {
3601 .error_detected = be_eeh_err_detected,
3602 .slot_reset = be_eeh_reset,
3603 .resume = be_eeh_resume,
3604};
3605
6b7c5b94
SP
3606static struct pci_driver be_driver = {
3607 .name = DRV_NAME,
3608 .id_table = be_dev_ids,
3609 .probe = be_probe,
3610 .remove = be_remove,
3611 .suspend = be_suspend,
cf588477 3612 .resume = be_resume,
82456b03 3613 .shutdown = be_shutdown,
cf588477 3614 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3615};
3616
3617static int __init be_init_module(void)
3618{
8e95a202
JP
3619 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3620 rx_frag_size != 2048) {
6b7c5b94
SP
3621 printk(KERN_WARNING DRV_NAME
3622 " : Module param rx_frag_size must be 2048/4096/8192."
3623 " Using 2048\n");
3624 rx_frag_size = 2048;
3625 }
6b7c5b94
SP
3626
3627 return pci_register_driver(&be_driver);
3628}
3629module_init(be_init_module);
3630
3631static void __exit be_exit_module(void)
3632{
3633 pci_unregister_driver(&be_driver);
3634}
3635module_exit(be_exit_module);
This page took 0.526942 seconds and 5 git commands to generate.