be2net: do not modify PCI MaxReadReq size
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
1ded132d
AK
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
cc4ce020
SK
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 581{
1ded132d 582 u16 vlan_tag;
cc4ce020 583
6b7c5b94
SP
584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
49e4b847 588 if (skb_is_gso(skb)) {
6b7c5b94
SP
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
6b7c5b94
SP
604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
4c5102f9 611 if (vlan_tx_tag_present(skb)) {
6b7c5b94 612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
2b7bcebf 623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 631 if (wrb->frag_len) {
7101e111 632 if (unmap_single)
2b7bcebf
IV
633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
7101e111 635 else
2b7bcebf 636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
637 }
638}
6b7c5b94 639
3c8def97 640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
7101e111
SP
643 dma_addr_t busaddr;
644 int i, copied = 0;
2b7bcebf 645 struct device *dev = &adapter->pdev->dev;
6b7c5b94 646 struct sk_buff *first_skb = skb;
6b7c5b94
SP
647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
7101e111
SP
649 bool map_single = false;
650 u16 map_head;
6b7c5b94 651
6b7c5b94
SP
652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
7101e111 654 map_head = txq->head;
6b7c5b94 655
ebc8d2ab 656 if (skb->len > skb->data_len) {
e743d313 657 int len = skb_headlen(skb);
2b7bcebf
IV
658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
7101e111
SP
660 goto dma_err;
661 map_single = true;
ebc8d2ab
DM
662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
6b7c5b94 668
ebc8d2ab 669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 670 const struct skb_frag_struct *frag =
ebc8d2ab 671 &skb_shinfo(skb)->frags[i];
b061b39e 672 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 673 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 674 if (dma_mapping_error(dev, busaddr))
7101e111 675 goto dma_err;
ebc8d2ab 676 wrb = queue_head_node(txq);
9e903e08 677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
9e903e08 680 copied += skb_frag_size(frag);
6b7c5b94
SP
681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
cc4ce020 690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
7101e111
SP
694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
2b7bcebf 698 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
6b7c5b94
SP
704}
705
61357325 706static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 707 struct net_device *netdev)
6b7c5b94
SP
708{
709 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
1ded132d
AK
716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
fe6d2a38 735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 736
3c8def97 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
738 if (copied) {
739 /* record the sent skb in the sent_skb table */
3c8def97
SP
740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
c190e3c8
AK
742
743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
7101e111 747 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
3c8def97 750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
751 stopped = true;
752 }
6b7c5b94 753
c190e3c8 754 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 755
3c8def97 756 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 757 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
6b7c5b94 761 }
1ded132d 762tx_drop:
6b7c5b94
SP
763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
82903e4b
AK
785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 787 */
10329df8 788static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 789{
10329df8
SP
790 u16 vids[BE_NUM_VLANS_SUPPORTED];
791 u16 num = 0, i;
82903e4b 792 int status = 0;
1da87b7f 793
c0e64ef4
SP
794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
0fc16ebf
PR
798 if (adapter->vlans_added > adapter->max_vlans)
799 goto set_vlan_promisc;
800
801 /* Construct VLAN Table to give to HW */
802 for (i = 0; i < VLAN_N_VID; i++)
803 if (adapter->vlan_tag[i])
10329df8 804 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
805
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 807 vids, num, 1, 0);
0fc16ebf
PR
808
809 /* Set to VLAN promisc mode as setting VLAN filter failed */
810 if (status) {
811 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813 goto set_vlan_promisc;
6b7c5b94 814 }
1da87b7f 815
b31c50a7 816 return status;
0fc16ebf
PR
817
818set_vlan_promisc:
819 status = be_cmd_vlan_config(adapter, adapter->if_handle,
820 NULL, 0, 1, 1);
821 return status;
6b7c5b94
SP
822}
823
8e586137 824static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
825{
826 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 827 int status = 0;
6b7c5b94 828
80817cbf
AK
829 if (!be_physfn(adapter)) {
830 status = -EINVAL;
831 goto ret;
832 }
ba343c77 833
6b7c5b94 834 adapter->vlan_tag[vid] = 1;
82903e4b 835 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 836 status = be_vid_config(adapter);
8e586137 837
80817cbf
AK
838 if (!status)
839 adapter->vlans_added++;
840 else
841 adapter->vlan_tag[vid] = 0;
842ret:
843 return status;
6b7c5b94
SP
844}
845
8e586137 846static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
847{
848 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 849 int status = 0;
6b7c5b94 850
80817cbf
AK
851 if (!be_physfn(adapter)) {
852 status = -EINVAL;
853 goto ret;
854 }
ba343c77 855
6b7c5b94 856 adapter->vlan_tag[vid] = 0;
82903e4b 857 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 858 status = be_vid_config(adapter);
8e586137 859
80817cbf
AK
860 if (!status)
861 adapter->vlans_added--;
862 else
863 adapter->vlan_tag[vid] = 1;
864ret:
865 return status;
6b7c5b94
SP
866}
867
a54769f5 868static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
869{
870 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 871 int status;
6b7c5b94 872
24307eef 873 if (netdev->flags & IFF_PROMISC) {
5b8821b7 874 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
875 adapter->promiscuous = true;
876 goto done;
6b7c5b94
SP
877 }
878
25985edc 879 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
880 if (adapter->promiscuous) {
881 adapter->promiscuous = false;
5b8821b7 882 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
883
884 if (adapter->vlans_added)
10329df8 885 be_vid_config(adapter);
6b7c5b94
SP
886 }
887
e7b909a6 888 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 889 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
890 netdev_mc_count(netdev) > BE_MAX_MC) {
891 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 892 goto done;
6b7c5b94 893 }
6b7c5b94 894
fbc13f01
AK
895 if (netdev_uc_count(netdev) != adapter->uc_macs) {
896 struct netdev_hw_addr *ha;
897 int i = 1; /* First slot is claimed by the Primary MAC */
898
899 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900 be_cmd_pmac_del(adapter, adapter->if_handle,
901 adapter->pmac_id[i], 0);
902 }
903
904 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 netdev_for_each_uc_addr(ha, adapter->netdev) {
911 adapter->uc_macs++; /* First slot is for Primary MAC */
912 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913 adapter->if_handle,
914 &adapter->pmac_id[adapter->uc_macs], 0);
915 }
916 }
917
0fc16ebf
PR
918 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920 /* Set to MCAST promisc mode if setting MULTICAST address fails */
921 if (status) {
922 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 }
24307eef
SP
926done:
927 return;
6b7c5b94
SP
928}
929
ba343c77
SB
930static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 933 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
934 int status;
935
11ac75ed 936 if (!sriov_enabled(adapter))
ba343c77
SB
937 return -EPERM;
938
11ac75ed 939 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
940 return -EINVAL;
941
590c391d
PR
942 if (lancer_chip(adapter)) {
943 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
944 } else {
11ac75ed
SP
945 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946 vf_cfg->pmac_id, vf + 1);
ba343c77 947
11ac75ed
SP
948 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
950 }
951
64600ea5 952 if (status)
ba343c77
SB
953 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954 mac, vf);
64600ea5 955 else
11ac75ed 956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 957
ba343c77
SB
958 return status;
959}
960
64600ea5
AK
961static int be_get_vf_config(struct net_device *netdev, int vf,
962 struct ifla_vf_info *vi)
963{
964 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 965 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 966
11ac75ed 967 if (!sriov_enabled(adapter))
64600ea5
AK
968 return -EPERM;
969
11ac75ed 970 if (vf >= adapter->num_vfs)
64600ea5
AK
971 return -EINVAL;
972
973 vi->vf = vf;
11ac75ed
SP
974 vi->tx_rate = vf_cfg->tx_rate;
975 vi->vlan = vf_cfg->vlan_tag;
64600ea5 976 vi->qos = 0;
11ac75ed 977 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
978
979 return 0;
980}
981
1da87b7f
AK
982static int be_set_vf_vlan(struct net_device *netdev,
983 int vf, u16 vlan, u8 qos)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 int status = 0;
987
11ac75ed 988 if (!sriov_enabled(adapter))
1da87b7f
AK
989 return -EPERM;
990
11ac75ed 991 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
992 return -EINVAL;
993
994 if (vlan) {
f1f3ee1b
AK
995 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996 /* If this is new value, program it. Else skip. */
997 adapter->vf_cfg[vf].vlan_tag = vlan;
998
999 status = be_cmd_set_hsw_config(adapter, vlan,
1000 vf + 1, adapter->vf_cfg[vf].if_handle);
1001 }
1da87b7f 1002 } else {
f1f3ee1b 1003 /* Reset Transparent Vlan Tagging. */
11ac75ed 1004 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1005 vlan = adapter->vf_cfg[vf].def_vid;
1006 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1008 }
1009
1da87b7f
AK
1010
1011 if (status)
1012 dev_info(&adapter->pdev->dev,
1013 "VLAN %d config on VF %d failed\n", vlan, vf);
1014 return status;
1015}
1016
e1d18735
AK
1017static int be_set_vf_tx_rate(struct net_device *netdev,
1018 int vf, int rate)
1019{
1020 struct be_adapter *adapter = netdev_priv(netdev);
1021 int status = 0;
1022
11ac75ed 1023 if (!sriov_enabled(adapter))
e1d18735
AK
1024 return -EPERM;
1025
94f434c2 1026 if (vf >= adapter->num_vfs)
e1d18735
AK
1027 return -EINVAL;
1028
94f434c2
AK
1029 if (rate < 100 || rate > 10000) {
1030 dev_err(&adapter->pdev->dev,
1031 "tx rate must be between 100 and 10000 Mbps\n");
1032 return -EINVAL;
1033 }
e1d18735 1034
856c4012 1035 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1036
1037 if (status)
94f434c2 1038 dev_err(&adapter->pdev->dev,
e1d18735 1039 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1040 else
1041 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1042 return status;
1043}
1044
39f1d94d
SP
1045static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046{
1047 struct pci_dev *dev, *pdev = adapter->pdev;
1048 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049 u16 offset, stride;
1050
1051 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1052 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1053 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1054
1055 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1056 while (dev) {
1057 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1058 if (dev->is_virtfn && dev->devfn == vf_fn) {
1059 vfs++;
1060 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1061 assigned_vfs++;
1062 }
1063 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1064 }
1065 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1066}
1067
10ef9ab4 1068static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1069{
10ef9ab4 1070 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1071 ulong now = jiffies;
ac124ff9 1072 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1073 u64 pkts;
1074 unsigned int start, eqd;
ac124ff9 1075
10ef9ab4
SP
1076 if (!eqo->enable_aic) {
1077 eqd = eqo->eqd;
1078 goto modify_eqd;
1079 }
1080
1081 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1082 return;
6b7c5b94 1083
10ef9ab4
SP
1084 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1085
4097f663 1086 /* Wrapped around */
3abcdeda
SP
1087 if (time_before(now, stats->rx_jiffies)) {
1088 stats->rx_jiffies = now;
4097f663
SP
1089 return;
1090 }
6b7c5b94 1091
ac124ff9
SP
1092 /* Update once a second */
1093 if (delta < HZ)
6b7c5b94
SP
1094 return;
1095
ab1594e9
SP
1096 do {
1097 start = u64_stats_fetch_begin_bh(&stats->sync);
1098 pkts = stats->rx_pkts;
1099 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1100
68c3e5a7 1101 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1102 stats->rx_pkts_prev = pkts;
3abcdeda 1103 stats->rx_jiffies = now;
10ef9ab4
SP
1104 eqd = (stats->rx_pps / 110000) << 3;
1105 eqd = min(eqd, eqo->max_eqd);
1106 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1107 if (eqd < 10)
1108 eqd = 0;
10ef9ab4
SP
1109
1110modify_eqd:
1111 if (eqd != eqo->cur_eqd) {
1112 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1113 eqo->cur_eqd = eqd;
ac124ff9 1114 }
6b7c5b94
SP
1115}
1116
3abcdeda 1117static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1118 struct be_rx_compl_info *rxcp)
4097f663 1119{
ac124ff9 1120 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1121
ab1594e9 1122 u64_stats_update_begin(&stats->sync);
3abcdeda 1123 stats->rx_compl++;
2e588f84 1124 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1125 stats->rx_pkts++;
2e588f84 1126 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1127 stats->rx_mcast_pkts++;
2e588f84 1128 if (rxcp->err)
ac124ff9 1129 stats->rx_compl_err++;
ab1594e9 1130 u64_stats_update_end(&stats->sync);
4097f663
SP
1131}
1132
2e588f84 1133static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1134{
19fad86f
PR
1135 /* L4 checksum is not reliable for non TCP/UDP packets.
1136 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1137 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1138 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1139}
1140
10ef9ab4
SP
1141static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1142 u16 frag_idx)
6b7c5b94 1143{
10ef9ab4 1144 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1145 struct be_rx_page_info *rx_page_info;
3abcdeda 1146 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1147
3abcdeda 1148 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1149 BUG_ON(!rx_page_info->page);
1150
205859a2 1151 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1152 dma_unmap_page(&adapter->pdev->dev,
1153 dma_unmap_addr(rx_page_info, bus),
1154 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1155 rx_page_info->last_page_user = false;
1156 }
6b7c5b94
SP
1157
1158 atomic_dec(&rxq->used);
1159 return rx_page_info;
1160}
1161
1162/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1163static void be_rx_compl_discard(struct be_rx_obj *rxo,
1164 struct be_rx_compl_info *rxcp)
6b7c5b94 1165{
3abcdeda 1166 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1167 struct be_rx_page_info *page_info;
2e588f84 1168 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1169
e80d9da6 1170 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1171 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1172 put_page(page_info->page);
1173 memset(page_info, 0, sizeof(*page_info));
2e588f84 1174 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1175 }
1176}
1177
1178/*
1179 * skb_fill_rx_data forms a complete skb for an ether frame
1180 * indicated by rxcp.
1181 */
10ef9ab4
SP
1182static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1183 struct be_rx_compl_info *rxcp)
6b7c5b94 1184{
3abcdeda 1185 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1186 struct be_rx_page_info *page_info;
2e588f84
SP
1187 u16 i, j;
1188 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1189 u8 *start;
6b7c5b94 1190
10ef9ab4 1191 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1192 start = page_address(page_info->page) + page_info->page_offset;
1193 prefetch(start);
1194
1195 /* Copy data in the first descriptor of this completion */
2e588f84 1196 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1197
1198 /* Copy the header portion into skb_data */
2e588f84 1199 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1200 memcpy(skb->data, start, hdr_len);
1201 skb->len = curr_frag_len;
1202 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1203 /* Complete packet has now been moved to data */
1204 put_page(page_info->page);
1205 skb->data_len = 0;
1206 skb->tail += curr_frag_len;
1207 } else {
1208 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1209 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1210 skb_shinfo(skb)->frags[0].page_offset =
1211 page_info->page_offset + hdr_len;
9e903e08 1212 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1213 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1214 skb->truesize += rx_frag_size;
6b7c5b94
SP
1215 skb->tail += hdr_len;
1216 }
205859a2 1217 page_info->page = NULL;
6b7c5b94 1218
2e588f84
SP
1219 if (rxcp->pkt_size <= rx_frag_size) {
1220 BUG_ON(rxcp->num_rcvd != 1);
1221 return;
6b7c5b94
SP
1222 }
1223
1224 /* More frags present for this completion */
2e588f84
SP
1225 index_inc(&rxcp->rxq_idx, rxq->len);
1226 remaining = rxcp->pkt_size - curr_frag_len;
1227 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1228 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1229 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1230
bd46cb6c
AK
1231 /* Coalesce all frags from the same physical page in one slot */
1232 if (page_info->page_offset == 0) {
1233 /* Fresh page */
1234 j++;
b061b39e 1235 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1236 skb_shinfo(skb)->frags[j].page_offset =
1237 page_info->page_offset;
9e903e08 1238 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1239 skb_shinfo(skb)->nr_frags++;
1240 } else {
1241 put_page(page_info->page);
1242 }
1243
9e903e08 1244 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1245 skb->len += curr_frag_len;
1246 skb->data_len += curr_frag_len;
bdb28a97 1247 skb->truesize += rx_frag_size;
2e588f84
SP
1248 remaining -= curr_frag_len;
1249 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1250 page_info->page = NULL;
6b7c5b94 1251 }
bd46cb6c 1252 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1253}
1254
5be93b9a 1255/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1256static void be_rx_compl_process(struct be_rx_obj *rxo,
1257 struct be_rx_compl_info *rxcp)
6b7c5b94 1258{
10ef9ab4 1259 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1260 struct net_device *netdev = adapter->netdev;
6b7c5b94 1261 struct sk_buff *skb;
89420424 1262
bb349bb4 1263 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1264 if (unlikely(!skb)) {
ac124ff9 1265 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1266 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1267 return;
1268 }
1269
10ef9ab4 1270 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1271
6332c8d3 1272 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1273 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1274 else
1275 skb_checksum_none_assert(skb);
6b7c5b94 1276
6332c8d3 1277 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1278 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1279 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1280 skb->rxhash = rxcp->rss_hash;
1281
6b7c5b94 1282
343e43c0 1283 if (rxcp->vlanf)
4c5102f9
AK
1284 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1285
1286 netif_receive_skb(skb);
6b7c5b94
SP
1287}
1288
5be93b9a 1289/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1290void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1291 struct be_rx_compl_info *rxcp)
6b7c5b94 1292{
10ef9ab4 1293 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1294 struct be_rx_page_info *page_info;
5be93b9a 1295 struct sk_buff *skb = NULL;
3abcdeda 1296 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1297 u16 remaining, curr_frag_len;
1298 u16 i, j;
3968fa1e 1299
10ef9ab4 1300 skb = napi_get_frags(napi);
5be93b9a 1301 if (!skb) {
10ef9ab4 1302 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1303 return;
1304 }
1305
2e588f84
SP
1306 remaining = rxcp->pkt_size;
1307 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1308 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1309
1310 curr_frag_len = min(remaining, rx_frag_size);
1311
bd46cb6c
AK
1312 /* Coalesce all frags from the same physical page in one slot */
1313 if (i == 0 || page_info->page_offset == 0) {
1314 /* First frag or Fresh page */
1315 j++;
b061b39e 1316 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1317 skb_shinfo(skb)->frags[j].page_offset =
1318 page_info->page_offset;
9e903e08 1319 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1320 } else {
1321 put_page(page_info->page);
1322 }
9e903e08 1323 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1324 skb->truesize += rx_frag_size;
bd46cb6c 1325 remaining -= curr_frag_len;
2e588f84 1326 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1327 memset(page_info, 0, sizeof(*page_info));
1328 }
bd46cb6c 1329 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1330
5be93b9a 1331 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1332 skb->len = rxcp->pkt_size;
1333 skb->data_len = rxcp->pkt_size;
5be93b9a 1334 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1335 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1336 if (adapter->netdev->features & NETIF_F_RXHASH)
1337 skb->rxhash = rxcp->rss_hash;
5be93b9a 1338
343e43c0 1339 if (rxcp->vlanf)
4c5102f9
AK
1340 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1341
10ef9ab4 1342 napi_gro_frags(napi);
2e588f84
SP
1343}
1344
10ef9ab4
SP
1345static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1346 struct be_rx_compl_info *rxcp)
2e588f84
SP
1347{
1348 rxcp->pkt_size =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1350 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1351 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1352 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1353 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1354 rxcp->ip_csum =
1355 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1356 rxcp->l4_csum =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1358 rxcp->ipv6 =
1359 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1360 rxcp->rxq_idx =
1361 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1362 rxcp->num_rcvd =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1364 rxcp->pkt_type =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1366 rxcp->rss_hash =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1368 if (rxcp->vlanf) {
1369 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1370 compl);
1371 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1372 compl);
15d72184 1373 }
12004ae9 1374 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1375}
1376
10ef9ab4
SP
1377static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1378 struct be_rx_compl_info *rxcp)
2e588f84
SP
1379{
1380 rxcp->pkt_size =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1382 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1383 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1384 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1385 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1386 rxcp->ip_csum =
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1388 rxcp->l4_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1390 rxcp->ipv6 =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1392 rxcp->rxq_idx =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1394 rxcp->num_rcvd =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1396 rxcp->pkt_type =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1398 rxcp->rss_hash =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1400 if (rxcp->vlanf) {
1401 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1402 compl);
1403 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1404 compl);
15d72184 1405 }
12004ae9 1406 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1407}
1408
1409static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1410{
1411 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1412 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1413 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1414
2e588f84
SP
1415 /* For checking the valid bit it is Ok to use either definition as the
1416 * valid bit is at the same position in both v0 and v1 Rx compl */
1417 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1418 return NULL;
6b7c5b94 1419
2e588f84
SP
1420 rmb();
1421 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1422
2e588f84 1423 if (adapter->be3_native)
10ef9ab4 1424 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1425 else
10ef9ab4 1426 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1427
15d72184
SP
1428 if (rxcp->vlanf) {
1429 /* vlanf could be wrongly set in some cards.
1430 * ignore if vtm is not set */
752961a1 1431 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1432 rxcp->vlanf = 0;
6b7c5b94 1433
15d72184 1434 if (!lancer_chip(adapter))
3c709f8f 1435 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1436
939cf306 1437 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1438 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1439 rxcp->vlanf = 0;
1440 }
2e588f84
SP
1441
1442 /* As the compl has been parsed, reset it; we wont touch it again */
1443 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1444
3abcdeda 1445 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1446 return rxcp;
1447}
1448
1829b086 1449static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1450{
6b7c5b94 1451 u32 order = get_order(size);
1829b086 1452
6b7c5b94 1453 if (order > 0)
1829b086
ED
1454 gfp |= __GFP_COMP;
1455 return alloc_pages(gfp, order);
6b7c5b94
SP
1456}
1457
1458/*
1459 * Allocate a page, split it to fragments of size rx_frag_size and post as
1460 * receive buffers to BE
1461 */
1829b086 1462static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1463{
3abcdeda 1464 struct be_adapter *adapter = rxo->adapter;
26d92f92 1465 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1466 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1467 struct page *pagep = NULL;
1468 struct be_eth_rx_d *rxd;
1469 u64 page_dmaaddr = 0, frag_dmaaddr;
1470 u32 posted, page_offset = 0;
1471
3abcdeda 1472 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1473 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1474 if (!pagep) {
1829b086 1475 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1476 if (unlikely(!pagep)) {
ac124ff9 1477 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1478 break;
1479 }
2b7bcebf
IV
1480 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1481 0, adapter->big_page_size,
1482 DMA_FROM_DEVICE);
6b7c5b94
SP
1483 page_info->page_offset = 0;
1484 } else {
1485 get_page(pagep);
1486 page_info->page_offset = page_offset + rx_frag_size;
1487 }
1488 page_offset = page_info->page_offset;
1489 page_info->page = pagep;
fac6da5b 1490 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1491 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1492
1493 rxd = queue_head_node(rxq);
1494 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1495 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1496
1497 /* Any space left in the current big page for another frag? */
1498 if ((page_offset + rx_frag_size + rx_frag_size) >
1499 adapter->big_page_size) {
1500 pagep = NULL;
1501 page_info->last_page_user = true;
1502 }
26d92f92
SP
1503
1504 prev_page_info = page_info;
1505 queue_head_inc(rxq);
10ef9ab4 1506 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1507 }
1508 if (pagep)
26d92f92 1509 prev_page_info->last_page_user = true;
6b7c5b94
SP
1510
1511 if (posted) {
6b7c5b94 1512 atomic_add(posted, &rxq->used);
8788fdc2 1513 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1514 } else if (atomic_read(&rxq->used) == 0) {
1515 /* Let be_worker replenish when memory is available */
3abcdeda 1516 rxo->rx_post_starved = true;
6b7c5b94 1517 }
6b7c5b94
SP
1518}
1519
5fb379ee 1520static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1521{
6b7c5b94
SP
1522 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1523
1524 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1525 return NULL;
1526
f3eb62d2 1527 rmb();
6b7c5b94
SP
1528 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1529
1530 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1531
1532 queue_tail_inc(tx_cq);
1533 return txcp;
1534}
1535
3c8def97
SP
1536static u16 be_tx_compl_process(struct be_adapter *adapter,
1537 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1538{
3c8def97 1539 struct be_queue_info *txq = &txo->q;
a73b796e 1540 struct be_eth_wrb *wrb;
3c8def97 1541 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1542 struct sk_buff *sent_skb;
ec43b1a6
SP
1543 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1544 bool unmap_skb_hdr = true;
6b7c5b94 1545
ec43b1a6 1546 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1547 BUG_ON(!sent_skb);
ec43b1a6
SP
1548 sent_skbs[txq->tail] = NULL;
1549
1550 /* skip header wrb */
a73b796e 1551 queue_tail_inc(txq);
6b7c5b94 1552
ec43b1a6 1553 do {
6b7c5b94 1554 cur_index = txq->tail;
a73b796e 1555 wrb = queue_tail_node(txq);
2b7bcebf
IV
1556 unmap_tx_frag(&adapter->pdev->dev, wrb,
1557 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1558 unmap_skb_hdr = false;
1559
6b7c5b94
SP
1560 num_wrbs++;
1561 queue_tail_inc(txq);
ec43b1a6 1562 } while (cur_index != last_index);
6b7c5b94 1563
6b7c5b94 1564 kfree_skb(sent_skb);
4d586b82 1565 return num_wrbs;
6b7c5b94
SP
1566}
1567
10ef9ab4
SP
1568/* Return the number of events in the event queue */
1569static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1570{
10ef9ab4
SP
1571 struct be_eq_entry *eqe;
1572 int num = 0;
859b1e4e 1573
10ef9ab4
SP
1574 do {
1575 eqe = queue_tail_node(&eqo->q);
1576 if (eqe->evt == 0)
1577 break;
859b1e4e 1578
10ef9ab4
SP
1579 rmb();
1580 eqe->evt = 0;
1581 num++;
1582 queue_tail_inc(&eqo->q);
1583 } while (true);
1584
1585 return num;
859b1e4e
SP
1586}
1587
10ef9ab4 1588static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1589{
10ef9ab4
SP
1590 bool rearm = false;
1591 int num = events_get(eqo);
859b1e4e 1592
10ef9ab4 1593 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1594 if (!num)
1595 rearm = true;
1596
af311fe3
PR
1597 if (num || msix_enabled(eqo->adapter))
1598 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1599
859b1e4e 1600 if (num)
10ef9ab4 1601 napi_schedule(&eqo->napi);
859b1e4e
SP
1602
1603 return num;
1604}
1605
10ef9ab4
SP
1606/* Leaves the EQ is disarmed state */
1607static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1608{
10ef9ab4 1609 int num = events_get(eqo);
859b1e4e 1610
10ef9ab4 1611 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1612}
1613
10ef9ab4 1614static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1615{
1616 struct be_rx_page_info *page_info;
3abcdeda
SP
1617 struct be_queue_info *rxq = &rxo->q;
1618 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1619 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1620 u16 tail;
1621
1622 /* First cleanup pending rx completions */
3abcdeda 1623 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1624 be_rx_compl_discard(rxo, rxcp);
1625 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1626 }
1627
1628 /* Then free posted rx buffer that were not used */
1629 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1630 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1631 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1632 put_page(page_info->page);
1633 memset(page_info, 0, sizeof(*page_info));
1634 }
1635 BUG_ON(atomic_read(&rxq->used));
482c9e79 1636 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1637}
1638
0ae57bb3 1639static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1640{
0ae57bb3
SP
1641 struct be_tx_obj *txo;
1642 struct be_queue_info *txq;
a8e9179a 1643 struct be_eth_tx_compl *txcp;
4d586b82 1644 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1645 struct sk_buff *sent_skb;
1646 bool dummy_wrb;
0ae57bb3 1647 int i, pending_txqs;
a8e9179a
SP
1648
1649 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1650 do {
0ae57bb3
SP
1651 pending_txqs = adapter->num_tx_qs;
1652
1653 for_all_tx_queues(adapter, txo, i) {
1654 txq = &txo->q;
1655 while ((txcp = be_tx_compl_get(&txo->cq))) {
1656 end_idx =
1657 AMAP_GET_BITS(struct amap_eth_tx_compl,
1658 wrb_index, txcp);
1659 num_wrbs += be_tx_compl_process(adapter, txo,
1660 end_idx);
1661 cmpl++;
1662 }
1663 if (cmpl) {
1664 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1665 atomic_sub(num_wrbs, &txq->used);
1666 cmpl = 0;
1667 num_wrbs = 0;
1668 }
1669 if (atomic_read(&txq->used) == 0)
1670 pending_txqs--;
a8e9179a
SP
1671 }
1672
0ae57bb3 1673 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1674 break;
1675
1676 mdelay(1);
1677 } while (true);
1678
0ae57bb3
SP
1679 for_all_tx_queues(adapter, txo, i) {
1680 txq = &txo->q;
1681 if (atomic_read(&txq->used))
1682 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1683 atomic_read(&txq->used));
1684
1685 /* free posted tx for which compls will never arrive */
1686 while (atomic_read(&txq->used)) {
1687 sent_skb = txo->sent_skb_list[txq->tail];
1688 end_idx = txq->tail;
1689 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1690 &dummy_wrb);
1691 index_adv(&end_idx, num_wrbs - 1, txq->len);
1692 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1693 atomic_sub(num_wrbs, &txq->used);
1694 }
b03388d6 1695 }
6b7c5b94
SP
1696}
1697
10ef9ab4
SP
1698static void be_evt_queues_destroy(struct be_adapter *adapter)
1699{
1700 struct be_eq_obj *eqo;
1701 int i;
1702
1703 for_all_evt_queues(adapter, eqo, i) {
1704 be_eq_clean(eqo);
1705 if (eqo->q.created)
1706 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1707 be_queue_free(adapter, &eqo->q);
1708 }
1709}
1710
1711static int be_evt_queues_create(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *eq;
1714 struct be_eq_obj *eqo;
1715 int i, rc;
1716
1717 adapter->num_evt_qs = num_irqs(adapter);
1718
1719 for_all_evt_queues(adapter, eqo, i) {
1720 eqo->adapter = adapter;
1721 eqo->tx_budget = BE_TX_BUDGET;
1722 eqo->idx = i;
1723 eqo->max_eqd = BE_MAX_EQD;
1724 eqo->enable_aic = true;
1725
1726 eq = &eqo->q;
1727 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1728 sizeof(struct be_eq_entry));
1729 if (rc)
1730 return rc;
1731
1732 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1733 if (rc)
1734 return rc;
1735 }
1cfafab9 1736 return 0;
10ef9ab4
SP
1737}
1738
5fb379ee
SP
1739static void be_mcc_queues_destroy(struct be_adapter *adapter)
1740{
1741 struct be_queue_info *q;
5fb379ee 1742
8788fdc2 1743 q = &adapter->mcc_obj.q;
5fb379ee 1744 if (q->created)
8788fdc2 1745 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1746 be_queue_free(adapter, q);
1747
8788fdc2 1748 q = &adapter->mcc_obj.cq;
5fb379ee 1749 if (q->created)
8788fdc2 1750 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1751 be_queue_free(adapter, q);
1752}
1753
1754/* Must be called only after TX qs are created as MCC shares TX EQ */
1755static int be_mcc_queues_create(struct be_adapter *adapter)
1756{
1757 struct be_queue_info *q, *cq;
5fb379ee 1758
8788fdc2 1759 cq = &adapter->mcc_obj.cq;
5fb379ee 1760 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1761 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1762 goto err;
1763
10ef9ab4
SP
1764 /* Use the default EQ for MCC completions */
1765 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1766 goto mcc_cq_free;
1767
8788fdc2 1768 q = &adapter->mcc_obj.q;
5fb379ee
SP
1769 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1770 goto mcc_cq_destroy;
1771
8788fdc2 1772 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1773 goto mcc_q_free;
1774
1775 return 0;
1776
1777mcc_q_free:
1778 be_queue_free(adapter, q);
1779mcc_cq_destroy:
8788fdc2 1780 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1781mcc_cq_free:
1782 be_queue_free(adapter, cq);
1783err:
1784 return -1;
1785}
1786
6b7c5b94
SP
1787static void be_tx_queues_destroy(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *q;
3c8def97
SP
1790 struct be_tx_obj *txo;
1791 u8 i;
6b7c5b94 1792
3c8def97
SP
1793 for_all_tx_queues(adapter, txo, i) {
1794 q = &txo->q;
1795 if (q->created)
1796 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1797 be_queue_free(adapter, q);
6b7c5b94 1798
3c8def97
SP
1799 q = &txo->cq;
1800 if (q->created)
1801 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1802 be_queue_free(adapter, q);
1803 }
6b7c5b94
SP
1804}
1805
dafc0fe3
SP
1806static int be_num_txqs_want(struct be_adapter *adapter)
1807{
39f1d94d
SP
1808 if (sriov_want(adapter) || be_is_mc(adapter) ||
1809 lancer_chip(adapter) || !be_physfn(adapter) ||
1810 adapter->generation == BE_GEN2)
dafc0fe3
SP
1811 return 1;
1812 else
1813 return MAX_TX_QS;
1814}
1815
10ef9ab4 1816static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1817{
10ef9ab4
SP
1818 struct be_queue_info *cq, *eq;
1819 int status;
3c8def97
SP
1820 struct be_tx_obj *txo;
1821 u8 i;
6b7c5b94 1822
dafc0fe3 1823 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1824 if (adapter->num_tx_qs != MAX_TX_QS) {
1825 rtnl_lock();
dafc0fe3
SP
1826 netif_set_real_num_tx_queues(adapter->netdev,
1827 adapter->num_tx_qs);
3bb62f4f
PR
1828 rtnl_unlock();
1829 }
dafc0fe3 1830
10ef9ab4
SP
1831 for_all_tx_queues(adapter, txo, i) {
1832 cq = &txo->cq;
1833 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1834 sizeof(struct be_eth_tx_compl));
1835 if (status)
1836 return status;
3c8def97 1837
10ef9ab4
SP
1838 /* If num_evt_qs is less than num_tx_qs, then more than
1839 * one txq share an eq
1840 */
1841 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1842 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1843 if (status)
1844 return status;
1845 }
1846 return 0;
1847}
6b7c5b94 1848
10ef9ab4
SP
1849static int be_tx_qs_create(struct be_adapter *adapter)
1850{
1851 struct be_tx_obj *txo;
1852 int i, status;
fe6d2a38 1853
3c8def97 1854 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1855 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1856 sizeof(struct be_eth_wrb));
1857 if (status)
1858 return status;
6b7c5b94 1859
10ef9ab4
SP
1860 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1861 if (status)
1862 return status;
3c8def97 1863 }
6b7c5b94 1864
10ef9ab4 1865 return 0;
6b7c5b94
SP
1866}
1867
10ef9ab4 1868static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1869{
1870 struct be_queue_info *q;
3abcdeda
SP
1871 struct be_rx_obj *rxo;
1872 int i;
1873
1874 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1875 q = &rxo->cq;
1876 if (q->created)
1877 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1878 be_queue_free(adapter, q);
ac6a0c4a
SP
1879 }
1880}
1881
10ef9ab4 1882static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1883{
10ef9ab4 1884 struct be_queue_info *eq, *cq;
3abcdeda
SP
1885 struct be_rx_obj *rxo;
1886 int rc, i;
6b7c5b94 1887
10ef9ab4
SP
1888 /* We'll create as many RSS rings as there are irqs.
1889 * But when there's only one irq there's no use creating RSS rings
1890 */
1891 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1892 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1893
6b7c5b94 1894 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1895 for_all_rx_queues(adapter, rxo, i) {
1896 rxo->adapter = adapter;
3abcdeda
SP
1897 cq = &rxo->cq;
1898 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1899 sizeof(struct be_eth_rx_compl));
1900 if (rc)
10ef9ab4 1901 return rc;
3abcdeda 1902
10ef9ab4
SP
1903 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1904 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1905 if (rc)
10ef9ab4 1906 return rc;
3abcdeda 1907 }
6b7c5b94 1908
10ef9ab4
SP
1909 if (adapter->num_rx_qs != MAX_RX_QS)
1910 dev_info(&adapter->pdev->dev,
1911 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1912
10ef9ab4 1913 return 0;
b628bde2
SP
1914}
1915
6b7c5b94
SP
1916static irqreturn_t be_intx(int irq, void *dev)
1917{
1918 struct be_adapter *adapter = dev;
10ef9ab4 1919 int num_evts;
6b7c5b94 1920
10ef9ab4
SP
1921 /* With INTx only one EQ is used */
1922 num_evts = event_handle(&adapter->eq_obj[0]);
1923 if (num_evts)
1924 return IRQ_HANDLED;
1925 else
1926 return IRQ_NONE;
6b7c5b94
SP
1927}
1928
10ef9ab4 1929static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1930{
10ef9ab4 1931 struct be_eq_obj *eqo = dev;
6b7c5b94 1932
10ef9ab4 1933 event_handle(eqo);
6b7c5b94
SP
1934 return IRQ_HANDLED;
1935}
1936
2e588f84 1937static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1938{
2e588f84 1939 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1940}
1941
10ef9ab4
SP
1942static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1943 int budget)
6b7c5b94 1944{
3abcdeda
SP
1945 struct be_adapter *adapter = rxo->adapter;
1946 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1947 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1948 u32 work_done;
1949
1950 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1951 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1952 if (!rxcp)
1953 break;
1954
12004ae9
SP
1955 /* Is it a flush compl that has no data */
1956 if (unlikely(rxcp->num_rcvd == 0))
1957 goto loop_continue;
1958
1959 /* Discard compl with partial DMA Lancer B0 */
1960 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1961 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1962 goto loop_continue;
1963 }
1964
1965 /* On BE drop pkts that arrive due to imperfect filtering in
1966 * promiscuous mode on some skews
1967 */
1968 if (unlikely(rxcp->port != adapter->port_num &&
1969 !lancer_chip(adapter))) {
10ef9ab4 1970 be_rx_compl_discard(rxo, rxcp);
12004ae9 1971 goto loop_continue;
64642811 1972 }
009dd872 1973
12004ae9 1974 if (do_gro(rxcp))
10ef9ab4 1975 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1976 else
10ef9ab4 1977 be_rx_compl_process(rxo, rxcp);
12004ae9 1978loop_continue:
2e588f84 1979 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1980 }
1981
10ef9ab4
SP
1982 if (work_done) {
1983 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1984
10ef9ab4
SP
1985 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1986 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1987 }
10ef9ab4 1988
6b7c5b94
SP
1989 return work_done;
1990}
1991
10ef9ab4
SP
1992static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1993 int budget, int idx)
6b7c5b94 1994{
6b7c5b94 1995 struct be_eth_tx_compl *txcp;
10ef9ab4 1996 int num_wrbs = 0, work_done;
3c8def97 1997
10ef9ab4
SP
1998 for (work_done = 0; work_done < budget; work_done++) {
1999 txcp = be_tx_compl_get(&txo->cq);
2000 if (!txcp)
2001 break;
2002 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2003 AMAP_GET_BITS(struct amap_eth_tx_compl,
2004 wrb_index, txcp));
10ef9ab4 2005 }
6b7c5b94 2006
10ef9ab4
SP
2007 if (work_done) {
2008 be_cq_notify(adapter, txo->cq.id, true, work_done);
2009 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2010
10ef9ab4
SP
2011 /* As Tx wrbs have been freed up, wake up netdev queue
2012 * if it was stopped due to lack of tx wrbs. */
2013 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2014 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2016 }
10ef9ab4
SP
2017
2018 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2019 tx_stats(txo)->tx_compl += work_done;
2020 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2021 }
10ef9ab4
SP
2022 return (work_done < budget); /* Done */
2023}
6b7c5b94 2024
10ef9ab4
SP
2025int be_poll(struct napi_struct *napi, int budget)
2026{
2027 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2028 struct be_adapter *adapter = eqo->adapter;
2029 int max_work = 0, work, i;
2030 bool tx_done;
f31e50a8 2031
10ef9ab4
SP
2032 /* Process all TXQs serviced by this EQ */
2033 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2034 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2035 eqo->tx_budget, i);
2036 if (!tx_done)
2037 max_work = budget;
f31e50a8
SP
2038 }
2039
10ef9ab4
SP
2040 /* This loop will iterate twice for EQ0 in which
2041 * completions of the last RXQ (default one) are also processed
2042 * For other EQs the loop iterates only once
2043 */
2044 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2045 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2046 max_work = max(work, max_work);
2047 }
6b7c5b94 2048
10ef9ab4
SP
2049 if (is_mcc_eqo(eqo))
2050 be_process_mcc(adapter);
93c86700 2051
10ef9ab4
SP
2052 if (max_work < budget) {
2053 napi_complete(napi);
2054 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2055 } else {
2056 /* As we'll continue in polling mode, count and clear events */
2057 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2058 }
10ef9ab4 2059 return max_work;
6b7c5b94
SP
2060}
2061
d053de91 2062void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2063{
e1cfb67a
PR
2064 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2065 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2066 u32 i;
2067
72f02485
SP
2068 if (adapter->eeh_err || adapter->ue_detected)
2069 return;
2070
e1cfb67a
PR
2071 if (lancer_chip(adapter)) {
2072 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2073 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2074 sliport_err1 = ioread32(adapter->db +
2075 SLIPORT_ERROR1_OFFSET);
2076 sliport_err2 = ioread32(adapter->db +
2077 SLIPORT_ERROR2_OFFSET);
2078 }
2079 } else {
2080 pci_read_config_dword(adapter->pdev,
2081 PCICFG_UE_STATUS_LOW, &ue_lo);
2082 pci_read_config_dword(adapter->pdev,
2083 PCICFG_UE_STATUS_HIGH, &ue_hi);
2084 pci_read_config_dword(adapter->pdev,
2085 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2086 pci_read_config_dword(adapter->pdev,
2087 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2088
2089 ue_lo = (ue_lo & (~ue_lo_mask));
2090 ue_hi = (ue_hi & (~ue_hi_mask));
2091 }
7c185276 2092
e1cfb67a
PR
2093 if (ue_lo || ue_hi ||
2094 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2095 adapter->ue_detected = true;
7acc2087 2096 adapter->eeh_err = true;
434b3648
SP
2097 dev_err(&adapter->pdev->dev,
2098 "Unrecoverable error in the card\n");
d053de91
AK
2099 }
2100
e1cfb67a
PR
2101 if (ue_lo) {
2102 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2103 if (ue_lo & 1)
7c185276
AK
2104 dev_err(&adapter->pdev->dev,
2105 "UE: %s bit set\n", ue_status_low_desc[i]);
2106 }
2107 }
e1cfb67a
PR
2108 if (ue_hi) {
2109 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2110 if (ue_hi & 1)
7c185276
AK
2111 dev_err(&adapter->pdev->dev,
2112 "UE: %s bit set\n", ue_status_hi_desc[i]);
2113 }
2114 }
2115
e1cfb67a
PR
2116 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2117 dev_err(&adapter->pdev->dev,
2118 "sliport status 0x%x\n", sliport_status);
2119 dev_err(&adapter->pdev->dev,
2120 "sliport error1 0x%x\n", sliport_err1);
2121 dev_err(&adapter->pdev->dev,
2122 "sliport error2 0x%x\n", sliport_err2);
2123 }
7c185276
AK
2124}
2125
8d56ff11
SP
2126static void be_msix_disable(struct be_adapter *adapter)
2127{
ac6a0c4a 2128 if (msix_enabled(adapter)) {
8d56ff11 2129 pci_disable_msix(adapter->pdev);
ac6a0c4a 2130 adapter->num_msix_vec = 0;
3abcdeda
SP
2131 }
2132}
2133
10ef9ab4
SP
2134static uint be_num_rss_want(struct be_adapter *adapter)
2135{
2136 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
39f1d94d 2137 !sriov_want(adapter) && be_physfn(adapter) &&
10ef9ab4
SP
2138 !be_is_mc(adapter))
2139 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2140 else
2141 return 0;
2142}
2143
6b7c5b94
SP
2144static void be_msix_enable(struct be_adapter *adapter)
2145{
10ef9ab4 2146#define BE_MIN_MSIX_VECTORS 1
045508a8 2147 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2148
10ef9ab4
SP
2149 /* If RSS queues are not used, need a vec for default RX Q */
2150 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2151 if (be_roce_supported(adapter)) {
2152 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2153 (num_online_cpus() + 1));
2154 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2155 num_vec += num_roce_vec;
2156 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2157 }
10ef9ab4 2158 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2159
ac6a0c4a 2160 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2161 adapter->msix_entries[i].entry = i;
2162
ac6a0c4a 2163 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2164 if (status == 0) {
2165 goto done;
2166 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2167 num_vec = status;
3abcdeda 2168 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2169 num_vec) == 0)
3abcdeda 2170 goto done;
3abcdeda
SP
2171 }
2172 return;
2173done:
045508a8
PP
2174 if (be_roce_supported(adapter)) {
2175 if (num_vec > num_roce_vec) {
2176 adapter->num_msix_vec = num_vec - num_roce_vec;
2177 adapter->num_msix_roce_vec =
2178 num_vec - adapter->num_msix_vec;
2179 } else {
2180 adapter->num_msix_vec = num_vec;
2181 adapter->num_msix_roce_vec = 0;
2182 }
2183 } else
2184 adapter->num_msix_vec = num_vec;
ac6a0c4a 2185 return;
6b7c5b94
SP
2186}
2187
fe6d2a38 2188static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2189 struct be_eq_obj *eqo)
b628bde2 2190{
10ef9ab4 2191 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2192}
6b7c5b94 2193
b628bde2
SP
2194static int be_msix_register(struct be_adapter *adapter)
2195{
10ef9ab4
SP
2196 struct net_device *netdev = adapter->netdev;
2197 struct be_eq_obj *eqo;
2198 int status, i, vec;
6b7c5b94 2199
10ef9ab4
SP
2200 for_all_evt_queues(adapter, eqo, i) {
2201 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2202 vec = be_msix_vec_get(adapter, eqo);
2203 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2204 if (status)
2205 goto err_msix;
2206 }
b628bde2 2207
6b7c5b94 2208 return 0;
3abcdeda 2209err_msix:
10ef9ab4
SP
2210 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2211 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2212 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2213 status);
ac6a0c4a 2214 be_msix_disable(adapter);
6b7c5b94
SP
2215 return status;
2216}
2217
2218static int be_irq_register(struct be_adapter *adapter)
2219{
2220 struct net_device *netdev = adapter->netdev;
2221 int status;
2222
ac6a0c4a 2223 if (msix_enabled(adapter)) {
6b7c5b94
SP
2224 status = be_msix_register(adapter);
2225 if (status == 0)
2226 goto done;
ba343c77
SB
2227 /* INTx is not supported for VF */
2228 if (!be_physfn(adapter))
2229 return status;
6b7c5b94
SP
2230 }
2231
2232 /* INTx */
2233 netdev->irq = adapter->pdev->irq;
2234 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2235 adapter);
2236 if (status) {
2237 dev_err(&adapter->pdev->dev,
2238 "INTx request IRQ failed - err %d\n", status);
2239 return status;
2240 }
2241done:
2242 adapter->isr_registered = true;
2243 return 0;
2244}
2245
2246static void be_irq_unregister(struct be_adapter *adapter)
2247{
2248 struct net_device *netdev = adapter->netdev;
10ef9ab4 2249 struct be_eq_obj *eqo;
3abcdeda 2250 int i;
6b7c5b94
SP
2251
2252 if (!adapter->isr_registered)
2253 return;
2254
2255 /* INTx */
ac6a0c4a 2256 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2257 free_irq(netdev->irq, adapter);
2258 goto done;
2259 }
2260
2261 /* MSIx */
10ef9ab4
SP
2262 for_all_evt_queues(adapter, eqo, i)
2263 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2264
6b7c5b94
SP
2265done:
2266 adapter->isr_registered = false;
6b7c5b94
SP
2267}
2268
10ef9ab4 2269static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2270{
2271 struct be_queue_info *q;
2272 struct be_rx_obj *rxo;
2273 int i;
2274
2275 for_all_rx_queues(adapter, rxo, i) {
2276 q = &rxo->q;
2277 if (q->created) {
2278 be_cmd_rxq_destroy(adapter, q);
2279 /* After the rxq is invalidated, wait for a grace time
2280 * of 1ms for all dma to end and the flush compl to
2281 * arrive
2282 */
2283 mdelay(1);
10ef9ab4 2284 be_rx_cq_clean(rxo);
482c9e79 2285 }
10ef9ab4 2286 be_queue_free(adapter, q);
482c9e79
SP
2287 }
2288}
2289
889cd4b2
SP
2290static int be_close(struct net_device *netdev)
2291{
2292 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2293 struct be_eq_obj *eqo;
2294 int i;
889cd4b2 2295
045508a8
PP
2296 be_roce_dev_close(adapter);
2297
889cd4b2
SP
2298 be_async_mcc_disable(adapter);
2299
fe6d2a38
SP
2300 if (!lancer_chip(adapter))
2301 be_intr_set(adapter, false);
889cd4b2 2302
10ef9ab4
SP
2303 for_all_evt_queues(adapter, eqo, i) {
2304 napi_disable(&eqo->napi);
2305 if (msix_enabled(adapter))
2306 synchronize_irq(be_msix_vec_get(adapter, eqo));
2307 else
2308 synchronize_irq(netdev->irq);
2309 be_eq_clean(eqo);
63fcb27f
PR
2310 }
2311
889cd4b2
SP
2312 be_irq_unregister(adapter);
2313
889cd4b2
SP
2314 /* Wait for all pending tx completions to arrive so that
2315 * all tx skbs are freed.
2316 */
0ae57bb3 2317 be_tx_compl_clean(adapter);
889cd4b2 2318
10ef9ab4 2319 be_rx_qs_destroy(adapter);
482c9e79
SP
2320 return 0;
2321}
2322
10ef9ab4 2323static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2324{
2325 struct be_rx_obj *rxo;
e9008ee9
PR
2326 int rc, i, j;
2327 u8 rsstable[128];
482c9e79
SP
2328
2329 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2330 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2331 sizeof(struct be_eth_rx_d));
2332 if (rc)
2333 return rc;
2334 }
2335
2336 /* The FW would like the default RXQ to be created first */
2337 rxo = default_rxo(adapter);
2338 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2339 adapter->if_handle, false, &rxo->rss_id);
2340 if (rc)
2341 return rc;
2342
2343 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2344 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2345 rx_frag_size, adapter->if_handle,
2346 true, &rxo->rss_id);
482c9e79
SP
2347 if (rc)
2348 return rc;
2349 }
2350
2351 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2352 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2353 for_all_rss_queues(adapter, rxo, i) {
2354 if ((j + i) >= 128)
2355 break;
2356 rsstable[j + i] = rxo->rss_id;
2357 }
2358 }
2359 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2360 if (rc)
2361 return rc;
2362 }
2363
2364 /* First time posting */
10ef9ab4 2365 for_all_rx_queues(adapter, rxo, i)
482c9e79 2366 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2367 return 0;
2368}
2369
6b7c5b94
SP
2370static int be_open(struct net_device *netdev)
2371{
2372 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2373 struct be_eq_obj *eqo;
3abcdeda 2374 struct be_rx_obj *rxo;
10ef9ab4 2375 struct be_tx_obj *txo;
b236916a 2376 u8 link_status;
3abcdeda 2377 int status, i;
5fb379ee 2378
10ef9ab4 2379 status = be_rx_qs_create(adapter);
482c9e79
SP
2380 if (status)
2381 goto err;
2382
5fb379ee
SP
2383 be_irq_register(adapter);
2384
fe6d2a38
SP
2385 if (!lancer_chip(adapter))
2386 be_intr_set(adapter, true);
5fb379ee 2387
10ef9ab4 2388 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2389 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2390
10ef9ab4
SP
2391 for_all_tx_queues(adapter, txo, i)
2392 be_cq_notify(adapter, txo->cq.id, true, 0);
2393
7a1e9b20
SP
2394 be_async_mcc_enable(adapter);
2395
10ef9ab4
SP
2396 for_all_evt_queues(adapter, eqo, i) {
2397 napi_enable(&eqo->napi);
2398 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2399 }
2400
b236916a
AK
2401 status = be_cmd_link_status_query(adapter, NULL, NULL,
2402 &link_status, 0);
2403 if (!status)
2404 be_link_status_update(adapter, link_status);
2405
045508a8 2406 be_roce_dev_open(adapter);
889cd4b2
SP
2407 return 0;
2408err:
2409 be_close(adapter->netdev);
2410 return -EIO;
5fb379ee
SP
2411}
2412
71d8d1b5
AK
2413static int be_setup_wol(struct be_adapter *adapter, bool enable)
2414{
2415 struct be_dma_mem cmd;
2416 int status = 0;
2417 u8 mac[ETH_ALEN];
2418
2419 memset(mac, 0, ETH_ALEN);
2420
2421 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2422 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2423 GFP_KERNEL);
71d8d1b5
AK
2424 if (cmd.va == NULL)
2425 return -1;
2426 memset(cmd.va, 0, cmd.size);
2427
2428 if (enable) {
2429 status = pci_write_config_dword(adapter->pdev,
2430 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2431 if (status) {
2432 dev_err(&adapter->pdev->dev,
2381a55c 2433 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2434 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2435 cmd.dma);
71d8d1b5
AK
2436 return status;
2437 }
2438 status = be_cmd_enable_magic_wol(adapter,
2439 adapter->netdev->dev_addr, &cmd);
2440 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2441 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2442 } else {
2443 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2444 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2445 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2446 }
2447
2b7bcebf 2448 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2449 return status;
2450}
2451
6d87f5c3
AK
2452/*
2453 * Generate a seed MAC address from the PF MAC Address using jhash.
2454 * MAC Address for VFs are assigned incrementally starting from the seed.
2455 * These addresses are programmed in the ASIC by the PF and the VF driver
2456 * queries for the MAC address during its probe.
2457 */
2458static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2459{
f9449ab7 2460 u32 vf;
3abcdeda 2461 int status = 0;
6d87f5c3 2462 u8 mac[ETH_ALEN];
11ac75ed 2463 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2464
2465 be_vf_eth_addr_generate(adapter, mac);
2466
11ac75ed 2467 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2468 if (lancer_chip(adapter)) {
2469 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2470 } else {
2471 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2472 vf_cfg->if_handle,
2473 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2474 }
2475
6d87f5c3
AK
2476 if (status)
2477 dev_err(&adapter->pdev->dev,
590c391d 2478 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2479 else
11ac75ed 2480 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2481
2482 mac[5] += 1;
2483 }
2484 return status;
2485}
2486
f9449ab7 2487static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2488{
11ac75ed 2489 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2490 u32 vf;
2491
39f1d94d
SP
2492 if (be_find_vfs(adapter, ASSIGNED)) {
2493 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2494 goto done;
2495 }
2496
11ac75ed 2497 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2498 if (lancer_chip(adapter))
2499 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2500 else
11ac75ed
SP
2501 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2502 vf_cfg->pmac_id, vf + 1);
f9449ab7 2503
11ac75ed
SP
2504 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2505 }
39f1d94d
SP
2506 pci_disable_sriov(adapter->pdev);
2507done:
2508 kfree(adapter->vf_cfg);
2509 adapter->num_vfs = 0;
6d87f5c3
AK
2510}
2511
a54769f5
SP
2512static int be_clear(struct be_adapter *adapter)
2513{
fbc13f01
AK
2514 int i = 1;
2515
191eb756
SP
2516 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2517 cancel_delayed_work_sync(&adapter->work);
2518 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2519 }
2520
11ac75ed 2521 if (sriov_enabled(adapter))
f9449ab7
SP
2522 be_vf_clear(adapter);
2523
fbc13f01
AK
2524 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2525 be_cmd_pmac_del(adapter, adapter->if_handle,
2526 adapter->pmac_id[i], 0);
2527
f9449ab7 2528 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2529
2530 be_mcc_queues_destroy(adapter);
10ef9ab4 2531 be_rx_cqs_destroy(adapter);
a54769f5 2532 be_tx_queues_destroy(adapter);
10ef9ab4 2533 be_evt_queues_destroy(adapter);
a54769f5
SP
2534
2535 /* tell fw we're done with firing cmds */
2536 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2537
2538 be_msix_disable(adapter);
39f1d94d 2539 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
a54769f5
SP
2540 return 0;
2541}
2542
39f1d94d 2543static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2544{
11ac75ed 2545 struct be_vf_cfg *vf_cfg;
30128031
SP
2546 int vf;
2547
39f1d94d
SP
2548 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2549 GFP_KERNEL);
2550 if (!adapter->vf_cfg)
2551 return -ENOMEM;
2552
11ac75ed
SP
2553 for_all_vfs(adapter, vf_cfg, vf) {
2554 vf_cfg->if_handle = -1;
2555 vf_cfg->pmac_id = -1;
30128031 2556 }
39f1d94d 2557 return 0;
30128031
SP
2558}
2559
f9449ab7
SP
2560static int be_vf_setup(struct be_adapter *adapter)
2561{
11ac75ed 2562 struct be_vf_cfg *vf_cfg;
39f1d94d 2563 struct device *dev = &adapter->pdev->dev;
f9449ab7 2564 u32 cap_flags, en_flags, vf;
f1f3ee1b 2565 u16 def_vlan, lnk_speed;
39f1d94d
SP
2566 int status, enabled_vfs;
2567
2568 enabled_vfs = be_find_vfs(adapter, ENABLED);
2569 if (enabled_vfs) {
2570 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2571 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2572 return 0;
2573 }
f9449ab7 2574
39f1d94d
SP
2575 if (num_vfs > adapter->dev_num_vfs) {
2576 dev_warn(dev, "Device supports %d VFs and not %d\n",
2577 adapter->dev_num_vfs, num_vfs);
2578 num_vfs = adapter->dev_num_vfs;
2579 }
2580
2581 status = pci_enable_sriov(adapter->pdev, num_vfs);
2582 if (!status) {
2583 adapter->num_vfs = num_vfs;
2584 } else {
2585 /* Platform doesn't support SRIOV though device supports it */
2586 dev_warn(dev, "SRIOV enable failed\n");
2587 return 0;
2588 }
2589
2590 status = be_vf_setup_init(adapter);
2591 if (status)
2592 goto err;
30128031 2593
590c391d
PR
2594 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2595 BE_IF_FLAGS_MULTICAST;
11ac75ed 2596 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2597 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2598 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2599 if (status)
2600 goto err;
f9449ab7
SP
2601 }
2602
39f1d94d
SP
2603 if (!enabled_vfs) {
2604 status = be_vf_eth_addr_config(adapter);
2605 if (status)
2606 goto err;
2607 }
f9449ab7 2608
11ac75ed 2609 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2610 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2611 NULL, vf + 1);
f9449ab7
SP
2612 if (status)
2613 goto err;
11ac75ed 2614 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2615
2616 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2617 vf + 1, vf_cfg->if_handle);
2618 if (status)
2619 goto err;
2620 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2621 }
2622 return 0;
2623err:
2624 return status;
2625}
2626
30128031
SP
2627static void be_setup_init(struct be_adapter *adapter)
2628{
2629 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2630 adapter->phy.link_speed = -1;
30128031
SP
2631 adapter->if_handle = -1;
2632 adapter->be3_native = false;
2633 adapter->promiscuous = false;
2634 adapter->eq_next_idx = 0;
42f11cf2 2635 adapter->phy.forced_port_speed = -1;
30128031
SP
2636}
2637
e5e1ee89 2638static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2639{
2640 u32 pmac_id;
e5e1ee89
PR
2641 int status;
2642 bool pmac_id_active;
2643
2644 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2645 &pmac_id, mac);
590c391d
PR
2646 if (status != 0)
2647 goto do_none;
e5e1ee89
PR
2648
2649 if (pmac_id_active) {
2650 status = be_cmd_mac_addr_query(adapter, mac,
2651 MAC_ADDRESS_TYPE_NETWORK,
2652 false, adapter->if_handle, pmac_id);
2653
2654 if (!status)
fbc13f01 2655 adapter->pmac_id[0] = pmac_id;
e5e1ee89
PR
2656 } else {
2657 status = be_cmd_pmac_add(adapter, mac,
fbc13f01 2658 adapter->if_handle, &adapter->pmac_id[0], 0);
e5e1ee89 2659 }
590c391d
PR
2660do_none:
2661 return status;
2662}
2663
39f1d94d
SP
2664/* Routine to query per function resource limits */
2665static int be_get_config(struct be_adapter *adapter)
2666{
2667 int pos;
2668 u16 dev_num_vfs;
2669
2670 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2671 if (pos) {
2672 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2673 &dev_num_vfs);
2674 adapter->dev_num_vfs = dev_num_vfs;
2675 }
2676 return 0;
2677}
2678
5fb379ee
SP
2679static int be_setup(struct be_adapter *adapter)
2680{
5fb379ee 2681 struct net_device *netdev = adapter->netdev;
39f1d94d 2682 struct device *dev = &adapter->pdev->dev;
f9449ab7 2683 u32 cap_flags, en_flags;
a54769f5 2684 u32 tx_fc, rx_fc;
10ef9ab4 2685 int status;
ba343c77
SB
2686 u8 mac[ETH_ALEN];
2687
30128031 2688 be_setup_init(adapter);
6b7c5b94 2689
39f1d94d
SP
2690 be_get_config(adapter);
2691
f9449ab7 2692 be_cmd_req_native_mode(adapter);
73d540f2 2693
10ef9ab4
SP
2694 be_msix_enable(adapter);
2695
2696 status = be_evt_queues_create(adapter);
2697 if (status)
a54769f5 2698 goto err;
6b7c5b94 2699
10ef9ab4
SP
2700 status = be_tx_cqs_create(adapter);
2701 if (status)
2702 goto err;
2703
2704 status = be_rx_cqs_create(adapter);
2705 if (status)
a54769f5 2706 goto err;
6b7c5b94 2707
f9449ab7 2708 status = be_mcc_queues_create(adapter);
10ef9ab4 2709 if (status)
a54769f5 2710 goto err;
6b7c5b94 2711
f9449ab7
SP
2712 memset(mac, 0, ETH_ALEN);
2713 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2714 true /*permanent */, 0, 0);
f9449ab7
SP
2715 if (status)
2716 return status;
2717 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2718 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2719
f9449ab7
SP
2720 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2721 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2722 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2723 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2724
f9449ab7
SP
2725 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2726 cap_flags |= BE_IF_FLAGS_RSS;
2727 en_flags |= BE_IF_FLAGS_RSS;
2728 }
2729 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2730 netdev->dev_addr, &adapter->if_handle,
fbc13f01 2731 &adapter->pmac_id[0], 0);
5fb379ee 2732 if (status != 0)
a54769f5 2733 goto err;
6b7c5b94 2734
590c391d
PR
2735 /* The VF's permanent mac queried from card is incorrect.
2736 * For BEx: Query the mac configued by the PF using if_handle
2737 * For Lancer: Get and use mac_list to obtain mac address.
2738 */
2739 if (!be_physfn(adapter)) {
2740 if (lancer_chip(adapter))
e5e1ee89 2741 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2742 else
2743 status = be_cmd_mac_addr_query(adapter, mac,
2744 MAC_ADDRESS_TYPE_NETWORK, false,
2745 adapter->if_handle, 0);
f9449ab7
SP
2746 if (!status) {
2747 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2748 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2749 }
2750 }
0dffc83e 2751
10ef9ab4
SP
2752 status = be_tx_qs_create(adapter);
2753 if (status)
2754 goto err;
2755
04b71175 2756 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2757
1d1e9a46 2758 if (adapter->vlans_added)
10329df8 2759 be_vid_config(adapter);
7ab8b0b4 2760
a54769f5 2761 be_set_rx_mode(adapter->netdev);
5fb379ee 2762
ddc3f5cb 2763 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2764
ddc3f5cb
AK
2765 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2766 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2767 adapter->rx_fc);
2dc1deb6 2768
39f1d94d
SP
2769 if (be_physfn(adapter) && num_vfs) {
2770 if (adapter->dev_num_vfs)
2771 be_vf_setup(adapter);
2772 else
2773 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2774 }
2775
42f11cf2
AK
2776 be_cmd_get_phy_info(adapter);
2777 if (be_pause_supported(adapter))
2778 adapter->phy.fc_autoneg = 1;
2779
191eb756
SP
2780 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2781 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2782
39f1d94d 2783 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
f9449ab7 2784 return 0;
a54769f5
SP
2785err:
2786 be_clear(adapter);
2787 return status;
2788}
6b7c5b94 2789
66268739
IV
2790#ifdef CONFIG_NET_POLL_CONTROLLER
2791static void be_netpoll(struct net_device *netdev)
2792{
2793 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2794 struct be_eq_obj *eqo;
66268739
IV
2795 int i;
2796
10ef9ab4
SP
2797 for_all_evt_queues(adapter, eqo, i)
2798 event_handle(eqo);
2799
2800 return;
66268739
IV
2801}
2802#endif
2803
84517482 2804#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2805char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2806
fa9a6fed 2807static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2808 const u8 *p, u32 img_start, int image_size,
2809 int hdr_size)
fa9a6fed
SB
2810{
2811 u32 crc_offset;
2812 u8 flashed_crc[4];
2813 int status;
3f0d4560
AK
2814
2815 crc_offset = hdr_size + img_start + image_size - 4;
2816
fa9a6fed 2817 p += crc_offset;
3f0d4560
AK
2818
2819 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2820 (image_size - 4));
fa9a6fed
SB
2821 if (status) {
2822 dev_err(&adapter->pdev->dev,
2823 "could not get crc from flash, not flashing redboot\n");
2824 return false;
2825 }
2826
2827 /*update redboot only if crc does not match*/
2828 if (!memcmp(flashed_crc, p, 4))
2829 return false;
2830 else
2831 return true;
fa9a6fed
SB
2832}
2833
306f1348
SP
2834static bool phy_flashing_required(struct be_adapter *adapter)
2835{
42f11cf2
AK
2836 return (adapter->phy.phy_type == TN_8022 &&
2837 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2838}
2839
c165541e
PR
2840static bool is_comp_in_ufi(struct be_adapter *adapter,
2841 struct flash_section_info *fsec, int type)
2842{
2843 int i = 0, img_type = 0;
2844 struct flash_section_info_g2 *fsec_g2 = NULL;
2845
2846 if (adapter->generation != BE_GEN3)
2847 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2848
2849 for (i = 0; i < MAX_FLASH_COMP; i++) {
2850 if (fsec_g2)
2851 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2852 else
2853 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2854
2855 if (img_type == type)
2856 return true;
2857 }
2858 return false;
2859
2860}
2861
2862struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2863 int header_size,
2864 const struct firmware *fw)
2865{
2866 struct flash_section_info *fsec = NULL;
2867 const u8 *p = fw->data;
2868
2869 p += header_size;
2870 while (p < (fw->data + fw->size)) {
2871 fsec = (struct flash_section_info *)p;
2872 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2873 return fsec;
2874 p += 32;
2875 }
2876 return NULL;
2877}
2878
3f0d4560 2879static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2880 const struct firmware *fw,
2881 struct be_dma_mem *flash_cmd,
2882 int num_of_images)
3f0d4560 2883
84517482 2884{
3f0d4560 2885 int status = 0, i, filehdr_size = 0;
c165541e 2886 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2887 u32 total_bytes = 0, flash_op;
84517482
AK
2888 int num_bytes;
2889 const u8 *p = fw->data;
2890 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2891 const struct flash_comp *pflashcomp;
c165541e
PR
2892 int num_comp, hdr_size;
2893 struct flash_section_info *fsec = NULL;
2894
2895 struct flash_comp gen3_flash_types[] = {
2896 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2897 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2898 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2899 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2900 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2901 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2902 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2903 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2904 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2905 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2906 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2907 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2908 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2909 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2910 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2911 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2912 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2913 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2914 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2915 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2916 };
c165541e
PR
2917
2918 struct flash_comp gen2_flash_types[] = {
2919 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2920 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2921 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2922 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2923 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2924 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2925 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2926 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2927 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2928 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2929 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2930 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2931 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2932 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2933 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2934 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2935 };
2936
2937 if (adapter->generation == BE_GEN3) {
2938 pflashcomp = gen3_flash_types;
2939 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2940 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2941 } else {
2942 pflashcomp = gen2_flash_types;
2943 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2944 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2945 }
c165541e
PR
2946 /* Get flash section info*/
2947 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2948 if (!fsec) {
2949 dev_err(&adapter->pdev->dev,
2950 "Invalid Cookie. UFI corrupted ?\n");
2951 return -1;
2952 }
9fe96934 2953 for (i = 0; i < num_comp; i++) {
c165541e 2954 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 2955 continue;
c165541e
PR
2956
2957 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2958 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2959 continue;
2960
2961 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
2962 if (!phy_flashing_required(adapter))
2963 continue;
2964 }
c165541e
PR
2965
2966 hdr_size = filehdr_size +
2967 (num_of_images * sizeof(struct image_hdr));
2968
2969 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2970 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2971 pflashcomp[i].size, hdr_size)))
3f0d4560 2972 continue;
c165541e
PR
2973
2974 /* Flash the component */
3f0d4560 2975 p = fw->data;
c165541e 2976 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
2977 if (p + pflashcomp[i].size > fw->data + fw->size)
2978 return -1;
2979 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2980 while (total_bytes) {
2981 if (total_bytes > 32*1024)
2982 num_bytes = 32*1024;
2983 else
2984 num_bytes = total_bytes;
2985 total_bytes -= num_bytes;
306f1348 2986 if (!total_bytes) {
c165541e 2987 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
2988 flash_op = FLASHROM_OPER_PHY_FLASH;
2989 else
2990 flash_op = FLASHROM_OPER_FLASH;
2991 } else {
c165541e 2992 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
2993 flash_op = FLASHROM_OPER_PHY_SAVE;
2994 else
2995 flash_op = FLASHROM_OPER_SAVE;
2996 }
3f0d4560
AK
2997 memcpy(req->params.data_buf, p, num_bytes);
2998 p += num_bytes;
2999 status = be_cmd_write_flashrom(adapter, flash_cmd,
3000 pflashcomp[i].optype, flash_op, num_bytes);
3001 if (status) {
306f1348
SP
3002 if ((status == ILLEGAL_IOCTL_REQ) &&
3003 (pflashcomp[i].optype ==
c165541e 3004 OPTYPE_PHY_FW))
306f1348 3005 break;
3f0d4560
AK
3006 dev_err(&adapter->pdev->dev,
3007 "cmd to write to flash rom failed.\n");
3008 return -1;
3009 }
84517482 3010 }
84517482 3011 }
84517482
AK
3012 return 0;
3013}
3014
3f0d4560
AK
3015static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3016{
3017 if (fhdr == NULL)
3018 return 0;
3019 if (fhdr->build[0] == '3')
3020 return BE_GEN3;
3021 else if (fhdr->build[0] == '2')
3022 return BE_GEN2;
3023 else
3024 return 0;
3025}
3026
485bf569
SN
3027static int lancer_fw_download(struct be_adapter *adapter,
3028 const struct firmware *fw)
84517482 3029{
485bf569
SN
3030#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3031#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3032 struct be_dma_mem flash_cmd;
485bf569
SN
3033 const u8 *data_ptr = NULL;
3034 u8 *dest_image_ptr = NULL;
3035 size_t image_size = 0;
3036 u32 chunk_size = 0;
3037 u32 data_written = 0;
3038 u32 offset = 0;
3039 int status = 0;
3040 u8 add_status = 0;
84517482 3041
485bf569 3042 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3043 dev_err(&adapter->pdev->dev,
485bf569
SN
3044 "FW Image not properly aligned. "
3045 "Length must be 4 byte aligned.\n");
3046 status = -EINVAL;
3047 goto lancer_fw_exit;
d9efd2af
SB
3048 }
3049
485bf569
SN
3050 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3051 + LANCER_FW_DOWNLOAD_CHUNK;
3052 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3053 &flash_cmd.dma, GFP_KERNEL);
3054 if (!flash_cmd.va) {
3055 status = -ENOMEM;
3056 dev_err(&adapter->pdev->dev,
3057 "Memory allocation failure while flashing\n");
3058 goto lancer_fw_exit;
3059 }
84517482 3060
485bf569
SN
3061 dest_image_ptr = flash_cmd.va +
3062 sizeof(struct lancer_cmd_req_write_object);
3063 image_size = fw->size;
3064 data_ptr = fw->data;
3065
3066 while (image_size) {
3067 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3068
3069 /* Copy the image chunk content. */
3070 memcpy(dest_image_ptr, data_ptr, chunk_size);
3071
3072 status = lancer_cmd_write_object(adapter, &flash_cmd,
3073 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3074 &data_written, &add_status);
3075
3076 if (status)
3077 break;
3078
3079 offset += data_written;
3080 data_ptr += data_written;
3081 image_size -= data_written;
3082 }
3083
3084 if (!status) {
3085 /* Commit the FW written */
3086 status = lancer_cmd_write_object(adapter, &flash_cmd,
3087 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3088 &data_written, &add_status);
3089 }
3090
3091 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3092 flash_cmd.dma);
3093 if (status) {
3094 dev_err(&adapter->pdev->dev,
3095 "Firmware load error. "
3096 "Status code: 0x%x Additional Status: 0x%x\n",
3097 status, add_status);
3098 goto lancer_fw_exit;
3099 }
3100
3101 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3102lancer_fw_exit:
3103 return status;
3104}
3105
3106static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3107{
3108 struct flash_file_hdr_g2 *fhdr;
3109 struct flash_file_hdr_g3 *fhdr3;
3110 struct image_hdr *img_hdr_ptr = NULL;
3111 struct be_dma_mem flash_cmd;
3112 const u8 *p;
3113 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3114
3115 p = fw->data;
3f0d4560 3116 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3117
84517482 3118 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3119 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3120 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3121 if (!flash_cmd.va) {
3122 status = -ENOMEM;
3123 dev_err(&adapter->pdev->dev,
3124 "Memory allocation failure while flashing\n");
485bf569 3125 goto be_fw_exit;
84517482
AK
3126 }
3127
3f0d4560
AK
3128 if ((adapter->generation == BE_GEN3) &&
3129 (get_ufigen_type(fhdr) == BE_GEN3)) {
3130 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3131 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3132 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3133 img_hdr_ptr = (struct image_hdr *) (fw->data +
3134 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3135 i * sizeof(struct image_hdr)));
3136 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3137 status = be_flash_data(adapter, fw, &flash_cmd,
3138 num_imgs);
3f0d4560
AK
3139 }
3140 } else if ((adapter->generation == BE_GEN2) &&
3141 (get_ufigen_type(fhdr) == BE_GEN2)) {
3142 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3143 } else {
3144 dev_err(&adapter->pdev->dev,
3145 "UFI and Interface are not compatible for flashing\n");
3146 status = -1;
84517482
AK
3147 }
3148
2b7bcebf
IV
3149 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3150 flash_cmd.dma);
84517482
AK
3151 if (status) {
3152 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3153 goto be_fw_exit;
84517482
AK
3154 }
3155
af901ca1 3156 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3157
485bf569
SN
3158be_fw_exit:
3159 return status;
3160}
3161
3162int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3163{
3164 const struct firmware *fw;
3165 int status;
3166
3167 if (!netif_running(adapter->netdev)) {
3168 dev_err(&adapter->pdev->dev,
3169 "Firmware load not allowed (interface is down)\n");
3170 return -1;
3171 }
3172
3173 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3174 if (status)
3175 goto fw_exit;
3176
3177 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3178
3179 if (lancer_chip(adapter))
3180 status = lancer_fw_download(adapter, fw);
3181 else
3182 status = be_fw_download(adapter, fw);
3183
84517482
AK
3184fw_exit:
3185 release_firmware(fw);
3186 return status;
3187}
3188
e5686ad8 3189static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3190 .ndo_open = be_open,
3191 .ndo_stop = be_close,
3192 .ndo_start_xmit = be_xmit,
a54769f5 3193 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3194 .ndo_set_mac_address = be_mac_addr_set,
3195 .ndo_change_mtu = be_change_mtu,
ab1594e9 3196 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3197 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3198 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3199 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3200 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3201 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3202 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3203 .ndo_get_vf_config = be_get_vf_config,
3204#ifdef CONFIG_NET_POLL_CONTROLLER
3205 .ndo_poll_controller = be_netpoll,
3206#endif
6b7c5b94
SP
3207};
3208
3209static void be_netdev_init(struct net_device *netdev)
3210{
3211 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3212 struct be_eq_obj *eqo;
3abcdeda 3213 int i;
6b7c5b94 3214
6332c8d3 3215 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3216 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3217 NETIF_F_HW_VLAN_TX;
3218 if (be_multi_rxq(adapter))
3219 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3220
3221 netdev->features |= netdev->hw_features |
8b8ddc68 3222 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3223
eb8a50d9 3224 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3225 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3226
fbc13f01
AK
3227 netdev->priv_flags |= IFF_UNICAST_FLT;
3228
6b7c5b94
SP
3229 netdev->flags |= IFF_MULTICAST;
3230
c190e3c8
AK
3231 netif_set_gso_max_size(netdev, 65535);
3232
10ef9ab4 3233 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3234
3235 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3236
10ef9ab4
SP
3237 for_all_evt_queues(adapter, eqo, i)
3238 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3239}
3240
3241static void be_unmap_pci_bars(struct be_adapter *adapter)
3242{
8788fdc2
SP
3243 if (adapter->csr)
3244 iounmap(adapter->csr);
3245 if (adapter->db)
3246 iounmap(adapter->db);
045508a8
PP
3247 if (adapter->roce_db.base)
3248 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3249}
3250
3251static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3252{
3253 struct pci_dev *pdev = adapter->pdev;
3254 u8 __iomem *addr;
3255
3256 addr = pci_iomap(pdev, 2, 0);
3257 if (addr == NULL)
3258 return -ENOMEM;
3259
3260 adapter->roce_db.base = addr;
3261 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3262 adapter->roce_db.size = 8192;
3263 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3264 return 0;
6b7c5b94
SP
3265}
3266
3267static int be_map_pci_bars(struct be_adapter *adapter)
3268{
3269 u8 __iomem *addr;
db3ea781 3270 int db_reg;
6b7c5b94 3271
fe6d2a38 3272 if (lancer_chip(adapter)) {
045508a8
PP
3273 if (be_type_2_3(adapter)) {
3274 addr = ioremap_nocache(
3275 pci_resource_start(adapter->pdev, 0),
3276 pci_resource_len(adapter->pdev, 0));
3277 if (addr == NULL)
3278 return -ENOMEM;
3279 adapter->db = addr;
3280 }
3281 if (adapter->if_type == SLI_INTF_TYPE_3) {
3282 if (lancer_roce_map_pci_bars(adapter))
3283 goto pci_map_err;
3284 }
fe6d2a38
SP
3285 return 0;
3286 }
3287
ba343c77
SB
3288 if (be_physfn(adapter)) {
3289 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3290 pci_resource_len(adapter->pdev, 2));
3291 if (addr == NULL)
3292 return -ENOMEM;
3293 adapter->csr = addr;
3294 }
6b7c5b94 3295
ba343c77 3296 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3297 db_reg = 4;
3298 } else {
ba343c77
SB
3299 if (be_physfn(adapter))
3300 db_reg = 4;
3301 else
3302 db_reg = 0;
3303 }
3304 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3305 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3306 if (addr == NULL)
3307 goto pci_map_err;
ba343c77 3308 adapter->db = addr;
045508a8
PP
3309 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3310 adapter->roce_db.size = 4096;
3311 adapter->roce_db.io_addr =
3312 pci_resource_start(adapter->pdev, db_reg);
3313 adapter->roce_db.total_size =
3314 pci_resource_len(adapter->pdev, db_reg);
3315 }
6b7c5b94
SP
3316 return 0;
3317pci_map_err:
3318 be_unmap_pci_bars(adapter);
3319 return -ENOMEM;
3320}
3321
6b7c5b94
SP
3322static void be_ctrl_cleanup(struct be_adapter *adapter)
3323{
8788fdc2 3324 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3325
3326 be_unmap_pci_bars(adapter);
3327
3328 if (mem->va)
2b7bcebf
IV
3329 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3330 mem->dma);
e7b909a6 3331
5b8821b7 3332 mem = &adapter->rx_filter;
e7b909a6 3333 if (mem->va)
2b7bcebf
IV
3334 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3335 mem->dma);
6b7c5b94
SP
3336}
3337
6b7c5b94
SP
3338static int be_ctrl_init(struct be_adapter *adapter)
3339{
8788fdc2
SP
3340 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3341 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3342 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3343 int status;
6b7c5b94
SP
3344
3345 status = be_map_pci_bars(adapter);
3346 if (status)
e7b909a6 3347 goto done;
6b7c5b94
SP
3348
3349 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3350 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3351 mbox_mem_alloc->size,
3352 &mbox_mem_alloc->dma,
3353 GFP_KERNEL);
6b7c5b94 3354 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3355 status = -ENOMEM;
3356 goto unmap_pci_bars;
6b7c5b94
SP
3357 }
3358 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3359 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3360 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3361 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3362
5b8821b7
SP
3363 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3364 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3365 &rx_filter->dma, GFP_KERNEL);
3366 if (rx_filter->va == NULL) {
e7b909a6
SP
3367 status = -ENOMEM;
3368 goto free_mbox;
3369 }
5b8821b7 3370 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3371
2984961c 3372 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3373 spin_lock_init(&adapter->mcc_lock);
3374 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3375
dd131e76 3376 init_completion(&adapter->flash_compl);
cf588477 3377 pci_save_state(adapter->pdev);
6b7c5b94 3378 return 0;
e7b909a6
SP
3379
3380free_mbox:
2b7bcebf
IV
3381 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3382 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3383
3384unmap_pci_bars:
3385 be_unmap_pci_bars(adapter);
3386
3387done:
3388 return status;
6b7c5b94
SP
3389}
3390
3391static void be_stats_cleanup(struct be_adapter *adapter)
3392{
3abcdeda 3393 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3394
3395 if (cmd->va)
2b7bcebf
IV
3396 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3397 cmd->va, cmd->dma);
6b7c5b94
SP
3398}
3399
3400static int be_stats_init(struct be_adapter *adapter)
3401{
3abcdeda 3402 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3403
005d5696 3404 if (adapter->generation == BE_GEN2) {
89a88ab8 3405 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3406 } else {
3407 if (lancer_chip(adapter))
3408 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3409 else
3410 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3411 }
2b7bcebf
IV
3412 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3413 GFP_KERNEL);
6b7c5b94
SP
3414 if (cmd->va == NULL)
3415 return -1;
d291b9af 3416 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3417 return 0;
3418}
3419
3420static void __devexit be_remove(struct pci_dev *pdev)
3421{
3422 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3423
6b7c5b94
SP
3424 if (!adapter)
3425 return;
3426
045508a8
PP
3427 be_roce_dev_remove(adapter);
3428
6b7c5b94
SP
3429 unregister_netdev(adapter->netdev);
3430
5fb379ee
SP
3431 be_clear(adapter);
3432
6b7c5b94
SP
3433 be_stats_cleanup(adapter);
3434
3435 be_ctrl_cleanup(adapter);
3436
6b7c5b94
SP
3437 pci_set_drvdata(pdev, NULL);
3438 pci_release_regions(pdev);
3439 pci_disable_device(pdev);
3440
3441 free_netdev(adapter->netdev);
3442}
3443
4762f6ce
AK
3444bool be_is_wol_supported(struct be_adapter *adapter)
3445{
3446 return ((adapter->wol_cap & BE_WOL_CAP) &&
3447 !be_is_wol_excluded(adapter)) ? true : false;
3448}
3449
941a77d5
SK
3450u32 be_get_fw_log_level(struct be_adapter *adapter)
3451{
3452 struct be_dma_mem extfat_cmd;
3453 struct be_fat_conf_params *cfgs;
3454 int status;
3455 u32 level = 0;
3456 int j;
3457
3458 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3459 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3460 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3461 &extfat_cmd.dma);
3462
3463 if (!extfat_cmd.va) {
3464 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3465 __func__);
3466 goto err;
3467 }
3468
3469 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3470 if (!status) {
3471 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3472 sizeof(struct be_cmd_resp_hdr));
3473 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3474 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3475 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3476 }
3477 }
3478 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3479 extfat_cmd.dma);
3480err:
3481 return level;
3482}
39f1d94d 3483static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3484{
6b7c5b94 3485 int status;
941a77d5 3486 u32 level;
6b7c5b94 3487
3abcdeda
SP
3488 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3489 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3490 if (status)
3491 return status;
3492
752961a1 3493 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3494 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3495 else
3496 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3497
fbc13f01
AK
3498 if (be_physfn(adapter))
3499 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3500 else
3501 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3502
3503 /* primary mac needs 1 pmac entry */
3504 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3505 sizeof(u32), GFP_KERNEL);
3506 if (!adapter->pmac_id)
3507 return -ENOMEM;
3508
9e1453c5
AK
3509 status = be_cmd_get_cntl_attributes(adapter);
3510 if (status)
3511 return status;
3512
4762f6ce
AK
3513 status = be_cmd_get_acpi_wol_cap(adapter);
3514 if (status) {
3515 /* in case of a failure to get wol capabillities
3516 * check the exclusion list to determine WOL capability */
3517 if (!be_is_wol_excluded(adapter))
3518 adapter->wol_cap |= BE_WOL_CAP;
3519 }
3520
3521 if (be_is_wol_supported(adapter))
3522 adapter->wol = true;
3523
941a77d5
SK
3524 level = be_get_fw_log_level(adapter);
3525 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3526
2243e2e9 3527 return 0;
6b7c5b94
SP
3528}
3529
39f1d94d 3530static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3531{
3532 struct pci_dev *pdev = adapter->pdev;
3533 u32 sli_intf = 0, if_type;
3534
3535 switch (pdev->device) {
3536 case BE_DEVICE_ID1:
3537 case OC_DEVICE_ID1:
3538 adapter->generation = BE_GEN2;
3539 break;
3540 case BE_DEVICE_ID2:
3541 case OC_DEVICE_ID2:
3542 adapter->generation = BE_GEN3;
3543 break;
3544 case OC_DEVICE_ID3:
12f4d0a8 3545 case OC_DEVICE_ID4:
fe6d2a38 3546 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3547 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3548 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3549 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3550 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3551 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3552 !be_type_2_3(adapter)) {
3553 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3554 return -EINVAL;
3555 }
3556 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3557 SLI_INTF_FAMILY_SHIFT);
3558 adapter->generation = BE_GEN3;
3559 break;
3560 case OC_DEVICE_ID5:
3561 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3562 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3563 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3564 return -EINVAL;
3565 }
fe6d2a38
SP
3566 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3567 SLI_INTF_FAMILY_SHIFT);
3568 adapter->generation = BE_GEN3;
3569 break;
3570 default:
3571 adapter->generation = 0;
3572 }
39f1d94d
SP
3573
3574 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3575 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3576 return 0;
3577}
3578
37eed1cb
PR
3579static int lancer_wait_ready(struct be_adapter *adapter)
3580{
d8110f62 3581#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3582 u32 sliport_status;
3583 int status = 0, i;
3584
3585 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3586 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3587 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3588 break;
3589
d8110f62 3590 msleep(1000);
37eed1cb
PR
3591 }
3592
3593 if (i == SLIPORT_READY_TIMEOUT)
3594 status = -1;
3595
3596 return status;
3597}
3598
3599static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3600{
3601 int status;
3602 u32 sliport_status, err, reset_needed;
3603 status = lancer_wait_ready(adapter);
3604 if (!status) {
3605 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3606 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3607 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3608 if (err && reset_needed) {
3609 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3610 adapter->db + SLIPORT_CONTROL_OFFSET);
3611
3612 /* check adapter has corrected the error */
3613 status = lancer_wait_ready(adapter);
3614 sliport_status = ioread32(adapter->db +
3615 SLIPORT_STATUS_OFFSET);
3616 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3617 SLIPORT_STATUS_RN_MASK);
3618 if (status || sliport_status)
3619 status = -1;
3620 } else if (err || reset_needed) {
3621 status = -1;
3622 }
3623 }
3624 return status;
3625}
3626
d8110f62
PR
3627static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3628{
3629 int status;
3630 u32 sliport_status;
3631
3632 if (adapter->eeh_err || adapter->ue_detected)
3633 return;
3634
3635 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3636
3637 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3638 dev_err(&adapter->pdev->dev,
3639 "Adapter in error state."
3640 "Trying to recover.\n");
3641
3642 status = lancer_test_and_set_rdy_state(adapter);
3643 if (status)
3644 goto err;
3645
3646 netif_device_detach(adapter->netdev);
3647
3648 if (netif_running(adapter->netdev))
3649 be_close(adapter->netdev);
3650
3651 be_clear(adapter);
3652
3653 adapter->fw_timeout = false;
3654
3655 status = be_setup(adapter);
3656 if (status)
3657 goto err;
3658
3659 if (netif_running(adapter->netdev)) {
3660 status = be_open(adapter->netdev);
3661 if (status)
3662 goto err;
3663 }
3664
3665 netif_device_attach(adapter->netdev);
3666
3667 dev_err(&adapter->pdev->dev,
3668 "Adapter error recovery succeeded\n");
3669 }
3670 return;
3671err:
3672 dev_err(&adapter->pdev->dev,
3673 "Adapter error recovery failed\n");
3674}
3675
3676static void be_worker(struct work_struct *work)
3677{
3678 struct be_adapter *adapter =
3679 container_of(work, struct be_adapter, work.work);
3680 struct be_rx_obj *rxo;
10ef9ab4 3681 struct be_eq_obj *eqo;
d8110f62
PR
3682 int i;
3683
3684 if (lancer_chip(adapter))
3685 lancer_test_and_recover_fn_err(adapter);
3686
3687 be_detect_dump_ue(adapter);
3688
3689 /* when interrupts are not yet enabled, just reap any pending
3690 * mcc completions */
3691 if (!netif_running(adapter->netdev)) {
10ef9ab4 3692 be_process_mcc(adapter);
d8110f62
PR
3693 goto reschedule;
3694 }
3695
3696 if (!adapter->stats_cmd_sent) {
3697 if (lancer_chip(adapter))
3698 lancer_cmd_get_pport_stats(adapter,
3699 &adapter->stats_cmd);
3700 else
3701 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3702 }
3703
3704 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3705 if (rxo->rx_post_starved) {
3706 rxo->rx_post_starved = false;
3707 be_post_rx_frags(rxo, GFP_KERNEL);
3708 }
3709 }
3710
10ef9ab4
SP
3711 for_all_evt_queues(adapter, eqo, i)
3712 be_eqd_update(adapter, eqo);
3713
d8110f62
PR
3714reschedule:
3715 adapter->work_counter++;
3716 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3717}
3718
39f1d94d
SP
3719static bool be_reset_required(struct be_adapter *adapter)
3720{
3721 u32 reg;
3722
3723 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3724 return reg;
3725}
3726
6b7c5b94
SP
3727static int __devinit be_probe(struct pci_dev *pdev,
3728 const struct pci_device_id *pdev_id)
3729{
3730 int status = 0;
3731 struct be_adapter *adapter;
3732 struct net_device *netdev;
6b7c5b94
SP
3733
3734 status = pci_enable_device(pdev);
3735 if (status)
3736 goto do_none;
3737
3738 status = pci_request_regions(pdev, DRV_NAME);
3739 if (status)
3740 goto disable_dev;
3741 pci_set_master(pdev);
3742
3c8def97 3743 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3744 if (netdev == NULL) {
3745 status = -ENOMEM;
3746 goto rel_reg;
3747 }
3748 adapter = netdev_priv(netdev);
3749 adapter->pdev = pdev;
3750 pci_set_drvdata(pdev, adapter);
fe6d2a38 3751
39f1d94d 3752 status = be_dev_type_check(adapter);
63657b9c 3753 if (status)
fe6d2a38
SP
3754 goto free_netdev;
3755
6b7c5b94 3756 adapter->netdev = netdev;
2243e2e9 3757 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3758
2b7bcebf 3759 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3760 if (!status) {
3761 netdev->features |= NETIF_F_HIGHDMA;
3762 } else {
2b7bcebf 3763 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3764 if (status) {
3765 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3766 goto free_netdev;
3767 }
3768 }
3769
6b7c5b94
SP
3770 status = be_ctrl_init(adapter);
3771 if (status)
39f1d94d 3772 goto free_netdev;
6b7c5b94 3773
37eed1cb 3774 if (lancer_chip(adapter)) {
d8110f62
PR
3775 status = lancer_wait_ready(adapter);
3776 if (!status) {
3777 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3778 adapter->db + SLIPORT_CONTROL_OFFSET);
3779 status = lancer_test_and_set_rdy_state(adapter);
3780 }
37eed1cb
PR
3781 if (status) {
3782 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3783 goto ctrl_clean;
37eed1cb
PR
3784 }
3785 }
3786
2243e2e9 3787 /* sync up with fw's ready state */
ba343c77
SB
3788 if (be_physfn(adapter)) {
3789 status = be_cmd_POST(adapter);
3790 if (status)
3791 goto ctrl_clean;
ba343c77 3792 }
6b7c5b94 3793
2243e2e9
SP
3794 /* tell fw we're ready to fire cmds */
3795 status = be_cmd_fw_init(adapter);
6b7c5b94 3796 if (status)
2243e2e9
SP
3797 goto ctrl_clean;
3798
39f1d94d
SP
3799 if (be_reset_required(adapter)) {
3800 status = be_cmd_reset_function(adapter);
3801 if (status)
3802 goto ctrl_clean;
3803 }
556ae191 3804
10ef9ab4
SP
3805 /* The INTR bit may be set in the card when probed by a kdump kernel
3806 * after a crash.
3807 */
3808 if (!lancer_chip(adapter))
3809 be_intr_set(adapter, false);
3810
2243e2e9
SP
3811 status = be_stats_init(adapter);
3812 if (status)
3813 goto ctrl_clean;
3814
39f1d94d 3815 status = be_get_initial_config(adapter);
6b7c5b94
SP
3816 if (status)
3817 goto stats_clean;
6b7c5b94
SP
3818
3819 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3820 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3821
5fb379ee
SP
3822 status = be_setup(adapter);
3823 if (status)
3abcdeda 3824 goto msix_disable;
2243e2e9 3825
3abcdeda 3826 be_netdev_init(netdev);
6b7c5b94
SP
3827 status = register_netdev(netdev);
3828 if (status != 0)
5fb379ee 3829 goto unsetup;
6b7c5b94 3830
045508a8
PP
3831 be_roce_dev_add(adapter);
3832
10ef9ab4
SP
3833 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3834 adapter->port_num);
34b1ef04 3835
6b7c5b94
SP
3836 return 0;
3837
5fb379ee
SP
3838unsetup:
3839 be_clear(adapter);
3abcdeda
SP
3840msix_disable:
3841 be_msix_disable(adapter);
6b7c5b94
SP
3842stats_clean:
3843 be_stats_cleanup(adapter);
3844ctrl_clean:
3845 be_ctrl_cleanup(adapter);
f9449ab7 3846free_netdev:
fe6d2a38 3847 free_netdev(netdev);
8d56ff11 3848 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3849rel_reg:
3850 pci_release_regions(pdev);
3851disable_dev:
3852 pci_disable_device(pdev);
3853do_none:
c4ca2374 3854 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3855 return status;
3856}
3857
3858static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3859{
3860 struct be_adapter *adapter = pci_get_drvdata(pdev);
3861 struct net_device *netdev = adapter->netdev;
3862
71d8d1b5
AK
3863 if (adapter->wol)
3864 be_setup_wol(adapter, true);
3865
6b7c5b94
SP
3866 netif_device_detach(netdev);
3867 if (netif_running(netdev)) {
3868 rtnl_lock();
3869 be_close(netdev);
3870 rtnl_unlock();
3871 }
9b0365f1 3872 be_clear(adapter);
6b7c5b94
SP
3873
3874 pci_save_state(pdev);
3875 pci_disable_device(pdev);
3876 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3877 return 0;
3878}
3879
3880static int be_resume(struct pci_dev *pdev)
3881{
3882 int status = 0;
3883 struct be_adapter *adapter = pci_get_drvdata(pdev);
3884 struct net_device *netdev = adapter->netdev;
3885
3886 netif_device_detach(netdev);
3887
3888 status = pci_enable_device(pdev);
3889 if (status)
3890 return status;
3891
3892 pci_set_power_state(pdev, 0);
3893 pci_restore_state(pdev);
3894
2243e2e9
SP
3895 /* tell fw we're ready to fire cmds */
3896 status = be_cmd_fw_init(adapter);
3897 if (status)
3898 return status;
3899
9b0365f1 3900 be_setup(adapter);
6b7c5b94
SP
3901 if (netif_running(netdev)) {
3902 rtnl_lock();
3903 be_open(netdev);
3904 rtnl_unlock();
3905 }
3906 netif_device_attach(netdev);
71d8d1b5
AK
3907
3908 if (adapter->wol)
3909 be_setup_wol(adapter, false);
a4ca055f 3910
6b7c5b94
SP
3911 return 0;
3912}
3913
82456b03
SP
3914/*
3915 * An FLR will stop BE from DMAing any data.
3916 */
3917static void be_shutdown(struct pci_dev *pdev)
3918{
3919 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3920
2d5d4154
AK
3921 if (!adapter)
3922 return;
82456b03 3923
0f4a6828 3924 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3925
2d5d4154 3926 netif_device_detach(adapter->netdev);
82456b03 3927
82456b03
SP
3928 if (adapter->wol)
3929 be_setup_wol(adapter, true);
3930
57841869
AK
3931 be_cmd_reset_function(adapter);
3932
82456b03 3933 pci_disable_device(pdev);
82456b03
SP
3934}
3935
cf588477
SP
3936static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3937 pci_channel_state_t state)
3938{
3939 struct be_adapter *adapter = pci_get_drvdata(pdev);
3940 struct net_device *netdev = adapter->netdev;
3941
3942 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3943
3944 adapter->eeh_err = true;
3945
3946 netif_device_detach(netdev);
3947
3948 if (netif_running(netdev)) {
3949 rtnl_lock();
3950 be_close(netdev);
3951 rtnl_unlock();
3952 }
3953 be_clear(adapter);
3954
3955 if (state == pci_channel_io_perm_failure)
3956 return PCI_ERS_RESULT_DISCONNECT;
3957
3958 pci_disable_device(pdev);
3959
eeb7fc7b
SK
3960 /* The error could cause the FW to trigger a flash debug dump.
3961 * Resetting the card while flash dump is in progress
3962 * can cause it not to recover; wait for it to finish
3963 */
3964 ssleep(30);
cf588477
SP
3965 return PCI_ERS_RESULT_NEED_RESET;
3966}
3967
3968static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3969{
3970 struct be_adapter *adapter = pci_get_drvdata(pdev);
3971 int status;
3972
3973 dev_info(&adapter->pdev->dev, "EEH reset\n");
3974 adapter->eeh_err = false;
6589ade0
SP
3975 adapter->ue_detected = false;
3976 adapter->fw_timeout = false;
cf588477
SP
3977
3978 status = pci_enable_device(pdev);
3979 if (status)
3980 return PCI_ERS_RESULT_DISCONNECT;
3981
3982 pci_set_master(pdev);
3983 pci_set_power_state(pdev, 0);
3984 pci_restore_state(pdev);
3985
3986 /* Check if card is ok and fw is ready */
3987 status = be_cmd_POST(adapter);
3988 if (status)
3989 return PCI_ERS_RESULT_DISCONNECT;
3990
3991 return PCI_ERS_RESULT_RECOVERED;
3992}
3993
3994static void be_eeh_resume(struct pci_dev *pdev)
3995{
3996 int status = 0;
3997 struct be_adapter *adapter = pci_get_drvdata(pdev);
3998 struct net_device *netdev = adapter->netdev;
3999
4000 dev_info(&adapter->pdev->dev, "EEH resume\n");
4001
4002 pci_save_state(pdev);
4003
4004 /* tell fw we're ready to fire cmds */
4005 status = be_cmd_fw_init(adapter);
4006 if (status)
4007 goto err;
4008
4009 status = be_setup(adapter);
4010 if (status)
4011 goto err;
4012
4013 if (netif_running(netdev)) {
4014 status = be_open(netdev);
4015 if (status)
4016 goto err;
4017 }
4018 netif_device_attach(netdev);
4019 return;
4020err:
4021 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4022}
4023
4024static struct pci_error_handlers be_eeh_handlers = {
4025 .error_detected = be_eeh_err_detected,
4026 .slot_reset = be_eeh_reset,
4027 .resume = be_eeh_resume,
4028};
4029
6b7c5b94
SP
4030static struct pci_driver be_driver = {
4031 .name = DRV_NAME,
4032 .id_table = be_dev_ids,
4033 .probe = be_probe,
4034 .remove = be_remove,
4035 .suspend = be_suspend,
cf588477 4036 .resume = be_resume,
82456b03 4037 .shutdown = be_shutdown,
cf588477 4038 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4039};
4040
4041static int __init be_init_module(void)
4042{
8e95a202
JP
4043 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4044 rx_frag_size != 2048) {
6b7c5b94
SP
4045 printk(KERN_WARNING DRV_NAME
4046 " : Module param rx_frag_size must be 2048/4096/8192."
4047 " Using 2048\n");
4048 rx_frag_size = 2048;
4049 }
6b7c5b94
SP
4050
4051 return pci_register_driver(&be_driver);
4052}
4053module_init(be_init_module);
4054
4055static void __exit be_exit_module(void)
4056{
4057 pci_unregister_driver(&be_driver);
4058}
4059module_exit(be_exit_module);
This page took 0.678062 seconds and 5 git commands to generate.