be2net: Fix initialization sequence for Lancer
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 561 wrb->rsvd0 = 0;
6b7c5b94
SP
562}
563
1ded132d
AK
564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
93040ae5
SK
580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
cc4ce020
SK
585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 587{
1ded132d 588 u16 vlan_tag;
cc4ce020 589
6b7c5b94
SP
590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
49e4b847 594 if (skb_is_gso(skb)) {
6b7c5b94
SP
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
6b7c5b94
SP
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
4c5102f9 617 if (vlan_tx_tag_present(skb)) {
6b7c5b94 618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
2b7bcebf 629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 637 if (wrb->frag_len) {
7101e111 638 if (unmap_single)
2b7bcebf
IV
639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
7101e111 641 else
2b7bcebf 642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
643 }
644}
6b7c5b94 645
3c8def97 646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
7101e111
SP
649 dma_addr_t busaddr;
650 int i, copied = 0;
2b7bcebf 651 struct device *dev = &adapter->pdev->dev;
6b7c5b94 652 struct sk_buff *first_skb = skb;
6b7c5b94
SP
653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
7101e111
SP
655 bool map_single = false;
656 u16 map_head;
6b7c5b94 657
6b7c5b94
SP
658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
7101e111 660 map_head = txq->head;
6b7c5b94 661
ebc8d2ab 662 if (skb->len > skb->data_len) {
e743d313 663 int len = skb_headlen(skb);
2b7bcebf
IV
664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
7101e111
SP
666 goto dma_err;
667 map_single = true;
ebc8d2ab
DM
668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
6b7c5b94 674
ebc8d2ab 675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 676 const struct skb_frag_struct *frag =
ebc8d2ab 677 &skb_shinfo(skb)->frags[i];
b061b39e 678 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 679 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 680 if (dma_mapping_error(dev, busaddr))
7101e111 681 goto dma_err;
ebc8d2ab 682 wrb = queue_head_node(txq);
9e903e08 683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
9e903e08 686 copied += skb_frag_size(frag);
6b7c5b94
SP
687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
cc4ce020 696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
7101e111
SP
700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
2b7bcebf 704 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
6b7c5b94
SP
710}
711
93040ae5
SK
712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
61357325 730static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 731 struct net_device *netdev)
6b7c5b94
SP
732{
733 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
93040ae5 736 struct iphdr *ip = NULL;
6b7c5b94 737 u32 wrb_cnt = 0, copied = 0;
93040ae5 738 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
739 bool dummy_wrb, stopped = false;
740
93040ae5
SK
741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 746 */
93040ae5
SK
747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
1ded132d 752
93040ae5
SK
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
760 if (unlikely(!skb))
761 goto tx_drop;
1ded132d
AK
762 }
763
fe6d2a38 764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 765
3c8def97 766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 767 if (copied) {
cd8f76c0
ED
768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
c190e3c8 770 /* record the sent skb in the sent_skb table */
3c8def97
SP
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
c190e3c8
AK
773
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
7101e111 778 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
3c8def97 781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
782 stopped = true;
783 }
6b7c5b94 784
c190e3c8 785 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 786
cd8f76c0 787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
6b7c5b94 791 }
1ded132d 792tx_drop:
6b7c5b94
SP
793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
82903e4b
AK
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 817 */
10329df8 818static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 819{
10329df8
SP
820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
82903e4b 822 int status = 0;
1da87b7f 823
c0e64ef4
SP
824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
0fc16ebf
PR
828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
10329df8 834 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 837 vids, num, 1, 0);
0fc16ebf
PR
838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
6b7c5b94 844 }
1da87b7f 845
b31c50a7 846 return status;
0fc16ebf
PR
847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
6b7c5b94
SP
852}
853
8e586137 854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
855{
856 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 857 int status = 0;
6b7c5b94 858
80817cbf
AK
859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
ba343c77 863
6b7c5b94 864 adapter->vlan_tag[vid] = 1;
82903e4b 865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 866 status = be_vid_config(adapter);
8e586137 867
80817cbf
AK
868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
6b7c5b94
SP
874}
875
8e586137 876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
877{
878 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 879 int status = 0;
6b7c5b94 880
80817cbf
AK
881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
ba343c77 885
6b7c5b94 886 adapter->vlan_tag[vid] = 0;
82903e4b 887 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 888 status = be_vid_config(adapter);
8e586137 889
80817cbf
AK
890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
6b7c5b94
SP
896}
897
a54769f5 898static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
899{
900 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 901 int status;
6b7c5b94 902
24307eef 903 if (netdev->flags & IFF_PROMISC) {
5b8821b7 904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
905 adapter->promiscuous = true;
906 goto done;
6b7c5b94
SP
907 }
908
25985edc 909 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
5b8821b7 912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
913
914 if (adapter->vlans_added)
10329df8 915 be_vid_config(adapter);
6b7c5b94
SP
916 }
917
e7b909a6 918 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 919 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 922 goto done;
6b7c5b94 923 }
6b7c5b94 924
fbc13f01
AK
925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
0fc16ebf
PR
948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
24307eef
SP
956done:
957 return;
6b7c5b94
SP
958}
959
ba343c77
SB
960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
964 int status;
965
11ac75ed 966 if (!sriov_enabled(adapter))
ba343c77
SB
967 return -EPERM;
968
11ac75ed 969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
970 return -EINVAL;
971
590c391d
PR
972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
11ac75ed
SP
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
ba343c77 977
11ac75ed
SP
978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
980 }
981
64600ea5 982 if (status)
ba343c77
SB
983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
64600ea5 985 else
11ac75ed 986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 987
ba343c77
SB
988 return status;
989}
990
64600ea5
AK
991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 996
11ac75ed 997 if (!sriov_enabled(adapter))
64600ea5
AK
998 return -EPERM;
999
11ac75ed 1000 if (vf >= adapter->num_vfs)
64600ea5
AK
1001 return -EINVAL;
1002
1003 vi->vf = vf;
11ac75ed
SP
1004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1006 vi->qos = 0;
11ac75ed 1007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1008
1009 return 0;
1010}
1011
1da87b7f
AK
1012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
11ac75ed 1018 if (!sriov_enabled(adapter))
1da87b7f
AK
1019 return -EPERM;
1020
11ac75ed 1021 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1022 return -EINVAL;
1023
1024 if (vlan) {
f1f3ee1b
AK
1025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
1da87b7f 1032 } else {
f1f3ee1b 1033 /* Reset Transparent Vlan Tagging. */
11ac75ed 1034 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1038 }
1039
1da87b7f
AK
1040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
e1d18735
AK
1047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
11ac75ed 1053 if (!sriov_enabled(adapter))
e1d18735
AK
1054 return -EPERM;
1055
94f434c2 1056 if (vf >= adapter->num_vfs)
e1d18735
AK
1057 return -EINVAL;
1058
94f434c2
AK
1059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
e1d18735 1064
856c4012 1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1066
1067 if (status)
94f434c2 1068 dev_err(&adapter->pdev->dev,
e1d18735 1069 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1072 return status;
1073}
1074
39f1d94d
SP
1075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1082 if (!pos)
1083 return 0;
39f1d94d
SP
1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
7665de15
SK
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
39f1d94d
SP
1092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
10ef9ab4 1101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1102{
10ef9ab4 1103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1104 ulong now = jiffies;
ac124ff9 1105 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1106 u64 pkts;
1107 unsigned int start, eqd;
ac124ff9 1108
10ef9ab4
SP
1109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1115 return;
6b7c5b94 1116
10ef9ab4
SP
1117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
4097f663 1119 /* Wrapped around */
3abcdeda
SP
1120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
4097f663
SP
1122 return;
1123 }
6b7c5b94 1124
ac124ff9
SP
1125 /* Update once a second */
1126 if (delta < HZ)
6b7c5b94
SP
1127 return;
1128
ab1594e9
SP
1129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
68c3e5a7 1134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1135 stats->rx_pkts_prev = pkts;
3abcdeda 1136 stats->rx_jiffies = now;
10ef9ab4
SP
1137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1140 if (eqd < 10)
1141 eqd = 0;
10ef9ab4
SP
1142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
ac124ff9 1147 }
6b7c5b94
SP
1148}
1149
3abcdeda 1150static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1151 struct be_rx_compl_info *rxcp)
4097f663 1152{
ac124ff9 1153 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1154
ab1594e9 1155 u64_stats_update_begin(&stats->sync);
3abcdeda 1156 stats->rx_compl++;
2e588f84 1157 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1158 stats->rx_pkts++;
2e588f84 1159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1160 stats->rx_mcast_pkts++;
2e588f84 1161 if (rxcp->err)
ac124ff9 1162 stats->rx_compl_err++;
ab1594e9 1163 u64_stats_update_end(&stats->sync);
4097f663
SP
1164}
1165
2e588f84 1166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1167{
19fad86f
PR
1168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1172}
1173
10ef9ab4
SP
1174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
6b7c5b94 1176{
10ef9ab4 1177 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1178 struct be_rx_page_info *rx_page_info;
3abcdeda 1179 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1180
3abcdeda 1181 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1182 BUG_ON(!rx_page_info->page);
1183
205859a2 1184 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1188 rx_page_info->last_page_user = false;
1189 }
6b7c5b94
SP
1190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
6b7c5b94 1198{
3abcdeda 1199 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1200 struct be_rx_page_info *page_info;
2e588f84 1201 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1202
e80d9da6 1203 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
2e588f84 1207 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
10ef9ab4
SP
1215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
6b7c5b94 1217{
3abcdeda 1218 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1219 struct be_rx_page_info *page_info;
2e588f84
SP
1220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1222 u8 *start;
6b7c5b94 1223
10ef9ab4 1224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
2e588f84 1229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1230
1231 /* Copy the header portion into skb_data */
2e588f84 1232 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1233 memcpy(skb->data, start, hdr_len);
1234 skb->len = curr_frag_len;
1235 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1236 /* Complete packet has now been moved to data */
1237 put_page(page_info->page);
1238 skb->data_len = 0;
1239 skb->tail += curr_frag_len;
1240 } else {
1241 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1242 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
9e903e08 1245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1246 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1247 skb->truesize += rx_frag_size;
6b7c5b94
SP
1248 skb->tail += hdr_len;
1249 }
205859a2 1250 page_info->page = NULL;
6b7c5b94 1251
2e588f84
SP
1252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
6b7c5b94
SP
1255 }
1256
1257 /* More frags present for this completion */
2e588f84
SP
1258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1262 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1263
bd46cb6c
AK
1264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
b061b39e 1268 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
9e903e08 1271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
9e903e08 1277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
bdb28a97 1280 skb->truesize += rx_frag_size;
2e588f84
SP
1281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1283 page_info->page = NULL;
6b7c5b94 1284 }
bd46cb6c 1285 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1286}
1287
5be93b9a 1288/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
6b7c5b94 1291{
10ef9ab4 1292 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1293 struct net_device *netdev = adapter->netdev;
6b7c5b94 1294 struct sk_buff *skb;
89420424 1295
bb349bb4 1296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1297 if (unlikely(!skb)) {
ac124ff9 1298 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1299 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1300 return;
1301 }
1302
10ef9ab4 1303 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1304
6332c8d3 1305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1307 else
1308 skb_checksum_none_assert(skb);
6b7c5b94 1309
6332c8d3 1310 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1312 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1313 skb->rxhash = rxcp->rss_hash;
1314
6b7c5b94 1315
343e43c0 1316 if (rxcp->vlanf)
4c5102f9
AK
1317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
6b7c5b94
SP
1320}
1321
5be93b9a 1322/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
6b7c5b94 1325{
10ef9ab4 1326 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1327 struct be_rx_page_info *page_info;
5be93b9a 1328 struct sk_buff *skb = NULL;
3abcdeda 1329 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1330 u16 remaining, curr_frag_len;
1331 u16 i, j;
3968fa1e 1332
10ef9ab4 1333 skb = napi_get_frags(napi);
5be93b9a 1334 if (!skb) {
10ef9ab4 1335 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1336 return;
1337 }
1338
2e588f84
SP
1339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
bd46cb6c
AK
1345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
b061b39e 1349 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
9e903e08 1352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1353 } else {
1354 put_page(page_info->page);
1355 }
9e903e08 1356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1357 skb->truesize += rx_frag_size;
bd46cb6c 1358 remaining -= curr_frag_len;
2e588f84 1359 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1360 memset(page_info, 0, sizeof(*page_info));
1361 }
bd46cb6c 1362 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1363
5be93b9a 1364 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
5be93b9a 1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
5be93b9a 1371
343e43c0 1372 if (rxcp->vlanf)
4c5102f9
AK
1373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
10ef9ab4 1375 napi_gro_frags(napi);
2e588f84
SP
1376}
1377
10ef9ab4
SP
1378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
2e588f84
SP
1380{
1381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1399 rxcp->rss_hash =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
15d72184 1406 }
12004ae9 1407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1408}
1409
10ef9ab4
SP
1410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
2e588f84
SP
1412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1431 rxcp->rss_hash =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
15d72184 1438 }
12004ae9 1439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1447
2e588f84
SP
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451 return NULL;
6b7c5b94 1452
2e588f84
SP
1453 rmb();
1454 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1455
2e588f84 1456 if (adapter->be3_native)
10ef9ab4 1457 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1458 else
10ef9ab4 1459 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1460
15d72184
SP
1461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
752961a1 1464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1465 rxcp->vlanf = 0;
6b7c5b94 1466
15d72184 1467 if (!lancer_chip(adapter))
3c709f8f 1468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1469
939cf306 1470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1471 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1472 rxcp->vlanf = 0;
1473 }
2e588f84
SP
1474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1477
3abcdeda 1478 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1479 return rxcp;
1480}
1481
1829b086 1482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1483{
6b7c5b94 1484 u32 order = get_order(size);
1829b086 1485
6b7c5b94 1486 if (order > 0)
1829b086
ED
1487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
6b7c5b94
SP
1489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
1829b086 1495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1496{
3abcdeda 1497 struct be_adapter *adapter = rxo->adapter;
26d92f92 1498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1499 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
3abcdeda 1505 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
1829b086 1508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1509 if (unlikely(!pagep)) {
ac124ff9 1510 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1511 break;
1512 }
2b7bcebf
IV
1513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
6b7c5b94
SP
1516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
fac6da5b 1523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
26d92f92
SP
1536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
10ef9ab4 1539 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1540 }
1541 if (pagep)
26d92f92 1542 prev_page_info->last_page_user = true;
6b7c5b94
SP
1543
1544 if (posted) {
6b7c5b94 1545 atomic_add(posted, &rxq->used);
8788fdc2 1546 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
3abcdeda 1549 rxo->rx_post_starved = true;
6b7c5b94 1550 }
6b7c5b94
SP
1551}
1552
5fb379ee 1553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1554{
6b7c5b94
SP
1555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
f3eb62d2 1560 rmb();
6b7c5b94
SP
1561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
3c8def97
SP
1569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1571{
3c8def97 1572 struct be_queue_info *txq = &txo->q;
a73b796e 1573 struct be_eth_wrb *wrb;
3c8def97 1574 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1575 struct sk_buff *sent_skb;
ec43b1a6
SP
1576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
6b7c5b94 1578
ec43b1a6 1579 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1580 BUG_ON(!sent_skb);
ec43b1a6
SP
1581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
a73b796e 1584 queue_tail_inc(txq);
6b7c5b94 1585
ec43b1a6 1586 do {
6b7c5b94 1587 cur_index = txq->tail;
a73b796e 1588 wrb = queue_tail_node(txq);
2b7bcebf
IV
1589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1591 unmap_skb_hdr = false;
1592
6b7c5b94
SP
1593 num_wrbs++;
1594 queue_tail_inc(txq);
ec43b1a6 1595 } while (cur_index != last_index);
6b7c5b94 1596
6b7c5b94 1597 kfree_skb(sent_skb);
4d586b82 1598 return num_wrbs;
6b7c5b94
SP
1599}
1600
10ef9ab4
SP
1601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1603{
10ef9ab4
SP
1604 struct be_eq_entry *eqe;
1605 int num = 0;
859b1e4e 1606
10ef9ab4
SP
1607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
859b1e4e 1611
10ef9ab4
SP
1612 rmb();
1613 eqe->evt = 0;
1614 num++;
1615 queue_tail_inc(&eqo->q);
1616 } while (true);
1617
1618 return num;
859b1e4e
SP
1619}
1620
10ef9ab4 1621static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1622{
10ef9ab4
SP
1623 bool rearm = false;
1624 int num = events_get(eqo);
859b1e4e 1625
10ef9ab4 1626 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1627 if (!num)
1628 rearm = true;
1629
af311fe3
PR
1630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
859b1e4e 1633 if (num)
10ef9ab4 1634 napi_schedule(&eqo->napi);
859b1e4e
SP
1635
1636 return num;
1637}
1638
10ef9ab4
SP
1639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1641{
10ef9ab4 1642 int num = events_get(eqo);
859b1e4e 1643
10ef9ab4 1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1645}
1646
10ef9ab4 1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1648{
1649 struct be_rx_page_info *page_info;
3abcdeda
SP
1650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1652 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
3abcdeda 1656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1664 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
482c9e79 1669 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1670}
1671
0ae57bb3 1672static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1673{
0ae57bb3
SP
1674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
a8e9179a 1676 struct be_eth_tx_compl *txcp;
4d586b82 1677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
0ae57bb3 1680 int i, pending_txqs;
a8e9179a
SP
1681
1682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
0ae57bb3
SP
1684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
a8e9179a
SP
1704 }
1705
0ae57bb3 1706 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
0ae57bb3
SP
1712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
1717
1718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
b03388d6 1728 }
6b7c5b94
SP
1729}
1730
10ef9ab4
SP
1731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
1737 be_eq_clean(eqo);
1738 if (eqo->q.created)
1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 be_queue_free(adapter, &eqo->q);
1741 }
1742}
1743
1744static int be_evt_queues_create(struct be_adapter *adapter)
1745{
1746 struct be_queue_info *eq;
1747 struct be_eq_obj *eqo;
1748 int i, rc;
1749
1750 adapter->num_evt_qs = num_irqs(adapter);
1751
1752 for_all_evt_queues(adapter, eqo, i) {
1753 eqo->adapter = adapter;
1754 eqo->tx_budget = BE_TX_BUDGET;
1755 eqo->idx = i;
1756 eqo->max_eqd = BE_MAX_EQD;
1757 eqo->enable_aic = true;
1758
1759 eq = &eqo->q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 return rc;
1764
1765 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766 if (rc)
1767 return rc;
1768 }
1cfafab9 1769 return 0;
10ef9ab4
SP
1770}
1771
5fb379ee
SP
1772static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_queue_info *q;
5fb379ee 1775
8788fdc2 1776 q = &adapter->mcc_obj.q;
5fb379ee 1777 if (q->created)
8788fdc2 1778 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1779 be_queue_free(adapter, q);
1780
8788fdc2 1781 q = &adapter->mcc_obj.cq;
5fb379ee 1782 if (q->created)
8788fdc2 1783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1784 be_queue_free(adapter, q);
1785}
1786
1787/* Must be called only after TX qs are created as MCC shares TX EQ */
1788static int be_mcc_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *q, *cq;
5fb379ee 1791
8788fdc2 1792 cq = &adapter->mcc_obj.cq;
5fb379ee 1793 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1794 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1795 goto err;
1796
10ef9ab4
SP
1797 /* Use the default EQ for MCC completions */
1798 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1799 goto mcc_cq_free;
1800
8788fdc2 1801 q = &adapter->mcc_obj.q;
5fb379ee
SP
1802 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803 goto mcc_cq_destroy;
1804
8788fdc2 1805 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1806 goto mcc_q_free;
1807
1808 return 0;
1809
1810mcc_q_free:
1811 be_queue_free(adapter, q);
1812mcc_cq_destroy:
8788fdc2 1813 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1814mcc_cq_free:
1815 be_queue_free(adapter, cq);
1816err:
1817 return -1;
1818}
1819
6b7c5b94
SP
1820static void be_tx_queues_destroy(struct be_adapter *adapter)
1821{
1822 struct be_queue_info *q;
3c8def97
SP
1823 struct be_tx_obj *txo;
1824 u8 i;
6b7c5b94 1825
3c8def97
SP
1826 for_all_tx_queues(adapter, txo, i) {
1827 q = &txo->q;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830 be_queue_free(adapter, q);
6b7c5b94 1831
3c8def97
SP
1832 q = &txo->cq;
1833 if (q->created)
1834 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835 be_queue_free(adapter, q);
1836 }
6b7c5b94
SP
1837}
1838
dafc0fe3
SP
1839static int be_num_txqs_want(struct be_adapter *adapter)
1840{
39f1d94d
SP
1841 if (sriov_want(adapter) || be_is_mc(adapter) ||
1842 lancer_chip(adapter) || !be_physfn(adapter) ||
1843 adapter->generation == BE_GEN2)
dafc0fe3
SP
1844 return 1;
1845 else
1846 return MAX_TX_QS;
1847}
1848
10ef9ab4 1849static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1850{
10ef9ab4
SP
1851 struct be_queue_info *cq, *eq;
1852 int status;
3c8def97
SP
1853 struct be_tx_obj *txo;
1854 u8 i;
6b7c5b94 1855
dafc0fe3 1856 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
dafc0fe3
SP
1859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
3bb62f4f
PR
1861 rtnl_unlock();
1862 }
dafc0fe3 1863
10ef9ab4
SP
1864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
1866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
3c8def97 1870
10ef9ab4
SP
1871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
1878 }
1879 return 0;
1880}
6b7c5b94 1881
10ef9ab4
SP
1882static int be_tx_qs_create(struct be_adapter *adapter)
1883{
1884 struct be_tx_obj *txo;
1885 int i, status;
fe6d2a38 1886
3c8def97 1887 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
6b7c5b94 1892
10ef9ab4
SP
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
3c8def97 1896 }
6b7c5b94 1897
10ef9ab4 1898 return 0;
6b7c5b94
SP
1899}
1900
10ef9ab4 1901static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1902{
1903 struct be_queue_info *q;
3abcdeda
SP
1904 struct be_rx_obj *rxo;
1905 int i;
1906
1907 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1908 q = &rxo->cq;
1909 if (q->created)
1910 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911 be_queue_free(adapter, q);
ac6a0c4a
SP
1912 }
1913}
1914
10ef9ab4 1915static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1916{
10ef9ab4 1917 struct be_queue_info *eq, *cq;
3abcdeda
SP
1918 struct be_rx_obj *rxo;
1919 int rc, i;
6b7c5b94 1920
10ef9ab4
SP
1921 /* We'll create as many RSS rings as there are irqs.
1922 * But when there's only one irq there's no use creating RSS rings
1923 */
1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925 num_irqs(adapter) + 1 : 1;
7f640062
SP
1926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
ac6a0c4a 1932
6b7c5b94 1933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1934 for_all_rx_queues(adapter, rxo, i) {
1935 rxo->adapter = adapter;
3abcdeda
SP
1936 cq = &rxo->cq;
1937 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938 sizeof(struct be_eth_rx_compl));
1939 if (rc)
10ef9ab4 1940 return rc;
3abcdeda 1941
10ef9ab4
SP
1942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1944 if (rc)
10ef9ab4 1945 return rc;
3abcdeda 1946 }
6b7c5b94 1947
10ef9ab4
SP
1948 if (adapter->num_rx_qs != MAX_RX_QS)
1949 dev_info(&adapter->pdev->dev,
1950 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1951
10ef9ab4 1952 return 0;
b628bde2
SP
1953}
1954
6b7c5b94
SP
1955static irqreturn_t be_intx(int irq, void *dev)
1956{
1957 struct be_adapter *adapter = dev;
10ef9ab4 1958 int num_evts;
6b7c5b94 1959
10ef9ab4
SP
1960 /* With INTx only one EQ is used */
1961 num_evts = event_handle(&adapter->eq_obj[0]);
1962 if (num_evts)
1963 return IRQ_HANDLED;
1964 else
1965 return IRQ_NONE;
6b7c5b94
SP
1966}
1967
10ef9ab4 1968static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1969{
10ef9ab4 1970 struct be_eq_obj *eqo = dev;
6b7c5b94 1971
10ef9ab4 1972 event_handle(eqo);
6b7c5b94
SP
1973 return IRQ_HANDLED;
1974}
1975
2e588f84 1976static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1977{
2e588f84 1978 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1979}
1980
10ef9ab4
SP
1981static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982 int budget)
6b7c5b94 1983{
3abcdeda
SP
1984 struct be_adapter *adapter = rxo->adapter;
1985 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1986 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1987 u32 work_done;
1988
1989 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1990 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1991 if (!rxcp)
1992 break;
1993
12004ae9
SP
1994 /* Is it a flush compl that has no data */
1995 if (unlikely(rxcp->num_rcvd == 0))
1996 goto loop_continue;
1997
1998 /* Discard compl with partial DMA Lancer B0 */
1999 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2000 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2001 goto loop_continue;
2002 }
2003
2004 /* On BE drop pkts that arrive due to imperfect filtering in
2005 * promiscuous mode on some skews
2006 */
2007 if (unlikely(rxcp->port != adapter->port_num &&
2008 !lancer_chip(adapter))) {
10ef9ab4 2009 be_rx_compl_discard(rxo, rxcp);
12004ae9 2010 goto loop_continue;
64642811 2011 }
009dd872 2012
12004ae9 2013 if (do_gro(rxcp))
10ef9ab4 2014 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2015 else
10ef9ab4 2016 be_rx_compl_process(rxo, rxcp);
12004ae9 2017loop_continue:
2e588f84 2018 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2019 }
2020
10ef9ab4
SP
2021 if (work_done) {
2022 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2023
10ef9ab4
SP
2024 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2026 }
10ef9ab4 2027
6b7c5b94
SP
2028 return work_done;
2029}
2030
10ef9ab4
SP
2031static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032 int budget, int idx)
6b7c5b94 2033{
6b7c5b94 2034 struct be_eth_tx_compl *txcp;
10ef9ab4 2035 int num_wrbs = 0, work_done;
3c8def97 2036
10ef9ab4
SP
2037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->cq);
2039 if (!txcp)
2040 break;
2041 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp));
10ef9ab4 2044 }
6b7c5b94 2045
10ef9ab4
SP
2046 if (work_done) {
2047 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2049
10ef9ab4
SP
2050 /* As Tx wrbs have been freed up, wake up netdev queue
2051 * if it was stopped due to lack of tx wrbs. */
2052 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053 atomic_read(&txo->q.used) < txo->q.len / 2) {
2054 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2055 }
10ef9ab4
SP
2056
2057 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2060 }
10ef9ab4
SP
2061 return (work_done < budget); /* Done */
2062}
6b7c5b94 2063
10ef9ab4
SP
2064int be_poll(struct napi_struct *napi, int budget)
2065{
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i;
2069 bool tx_done;
f31e50a8 2070
10ef9ab4
SP
2071 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074 eqo->tx_budget, i);
2075 if (!tx_done)
2076 max_work = budget;
f31e50a8
SP
2077 }
2078
10ef9ab4
SP
2079 /* This loop will iterate twice for EQ0 in which
2080 * completions of the last RXQ (default one) are also processed
2081 * For other EQs the loop iterates only once
2082 */
2083 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085 max_work = max(work, max_work);
2086 }
6b7c5b94 2087
10ef9ab4
SP
2088 if (is_mcc_eqo(eqo))
2089 be_process_mcc(adapter);
93c86700 2090
10ef9ab4
SP
2091 if (max_work < budget) {
2092 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094 } else {
2095 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2097 }
10ef9ab4 2098 return max_work;
6b7c5b94
SP
2099}
2100
d053de91 2101void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2102{
e1cfb67a
PR
2103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2105 u32 i;
2106
72f02485
SP
2107 if (adapter->eeh_err || adapter->ue_detected)
2108 return;
2109
e1cfb67a
PR
2110 if (lancer_chip(adapter)) {
2111 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113 sliport_err1 = ioread32(adapter->db +
2114 SLIPORT_ERROR1_OFFSET);
2115 sliport_err2 = ioread32(adapter->db +
2116 SLIPORT_ERROR2_OFFSET);
2117 }
2118 } else {
2119 pci_read_config_dword(adapter->pdev,
2120 PCICFG_UE_STATUS_LOW, &ue_lo);
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2127
2128 ue_lo = (ue_lo & (~ue_lo_mask));
2129 ue_hi = (ue_hi & (~ue_hi_mask));
2130 }
7c185276 2131
e1cfb67a
PR
2132 if (ue_lo || ue_hi ||
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2134 adapter->ue_detected = true;
7acc2087 2135 adapter->eeh_err = true;
434b3648
SP
2136 dev_err(&adapter->pdev->dev,
2137 "Unrecoverable error in the card\n");
d053de91
AK
2138 }
2139
e1cfb67a
PR
2140 if (ue_lo) {
2141 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2142 if (ue_lo & 1)
7c185276
AK
2143 dev_err(&adapter->pdev->dev,
2144 "UE: %s bit set\n", ue_status_low_desc[i]);
2145 }
2146 }
e1cfb67a
PR
2147 if (ue_hi) {
2148 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2149 if (ue_hi & 1)
7c185276
AK
2150 dev_err(&adapter->pdev->dev,
2151 "UE: %s bit set\n", ue_status_hi_desc[i]);
2152 }
2153 }
2154
e1cfb67a
PR
2155 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2156 dev_err(&adapter->pdev->dev,
2157 "sliport status 0x%x\n", sliport_status);
2158 dev_err(&adapter->pdev->dev,
2159 "sliport error1 0x%x\n", sliport_err1);
2160 dev_err(&adapter->pdev->dev,
2161 "sliport error2 0x%x\n", sliport_err2);
2162 }
7c185276
AK
2163}
2164
8d56ff11
SP
2165static void be_msix_disable(struct be_adapter *adapter)
2166{
ac6a0c4a 2167 if (msix_enabled(adapter)) {
8d56ff11 2168 pci_disable_msix(adapter->pdev);
ac6a0c4a 2169 adapter->num_msix_vec = 0;
3abcdeda
SP
2170 }
2171}
2172
10ef9ab4
SP
2173static uint be_num_rss_want(struct be_adapter *adapter)
2174{
30e80b55 2175 u32 num = 0;
10ef9ab4 2176 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
39f1d94d 2177 !sriov_want(adapter) && be_physfn(adapter) &&
30e80b55
YM
2178 !be_is_mc(adapter)) {
2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181 }
2182 return num;
10ef9ab4
SP
2183}
2184
6b7c5b94
SP
2185static void be_msix_enable(struct be_adapter *adapter)
2186{
10ef9ab4 2187#define BE_MIN_MSIX_VECTORS 1
045508a8 2188 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2189
10ef9ab4
SP
2190 /* If RSS queues are not used, need a vec for default RX Q */
2191 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2192 if (be_roce_supported(adapter)) {
2193 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2194 (num_online_cpus() + 1));
2195 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2196 num_vec += num_roce_vec;
2197 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2198 }
10ef9ab4 2199 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2200
ac6a0c4a 2201 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2202 adapter->msix_entries[i].entry = i;
2203
ac6a0c4a 2204 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2205 if (status == 0) {
2206 goto done;
2207 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2208 num_vec = status;
3abcdeda 2209 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2210 num_vec) == 0)
3abcdeda 2211 goto done;
3abcdeda
SP
2212 }
2213 return;
2214done:
045508a8
PP
2215 if (be_roce_supported(adapter)) {
2216 if (num_vec > num_roce_vec) {
2217 adapter->num_msix_vec = num_vec - num_roce_vec;
2218 adapter->num_msix_roce_vec =
2219 num_vec - adapter->num_msix_vec;
2220 } else {
2221 adapter->num_msix_vec = num_vec;
2222 adapter->num_msix_roce_vec = 0;
2223 }
2224 } else
2225 adapter->num_msix_vec = num_vec;
ac6a0c4a 2226 return;
6b7c5b94
SP
2227}
2228
fe6d2a38 2229static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2230 struct be_eq_obj *eqo)
b628bde2 2231{
10ef9ab4 2232 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2233}
6b7c5b94 2234
b628bde2
SP
2235static int be_msix_register(struct be_adapter *adapter)
2236{
10ef9ab4
SP
2237 struct net_device *netdev = adapter->netdev;
2238 struct be_eq_obj *eqo;
2239 int status, i, vec;
6b7c5b94 2240
10ef9ab4
SP
2241 for_all_evt_queues(adapter, eqo, i) {
2242 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2243 vec = be_msix_vec_get(adapter, eqo);
2244 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2245 if (status)
2246 goto err_msix;
2247 }
b628bde2 2248
6b7c5b94 2249 return 0;
3abcdeda 2250err_msix:
10ef9ab4
SP
2251 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2254 status);
ac6a0c4a 2255 be_msix_disable(adapter);
6b7c5b94
SP
2256 return status;
2257}
2258
2259static int be_irq_register(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262 int status;
2263
ac6a0c4a 2264 if (msix_enabled(adapter)) {
6b7c5b94
SP
2265 status = be_msix_register(adapter);
2266 if (status == 0)
2267 goto done;
ba343c77
SB
2268 /* INTx is not supported for VF */
2269 if (!be_physfn(adapter))
2270 return status;
6b7c5b94
SP
2271 }
2272
2273 /* INTx */
2274 netdev->irq = adapter->pdev->irq;
2275 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2276 adapter);
2277 if (status) {
2278 dev_err(&adapter->pdev->dev,
2279 "INTx request IRQ failed - err %d\n", status);
2280 return status;
2281 }
2282done:
2283 adapter->isr_registered = true;
2284 return 0;
2285}
2286
2287static void be_irq_unregister(struct be_adapter *adapter)
2288{
2289 struct net_device *netdev = adapter->netdev;
10ef9ab4 2290 struct be_eq_obj *eqo;
3abcdeda 2291 int i;
6b7c5b94
SP
2292
2293 if (!adapter->isr_registered)
2294 return;
2295
2296 /* INTx */
ac6a0c4a 2297 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2298 free_irq(netdev->irq, adapter);
2299 goto done;
2300 }
2301
2302 /* MSIx */
10ef9ab4
SP
2303 for_all_evt_queues(adapter, eqo, i)
2304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2305
6b7c5b94
SP
2306done:
2307 adapter->isr_registered = false;
6b7c5b94
SP
2308}
2309
10ef9ab4 2310static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2311{
2312 struct be_queue_info *q;
2313 struct be_rx_obj *rxo;
2314 int i;
2315
2316 for_all_rx_queues(adapter, rxo, i) {
2317 q = &rxo->q;
2318 if (q->created) {
2319 be_cmd_rxq_destroy(adapter, q);
2320 /* After the rxq is invalidated, wait for a grace time
2321 * of 1ms for all dma to end and the flush compl to
2322 * arrive
2323 */
2324 mdelay(1);
10ef9ab4 2325 be_rx_cq_clean(rxo);
482c9e79 2326 }
10ef9ab4 2327 be_queue_free(adapter, q);
482c9e79
SP
2328 }
2329}
2330
889cd4b2
SP
2331static int be_close(struct net_device *netdev)
2332{
2333 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2334 struct be_eq_obj *eqo;
2335 int i;
889cd4b2 2336
045508a8
PP
2337 be_roce_dev_close(adapter);
2338
889cd4b2
SP
2339 be_async_mcc_disable(adapter);
2340
fe6d2a38
SP
2341 if (!lancer_chip(adapter))
2342 be_intr_set(adapter, false);
889cd4b2 2343
10ef9ab4
SP
2344 for_all_evt_queues(adapter, eqo, i) {
2345 napi_disable(&eqo->napi);
2346 if (msix_enabled(adapter))
2347 synchronize_irq(be_msix_vec_get(adapter, eqo));
2348 else
2349 synchronize_irq(netdev->irq);
2350 be_eq_clean(eqo);
63fcb27f
PR
2351 }
2352
889cd4b2
SP
2353 be_irq_unregister(adapter);
2354
889cd4b2
SP
2355 /* Wait for all pending tx completions to arrive so that
2356 * all tx skbs are freed.
2357 */
0ae57bb3 2358 be_tx_compl_clean(adapter);
889cd4b2 2359
10ef9ab4 2360 be_rx_qs_destroy(adapter);
482c9e79
SP
2361 return 0;
2362}
2363
10ef9ab4 2364static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2365{
2366 struct be_rx_obj *rxo;
e9008ee9
PR
2367 int rc, i, j;
2368 u8 rsstable[128];
482c9e79
SP
2369
2370 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2371 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2372 sizeof(struct be_eth_rx_d));
2373 if (rc)
2374 return rc;
2375 }
2376
2377 /* The FW would like the default RXQ to be created first */
2378 rxo = default_rxo(adapter);
2379 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2380 adapter->if_handle, false, &rxo->rss_id);
2381 if (rc)
2382 return rc;
2383
2384 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2385 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2386 rx_frag_size, adapter->if_handle,
2387 true, &rxo->rss_id);
482c9e79
SP
2388 if (rc)
2389 return rc;
2390 }
2391
2392 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2393 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2394 for_all_rss_queues(adapter, rxo, i) {
2395 if ((j + i) >= 128)
2396 break;
2397 rsstable[j + i] = rxo->rss_id;
2398 }
2399 }
2400 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2401 if (rc)
2402 return rc;
2403 }
2404
2405 /* First time posting */
10ef9ab4 2406 for_all_rx_queues(adapter, rxo, i)
482c9e79 2407 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2408 return 0;
2409}
2410
6b7c5b94
SP
2411static int be_open(struct net_device *netdev)
2412{
2413 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2414 struct be_eq_obj *eqo;
3abcdeda 2415 struct be_rx_obj *rxo;
10ef9ab4 2416 struct be_tx_obj *txo;
b236916a 2417 u8 link_status;
3abcdeda 2418 int status, i;
5fb379ee 2419
10ef9ab4 2420 status = be_rx_qs_create(adapter);
482c9e79
SP
2421 if (status)
2422 goto err;
2423
5fb379ee
SP
2424 be_irq_register(adapter);
2425
fe6d2a38
SP
2426 if (!lancer_chip(adapter))
2427 be_intr_set(adapter, true);
5fb379ee 2428
10ef9ab4 2429 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2430 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2431
10ef9ab4
SP
2432 for_all_tx_queues(adapter, txo, i)
2433 be_cq_notify(adapter, txo->cq.id, true, 0);
2434
7a1e9b20
SP
2435 be_async_mcc_enable(adapter);
2436
10ef9ab4
SP
2437 for_all_evt_queues(adapter, eqo, i) {
2438 napi_enable(&eqo->napi);
2439 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2440 }
2441
b236916a
AK
2442 status = be_cmd_link_status_query(adapter, NULL, NULL,
2443 &link_status, 0);
2444 if (!status)
2445 be_link_status_update(adapter, link_status);
2446
045508a8 2447 be_roce_dev_open(adapter);
889cd4b2
SP
2448 return 0;
2449err:
2450 be_close(adapter->netdev);
2451 return -EIO;
5fb379ee
SP
2452}
2453
71d8d1b5
AK
2454static int be_setup_wol(struct be_adapter *adapter, bool enable)
2455{
2456 struct be_dma_mem cmd;
2457 int status = 0;
2458 u8 mac[ETH_ALEN];
2459
2460 memset(mac, 0, ETH_ALEN);
2461
2462 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2463 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2464 GFP_KERNEL);
71d8d1b5
AK
2465 if (cmd.va == NULL)
2466 return -1;
2467 memset(cmd.va, 0, cmd.size);
2468
2469 if (enable) {
2470 status = pci_write_config_dword(adapter->pdev,
2471 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2472 if (status) {
2473 dev_err(&adapter->pdev->dev,
2381a55c 2474 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2475 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2476 cmd.dma);
71d8d1b5
AK
2477 return status;
2478 }
2479 status = be_cmd_enable_magic_wol(adapter,
2480 adapter->netdev->dev_addr, &cmd);
2481 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2482 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2483 } else {
2484 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2485 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2486 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487 }
2488
2b7bcebf 2489 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2490 return status;
2491}
2492
6d87f5c3
AK
2493/*
2494 * Generate a seed MAC address from the PF MAC Address using jhash.
2495 * MAC Address for VFs are assigned incrementally starting from the seed.
2496 * These addresses are programmed in the ASIC by the PF and the VF driver
2497 * queries for the MAC address during its probe.
2498 */
2499static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2500{
f9449ab7 2501 u32 vf;
3abcdeda 2502 int status = 0;
6d87f5c3 2503 u8 mac[ETH_ALEN];
11ac75ed 2504 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2505
2506 be_vf_eth_addr_generate(adapter, mac);
2507
11ac75ed 2508 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2509 if (lancer_chip(adapter)) {
2510 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2511 } else {
2512 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2513 vf_cfg->if_handle,
2514 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2515 }
2516
6d87f5c3
AK
2517 if (status)
2518 dev_err(&adapter->pdev->dev,
590c391d 2519 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2520 else
11ac75ed 2521 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2522
2523 mac[5] += 1;
2524 }
2525 return status;
2526}
2527
f9449ab7 2528static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2529{
11ac75ed 2530 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2531 u32 vf;
2532
39f1d94d
SP
2533 if (be_find_vfs(adapter, ASSIGNED)) {
2534 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2535 goto done;
2536 }
2537
11ac75ed 2538 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2539 if (lancer_chip(adapter))
2540 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2541 else
11ac75ed
SP
2542 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2543 vf_cfg->pmac_id, vf + 1);
f9449ab7 2544
11ac75ed
SP
2545 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2546 }
39f1d94d
SP
2547 pci_disable_sriov(adapter->pdev);
2548done:
2549 kfree(adapter->vf_cfg);
2550 adapter->num_vfs = 0;
6d87f5c3
AK
2551}
2552
a54769f5
SP
2553static int be_clear(struct be_adapter *adapter)
2554{
fbc13f01
AK
2555 int i = 1;
2556
191eb756
SP
2557 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2558 cancel_delayed_work_sync(&adapter->work);
2559 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2560 }
2561
11ac75ed 2562 if (sriov_enabled(adapter))
f9449ab7
SP
2563 be_vf_clear(adapter);
2564
fbc13f01
AK
2565 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2566 be_cmd_pmac_del(adapter, adapter->if_handle,
2567 adapter->pmac_id[i], 0);
2568
f9449ab7 2569 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2570
2571 be_mcc_queues_destroy(adapter);
10ef9ab4 2572 be_rx_cqs_destroy(adapter);
a54769f5 2573 be_tx_queues_destroy(adapter);
10ef9ab4 2574 be_evt_queues_destroy(adapter);
a54769f5 2575
10ef9ab4 2576 be_msix_disable(adapter);
a54769f5
SP
2577 return 0;
2578}
2579
39f1d94d 2580static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2581{
11ac75ed 2582 struct be_vf_cfg *vf_cfg;
30128031
SP
2583 int vf;
2584
39f1d94d
SP
2585 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2586 GFP_KERNEL);
2587 if (!adapter->vf_cfg)
2588 return -ENOMEM;
2589
11ac75ed
SP
2590 for_all_vfs(adapter, vf_cfg, vf) {
2591 vf_cfg->if_handle = -1;
2592 vf_cfg->pmac_id = -1;
30128031 2593 }
39f1d94d 2594 return 0;
30128031
SP
2595}
2596
f9449ab7
SP
2597static int be_vf_setup(struct be_adapter *adapter)
2598{
11ac75ed 2599 struct be_vf_cfg *vf_cfg;
39f1d94d 2600 struct device *dev = &adapter->pdev->dev;
f9449ab7 2601 u32 cap_flags, en_flags, vf;
f1f3ee1b 2602 u16 def_vlan, lnk_speed;
39f1d94d
SP
2603 int status, enabled_vfs;
2604
2605 enabled_vfs = be_find_vfs(adapter, ENABLED);
2606 if (enabled_vfs) {
2607 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2608 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2609 return 0;
2610 }
f9449ab7 2611
39f1d94d
SP
2612 if (num_vfs > adapter->dev_num_vfs) {
2613 dev_warn(dev, "Device supports %d VFs and not %d\n",
2614 adapter->dev_num_vfs, num_vfs);
2615 num_vfs = adapter->dev_num_vfs;
2616 }
2617
2618 status = pci_enable_sriov(adapter->pdev, num_vfs);
2619 if (!status) {
2620 adapter->num_vfs = num_vfs;
2621 } else {
2622 /* Platform doesn't support SRIOV though device supports it */
2623 dev_warn(dev, "SRIOV enable failed\n");
2624 return 0;
2625 }
2626
2627 status = be_vf_setup_init(adapter);
2628 if (status)
2629 goto err;
30128031 2630
590c391d
PR
2631 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2632 BE_IF_FLAGS_MULTICAST;
11ac75ed 2633 for_all_vfs(adapter, vf_cfg, vf) {
1578e777
PR
2634 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2635 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2636 if (status)
2637 goto err;
f9449ab7
SP
2638 }
2639
39f1d94d
SP
2640 if (!enabled_vfs) {
2641 status = be_vf_eth_addr_config(adapter);
2642 if (status)
2643 goto err;
2644 }
f9449ab7 2645
11ac75ed 2646 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2647 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2648 NULL, vf + 1);
f9449ab7
SP
2649 if (status)
2650 goto err;
11ac75ed 2651 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2652
2653 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2654 vf + 1, vf_cfg->if_handle);
2655 if (status)
2656 goto err;
2657 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2658 }
2659 return 0;
2660err:
2661 return status;
2662}
2663
30128031
SP
2664static void be_setup_init(struct be_adapter *adapter)
2665{
2666 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2667 adapter->phy.link_speed = -1;
30128031
SP
2668 adapter->if_handle = -1;
2669 adapter->be3_native = false;
2670 adapter->promiscuous = false;
2671 adapter->eq_next_idx = 0;
42f11cf2 2672 adapter->phy.forced_port_speed = -1;
30128031
SP
2673}
2674
1578e777
PR
2675static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2676 bool *active_mac, u32 *pmac_id)
590c391d 2677{
1578e777 2678 int status = 0;
e5e1ee89 2679
1578e777
PR
2680 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2681 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2682 if (!lancer_chip(adapter) && !be_physfn(adapter))
2683 *active_mac = true;
2684 else
2685 *active_mac = false;
e5e1ee89 2686
1578e777
PR
2687 return status;
2688 }
e5e1ee89 2689
1578e777
PR
2690 if (lancer_chip(adapter)) {
2691 status = be_cmd_get_mac_from_list(adapter, mac,
2692 active_mac, pmac_id, 0);
2693 if (*active_mac) {
2694 status = be_cmd_mac_addr_query(adapter, mac,
2695 MAC_ADDRESS_TYPE_NETWORK,
2696 false, if_handle,
2697 *pmac_id);
2698 }
2699 } else if (be_physfn(adapter)) {
2700 /* For BE3, for PF get permanent MAC */
2701 status = be_cmd_mac_addr_query(adapter, mac,
2702 MAC_ADDRESS_TYPE_NETWORK, true,
2703 0, 0);
2704 *active_mac = false;
e5e1ee89 2705 } else {
1578e777
PR
2706 /* For BE3, for VF get soft MAC assigned by PF*/
2707 status = be_cmd_mac_addr_query(adapter, mac,
2708 MAC_ADDRESS_TYPE_NETWORK, false,
2709 if_handle, 0);
2710 *active_mac = true;
e5e1ee89 2711 }
590c391d
PR
2712 return status;
2713}
2714
39f1d94d
SP
2715/* Routine to query per function resource limits */
2716static int be_get_config(struct be_adapter *adapter)
2717{
2718 int pos;
2719 u16 dev_num_vfs;
2720
2721 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722 if (pos) {
2723 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2724 &dev_num_vfs);
2725 adapter->dev_num_vfs = dev_num_vfs;
2726 }
2727 return 0;
2728}
2729
5fb379ee
SP
2730static int be_setup(struct be_adapter *adapter)
2731{
39f1d94d 2732 struct device *dev = &adapter->pdev->dev;
f9449ab7 2733 u32 cap_flags, en_flags;
a54769f5 2734 u32 tx_fc, rx_fc;
10ef9ab4 2735 int status;
ba343c77 2736 u8 mac[ETH_ALEN];
1578e777 2737 bool active_mac;
ba343c77 2738
30128031 2739 be_setup_init(adapter);
6b7c5b94 2740
39f1d94d
SP
2741 be_get_config(adapter);
2742
f9449ab7 2743 be_cmd_req_native_mode(adapter);
73d540f2 2744
10ef9ab4
SP
2745 be_msix_enable(adapter);
2746
2747 status = be_evt_queues_create(adapter);
2748 if (status)
a54769f5 2749 goto err;
6b7c5b94 2750
10ef9ab4
SP
2751 status = be_tx_cqs_create(adapter);
2752 if (status)
2753 goto err;
2754
2755 status = be_rx_cqs_create(adapter);
2756 if (status)
a54769f5 2757 goto err;
6b7c5b94 2758
f9449ab7 2759 status = be_mcc_queues_create(adapter);
10ef9ab4 2760 if (status)
a54769f5 2761 goto err;
6b7c5b94 2762
f9449ab7
SP
2763 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2764 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2765 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2766 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2767
f9449ab7
SP
2768 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2769 cap_flags |= BE_IF_FLAGS_RSS;
2770 en_flags |= BE_IF_FLAGS_RSS;
2771 }
1578e777 2772
f9449ab7 2773 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1578e777 2774 &adapter->if_handle, 0);
5fb379ee 2775 if (status != 0)
a54769f5 2776 goto err;
6b7c5b94 2777
1578e777
PR
2778 memset(mac, 0, ETH_ALEN);
2779 active_mac = false;
2780 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2781 &active_mac, &adapter->pmac_id[0]);
2782 if (status != 0)
2783 goto err;
2784
2785 if (!active_mac) {
2786 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2787 &adapter->pmac_id[0], 0);
2788 if (status != 0)
2789 goto err;
2790 }
2791
2792 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2793 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2794 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2795 }
0dffc83e 2796
10ef9ab4
SP
2797 status = be_tx_qs_create(adapter);
2798 if (status)
2799 goto err;
2800
04b71175 2801 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2802
1d1e9a46 2803 if (adapter->vlans_added)
10329df8 2804 be_vid_config(adapter);
7ab8b0b4 2805
a54769f5 2806 be_set_rx_mode(adapter->netdev);
5fb379ee 2807
ddc3f5cb 2808 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2809
ddc3f5cb
AK
2810 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2811 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2812 adapter->rx_fc);
2dc1deb6 2813
39f1d94d
SP
2814 if (be_physfn(adapter) && num_vfs) {
2815 if (adapter->dev_num_vfs)
2816 be_vf_setup(adapter);
2817 else
2818 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2819 }
2820
42f11cf2
AK
2821 be_cmd_get_phy_info(adapter);
2822 if (be_pause_supported(adapter))
2823 adapter->phy.fc_autoneg = 1;
2824
191eb756
SP
2825 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2826 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2827 return 0;
a54769f5
SP
2828err:
2829 be_clear(adapter);
2830 return status;
2831}
6b7c5b94 2832
66268739
IV
2833#ifdef CONFIG_NET_POLL_CONTROLLER
2834static void be_netpoll(struct net_device *netdev)
2835{
2836 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2837 struct be_eq_obj *eqo;
66268739
IV
2838 int i;
2839
10ef9ab4
SP
2840 for_all_evt_queues(adapter, eqo, i)
2841 event_handle(eqo);
2842
2843 return;
66268739
IV
2844}
2845#endif
2846
84517482 2847#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2848char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2849
fa9a6fed 2850static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2851 const u8 *p, u32 img_start, int image_size,
2852 int hdr_size)
fa9a6fed
SB
2853{
2854 u32 crc_offset;
2855 u8 flashed_crc[4];
2856 int status;
3f0d4560
AK
2857
2858 crc_offset = hdr_size + img_start + image_size - 4;
2859
fa9a6fed 2860 p += crc_offset;
3f0d4560
AK
2861
2862 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2863 (image_size - 4));
fa9a6fed
SB
2864 if (status) {
2865 dev_err(&adapter->pdev->dev,
2866 "could not get crc from flash, not flashing redboot\n");
2867 return false;
2868 }
2869
2870 /*update redboot only if crc does not match*/
2871 if (!memcmp(flashed_crc, p, 4))
2872 return false;
2873 else
2874 return true;
fa9a6fed
SB
2875}
2876
306f1348
SP
2877static bool phy_flashing_required(struct be_adapter *adapter)
2878{
42f11cf2
AK
2879 return (adapter->phy.phy_type == TN_8022 &&
2880 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2881}
2882
c165541e
PR
2883static bool is_comp_in_ufi(struct be_adapter *adapter,
2884 struct flash_section_info *fsec, int type)
2885{
2886 int i = 0, img_type = 0;
2887 struct flash_section_info_g2 *fsec_g2 = NULL;
2888
2889 if (adapter->generation != BE_GEN3)
2890 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2891
2892 for (i = 0; i < MAX_FLASH_COMP; i++) {
2893 if (fsec_g2)
2894 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2895 else
2896 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2897
2898 if (img_type == type)
2899 return true;
2900 }
2901 return false;
2902
2903}
2904
2905struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2906 int header_size,
2907 const struct firmware *fw)
2908{
2909 struct flash_section_info *fsec = NULL;
2910 const u8 *p = fw->data;
2911
2912 p += header_size;
2913 while (p < (fw->data + fw->size)) {
2914 fsec = (struct flash_section_info *)p;
2915 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2916 return fsec;
2917 p += 32;
2918 }
2919 return NULL;
2920}
2921
3f0d4560 2922static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2923 const struct firmware *fw,
2924 struct be_dma_mem *flash_cmd,
2925 int num_of_images)
3f0d4560 2926
84517482 2927{
3f0d4560 2928 int status = 0, i, filehdr_size = 0;
c165541e 2929 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2930 u32 total_bytes = 0, flash_op;
84517482
AK
2931 int num_bytes;
2932 const u8 *p = fw->data;
2933 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2934 const struct flash_comp *pflashcomp;
c165541e
PR
2935 int num_comp, hdr_size;
2936 struct flash_section_info *fsec = NULL;
2937
2938 struct flash_comp gen3_flash_types[] = {
2939 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2940 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2941 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2942 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2943 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2944 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2945 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2946 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2947 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2948 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2949 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2950 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2951 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2952 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2953 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2954 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2955 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2956 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2957 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2958 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2959 };
c165541e
PR
2960
2961 struct flash_comp gen2_flash_types[] = {
2962 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2963 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2964 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2965 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2966 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2967 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2968 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2969 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2970 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2971 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2972 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2973 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2974 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2975 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2976 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2977 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2978 };
2979
2980 if (adapter->generation == BE_GEN3) {
2981 pflashcomp = gen3_flash_types;
2982 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2983 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2984 } else {
2985 pflashcomp = gen2_flash_types;
2986 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2987 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2988 }
c165541e
PR
2989 /* Get flash section info*/
2990 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2991 if (!fsec) {
2992 dev_err(&adapter->pdev->dev,
2993 "Invalid Cookie. UFI corrupted ?\n");
2994 return -1;
2995 }
9fe96934 2996 for (i = 0; i < num_comp; i++) {
c165541e 2997 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 2998 continue;
c165541e
PR
2999
3000 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3001 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3002 continue;
3003
3004 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
3005 if (!phy_flashing_required(adapter))
3006 continue;
3007 }
c165541e
PR
3008
3009 hdr_size = filehdr_size +
3010 (num_of_images * sizeof(struct image_hdr));
3011
3012 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3013 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3014 pflashcomp[i].size, hdr_size)))
3f0d4560 3015 continue;
c165541e
PR
3016
3017 /* Flash the component */
3f0d4560 3018 p = fw->data;
c165541e 3019 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3020 if (p + pflashcomp[i].size > fw->data + fw->size)
3021 return -1;
3022 total_bytes = pflashcomp[i].size;
3f0d4560
AK
3023 while (total_bytes) {
3024 if (total_bytes > 32*1024)
3025 num_bytes = 32*1024;
3026 else
3027 num_bytes = total_bytes;
3028 total_bytes -= num_bytes;
306f1348 3029 if (!total_bytes) {
c165541e 3030 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3031 flash_op = FLASHROM_OPER_PHY_FLASH;
3032 else
3033 flash_op = FLASHROM_OPER_FLASH;
3034 } else {
c165541e 3035 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3036 flash_op = FLASHROM_OPER_PHY_SAVE;
3037 else
3038 flash_op = FLASHROM_OPER_SAVE;
3039 }
3f0d4560
AK
3040 memcpy(req->params.data_buf, p, num_bytes);
3041 p += num_bytes;
3042 status = be_cmd_write_flashrom(adapter, flash_cmd,
3043 pflashcomp[i].optype, flash_op, num_bytes);
3044 if (status) {
306f1348
SP
3045 if ((status == ILLEGAL_IOCTL_REQ) &&
3046 (pflashcomp[i].optype ==
c165541e 3047 OPTYPE_PHY_FW))
306f1348 3048 break;
3f0d4560
AK
3049 dev_err(&adapter->pdev->dev,
3050 "cmd to write to flash rom failed.\n");
3051 return -1;
3052 }
84517482 3053 }
84517482 3054 }
84517482
AK
3055 return 0;
3056}
3057
3f0d4560
AK
3058static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3059{
3060 if (fhdr == NULL)
3061 return 0;
3062 if (fhdr->build[0] == '3')
3063 return BE_GEN3;
3064 else if (fhdr->build[0] == '2')
3065 return BE_GEN2;
3066 else
3067 return 0;
3068}
3069
485bf569
SN
3070static int lancer_fw_download(struct be_adapter *adapter,
3071 const struct firmware *fw)
84517482 3072{
485bf569
SN
3073#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3074#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3075 struct be_dma_mem flash_cmd;
485bf569
SN
3076 const u8 *data_ptr = NULL;
3077 u8 *dest_image_ptr = NULL;
3078 size_t image_size = 0;
3079 u32 chunk_size = 0;
3080 u32 data_written = 0;
3081 u32 offset = 0;
3082 int status = 0;
3083 u8 add_status = 0;
84517482 3084
485bf569 3085 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3086 dev_err(&adapter->pdev->dev,
485bf569
SN
3087 "FW Image not properly aligned. "
3088 "Length must be 4 byte aligned.\n");
3089 status = -EINVAL;
3090 goto lancer_fw_exit;
d9efd2af
SB
3091 }
3092
485bf569
SN
3093 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3094 + LANCER_FW_DOWNLOAD_CHUNK;
3095 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3096 &flash_cmd.dma, GFP_KERNEL);
3097 if (!flash_cmd.va) {
3098 status = -ENOMEM;
3099 dev_err(&adapter->pdev->dev,
3100 "Memory allocation failure while flashing\n");
3101 goto lancer_fw_exit;
3102 }
84517482 3103
485bf569
SN
3104 dest_image_ptr = flash_cmd.va +
3105 sizeof(struct lancer_cmd_req_write_object);
3106 image_size = fw->size;
3107 data_ptr = fw->data;
3108
3109 while (image_size) {
3110 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3111
3112 /* Copy the image chunk content. */
3113 memcpy(dest_image_ptr, data_ptr, chunk_size);
3114
3115 status = lancer_cmd_write_object(adapter, &flash_cmd,
3116 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3117 &data_written, &add_status);
3118
3119 if (status)
3120 break;
3121
3122 offset += data_written;
3123 data_ptr += data_written;
3124 image_size -= data_written;
3125 }
3126
3127 if (!status) {
3128 /* Commit the FW written */
3129 status = lancer_cmd_write_object(adapter, &flash_cmd,
3130 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3131 &data_written, &add_status);
3132 }
3133
3134 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3135 flash_cmd.dma);
3136 if (status) {
3137 dev_err(&adapter->pdev->dev,
3138 "Firmware load error. "
3139 "Status code: 0x%x Additional Status: 0x%x\n",
3140 status, add_status);
3141 goto lancer_fw_exit;
3142 }
3143
3144 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3145lancer_fw_exit:
3146 return status;
3147}
3148
3149static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3150{
3151 struct flash_file_hdr_g2 *fhdr;
3152 struct flash_file_hdr_g3 *fhdr3;
3153 struct image_hdr *img_hdr_ptr = NULL;
3154 struct be_dma_mem flash_cmd;
3155 const u8 *p;
3156 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3157
3158 p = fw->data;
3f0d4560 3159 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3160
84517482 3161 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3162 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3163 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3164 if (!flash_cmd.va) {
3165 status = -ENOMEM;
3166 dev_err(&adapter->pdev->dev,
3167 "Memory allocation failure while flashing\n");
485bf569 3168 goto be_fw_exit;
84517482
AK
3169 }
3170
3f0d4560
AK
3171 if ((adapter->generation == BE_GEN3) &&
3172 (get_ufigen_type(fhdr) == BE_GEN3)) {
3173 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3174 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3175 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3176 img_hdr_ptr = (struct image_hdr *) (fw->data +
3177 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3178 i * sizeof(struct image_hdr)));
3179 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3180 status = be_flash_data(adapter, fw, &flash_cmd,
3181 num_imgs);
3f0d4560
AK
3182 }
3183 } else if ((adapter->generation == BE_GEN2) &&
3184 (get_ufigen_type(fhdr) == BE_GEN2)) {
3185 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3186 } else {
3187 dev_err(&adapter->pdev->dev,
3188 "UFI and Interface are not compatible for flashing\n");
3189 status = -1;
84517482
AK
3190 }
3191
2b7bcebf
IV
3192 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3193 flash_cmd.dma);
84517482
AK
3194 if (status) {
3195 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3196 goto be_fw_exit;
84517482
AK
3197 }
3198
af901ca1 3199 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3200
485bf569
SN
3201be_fw_exit:
3202 return status;
3203}
3204
3205int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3206{
3207 const struct firmware *fw;
3208 int status;
3209
3210 if (!netif_running(adapter->netdev)) {
3211 dev_err(&adapter->pdev->dev,
3212 "Firmware load not allowed (interface is down)\n");
3213 return -1;
3214 }
3215
3216 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3217 if (status)
3218 goto fw_exit;
3219
3220 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3221
3222 if (lancer_chip(adapter))
3223 status = lancer_fw_download(adapter, fw);
3224 else
3225 status = be_fw_download(adapter, fw);
3226
84517482
AK
3227fw_exit:
3228 release_firmware(fw);
3229 return status;
3230}
3231
e5686ad8 3232static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3233 .ndo_open = be_open,
3234 .ndo_stop = be_close,
3235 .ndo_start_xmit = be_xmit,
a54769f5 3236 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3237 .ndo_set_mac_address = be_mac_addr_set,
3238 .ndo_change_mtu = be_change_mtu,
ab1594e9 3239 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3240 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3241 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3242 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3243 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3244 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3245 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3246 .ndo_get_vf_config = be_get_vf_config,
3247#ifdef CONFIG_NET_POLL_CONTROLLER
3248 .ndo_poll_controller = be_netpoll,
3249#endif
6b7c5b94
SP
3250};
3251
3252static void be_netdev_init(struct net_device *netdev)
3253{
3254 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3255 struct be_eq_obj *eqo;
3abcdeda 3256 int i;
6b7c5b94 3257
6332c8d3 3258 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3259 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3260 NETIF_F_HW_VLAN_TX;
3261 if (be_multi_rxq(adapter))
3262 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3263
3264 netdev->features |= netdev->hw_features |
8b8ddc68 3265 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3266
eb8a50d9 3267 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3268 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3269
fbc13f01
AK
3270 netdev->priv_flags |= IFF_UNICAST_FLT;
3271
6b7c5b94
SP
3272 netdev->flags |= IFF_MULTICAST;
3273
b7e5887e 3274 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3275
10ef9ab4 3276 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3277
3278 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3279
10ef9ab4
SP
3280 for_all_evt_queues(adapter, eqo, i)
3281 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3282}
3283
3284static void be_unmap_pci_bars(struct be_adapter *adapter)
3285{
8788fdc2
SP
3286 if (adapter->csr)
3287 iounmap(adapter->csr);
3288 if (adapter->db)
3289 iounmap(adapter->db);
045508a8
PP
3290 if (adapter->roce_db.base)
3291 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3292}
3293
3294static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3295{
3296 struct pci_dev *pdev = adapter->pdev;
3297 u8 __iomem *addr;
3298
3299 addr = pci_iomap(pdev, 2, 0);
3300 if (addr == NULL)
3301 return -ENOMEM;
3302
3303 adapter->roce_db.base = addr;
3304 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3305 adapter->roce_db.size = 8192;
3306 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3307 return 0;
6b7c5b94
SP
3308}
3309
3310static int be_map_pci_bars(struct be_adapter *adapter)
3311{
3312 u8 __iomem *addr;
db3ea781 3313 int db_reg;
6b7c5b94 3314
fe6d2a38 3315 if (lancer_chip(adapter)) {
045508a8
PP
3316 if (be_type_2_3(adapter)) {
3317 addr = ioremap_nocache(
3318 pci_resource_start(adapter->pdev, 0),
3319 pci_resource_len(adapter->pdev, 0));
3320 if (addr == NULL)
3321 return -ENOMEM;
3322 adapter->db = addr;
3323 }
3324 if (adapter->if_type == SLI_INTF_TYPE_3) {
3325 if (lancer_roce_map_pci_bars(adapter))
3326 goto pci_map_err;
3327 }
fe6d2a38
SP
3328 return 0;
3329 }
3330
ba343c77
SB
3331 if (be_physfn(adapter)) {
3332 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3333 pci_resource_len(adapter->pdev, 2));
3334 if (addr == NULL)
3335 return -ENOMEM;
3336 adapter->csr = addr;
3337 }
6b7c5b94 3338
ba343c77 3339 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3340 db_reg = 4;
3341 } else {
ba343c77
SB
3342 if (be_physfn(adapter))
3343 db_reg = 4;
3344 else
3345 db_reg = 0;
3346 }
3347 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3348 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3349 if (addr == NULL)
3350 goto pci_map_err;
ba343c77 3351 adapter->db = addr;
045508a8
PP
3352 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3353 adapter->roce_db.size = 4096;
3354 adapter->roce_db.io_addr =
3355 pci_resource_start(adapter->pdev, db_reg);
3356 adapter->roce_db.total_size =
3357 pci_resource_len(adapter->pdev, db_reg);
3358 }
6b7c5b94
SP
3359 return 0;
3360pci_map_err:
3361 be_unmap_pci_bars(adapter);
3362 return -ENOMEM;
3363}
3364
6b7c5b94
SP
3365static void be_ctrl_cleanup(struct be_adapter *adapter)
3366{
8788fdc2 3367 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3368
3369 be_unmap_pci_bars(adapter);
3370
3371 if (mem->va)
2b7bcebf
IV
3372 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3373 mem->dma);
e7b909a6 3374
5b8821b7 3375 mem = &adapter->rx_filter;
e7b909a6 3376 if (mem->va)
2b7bcebf
IV
3377 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3378 mem->dma);
6b7c5b94
SP
3379}
3380
6b7c5b94
SP
3381static int be_ctrl_init(struct be_adapter *adapter)
3382{
8788fdc2
SP
3383 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3384 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3385 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3386 int status;
6b7c5b94
SP
3387
3388 status = be_map_pci_bars(adapter);
3389 if (status)
e7b909a6 3390 goto done;
6b7c5b94
SP
3391
3392 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3393 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3394 mbox_mem_alloc->size,
3395 &mbox_mem_alloc->dma,
3396 GFP_KERNEL);
6b7c5b94 3397 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3398 status = -ENOMEM;
3399 goto unmap_pci_bars;
6b7c5b94
SP
3400 }
3401 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3402 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3403 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3404 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3405
5b8821b7
SP
3406 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3407 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3408 &rx_filter->dma, GFP_KERNEL);
3409 if (rx_filter->va == NULL) {
e7b909a6
SP
3410 status = -ENOMEM;
3411 goto free_mbox;
3412 }
5b8821b7 3413 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3414
2984961c 3415 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3416 spin_lock_init(&adapter->mcc_lock);
3417 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3418
dd131e76 3419 init_completion(&adapter->flash_compl);
cf588477 3420 pci_save_state(adapter->pdev);
6b7c5b94 3421 return 0;
e7b909a6
SP
3422
3423free_mbox:
2b7bcebf
IV
3424 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3425 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3426
3427unmap_pci_bars:
3428 be_unmap_pci_bars(adapter);
3429
3430done:
3431 return status;
6b7c5b94
SP
3432}
3433
3434static void be_stats_cleanup(struct be_adapter *adapter)
3435{
3abcdeda 3436 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3437
3438 if (cmd->va)
2b7bcebf
IV
3439 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3440 cmd->va, cmd->dma);
6b7c5b94
SP
3441}
3442
3443static int be_stats_init(struct be_adapter *adapter)
3444{
3abcdeda 3445 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3446
005d5696 3447 if (adapter->generation == BE_GEN2) {
89a88ab8 3448 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3449 } else {
3450 if (lancer_chip(adapter))
3451 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3452 else
3453 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3454 }
2b7bcebf
IV
3455 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3456 GFP_KERNEL);
6b7c5b94
SP
3457 if (cmd->va == NULL)
3458 return -1;
d291b9af 3459 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3460 return 0;
3461}
3462
3463static void __devexit be_remove(struct pci_dev *pdev)
3464{
3465 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3466
6b7c5b94
SP
3467 if (!adapter)
3468 return;
3469
045508a8
PP
3470 be_roce_dev_remove(adapter);
3471
6b7c5b94
SP
3472 unregister_netdev(adapter->netdev);
3473
5fb379ee
SP
3474 be_clear(adapter);
3475
bf99e50d
PR
3476 /* tell fw we're done with firing cmds */
3477 be_cmd_fw_clean(adapter);
3478
6b7c5b94
SP
3479 be_stats_cleanup(adapter);
3480
3481 be_ctrl_cleanup(adapter);
3482
6b7c5b94
SP
3483 pci_set_drvdata(pdev, NULL);
3484 pci_release_regions(pdev);
3485 pci_disable_device(pdev);
3486
3487 free_netdev(adapter->netdev);
3488}
3489
4762f6ce
AK
3490bool be_is_wol_supported(struct be_adapter *adapter)
3491{
3492 return ((adapter->wol_cap & BE_WOL_CAP) &&
3493 !be_is_wol_excluded(adapter)) ? true : false;
3494}
3495
941a77d5
SK
3496u32 be_get_fw_log_level(struct be_adapter *adapter)
3497{
3498 struct be_dma_mem extfat_cmd;
3499 struct be_fat_conf_params *cfgs;
3500 int status;
3501 u32 level = 0;
3502 int j;
3503
3504 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3505 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3506 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3507 &extfat_cmd.dma);
3508
3509 if (!extfat_cmd.va) {
3510 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3511 __func__);
3512 goto err;
3513 }
3514
3515 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3516 if (!status) {
3517 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3518 sizeof(struct be_cmd_resp_hdr));
3519 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3520 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3521 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3522 }
3523 }
3524 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3525 extfat_cmd.dma);
3526err:
3527 return level;
3528}
39f1d94d 3529static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3530{
6b7c5b94 3531 int status;
941a77d5 3532 u32 level;
6b7c5b94 3533
3abcdeda
SP
3534 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3535 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3536 if (status)
3537 return status;
3538
752961a1 3539 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3540 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3541 else
3542 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3543
fbc13f01
AK
3544 if (be_physfn(adapter))
3545 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3546 else
3547 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3548
3549 /* primary mac needs 1 pmac entry */
3550 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3551 sizeof(u32), GFP_KERNEL);
3552 if (!adapter->pmac_id)
3553 return -ENOMEM;
3554
9e1453c5
AK
3555 status = be_cmd_get_cntl_attributes(adapter);
3556 if (status)
3557 return status;
3558
4762f6ce
AK
3559 status = be_cmd_get_acpi_wol_cap(adapter);
3560 if (status) {
3561 /* in case of a failure to get wol capabillities
3562 * check the exclusion list to determine WOL capability */
3563 if (!be_is_wol_excluded(adapter))
3564 adapter->wol_cap |= BE_WOL_CAP;
3565 }
3566
3567 if (be_is_wol_supported(adapter))
3568 adapter->wol = true;
3569
7aeb2156
PR
3570 /* Must be a power of 2 or else MODULO will BUG_ON */
3571 adapter->be_get_temp_freq = 64;
3572
941a77d5
SK
3573 level = be_get_fw_log_level(adapter);
3574 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3575
2243e2e9 3576 return 0;
6b7c5b94
SP
3577}
3578
39f1d94d 3579static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3580{
3581 struct pci_dev *pdev = adapter->pdev;
3582 u32 sli_intf = 0, if_type;
3583
3584 switch (pdev->device) {
3585 case BE_DEVICE_ID1:
3586 case OC_DEVICE_ID1:
3587 adapter->generation = BE_GEN2;
3588 break;
3589 case BE_DEVICE_ID2:
3590 case OC_DEVICE_ID2:
3591 adapter->generation = BE_GEN3;
3592 break;
3593 case OC_DEVICE_ID3:
12f4d0a8 3594 case OC_DEVICE_ID4:
fe6d2a38 3595 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3596 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3597 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3598 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3599 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3600 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3601 !be_type_2_3(adapter)) {
3602 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3603 return -EINVAL;
3604 }
3605 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3606 SLI_INTF_FAMILY_SHIFT);
3607 adapter->generation = BE_GEN3;
3608 break;
3609 case OC_DEVICE_ID5:
3610 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3611 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3612 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3613 return -EINVAL;
3614 }
fe6d2a38
SP
3615 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3616 SLI_INTF_FAMILY_SHIFT);
3617 adapter->generation = BE_GEN3;
3618 break;
3619 default:
3620 adapter->generation = 0;
3621 }
39f1d94d
SP
3622
3623 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3624 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3625 return 0;
3626}
3627
d8110f62
PR
3628static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3629{
3630 int status;
3631 u32 sliport_status;
3632
3633 if (adapter->eeh_err || adapter->ue_detected)
3634 return;
3635
3636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3637
3638 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3639 dev_err(&adapter->pdev->dev,
3640 "Adapter in error state."
3641 "Trying to recover.\n");
3642
3643 status = lancer_test_and_set_rdy_state(adapter);
3644 if (status)
3645 goto err;
3646
3647 netif_device_detach(adapter->netdev);
3648
3649 if (netif_running(adapter->netdev))
3650 be_close(adapter->netdev);
3651
3652 be_clear(adapter);
3653
3654 adapter->fw_timeout = false;
3655
3656 status = be_setup(adapter);
3657 if (status)
3658 goto err;
3659
3660 if (netif_running(adapter->netdev)) {
3661 status = be_open(adapter->netdev);
3662 if (status)
3663 goto err;
3664 }
3665
3666 netif_device_attach(adapter->netdev);
3667
3668 dev_err(&adapter->pdev->dev,
3669 "Adapter error recovery succeeded\n");
3670 }
3671 return;
3672err:
3673 dev_err(&adapter->pdev->dev,
3674 "Adapter error recovery failed\n");
3675}
3676
3677static void be_worker(struct work_struct *work)
3678{
3679 struct be_adapter *adapter =
3680 container_of(work, struct be_adapter, work.work);
3681 struct be_rx_obj *rxo;
10ef9ab4 3682 struct be_eq_obj *eqo;
d8110f62
PR
3683 int i;
3684
3685 if (lancer_chip(adapter))
3686 lancer_test_and_recover_fn_err(adapter);
3687
3688 be_detect_dump_ue(adapter);
3689
3690 /* when interrupts are not yet enabled, just reap any pending
3691 * mcc completions */
3692 if (!netif_running(adapter->netdev)) {
10ef9ab4 3693 be_process_mcc(adapter);
d8110f62
PR
3694 goto reschedule;
3695 }
3696
3697 if (!adapter->stats_cmd_sent) {
3698 if (lancer_chip(adapter))
3699 lancer_cmd_get_pport_stats(adapter,
3700 &adapter->stats_cmd);
3701 else
3702 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3703 }
3704
7aeb2156
PR
3705 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3706 be_cmd_get_die_temperature(adapter);
3707
d8110f62 3708 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3709 if (rxo->rx_post_starved) {
3710 rxo->rx_post_starved = false;
3711 be_post_rx_frags(rxo, GFP_KERNEL);
3712 }
3713 }
3714
10ef9ab4
SP
3715 for_all_evt_queues(adapter, eqo, i)
3716 be_eqd_update(adapter, eqo);
3717
d8110f62
PR
3718reschedule:
3719 adapter->work_counter++;
3720 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3721}
3722
39f1d94d
SP
3723static bool be_reset_required(struct be_adapter *adapter)
3724{
d79c0a20 3725 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3726}
3727
6b7c5b94
SP
3728static int __devinit be_probe(struct pci_dev *pdev,
3729 const struct pci_device_id *pdev_id)
3730{
3731 int status = 0;
3732 struct be_adapter *adapter;
3733 struct net_device *netdev;
6b7c5b94
SP
3734
3735 status = pci_enable_device(pdev);
3736 if (status)
3737 goto do_none;
3738
3739 status = pci_request_regions(pdev, DRV_NAME);
3740 if (status)
3741 goto disable_dev;
3742 pci_set_master(pdev);
3743
7f640062 3744 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
3745 if (netdev == NULL) {
3746 status = -ENOMEM;
3747 goto rel_reg;
3748 }
3749 adapter = netdev_priv(netdev);
3750 adapter->pdev = pdev;
3751 pci_set_drvdata(pdev, adapter);
fe6d2a38 3752
39f1d94d 3753 status = be_dev_type_check(adapter);
63657b9c 3754 if (status)
fe6d2a38
SP
3755 goto free_netdev;
3756
6b7c5b94 3757 adapter->netdev = netdev;
2243e2e9 3758 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3759
2b7bcebf 3760 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3761 if (!status) {
3762 netdev->features |= NETIF_F_HIGHDMA;
3763 } else {
2b7bcebf 3764 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3765 if (status) {
3766 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3767 goto free_netdev;
3768 }
3769 }
3770
6b7c5b94
SP
3771 status = be_ctrl_init(adapter);
3772 if (status)
39f1d94d 3773 goto free_netdev;
6b7c5b94 3774
2243e2e9 3775 /* sync up with fw's ready state */
ba343c77 3776 if (be_physfn(adapter)) {
bf99e50d 3777 status = be_fw_wait_ready(adapter);
ba343c77
SB
3778 if (status)
3779 goto ctrl_clean;
ba343c77 3780 }
6b7c5b94 3781
2243e2e9
SP
3782 /* tell fw we're ready to fire cmds */
3783 status = be_cmd_fw_init(adapter);
6b7c5b94 3784 if (status)
2243e2e9
SP
3785 goto ctrl_clean;
3786
39f1d94d
SP
3787 if (be_reset_required(adapter)) {
3788 status = be_cmd_reset_function(adapter);
3789 if (status)
3790 goto ctrl_clean;
3791 }
556ae191 3792
10ef9ab4
SP
3793 /* The INTR bit may be set in the card when probed by a kdump kernel
3794 * after a crash.
3795 */
3796 if (!lancer_chip(adapter))
3797 be_intr_set(adapter, false);
3798
2243e2e9
SP
3799 status = be_stats_init(adapter);
3800 if (status)
3801 goto ctrl_clean;
3802
39f1d94d 3803 status = be_get_initial_config(adapter);
6b7c5b94
SP
3804 if (status)
3805 goto stats_clean;
6b7c5b94
SP
3806
3807 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3808 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3809
5fb379ee
SP
3810 status = be_setup(adapter);
3811 if (status)
3abcdeda 3812 goto msix_disable;
2243e2e9 3813
3abcdeda 3814 be_netdev_init(netdev);
6b7c5b94
SP
3815 status = register_netdev(netdev);
3816 if (status != 0)
5fb379ee 3817 goto unsetup;
6b7c5b94 3818
045508a8
PP
3819 be_roce_dev_add(adapter);
3820
10ef9ab4
SP
3821 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3822 adapter->port_num);
34b1ef04 3823
6b7c5b94
SP
3824 return 0;
3825
5fb379ee
SP
3826unsetup:
3827 be_clear(adapter);
3abcdeda
SP
3828msix_disable:
3829 be_msix_disable(adapter);
6b7c5b94
SP
3830stats_clean:
3831 be_stats_cleanup(adapter);
3832ctrl_clean:
3833 be_ctrl_cleanup(adapter);
f9449ab7 3834free_netdev:
fe6d2a38 3835 free_netdev(netdev);
8d56ff11 3836 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3837rel_reg:
3838 pci_release_regions(pdev);
3839disable_dev:
3840 pci_disable_device(pdev);
3841do_none:
c4ca2374 3842 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3843 return status;
3844}
3845
3846static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3847{
3848 struct be_adapter *adapter = pci_get_drvdata(pdev);
3849 struct net_device *netdev = adapter->netdev;
3850
71d8d1b5
AK
3851 if (adapter->wol)
3852 be_setup_wol(adapter, true);
3853
6b7c5b94
SP
3854 netif_device_detach(netdev);
3855 if (netif_running(netdev)) {
3856 rtnl_lock();
3857 be_close(netdev);
3858 rtnl_unlock();
3859 }
9b0365f1 3860 be_clear(adapter);
6b7c5b94
SP
3861
3862 pci_save_state(pdev);
3863 pci_disable_device(pdev);
3864 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3865 return 0;
3866}
3867
3868static int be_resume(struct pci_dev *pdev)
3869{
3870 int status = 0;
3871 struct be_adapter *adapter = pci_get_drvdata(pdev);
3872 struct net_device *netdev = adapter->netdev;
3873
3874 netif_device_detach(netdev);
3875
3876 status = pci_enable_device(pdev);
3877 if (status)
3878 return status;
3879
3880 pci_set_power_state(pdev, 0);
3881 pci_restore_state(pdev);
3882
2243e2e9
SP
3883 /* tell fw we're ready to fire cmds */
3884 status = be_cmd_fw_init(adapter);
3885 if (status)
3886 return status;
3887
9b0365f1 3888 be_setup(adapter);
6b7c5b94
SP
3889 if (netif_running(netdev)) {
3890 rtnl_lock();
3891 be_open(netdev);
3892 rtnl_unlock();
3893 }
3894 netif_device_attach(netdev);
71d8d1b5
AK
3895
3896 if (adapter->wol)
3897 be_setup_wol(adapter, false);
a4ca055f 3898
6b7c5b94
SP
3899 return 0;
3900}
3901
82456b03
SP
3902/*
3903 * An FLR will stop BE from DMAing any data.
3904 */
3905static void be_shutdown(struct pci_dev *pdev)
3906{
3907 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3908
2d5d4154
AK
3909 if (!adapter)
3910 return;
82456b03 3911
0f4a6828 3912 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3913
2d5d4154 3914 netif_device_detach(adapter->netdev);
82456b03 3915
82456b03
SP
3916 if (adapter->wol)
3917 be_setup_wol(adapter, true);
3918
57841869
AK
3919 be_cmd_reset_function(adapter);
3920
82456b03 3921 pci_disable_device(pdev);
82456b03
SP
3922}
3923
cf588477
SP
3924static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3925 pci_channel_state_t state)
3926{
3927 struct be_adapter *adapter = pci_get_drvdata(pdev);
3928 struct net_device *netdev = adapter->netdev;
3929
3930 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3931
3932 adapter->eeh_err = true;
3933
3934 netif_device_detach(netdev);
3935
3936 if (netif_running(netdev)) {
3937 rtnl_lock();
3938 be_close(netdev);
3939 rtnl_unlock();
3940 }
3941 be_clear(adapter);
3942
3943 if (state == pci_channel_io_perm_failure)
3944 return PCI_ERS_RESULT_DISCONNECT;
3945
3946 pci_disable_device(pdev);
3947
eeb7fc7b
SK
3948 /* The error could cause the FW to trigger a flash debug dump.
3949 * Resetting the card while flash dump is in progress
3950 * can cause it not to recover; wait for it to finish
3951 */
3952 ssleep(30);
cf588477
SP
3953 return PCI_ERS_RESULT_NEED_RESET;
3954}
3955
3956static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3957{
3958 struct be_adapter *adapter = pci_get_drvdata(pdev);
3959 int status;
3960
3961 dev_info(&adapter->pdev->dev, "EEH reset\n");
3962 adapter->eeh_err = false;
6589ade0
SP
3963 adapter->ue_detected = false;
3964 adapter->fw_timeout = false;
cf588477
SP
3965
3966 status = pci_enable_device(pdev);
3967 if (status)
3968 return PCI_ERS_RESULT_DISCONNECT;
3969
3970 pci_set_master(pdev);
3971 pci_set_power_state(pdev, 0);
3972 pci_restore_state(pdev);
3973
3974 /* Check if card is ok and fw is ready */
bf99e50d 3975 status = be_fw_wait_ready(adapter);
cf588477
SP
3976 if (status)
3977 return PCI_ERS_RESULT_DISCONNECT;
3978
3979 return PCI_ERS_RESULT_RECOVERED;
3980}
3981
3982static void be_eeh_resume(struct pci_dev *pdev)
3983{
3984 int status = 0;
3985 struct be_adapter *adapter = pci_get_drvdata(pdev);
3986 struct net_device *netdev = adapter->netdev;
3987
3988 dev_info(&adapter->pdev->dev, "EEH resume\n");
3989
3990 pci_save_state(pdev);
3991
3992 /* tell fw we're ready to fire cmds */
3993 status = be_cmd_fw_init(adapter);
3994 if (status)
3995 goto err;
3996
bf99e50d
PR
3997 status = be_cmd_reset_function(adapter);
3998 if (status)
3999 goto err;
4000
cf588477
SP
4001 status = be_setup(adapter);
4002 if (status)
4003 goto err;
4004
4005 if (netif_running(netdev)) {
4006 status = be_open(netdev);
4007 if (status)
4008 goto err;
4009 }
4010 netif_device_attach(netdev);
4011 return;
4012err:
4013 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4014}
4015
4016static struct pci_error_handlers be_eeh_handlers = {
4017 .error_detected = be_eeh_err_detected,
4018 .slot_reset = be_eeh_reset,
4019 .resume = be_eeh_resume,
4020};
4021
6b7c5b94
SP
4022static struct pci_driver be_driver = {
4023 .name = DRV_NAME,
4024 .id_table = be_dev_ids,
4025 .probe = be_probe,
4026 .remove = be_remove,
4027 .suspend = be_suspend,
cf588477 4028 .resume = be_resume,
82456b03 4029 .shutdown = be_shutdown,
cf588477 4030 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4031};
4032
4033static int __init be_init_module(void)
4034{
8e95a202
JP
4035 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4036 rx_frag_size != 2048) {
6b7c5b94
SP
4037 printk(KERN_WARNING DRV_NAME
4038 " : Module param rx_frag_size must be 2048/4096/8192."
4039 " Using 2048\n");
4040 rx_frag_size = 2048;
4041 }
6b7c5b94
SP
4042
4043 return pci_register_driver(&be_driver);
4044}
4045module_init(be_init_module);
4046
4047static void __exit be_exit_module(void)
4048{
4049 pci_unregister_driver(&be_driver);
4050}
4051module_exit(be_exit_module);
This page took 0.629904 seconds and 5 git commands to generate.