net: struct sock cleanups
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
1ded132d
AK
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
cc4ce020
SK
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 581{
1ded132d 582 u16 vlan_tag;
cc4ce020 583
6b7c5b94
SP
584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
49e4b847 588 if (skb_is_gso(skb)) {
6b7c5b94
SP
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
6b7c5b94
SP
604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
4c5102f9 611 if (vlan_tx_tag_present(skb)) {
6b7c5b94 612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
2b7bcebf 623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 631 if (wrb->frag_len) {
7101e111 632 if (unmap_single)
2b7bcebf
IV
633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
7101e111 635 else
2b7bcebf 636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
637 }
638}
6b7c5b94 639
3c8def97 640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
7101e111
SP
643 dma_addr_t busaddr;
644 int i, copied = 0;
2b7bcebf 645 struct device *dev = &adapter->pdev->dev;
6b7c5b94 646 struct sk_buff *first_skb = skb;
6b7c5b94
SP
647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
7101e111
SP
649 bool map_single = false;
650 u16 map_head;
6b7c5b94 651
6b7c5b94
SP
652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
7101e111 654 map_head = txq->head;
6b7c5b94 655
ebc8d2ab 656 if (skb->len > skb->data_len) {
e743d313 657 int len = skb_headlen(skb);
2b7bcebf
IV
658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
7101e111
SP
660 goto dma_err;
661 map_single = true;
ebc8d2ab
DM
662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
6b7c5b94 668
ebc8d2ab 669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 670 const struct skb_frag_struct *frag =
ebc8d2ab 671 &skb_shinfo(skb)->frags[i];
b061b39e 672 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 673 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 674 if (dma_mapping_error(dev, busaddr))
7101e111 675 goto dma_err;
ebc8d2ab 676 wrb = queue_head_node(txq);
9e903e08 677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
9e903e08 680 copied += skb_frag_size(frag);
6b7c5b94
SP
681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
cc4ce020 690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
7101e111
SP
694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
2b7bcebf 698 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
6b7c5b94
SP
704}
705
61357325 706static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 707 struct net_device *netdev)
6b7c5b94
SP
708{
709 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
1ded132d
AK
716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
421737bd
SP
722 if (vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
1ded132d
AK
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
fe6d2a38 735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 736
3c8def97 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 738 if (copied) {
cd8f76c0
ED
739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
c190e3c8 741 /* record the sent skb in the sent_skb table */
3c8def97
SP
742 BUG_ON(txo->sent_skb_list[start]);
743 txo->sent_skb_list[start] = skb;
c190e3c8
AK
744
745 /* Ensure txq has space for the next skb; Else stop the queue
746 * *BEFORE* ringing the tx doorbell, so that we serialze the
747 * tx compls of the current transmit which'll wake up the queue
748 */
7101e111 749 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
750 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
751 txq->len) {
3c8def97 752 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
753 stopped = true;
754 }
6b7c5b94 755
c190e3c8 756 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 757
cd8f76c0 758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
759 } else {
760 txq->head = start;
761 dev_kfree_skb_any(skb);
6b7c5b94 762 }
1ded132d 763tx_drop:
6b7c5b94
SP
764 return NETDEV_TX_OK;
765}
766
767static int be_change_mtu(struct net_device *netdev, int new_mtu)
768{
769 struct be_adapter *adapter = netdev_priv(netdev);
770 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
771 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
772 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
773 dev_info(&adapter->pdev->dev,
774 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
775 BE_MIN_MTU,
776 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
777 return -EINVAL;
778 }
779 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
780 netdev->mtu, new_mtu);
781 netdev->mtu = new_mtu;
782 return 0;
783}
784
785/*
82903e4b
AK
786 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 788 */
10329df8 789static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 790{
10329df8
SP
791 u16 vids[BE_NUM_VLANS_SUPPORTED];
792 u16 num = 0, i;
82903e4b 793 int status = 0;
1da87b7f 794
c0e64ef4
SP
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
0fc16ebf
PR
799 if (adapter->vlans_added > adapter->max_vlans)
800 goto set_vlan_promisc;
801
802 /* Construct VLAN Table to give to HW */
803 for (i = 0; i < VLAN_N_VID; i++)
804 if (adapter->vlan_tag[i])
10329df8 805 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
806
807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 808 vids, num, 1, 0);
0fc16ebf
PR
809
810 /* Set to VLAN promisc mode as setting VLAN filter failed */
811 if (status) {
812 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
813 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
814 goto set_vlan_promisc;
6b7c5b94 815 }
1da87b7f 816
b31c50a7 817 return status;
0fc16ebf
PR
818
819set_vlan_promisc:
820 status = be_cmd_vlan_config(adapter, adapter->if_handle,
821 NULL, 0, 1, 1);
822 return status;
6b7c5b94
SP
823}
824
8e586137 825static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
826{
827 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 828 int status = 0;
6b7c5b94 829
80817cbf
AK
830 if (!be_physfn(adapter)) {
831 status = -EINVAL;
832 goto ret;
833 }
ba343c77 834
6b7c5b94 835 adapter->vlan_tag[vid] = 1;
82903e4b 836 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 837 status = be_vid_config(adapter);
8e586137 838
80817cbf
AK
839 if (!status)
840 adapter->vlans_added++;
841 else
842 adapter->vlan_tag[vid] = 0;
843ret:
844 return status;
6b7c5b94
SP
845}
846
8e586137 847static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 850 int status = 0;
6b7c5b94 851
80817cbf
AK
852 if (!be_physfn(adapter)) {
853 status = -EINVAL;
854 goto ret;
855 }
ba343c77 856
6b7c5b94 857 adapter->vlan_tag[vid] = 0;
82903e4b 858 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 859 status = be_vid_config(adapter);
8e586137 860
80817cbf
AK
861 if (!status)
862 adapter->vlans_added--;
863 else
864 adapter->vlan_tag[vid] = 1;
865ret:
866 return status;
6b7c5b94
SP
867}
868
a54769f5 869static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
870{
871 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 872 int status;
6b7c5b94 873
24307eef 874 if (netdev->flags & IFF_PROMISC) {
5b8821b7 875 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
876 adapter->promiscuous = true;
877 goto done;
6b7c5b94
SP
878 }
879
25985edc 880 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
881 if (adapter->promiscuous) {
882 adapter->promiscuous = false;
5b8821b7 883 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
884
885 if (adapter->vlans_added)
10329df8 886 be_vid_config(adapter);
6b7c5b94
SP
887 }
888
e7b909a6 889 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 890 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
891 netdev_mc_count(netdev) > BE_MAX_MC) {
892 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 893 goto done;
6b7c5b94 894 }
6b7c5b94 895
fbc13f01
AK
896 if (netdev_uc_count(netdev) != adapter->uc_macs) {
897 struct netdev_hw_addr *ha;
898 int i = 1; /* First slot is claimed by the Primary MAC */
899
900 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
901 be_cmd_pmac_del(adapter, adapter->if_handle,
902 adapter->pmac_id[i], 0);
903 }
904
905 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
906 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
907 adapter->promiscuous = true;
908 goto done;
909 }
910
911 netdev_for_each_uc_addr(ha, adapter->netdev) {
912 adapter->uc_macs++; /* First slot is for Primary MAC */
913 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
914 adapter->if_handle,
915 &adapter->pmac_id[adapter->uc_macs], 0);
916 }
917 }
918
0fc16ebf
PR
919 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
920
921 /* Set to MCAST promisc mode if setting MULTICAST address fails */
922 if (status) {
923 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
924 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
925 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
926 }
24307eef
SP
927done:
928 return;
6b7c5b94
SP
929}
930
ba343c77
SB
931static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
932{
933 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 934 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
935 int status;
936
11ac75ed 937 if (!sriov_enabled(adapter))
ba343c77
SB
938 return -EPERM;
939
11ac75ed 940 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
941 return -EINVAL;
942
590c391d
PR
943 if (lancer_chip(adapter)) {
944 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
945 } else {
11ac75ed
SP
946 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
947 vf_cfg->pmac_id, vf + 1);
ba343c77 948
11ac75ed
SP
949 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
950 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
951 }
952
64600ea5 953 if (status)
ba343c77
SB
954 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
955 mac, vf);
64600ea5 956 else
11ac75ed 957 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 958
ba343c77
SB
959 return status;
960}
961
64600ea5
AK
962static int be_get_vf_config(struct net_device *netdev, int vf,
963 struct ifla_vf_info *vi)
964{
965 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 966 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 967
11ac75ed 968 if (!sriov_enabled(adapter))
64600ea5
AK
969 return -EPERM;
970
11ac75ed 971 if (vf >= adapter->num_vfs)
64600ea5
AK
972 return -EINVAL;
973
974 vi->vf = vf;
11ac75ed
SP
975 vi->tx_rate = vf_cfg->tx_rate;
976 vi->vlan = vf_cfg->vlan_tag;
64600ea5 977 vi->qos = 0;
11ac75ed 978 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
979
980 return 0;
981}
982
1da87b7f
AK
983static int be_set_vf_vlan(struct net_device *netdev,
984 int vf, u16 vlan, u8 qos)
985{
986 struct be_adapter *adapter = netdev_priv(netdev);
987 int status = 0;
988
11ac75ed 989 if (!sriov_enabled(adapter))
1da87b7f
AK
990 return -EPERM;
991
11ac75ed 992 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
993 return -EINVAL;
994
995 if (vlan) {
f1f3ee1b
AK
996 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
997 /* If this is new value, program it. Else skip. */
998 adapter->vf_cfg[vf].vlan_tag = vlan;
999
1000 status = be_cmd_set_hsw_config(adapter, vlan,
1001 vf + 1, adapter->vf_cfg[vf].if_handle);
1002 }
1da87b7f 1003 } else {
f1f3ee1b 1004 /* Reset Transparent Vlan Tagging. */
11ac75ed 1005 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1006 vlan = adapter->vf_cfg[vf].def_vid;
1007 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1008 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1009 }
1010
1da87b7f
AK
1011
1012 if (status)
1013 dev_info(&adapter->pdev->dev,
1014 "VLAN %d config on VF %d failed\n", vlan, vf);
1015 return status;
1016}
1017
e1d18735
AK
1018static int be_set_vf_tx_rate(struct net_device *netdev,
1019 int vf, int rate)
1020{
1021 struct be_adapter *adapter = netdev_priv(netdev);
1022 int status = 0;
1023
11ac75ed 1024 if (!sriov_enabled(adapter))
e1d18735
AK
1025 return -EPERM;
1026
94f434c2 1027 if (vf >= adapter->num_vfs)
e1d18735
AK
1028 return -EINVAL;
1029
94f434c2
AK
1030 if (rate < 100 || rate > 10000) {
1031 dev_err(&adapter->pdev->dev,
1032 "tx rate must be between 100 and 10000 Mbps\n");
1033 return -EINVAL;
1034 }
e1d18735 1035
856c4012 1036 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1037
1038 if (status)
94f434c2 1039 dev_err(&adapter->pdev->dev,
e1d18735 1040 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1041 else
1042 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1043 return status;
1044}
1045
39f1d94d
SP
1046static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1047{
1048 struct pci_dev *dev, *pdev = adapter->pdev;
1049 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1050 u16 offset, stride;
1051
1052 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1053 if (!pos)
1054 return 0;
39f1d94d
SP
1055 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1056 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1057
1058 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1059 while (dev) {
1060 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1061 if (dev->is_virtfn && dev->devfn == vf_fn) {
1062 vfs++;
1063 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1064 assigned_vfs++;
1065 }
1066 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1067 }
1068 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1069}
1070
10ef9ab4 1071static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1072{
10ef9ab4 1073 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1074 ulong now = jiffies;
ac124ff9 1075 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1076 u64 pkts;
1077 unsigned int start, eqd;
ac124ff9 1078
10ef9ab4
SP
1079 if (!eqo->enable_aic) {
1080 eqd = eqo->eqd;
1081 goto modify_eqd;
1082 }
1083
1084 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1085 return;
6b7c5b94 1086
10ef9ab4
SP
1087 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1088
4097f663 1089 /* Wrapped around */
3abcdeda
SP
1090 if (time_before(now, stats->rx_jiffies)) {
1091 stats->rx_jiffies = now;
4097f663
SP
1092 return;
1093 }
6b7c5b94 1094
ac124ff9
SP
1095 /* Update once a second */
1096 if (delta < HZ)
6b7c5b94
SP
1097 return;
1098
ab1594e9
SP
1099 do {
1100 start = u64_stats_fetch_begin_bh(&stats->sync);
1101 pkts = stats->rx_pkts;
1102 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1103
68c3e5a7 1104 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1105 stats->rx_pkts_prev = pkts;
3abcdeda 1106 stats->rx_jiffies = now;
10ef9ab4
SP
1107 eqd = (stats->rx_pps / 110000) << 3;
1108 eqd = min(eqd, eqo->max_eqd);
1109 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1110 if (eqd < 10)
1111 eqd = 0;
10ef9ab4
SP
1112
1113modify_eqd:
1114 if (eqd != eqo->cur_eqd) {
1115 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1116 eqo->cur_eqd = eqd;
ac124ff9 1117 }
6b7c5b94
SP
1118}
1119
3abcdeda 1120static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1121 struct be_rx_compl_info *rxcp)
4097f663 1122{
ac124ff9 1123 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1124
ab1594e9 1125 u64_stats_update_begin(&stats->sync);
3abcdeda 1126 stats->rx_compl++;
2e588f84 1127 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1128 stats->rx_pkts++;
2e588f84 1129 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1130 stats->rx_mcast_pkts++;
2e588f84 1131 if (rxcp->err)
ac124ff9 1132 stats->rx_compl_err++;
ab1594e9 1133 u64_stats_update_end(&stats->sync);
4097f663
SP
1134}
1135
2e588f84 1136static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1137{
19fad86f
PR
1138 /* L4 checksum is not reliable for non TCP/UDP packets.
1139 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1140 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1141 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1142}
1143
10ef9ab4
SP
1144static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1145 u16 frag_idx)
6b7c5b94 1146{
10ef9ab4 1147 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1148 struct be_rx_page_info *rx_page_info;
3abcdeda 1149 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1150
3abcdeda 1151 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1152 BUG_ON(!rx_page_info->page);
1153
205859a2 1154 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1155 dma_unmap_page(&adapter->pdev->dev,
1156 dma_unmap_addr(rx_page_info, bus),
1157 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1158 rx_page_info->last_page_user = false;
1159 }
6b7c5b94
SP
1160
1161 atomic_dec(&rxq->used);
1162 return rx_page_info;
1163}
1164
1165/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1166static void be_rx_compl_discard(struct be_rx_obj *rxo,
1167 struct be_rx_compl_info *rxcp)
6b7c5b94 1168{
3abcdeda 1169 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1170 struct be_rx_page_info *page_info;
2e588f84 1171 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1172
e80d9da6 1173 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1174 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1175 put_page(page_info->page);
1176 memset(page_info, 0, sizeof(*page_info));
2e588f84 1177 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1178 }
1179}
1180
1181/*
1182 * skb_fill_rx_data forms a complete skb for an ether frame
1183 * indicated by rxcp.
1184 */
10ef9ab4
SP
1185static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1186 struct be_rx_compl_info *rxcp)
6b7c5b94 1187{
3abcdeda 1188 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1189 struct be_rx_page_info *page_info;
2e588f84
SP
1190 u16 i, j;
1191 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1192 u8 *start;
6b7c5b94 1193
10ef9ab4 1194 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1195 start = page_address(page_info->page) + page_info->page_offset;
1196 prefetch(start);
1197
1198 /* Copy data in the first descriptor of this completion */
2e588f84 1199 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1200
1201 /* Copy the header portion into skb_data */
2e588f84 1202 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1203 memcpy(skb->data, start, hdr_len);
1204 skb->len = curr_frag_len;
1205 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1206 /* Complete packet has now been moved to data */
1207 put_page(page_info->page);
1208 skb->data_len = 0;
1209 skb->tail += curr_frag_len;
1210 } else {
1211 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1212 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1213 skb_shinfo(skb)->frags[0].page_offset =
1214 page_info->page_offset + hdr_len;
9e903e08 1215 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1216 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1217 skb->truesize += rx_frag_size;
6b7c5b94
SP
1218 skb->tail += hdr_len;
1219 }
205859a2 1220 page_info->page = NULL;
6b7c5b94 1221
2e588f84
SP
1222 if (rxcp->pkt_size <= rx_frag_size) {
1223 BUG_ON(rxcp->num_rcvd != 1);
1224 return;
6b7c5b94
SP
1225 }
1226
1227 /* More frags present for this completion */
2e588f84
SP
1228 index_inc(&rxcp->rxq_idx, rxq->len);
1229 remaining = rxcp->pkt_size - curr_frag_len;
1230 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1231 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1232 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1233
bd46cb6c
AK
1234 /* Coalesce all frags from the same physical page in one slot */
1235 if (page_info->page_offset == 0) {
1236 /* Fresh page */
1237 j++;
b061b39e 1238 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1239 skb_shinfo(skb)->frags[j].page_offset =
1240 page_info->page_offset;
9e903e08 1241 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1242 skb_shinfo(skb)->nr_frags++;
1243 } else {
1244 put_page(page_info->page);
1245 }
1246
9e903e08 1247 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1248 skb->len += curr_frag_len;
1249 skb->data_len += curr_frag_len;
bdb28a97 1250 skb->truesize += rx_frag_size;
2e588f84
SP
1251 remaining -= curr_frag_len;
1252 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1253 page_info->page = NULL;
6b7c5b94 1254 }
bd46cb6c 1255 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1256}
1257
5be93b9a 1258/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1259static void be_rx_compl_process(struct be_rx_obj *rxo,
1260 struct be_rx_compl_info *rxcp)
6b7c5b94 1261{
10ef9ab4 1262 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1263 struct net_device *netdev = adapter->netdev;
6b7c5b94 1264 struct sk_buff *skb;
89420424 1265
bb349bb4 1266 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1267 if (unlikely(!skb)) {
ac124ff9 1268 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1269 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1270 return;
1271 }
1272
10ef9ab4 1273 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1274
6332c8d3 1275 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1276 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1277 else
1278 skb_checksum_none_assert(skb);
6b7c5b94 1279
6332c8d3 1280 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1281 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1282 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1283 skb->rxhash = rxcp->rss_hash;
1284
6b7c5b94 1285
343e43c0 1286 if (rxcp->vlanf)
4c5102f9
AK
1287 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1288
1289 netif_receive_skb(skb);
6b7c5b94
SP
1290}
1291
5be93b9a 1292/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1293void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1294 struct be_rx_compl_info *rxcp)
6b7c5b94 1295{
10ef9ab4 1296 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1297 struct be_rx_page_info *page_info;
5be93b9a 1298 struct sk_buff *skb = NULL;
3abcdeda 1299 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1300 u16 remaining, curr_frag_len;
1301 u16 i, j;
3968fa1e 1302
10ef9ab4 1303 skb = napi_get_frags(napi);
5be93b9a 1304 if (!skb) {
10ef9ab4 1305 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1306 return;
1307 }
1308
2e588f84
SP
1309 remaining = rxcp->pkt_size;
1310 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1311 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1312
1313 curr_frag_len = min(remaining, rx_frag_size);
1314
bd46cb6c
AK
1315 /* Coalesce all frags from the same physical page in one slot */
1316 if (i == 0 || page_info->page_offset == 0) {
1317 /* First frag or Fresh page */
1318 j++;
b061b39e 1319 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1320 skb_shinfo(skb)->frags[j].page_offset =
1321 page_info->page_offset;
9e903e08 1322 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1323 } else {
1324 put_page(page_info->page);
1325 }
9e903e08 1326 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1327 skb->truesize += rx_frag_size;
bd46cb6c 1328 remaining -= curr_frag_len;
2e588f84 1329 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1330 memset(page_info, 0, sizeof(*page_info));
1331 }
bd46cb6c 1332 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1333
5be93b9a 1334 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1335 skb->len = rxcp->pkt_size;
1336 skb->data_len = rxcp->pkt_size;
5be93b9a 1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1338 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1339 if (adapter->netdev->features & NETIF_F_RXHASH)
1340 skb->rxhash = rxcp->rss_hash;
5be93b9a 1341
343e43c0 1342 if (rxcp->vlanf)
4c5102f9
AK
1343 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1344
10ef9ab4 1345 napi_gro_frags(napi);
2e588f84
SP
1346}
1347
10ef9ab4
SP
1348static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1349 struct be_rx_compl_info *rxcp)
2e588f84
SP
1350{
1351 rxcp->pkt_size =
1352 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1353 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1354 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1355 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1356 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1357 rxcp->ip_csum =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1359 rxcp->l4_csum =
1360 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1361 rxcp->ipv6 =
1362 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1363 rxcp->rxq_idx =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1365 rxcp->num_rcvd =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1367 rxcp->pkt_type =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1369 rxcp->rss_hash =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1371 if (rxcp->vlanf) {
1372 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1373 compl);
1374 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1375 compl);
15d72184 1376 }
12004ae9 1377 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1378}
1379
10ef9ab4
SP
1380static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1381 struct be_rx_compl_info *rxcp)
2e588f84
SP
1382{
1383 rxcp->pkt_size =
1384 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1385 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1386 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1387 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1388 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1389 rxcp->ip_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1391 rxcp->l4_csum =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1393 rxcp->ipv6 =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1395 rxcp->rxq_idx =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1397 rxcp->num_rcvd =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1399 rxcp->pkt_type =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1401 rxcp->rss_hash =
1402 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1403 if (rxcp->vlanf) {
1404 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1405 compl);
1406 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1407 compl);
15d72184 1408 }
12004ae9 1409 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1410}
1411
1412static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1413{
1414 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1415 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1416 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1417
2e588f84
SP
1418 /* For checking the valid bit it is Ok to use either definition as the
1419 * valid bit is at the same position in both v0 and v1 Rx compl */
1420 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1421 return NULL;
6b7c5b94 1422
2e588f84
SP
1423 rmb();
1424 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1425
2e588f84 1426 if (adapter->be3_native)
10ef9ab4 1427 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1428 else
10ef9ab4 1429 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1430
15d72184
SP
1431 if (rxcp->vlanf) {
1432 /* vlanf could be wrongly set in some cards.
1433 * ignore if vtm is not set */
752961a1 1434 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1435 rxcp->vlanf = 0;
6b7c5b94 1436
15d72184 1437 if (!lancer_chip(adapter))
3c709f8f 1438 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1439
939cf306 1440 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1441 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1442 rxcp->vlanf = 0;
1443 }
2e588f84
SP
1444
1445 /* As the compl has been parsed, reset it; we wont touch it again */
1446 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1447
3abcdeda 1448 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1449 return rxcp;
1450}
1451
1829b086 1452static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1453{
6b7c5b94 1454 u32 order = get_order(size);
1829b086 1455
6b7c5b94 1456 if (order > 0)
1829b086
ED
1457 gfp |= __GFP_COMP;
1458 return alloc_pages(gfp, order);
6b7c5b94
SP
1459}
1460
1461/*
1462 * Allocate a page, split it to fragments of size rx_frag_size and post as
1463 * receive buffers to BE
1464 */
1829b086 1465static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1466{
3abcdeda 1467 struct be_adapter *adapter = rxo->adapter;
26d92f92 1468 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1469 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1470 struct page *pagep = NULL;
1471 struct be_eth_rx_d *rxd;
1472 u64 page_dmaaddr = 0, frag_dmaaddr;
1473 u32 posted, page_offset = 0;
1474
3abcdeda 1475 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1476 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1477 if (!pagep) {
1829b086 1478 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1479 if (unlikely(!pagep)) {
ac124ff9 1480 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1481 break;
1482 }
2b7bcebf
IV
1483 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1484 0, adapter->big_page_size,
1485 DMA_FROM_DEVICE);
6b7c5b94
SP
1486 page_info->page_offset = 0;
1487 } else {
1488 get_page(pagep);
1489 page_info->page_offset = page_offset + rx_frag_size;
1490 }
1491 page_offset = page_info->page_offset;
1492 page_info->page = pagep;
fac6da5b 1493 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1494 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1495
1496 rxd = queue_head_node(rxq);
1497 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1498 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1499
1500 /* Any space left in the current big page for another frag? */
1501 if ((page_offset + rx_frag_size + rx_frag_size) >
1502 adapter->big_page_size) {
1503 pagep = NULL;
1504 page_info->last_page_user = true;
1505 }
26d92f92
SP
1506
1507 prev_page_info = page_info;
1508 queue_head_inc(rxq);
10ef9ab4 1509 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1510 }
1511 if (pagep)
26d92f92 1512 prev_page_info->last_page_user = true;
6b7c5b94
SP
1513
1514 if (posted) {
6b7c5b94 1515 atomic_add(posted, &rxq->used);
8788fdc2 1516 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1517 } else if (atomic_read(&rxq->used) == 0) {
1518 /* Let be_worker replenish when memory is available */
3abcdeda 1519 rxo->rx_post_starved = true;
6b7c5b94 1520 }
6b7c5b94
SP
1521}
1522
5fb379ee 1523static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1524{
6b7c5b94
SP
1525 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1526
1527 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1528 return NULL;
1529
f3eb62d2 1530 rmb();
6b7c5b94
SP
1531 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1532
1533 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1534
1535 queue_tail_inc(tx_cq);
1536 return txcp;
1537}
1538
3c8def97
SP
1539static u16 be_tx_compl_process(struct be_adapter *adapter,
1540 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1541{
3c8def97 1542 struct be_queue_info *txq = &txo->q;
a73b796e 1543 struct be_eth_wrb *wrb;
3c8def97 1544 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1545 struct sk_buff *sent_skb;
ec43b1a6
SP
1546 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1547 bool unmap_skb_hdr = true;
6b7c5b94 1548
ec43b1a6 1549 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1550 BUG_ON(!sent_skb);
ec43b1a6
SP
1551 sent_skbs[txq->tail] = NULL;
1552
1553 /* skip header wrb */
a73b796e 1554 queue_tail_inc(txq);
6b7c5b94 1555
ec43b1a6 1556 do {
6b7c5b94 1557 cur_index = txq->tail;
a73b796e 1558 wrb = queue_tail_node(txq);
2b7bcebf
IV
1559 unmap_tx_frag(&adapter->pdev->dev, wrb,
1560 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1561 unmap_skb_hdr = false;
1562
6b7c5b94
SP
1563 num_wrbs++;
1564 queue_tail_inc(txq);
ec43b1a6 1565 } while (cur_index != last_index);
6b7c5b94 1566
6b7c5b94 1567 kfree_skb(sent_skb);
4d586b82 1568 return num_wrbs;
6b7c5b94
SP
1569}
1570
10ef9ab4
SP
1571/* Return the number of events in the event queue */
1572static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1573{
10ef9ab4
SP
1574 struct be_eq_entry *eqe;
1575 int num = 0;
859b1e4e 1576
10ef9ab4
SP
1577 do {
1578 eqe = queue_tail_node(&eqo->q);
1579 if (eqe->evt == 0)
1580 break;
859b1e4e 1581
10ef9ab4
SP
1582 rmb();
1583 eqe->evt = 0;
1584 num++;
1585 queue_tail_inc(&eqo->q);
1586 } while (true);
1587
1588 return num;
859b1e4e
SP
1589}
1590
10ef9ab4 1591static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1592{
10ef9ab4
SP
1593 bool rearm = false;
1594 int num = events_get(eqo);
859b1e4e 1595
10ef9ab4 1596 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1597 if (!num)
1598 rearm = true;
1599
af311fe3
PR
1600 if (num || msix_enabled(eqo->adapter))
1601 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1602
859b1e4e 1603 if (num)
10ef9ab4 1604 napi_schedule(&eqo->napi);
859b1e4e
SP
1605
1606 return num;
1607}
1608
10ef9ab4
SP
1609/* Leaves the EQ is disarmed state */
1610static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1611{
10ef9ab4 1612 int num = events_get(eqo);
859b1e4e 1613
10ef9ab4 1614 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1615}
1616
10ef9ab4 1617static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1618{
1619 struct be_rx_page_info *page_info;
3abcdeda
SP
1620 struct be_queue_info *rxq = &rxo->q;
1621 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1622 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1623 u16 tail;
1624
1625 /* First cleanup pending rx completions */
3abcdeda 1626 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1627 be_rx_compl_discard(rxo, rxcp);
1628 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1629 }
1630
1631 /* Then free posted rx buffer that were not used */
1632 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1633 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1634 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1635 put_page(page_info->page);
1636 memset(page_info, 0, sizeof(*page_info));
1637 }
1638 BUG_ON(atomic_read(&rxq->used));
482c9e79 1639 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1640}
1641
0ae57bb3 1642static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1643{
0ae57bb3
SP
1644 struct be_tx_obj *txo;
1645 struct be_queue_info *txq;
a8e9179a 1646 struct be_eth_tx_compl *txcp;
4d586b82 1647 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1648 struct sk_buff *sent_skb;
1649 bool dummy_wrb;
0ae57bb3 1650 int i, pending_txqs;
a8e9179a
SP
1651
1652 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1653 do {
0ae57bb3
SP
1654 pending_txqs = adapter->num_tx_qs;
1655
1656 for_all_tx_queues(adapter, txo, i) {
1657 txq = &txo->q;
1658 while ((txcp = be_tx_compl_get(&txo->cq))) {
1659 end_idx =
1660 AMAP_GET_BITS(struct amap_eth_tx_compl,
1661 wrb_index, txcp);
1662 num_wrbs += be_tx_compl_process(adapter, txo,
1663 end_idx);
1664 cmpl++;
1665 }
1666 if (cmpl) {
1667 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1668 atomic_sub(num_wrbs, &txq->used);
1669 cmpl = 0;
1670 num_wrbs = 0;
1671 }
1672 if (atomic_read(&txq->used) == 0)
1673 pending_txqs--;
a8e9179a
SP
1674 }
1675
0ae57bb3 1676 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1677 break;
1678
1679 mdelay(1);
1680 } while (true);
1681
0ae57bb3
SP
1682 for_all_tx_queues(adapter, txo, i) {
1683 txq = &txo->q;
1684 if (atomic_read(&txq->used))
1685 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1686 atomic_read(&txq->used));
1687
1688 /* free posted tx for which compls will never arrive */
1689 while (atomic_read(&txq->used)) {
1690 sent_skb = txo->sent_skb_list[txq->tail];
1691 end_idx = txq->tail;
1692 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1693 &dummy_wrb);
1694 index_adv(&end_idx, num_wrbs - 1, txq->len);
1695 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1696 atomic_sub(num_wrbs, &txq->used);
1697 }
b03388d6 1698 }
6b7c5b94
SP
1699}
1700
10ef9ab4
SP
1701static void be_evt_queues_destroy(struct be_adapter *adapter)
1702{
1703 struct be_eq_obj *eqo;
1704 int i;
1705
1706 for_all_evt_queues(adapter, eqo, i) {
1707 be_eq_clean(eqo);
1708 if (eqo->q.created)
1709 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1710 be_queue_free(adapter, &eqo->q);
1711 }
1712}
1713
1714static int be_evt_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *eq;
1717 struct be_eq_obj *eqo;
1718 int i, rc;
1719
1720 adapter->num_evt_qs = num_irqs(adapter);
1721
1722 for_all_evt_queues(adapter, eqo, i) {
1723 eqo->adapter = adapter;
1724 eqo->tx_budget = BE_TX_BUDGET;
1725 eqo->idx = i;
1726 eqo->max_eqd = BE_MAX_EQD;
1727 eqo->enable_aic = true;
1728
1729 eq = &eqo->q;
1730 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1731 sizeof(struct be_eq_entry));
1732 if (rc)
1733 return rc;
1734
1735 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1736 if (rc)
1737 return rc;
1738 }
1cfafab9 1739 return 0;
10ef9ab4
SP
1740}
1741
5fb379ee
SP
1742static void be_mcc_queues_destroy(struct be_adapter *adapter)
1743{
1744 struct be_queue_info *q;
5fb379ee 1745
8788fdc2 1746 q = &adapter->mcc_obj.q;
5fb379ee 1747 if (q->created)
8788fdc2 1748 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1749 be_queue_free(adapter, q);
1750
8788fdc2 1751 q = &adapter->mcc_obj.cq;
5fb379ee 1752 if (q->created)
8788fdc2 1753 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1754 be_queue_free(adapter, q);
1755}
1756
1757/* Must be called only after TX qs are created as MCC shares TX EQ */
1758static int be_mcc_queues_create(struct be_adapter *adapter)
1759{
1760 struct be_queue_info *q, *cq;
5fb379ee 1761
8788fdc2 1762 cq = &adapter->mcc_obj.cq;
5fb379ee 1763 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1764 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1765 goto err;
1766
10ef9ab4
SP
1767 /* Use the default EQ for MCC completions */
1768 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1769 goto mcc_cq_free;
1770
8788fdc2 1771 q = &adapter->mcc_obj.q;
5fb379ee
SP
1772 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1773 goto mcc_cq_destroy;
1774
8788fdc2 1775 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1776 goto mcc_q_free;
1777
1778 return 0;
1779
1780mcc_q_free:
1781 be_queue_free(adapter, q);
1782mcc_cq_destroy:
8788fdc2 1783 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1784mcc_cq_free:
1785 be_queue_free(adapter, cq);
1786err:
1787 return -1;
1788}
1789
6b7c5b94
SP
1790static void be_tx_queues_destroy(struct be_adapter *adapter)
1791{
1792 struct be_queue_info *q;
3c8def97
SP
1793 struct be_tx_obj *txo;
1794 u8 i;
6b7c5b94 1795
3c8def97
SP
1796 for_all_tx_queues(adapter, txo, i) {
1797 q = &txo->q;
1798 if (q->created)
1799 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1800 be_queue_free(adapter, q);
6b7c5b94 1801
3c8def97
SP
1802 q = &txo->cq;
1803 if (q->created)
1804 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1805 be_queue_free(adapter, q);
1806 }
6b7c5b94
SP
1807}
1808
dafc0fe3
SP
1809static int be_num_txqs_want(struct be_adapter *adapter)
1810{
39f1d94d
SP
1811 if (sriov_want(adapter) || be_is_mc(adapter) ||
1812 lancer_chip(adapter) || !be_physfn(adapter) ||
1813 adapter->generation == BE_GEN2)
dafc0fe3
SP
1814 return 1;
1815 else
1816 return MAX_TX_QS;
1817}
1818
10ef9ab4 1819static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1820{
10ef9ab4
SP
1821 struct be_queue_info *cq, *eq;
1822 int status;
3c8def97
SP
1823 struct be_tx_obj *txo;
1824 u8 i;
6b7c5b94 1825
dafc0fe3 1826 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1827 if (adapter->num_tx_qs != MAX_TX_QS) {
1828 rtnl_lock();
dafc0fe3
SP
1829 netif_set_real_num_tx_queues(adapter->netdev,
1830 adapter->num_tx_qs);
3bb62f4f
PR
1831 rtnl_unlock();
1832 }
dafc0fe3 1833
10ef9ab4
SP
1834 for_all_tx_queues(adapter, txo, i) {
1835 cq = &txo->cq;
1836 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1837 sizeof(struct be_eth_tx_compl));
1838 if (status)
1839 return status;
3c8def97 1840
10ef9ab4
SP
1841 /* If num_evt_qs is less than num_tx_qs, then more than
1842 * one txq share an eq
1843 */
1844 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1845 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1846 if (status)
1847 return status;
1848 }
1849 return 0;
1850}
6b7c5b94 1851
10ef9ab4
SP
1852static int be_tx_qs_create(struct be_adapter *adapter)
1853{
1854 struct be_tx_obj *txo;
1855 int i, status;
fe6d2a38 1856
3c8def97 1857 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1858 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1859 sizeof(struct be_eth_wrb));
1860 if (status)
1861 return status;
6b7c5b94 1862
10ef9ab4
SP
1863 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1864 if (status)
1865 return status;
3c8def97 1866 }
6b7c5b94 1867
10ef9ab4 1868 return 0;
6b7c5b94
SP
1869}
1870
10ef9ab4 1871static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1872{
1873 struct be_queue_info *q;
3abcdeda
SP
1874 struct be_rx_obj *rxo;
1875 int i;
1876
1877 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1878 q = &rxo->cq;
1879 if (q->created)
1880 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1881 be_queue_free(adapter, q);
ac6a0c4a
SP
1882 }
1883}
1884
10ef9ab4 1885static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1886{
10ef9ab4 1887 struct be_queue_info *eq, *cq;
3abcdeda
SP
1888 struct be_rx_obj *rxo;
1889 int rc, i;
6b7c5b94 1890
10ef9ab4
SP
1891 /* We'll create as many RSS rings as there are irqs.
1892 * But when there's only one irq there's no use creating RSS rings
1893 */
1894 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1895 num_irqs(adapter) + 1 : 1;
7f640062
SP
1896 if (adapter->num_rx_qs != MAX_RX_QS) {
1897 rtnl_lock();
1898 netif_set_real_num_rx_queues(adapter->netdev,
1899 adapter->num_rx_qs);
1900 rtnl_unlock();
1901 }
ac6a0c4a 1902
6b7c5b94 1903 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1904 for_all_rx_queues(adapter, rxo, i) {
1905 rxo->adapter = adapter;
3abcdeda
SP
1906 cq = &rxo->cq;
1907 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1908 sizeof(struct be_eth_rx_compl));
1909 if (rc)
10ef9ab4 1910 return rc;
3abcdeda 1911
10ef9ab4
SP
1912 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1913 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1914 if (rc)
10ef9ab4 1915 return rc;
3abcdeda 1916 }
6b7c5b94 1917
10ef9ab4
SP
1918 if (adapter->num_rx_qs != MAX_RX_QS)
1919 dev_info(&adapter->pdev->dev,
1920 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1921
10ef9ab4 1922 return 0;
b628bde2
SP
1923}
1924
6b7c5b94
SP
1925static irqreturn_t be_intx(int irq, void *dev)
1926{
1927 struct be_adapter *adapter = dev;
10ef9ab4 1928 int num_evts;
6b7c5b94 1929
10ef9ab4
SP
1930 /* With INTx only one EQ is used */
1931 num_evts = event_handle(&adapter->eq_obj[0]);
1932 if (num_evts)
1933 return IRQ_HANDLED;
1934 else
1935 return IRQ_NONE;
6b7c5b94
SP
1936}
1937
10ef9ab4 1938static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1939{
10ef9ab4 1940 struct be_eq_obj *eqo = dev;
6b7c5b94 1941
10ef9ab4 1942 event_handle(eqo);
6b7c5b94
SP
1943 return IRQ_HANDLED;
1944}
1945
2e588f84 1946static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1947{
2e588f84 1948 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1949}
1950
10ef9ab4
SP
1951static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1952 int budget)
6b7c5b94 1953{
3abcdeda
SP
1954 struct be_adapter *adapter = rxo->adapter;
1955 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1956 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1957 u32 work_done;
1958
1959 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1960 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1961 if (!rxcp)
1962 break;
1963
12004ae9
SP
1964 /* Is it a flush compl that has no data */
1965 if (unlikely(rxcp->num_rcvd == 0))
1966 goto loop_continue;
1967
1968 /* Discard compl with partial DMA Lancer B0 */
1969 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1970 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1971 goto loop_continue;
1972 }
1973
1974 /* On BE drop pkts that arrive due to imperfect filtering in
1975 * promiscuous mode on some skews
1976 */
1977 if (unlikely(rxcp->port != adapter->port_num &&
1978 !lancer_chip(adapter))) {
10ef9ab4 1979 be_rx_compl_discard(rxo, rxcp);
12004ae9 1980 goto loop_continue;
64642811 1981 }
009dd872 1982
12004ae9 1983 if (do_gro(rxcp))
10ef9ab4 1984 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1985 else
10ef9ab4 1986 be_rx_compl_process(rxo, rxcp);
12004ae9 1987loop_continue:
2e588f84 1988 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1989 }
1990
10ef9ab4
SP
1991 if (work_done) {
1992 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1993
10ef9ab4
SP
1994 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1995 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1996 }
10ef9ab4 1997
6b7c5b94
SP
1998 return work_done;
1999}
2000
10ef9ab4
SP
2001static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2002 int budget, int idx)
6b7c5b94 2003{
6b7c5b94 2004 struct be_eth_tx_compl *txcp;
10ef9ab4 2005 int num_wrbs = 0, work_done;
3c8def97 2006
10ef9ab4
SP
2007 for (work_done = 0; work_done < budget; work_done++) {
2008 txcp = be_tx_compl_get(&txo->cq);
2009 if (!txcp)
2010 break;
2011 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2012 AMAP_GET_BITS(struct amap_eth_tx_compl,
2013 wrb_index, txcp));
10ef9ab4 2014 }
6b7c5b94 2015
10ef9ab4
SP
2016 if (work_done) {
2017 be_cq_notify(adapter, txo->cq.id, true, work_done);
2018 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2019
10ef9ab4
SP
2020 /* As Tx wrbs have been freed up, wake up netdev queue
2021 * if it was stopped due to lack of tx wrbs. */
2022 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2023 atomic_read(&txo->q.used) < txo->q.len / 2) {
2024 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2025 }
10ef9ab4
SP
2026
2027 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2028 tx_stats(txo)->tx_compl += work_done;
2029 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2030 }
10ef9ab4
SP
2031 return (work_done < budget); /* Done */
2032}
6b7c5b94 2033
10ef9ab4
SP
2034int be_poll(struct napi_struct *napi, int budget)
2035{
2036 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2037 struct be_adapter *adapter = eqo->adapter;
2038 int max_work = 0, work, i;
2039 bool tx_done;
f31e50a8 2040
10ef9ab4
SP
2041 /* Process all TXQs serviced by this EQ */
2042 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2043 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2044 eqo->tx_budget, i);
2045 if (!tx_done)
2046 max_work = budget;
f31e50a8
SP
2047 }
2048
10ef9ab4
SP
2049 /* This loop will iterate twice for EQ0 in which
2050 * completions of the last RXQ (default one) are also processed
2051 * For other EQs the loop iterates only once
2052 */
2053 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2054 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2055 max_work = max(work, max_work);
2056 }
6b7c5b94 2057
10ef9ab4
SP
2058 if (is_mcc_eqo(eqo))
2059 be_process_mcc(adapter);
93c86700 2060
10ef9ab4
SP
2061 if (max_work < budget) {
2062 napi_complete(napi);
2063 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2064 } else {
2065 /* As we'll continue in polling mode, count and clear events */
2066 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2067 }
10ef9ab4 2068 return max_work;
6b7c5b94
SP
2069}
2070
d053de91 2071void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2072{
e1cfb67a
PR
2073 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2074 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2075 u32 i;
2076
72f02485
SP
2077 if (adapter->eeh_err || adapter->ue_detected)
2078 return;
2079
e1cfb67a
PR
2080 if (lancer_chip(adapter)) {
2081 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2082 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083 sliport_err1 = ioread32(adapter->db +
2084 SLIPORT_ERROR1_OFFSET);
2085 sliport_err2 = ioread32(adapter->db +
2086 SLIPORT_ERROR2_OFFSET);
2087 }
2088 } else {
2089 pci_read_config_dword(adapter->pdev,
2090 PCICFG_UE_STATUS_LOW, &ue_lo);
2091 pci_read_config_dword(adapter->pdev,
2092 PCICFG_UE_STATUS_HIGH, &ue_hi);
2093 pci_read_config_dword(adapter->pdev,
2094 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2095 pci_read_config_dword(adapter->pdev,
2096 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2097
2098 ue_lo = (ue_lo & (~ue_lo_mask));
2099 ue_hi = (ue_hi & (~ue_hi_mask));
2100 }
7c185276 2101
e1cfb67a
PR
2102 if (ue_lo || ue_hi ||
2103 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2104 adapter->ue_detected = true;
7acc2087 2105 adapter->eeh_err = true;
434b3648
SP
2106 dev_err(&adapter->pdev->dev,
2107 "Unrecoverable error in the card\n");
d053de91
AK
2108 }
2109
e1cfb67a
PR
2110 if (ue_lo) {
2111 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2112 if (ue_lo & 1)
7c185276
AK
2113 dev_err(&adapter->pdev->dev,
2114 "UE: %s bit set\n", ue_status_low_desc[i]);
2115 }
2116 }
e1cfb67a
PR
2117 if (ue_hi) {
2118 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2119 if (ue_hi & 1)
7c185276
AK
2120 dev_err(&adapter->pdev->dev,
2121 "UE: %s bit set\n", ue_status_hi_desc[i]);
2122 }
2123 }
2124
e1cfb67a
PR
2125 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2126 dev_err(&adapter->pdev->dev,
2127 "sliport status 0x%x\n", sliport_status);
2128 dev_err(&adapter->pdev->dev,
2129 "sliport error1 0x%x\n", sliport_err1);
2130 dev_err(&adapter->pdev->dev,
2131 "sliport error2 0x%x\n", sliport_err2);
2132 }
7c185276
AK
2133}
2134
8d56ff11
SP
2135static void be_msix_disable(struct be_adapter *adapter)
2136{
ac6a0c4a 2137 if (msix_enabled(adapter)) {
8d56ff11 2138 pci_disable_msix(adapter->pdev);
ac6a0c4a 2139 adapter->num_msix_vec = 0;
3abcdeda
SP
2140 }
2141}
2142
10ef9ab4
SP
2143static uint be_num_rss_want(struct be_adapter *adapter)
2144{
2145 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
39f1d94d 2146 !sriov_want(adapter) && be_physfn(adapter) &&
10ef9ab4
SP
2147 !be_is_mc(adapter))
2148 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2149 else
2150 return 0;
2151}
2152
6b7c5b94
SP
2153static void be_msix_enable(struct be_adapter *adapter)
2154{
10ef9ab4 2155#define BE_MIN_MSIX_VECTORS 1
045508a8 2156 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2157
10ef9ab4
SP
2158 /* If RSS queues are not used, need a vec for default RX Q */
2159 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2160 if (be_roce_supported(adapter)) {
2161 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2162 (num_online_cpus() + 1));
2163 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2164 num_vec += num_roce_vec;
2165 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2166 }
10ef9ab4 2167 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2168
ac6a0c4a 2169 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2170 adapter->msix_entries[i].entry = i;
2171
ac6a0c4a 2172 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2173 if (status == 0) {
2174 goto done;
2175 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2176 num_vec = status;
3abcdeda 2177 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2178 num_vec) == 0)
3abcdeda 2179 goto done;
3abcdeda
SP
2180 }
2181 return;
2182done:
045508a8
PP
2183 if (be_roce_supported(adapter)) {
2184 if (num_vec > num_roce_vec) {
2185 adapter->num_msix_vec = num_vec - num_roce_vec;
2186 adapter->num_msix_roce_vec =
2187 num_vec - adapter->num_msix_vec;
2188 } else {
2189 adapter->num_msix_vec = num_vec;
2190 adapter->num_msix_roce_vec = 0;
2191 }
2192 } else
2193 adapter->num_msix_vec = num_vec;
ac6a0c4a 2194 return;
6b7c5b94
SP
2195}
2196
fe6d2a38 2197static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2198 struct be_eq_obj *eqo)
b628bde2 2199{
10ef9ab4 2200 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2201}
6b7c5b94 2202
b628bde2
SP
2203static int be_msix_register(struct be_adapter *adapter)
2204{
10ef9ab4
SP
2205 struct net_device *netdev = adapter->netdev;
2206 struct be_eq_obj *eqo;
2207 int status, i, vec;
6b7c5b94 2208
10ef9ab4
SP
2209 for_all_evt_queues(adapter, eqo, i) {
2210 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2211 vec = be_msix_vec_get(adapter, eqo);
2212 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2213 if (status)
2214 goto err_msix;
2215 }
b628bde2 2216
6b7c5b94 2217 return 0;
3abcdeda 2218err_msix:
10ef9ab4
SP
2219 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2220 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2221 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2222 status);
ac6a0c4a 2223 be_msix_disable(adapter);
6b7c5b94
SP
2224 return status;
2225}
2226
2227static int be_irq_register(struct be_adapter *adapter)
2228{
2229 struct net_device *netdev = adapter->netdev;
2230 int status;
2231
ac6a0c4a 2232 if (msix_enabled(adapter)) {
6b7c5b94
SP
2233 status = be_msix_register(adapter);
2234 if (status == 0)
2235 goto done;
ba343c77
SB
2236 /* INTx is not supported for VF */
2237 if (!be_physfn(adapter))
2238 return status;
6b7c5b94
SP
2239 }
2240
2241 /* INTx */
2242 netdev->irq = adapter->pdev->irq;
2243 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2244 adapter);
2245 if (status) {
2246 dev_err(&adapter->pdev->dev,
2247 "INTx request IRQ failed - err %d\n", status);
2248 return status;
2249 }
2250done:
2251 adapter->isr_registered = true;
2252 return 0;
2253}
2254
2255static void be_irq_unregister(struct be_adapter *adapter)
2256{
2257 struct net_device *netdev = adapter->netdev;
10ef9ab4 2258 struct be_eq_obj *eqo;
3abcdeda 2259 int i;
6b7c5b94
SP
2260
2261 if (!adapter->isr_registered)
2262 return;
2263
2264 /* INTx */
ac6a0c4a 2265 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2266 free_irq(netdev->irq, adapter);
2267 goto done;
2268 }
2269
2270 /* MSIx */
10ef9ab4
SP
2271 for_all_evt_queues(adapter, eqo, i)
2272 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2273
6b7c5b94
SP
2274done:
2275 adapter->isr_registered = false;
6b7c5b94
SP
2276}
2277
10ef9ab4 2278static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2279{
2280 struct be_queue_info *q;
2281 struct be_rx_obj *rxo;
2282 int i;
2283
2284 for_all_rx_queues(adapter, rxo, i) {
2285 q = &rxo->q;
2286 if (q->created) {
2287 be_cmd_rxq_destroy(adapter, q);
2288 /* After the rxq is invalidated, wait for a grace time
2289 * of 1ms for all dma to end and the flush compl to
2290 * arrive
2291 */
2292 mdelay(1);
10ef9ab4 2293 be_rx_cq_clean(rxo);
482c9e79 2294 }
10ef9ab4 2295 be_queue_free(adapter, q);
482c9e79
SP
2296 }
2297}
2298
889cd4b2
SP
2299static int be_close(struct net_device *netdev)
2300{
2301 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2302 struct be_eq_obj *eqo;
2303 int i;
889cd4b2 2304
045508a8
PP
2305 be_roce_dev_close(adapter);
2306
889cd4b2
SP
2307 be_async_mcc_disable(adapter);
2308
fe6d2a38
SP
2309 if (!lancer_chip(adapter))
2310 be_intr_set(adapter, false);
889cd4b2 2311
10ef9ab4
SP
2312 for_all_evt_queues(adapter, eqo, i) {
2313 napi_disable(&eqo->napi);
2314 if (msix_enabled(adapter))
2315 synchronize_irq(be_msix_vec_get(adapter, eqo));
2316 else
2317 synchronize_irq(netdev->irq);
2318 be_eq_clean(eqo);
63fcb27f
PR
2319 }
2320
889cd4b2
SP
2321 be_irq_unregister(adapter);
2322
889cd4b2
SP
2323 /* Wait for all pending tx completions to arrive so that
2324 * all tx skbs are freed.
2325 */
0ae57bb3 2326 be_tx_compl_clean(adapter);
889cd4b2 2327
10ef9ab4 2328 be_rx_qs_destroy(adapter);
482c9e79
SP
2329 return 0;
2330}
2331
10ef9ab4 2332static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2333{
2334 struct be_rx_obj *rxo;
e9008ee9
PR
2335 int rc, i, j;
2336 u8 rsstable[128];
482c9e79
SP
2337
2338 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2339 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2340 sizeof(struct be_eth_rx_d));
2341 if (rc)
2342 return rc;
2343 }
2344
2345 /* The FW would like the default RXQ to be created first */
2346 rxo = default_rxo(adapter);
2347 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2348 adapter->if_handle, false, &rxo->rss_id);
2349 if (rc)
2350 return rc;
2351
2352 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2353 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2354 rx_frag_size, adapter->if_handle,
2355 true, &rxo->rss_id);
482c9e79
SP
2356 if (rc)
2357 return rc;
2358 }
2359
2360 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2361 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2362 for_all_rss_queues(adapter, rxo, i) {
2363 if ((j + i) >= 128)
2364 break;
2365 rsstable[j + i] = rxo->rss_id;
2366 }
2367 }
2368 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2369 if (rc)
2370 return rc;
2371 }
2372
2373 /* First time posting */
10ef9ab4 2374 for_all_rx_queues(adapter, rxo, i)
482c9e79 2375 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2376 return 0;
2377}
2378
6b7c5b94
SP
2379static int be_open(struct net_device *netdev)
2380{
2381 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2382 struct be_eq_obj *eqo;
3abcdeda 2383 struct be_rx_obj *rxo;
10ef9ab4 2384 struct be_tx_obj *txo;
b236916a 2385 u8 link_status;
3abcdeda 2386 int status, i;
5fb379ee 2387
10ef9ab4 2388 status = be_rx_qs_create(adapter);
482c9e79
SP
2389 if (status)
2390 goto err;
2391
5fb379ee
SP
2392 be_irq_register(adapter);
2393
fe6d2a38
SP
2394 if (!lancer_chip(adapter))
2395 be_intr_set(adapter, true);
5fb379ee 2396
10ef9ab4 2397 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2398 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2399
10ef9ab4
SP
2400 for_all_tx_queues(adapter, txo, i)
2401 be_cq_notify(adapter, txo->cq.id, true, 0);
2402
7a1e9b20
SP
2403 be_async_mcc_enable(adapter);
2404
10ef9ab4
SP
2405 for_all_evt_queues(adapter, eqo, i) {
2406 napi_enable(&eqo->napi);
2407 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2408 }
2409
b236916a
AK
2410 status = be_cmd_link_status_query(adapter, NULL, NULL,
2411 &link_status, 0);
2412 if (!status)
2413 be_link_status_update(adapter, link_status);
2414
045508a8 2415 be_roce_dev_open(adapter);
889cd4b2
SP
2416 return 0;
2417err:
2418 be_close(adapter->netdev);
2419 return -EIO;
5fb379ee
SP
2420}
2421
71d8d1b5
AK
2422static int be_setup_wol(struct be_adapter *adapter, bool enable)
2423{
2424 struct be_dma_mem cmd;
2425 int status = 0;
2426 u8 mac[ETH_ALEN];
2427
2428 memset(mac, 0, ETH_ALEN);
2429
2430 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2431 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2432 GFP_KERNEL);
71d8d1b5
AK
2433 if (cmd.va == NULL)
2434 return -1;
2435 memset(cmd.va, 0, cmd.size);
2436
2437 if (enable) {
2438 status = pci_write_config_dword(adapter->pdev,
2439 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2440 if (status) {
2441 dev_err(&adapter->pdev->dev,
2381a55c 2442 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2443 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2444 cmd.dma);
71d8d1b5
AK
2445 return status;
2446 }
2447 status = be_cmd_enable_magic_wol(adapter,
2448 adapter->netdev->dev_addr, &cmd);
2449 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2450 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2451 } else {
2452 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2455 }
2456
2b7bcebf 2457 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2458 return status;
2459}
2460
6d87f5c3
AK
2461/*
2462 * Generate a seed MAC address from the PF MAC Address using jhash.
2463 * MAC Address for VFs are assigned incrementally starting from the seed.
2464 * These addresses are programmed in the ASIC by the PF and the VF driver
2465 * queries for the MAC address during its probe.
2466 */
2467static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2468{
f9449ab7 2469 u32 vf;
3abcdeda 2470 int status = 0;
6d87f5c3 2471 u8 mac[ETH_ALEN];
11ac75ed 2472 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2473
2474 be_vf_eth_addr_generate(adapter, mac);
2475
11ac75ed 2476 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2477 if (lancer_chip(adapter)) {
2478 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2479 } else {
2480 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2481 vf_cfg->if_handle,
2482 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2483 }
2484
6d87f5c3
AK
2485 if (status)
2486 dev_err(&adapter->pdev->dev,
590c391d 2487 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2488 else
11ac75ed 2489 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2490
2491 mac[5] += 1;
2492 }
2493 return status;
2494}
2495
f9449ab7 2496static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2497{
11ac75ed 2498 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2499 u32 vf;
2500
39f1d94d
SP
2501 if (be_find_vfs(adapter, ASSIGNED)) {
2502 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2503 goto done;
2504 }
2505
11ac75ed 2506 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2507 if (lancer_chip(adapter))
2508 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2509 else
11ac75ed
SP
2510 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2511 vf_cfg->pmac_id, vf + 1);
f9449ab7 2512
11ac75ed
SP
2513 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2514 }
39f1d94d
SP
2515 pci_disable_sriov(adapter->pdev);
2516done:
2517 kfree(adapter->vf_cfg);
2518 adapter->num_vfs = 0;
6d87f5c3
AK
2519}
2520
a54769f5
SP
2521static int be_clear(struct be_adapter *adapter)
2522{
fbc13f01
AK
2523 int i = 1;
2524
191eb756
SP
2525 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2526 cancel_delayed_work_sync(&adapter->work);
2527 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2528 }
2529
11ac75ed 2530 if (sriov_enabled(adapter))
f9449ab7
SP
2531 be_vf_clear(adapter);
2532
fbc13f01
AK
2533 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2534 be_cmd_pmac_del(adapter, adapter->if_handle,
2535 adapter->pmac_id[i], 0);
2536
f9449ab7 2537 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2538
2539 be_mcc_queues_destroy(adapter);
10ef9ab4 2540 be_rx_cqs_destroy(adapter);
a54769f5 2541 be_tx_queues_destroy(adapter);
10ef9ab4 2542 be_evt_queues_destroy(adapter);
a54769f5
SP
2543
2544 /* tell fw we're done with firing cmds */
2545 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2546
2547 be_msix_disable(adapter);
a54769f5
SP
2548 return 0;
2549}
2550
39f1d94d 2551static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2552{
11ac75ed 2553 struct be_vf_cfg *vf_cfg;
30128031
SP
2554 int vf;
2555
39f1d94d
SP
2556 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2557 GFP_KERNEL);
2558 if (!adapter->vf_cfg)
2559 return -ENOMEM;
2560
11ac75ed
SP
2561 for_all_vfs(adapter, vf_cfg, vf) {
2562 vf_cfg->if_handle = -1;
2563 vf_cfg->pmac_id = -1;
30128031 2564 }
39f1d94d 2565 return 0;
30128031
SP
2566}
2567
f9449ab7
SP
2568static int be_vf_setup(struct be_adapter *adapter)
2569{
11ac75ed 2570 struct be_vf_cfg *vf_cfg;
39f1d94d 2571 struct device *dev = &adapter->pdev->dev;
f9449ab7 2572 u32 cap_flags, en_flags, vf;
f1f3ee1b 2573 u16 def_vlan, lnk_speed;
39f1d94d
SP
2574 int status, enabled_vfs;
2575
2576 enabled_vfs = be_find_vfs(adapter, ENABLED);
2577 if (enabled_vfs) {
2578 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2579 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2580 return 0;
2581 }
f9449ab7 2582
39f1d94d
SP
2583 if (num_vfs > adapter->dev_num_vfs) {
2584 dev_warn(dev, "Device supports %d VFs and not %d\n",
2585 adapter->dev_num_vfs, num_vfs);
2586 num_vfs = adapter->dev_num_vfs;
2587 }
2588
2589 status = pci_enable_sriov(adapter->pdev, num_vfs);
2590 if (!status) {
2591 adapter->num_vfs = num_vfs;
2592 } else {
2593 /* Platform doesn't support SRIOV though device supports it */
2594 dev_warn(dev, "SRIOV enable failed\n");
2595 return 0;
2596 }
2597
2598 status = be_vf_setup_init(adapter);
2599 if (status)
2600 goto err;
30128031 2601
590c391d
PR
2602 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST;
11ac75ed 2604 for_all_vfs(adapter, vf_cfg, vf) {
1578e777
PR
2605 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2607 if (status)
2608 goto err;
f9449ab7
SP
2609 }
2610
39f1d94d
SP
2611 if (!enabled_vfs) {
2612 status = be_vf_eth_addr_config(adapter);
2613 if (status)
2614 goto err;
2615 }
f9449ab7 2616
11ac75ed 2617 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2618 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2619 NULL, vf + 1);
f9449ab7
SP
2620 if (status)
2621 goto err;
11ac75ed 2622 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2623
2624 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2625 vf + 1, vf_cfg->if_handle);
2626 if (status)
2627 goto err;
2628 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2629 }
2630 return 0;
2631err:
2632 return status;
2633}
2634
30128031
SP
2635static void be_setup_init(struct be_adapter *adapter)
2636{
2637 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2638 adapter->phy.link_speed = -1;
30128031
SP
2639 adapter->if_handle = -1;
2640 adapter->be3_native = false;
2641 adapter->promiscuous = false;
2642 adapter->eq_next_idx = 0;
42f11cf2 2643 adapter->phy.forced_port_speed = -1;
30128031
SP
2644}
2645
1578e777
PR
2646static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2647 bool *active_mac, u32 *pmac_id)
590c391d 2648{
1578e777 2649 int status = 0;
e5e1ee89 2650
1578e777
PR
2651 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2652 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2653 if (!lancer_chip(adapter) && !be_physfn(adapter))
2654 *active_mac = true;
2655 else
2656 *active_mac = false;
e5e1ee89 2657
1578e777
PR
2658 return status;
2659 }
e5e1ee89 2660
1578e777
PR
2661 if (lancer_chip(adapter)) {
2662 status = be_cmd_get_mac_from_list(adapter, mac,
2663 active_mac, pmac_id, 0);
2664 if (*active_mac) {
2665 status = be_cmd_mac_addr_query(adapter, mac,
2666 MAC_ADDRESS_TYPE_NETWORK,
2667 false, if_handle,
2668 *pmac_id);
2669 }
2670 } else if (be_physfn(adapter)) {
2671 /* For BE3, for PF get permanent MAC */
2672 status = be_cmd_mac_addr_query(adapter, mac,
2673 MAC_ADDRESS_TYPE_NETWORK, true,
2674 0, 0);
2675 *active_mac = false;
e5e1ee89 2676 } else {
1578e777
PR
2677 /* For BE3, for VF get soft MAC assigned by PF*/
2678 status = be_cmd_mac_addr_query(adapter, mac,
2679 MAC_ADDRESS_TYPE_NETWORK, false,
2680 if_handle, 0);
2681 *active_mac = true;
e5e1ee89 2682 }
590c391d
PR
2683 return status;
2684}
2685
39f1d94d
SP
2686/* Routine to query per function resource limits */
2687static int be_get_config(struct be_adapter *adapter)
2688{
2689 int pos;
2690 u16 dev_num_vfs;
2691
2692 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2693 if (pos) {
2694 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2695 &dev_num_vfs);
2696 adapter->dev_num_vfs = dev_num_vfs;
2697 }
2698 return 0;
2699}
2700
5fb379ee
SP
2701static int be_setup(struct be_adapter *adapter)
2702{
39f1d94d 2703 struct device *dev = &adapter->pdev->dev;
f9449ab7 2704 u32 cap_flags, en_flags;
a54769f5 2705 u32 tx_fc, rx_fc;
10ef9ab4 2706 int status;
ba343c77 2707 u8 mac[ETH_ALEN];
1578e777 2708 bool active_mac;
ba343c77 2709
30128031 2710 be_setup_init(adapter);
6b7c5b94 2711
39f1d94d
SP
2712 be_get_config(adapter);
2713
f9449ab7 2714 be_cmd_req_native_mode(adapter);
73d540f2 2715
10ef9ab4
SP
2716 be_msix_enable(adapter);
2717
2718 status = be_evt_queues_create(adapter);
2719 if (status)
a54769f5 2720 goto err;
6b7c5b94 2721
10ef9ab4
SP
2722 status = be_tx_cqs_create(adapter);
2723 if (status)
2724 goto err;
2725
2726 status = be_rx_cqs_create(adapter);
2727 if (status)
a54769f5 2728 goto err;
6b7c5b94 2729
f9449ab7 2730 status = be_mcc_queues_create(adapter);
10ef9ab4 2731 if (status)
a54769f5 2732 goto err;
6b7c5b94 2733
f9449ab7
SP
2734 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2735 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2736 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2737 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2738
f9449ab7
SP
2739 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2740 cap_flags |= BE_IF_FLAGS_RSS;
2741 en_flags |= BE_IF_FLAGS_RSS;
2742 }
1578e777 2743
f9449ab7 2744 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1578e777 2745 &adapter->if_handle, 0);
5fb379ee 2746 if (status != 0)
a54769f5 2747 goto err;
6b7c5b94 2748
1578e777
PR
2749 memset(mac, 0, ETH_ALEN);
2750 active_mac = false;
2751 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2752 &active_mac, &adapter->pmac_id[0]);
2753 if (status != 0)
2754 goto err;
2755
2756 if (!active_mac) {
2757 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2758 &adapter->pmac_id[0], 0);
2759 if (status != 0)
2760 goto err;
2761 }
2762
2763 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2764 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2765 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2766 }
0dffc83e 2767
10ef9ab4
SP
2768 status = be_tx_qs_create(adapter);
2769 if (status)
2770 goto err;
2771
04b71175 2772 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2773
1d1e9a46 2774 if (adapter->vlans_added)
10329df8 2775 be_vid_config(adapter);
7ab8b0b4 2776
a54769f5 2777 be_set_rx_mode(adapter->netdev);
5fb379ee 2778
ddc3f5cb 2779 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2780
ddc3f5cb
AK
2781 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2782 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2783 adapter->rx_fc);
2dc1deb6 2784
39f1d94d
SP
2785 if (be_physfn(adapter) && num_vfs) {
2786 if (adapter->dev_num_vfs)
2787 be_vf_setup(adapter);
2788 else
2789 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2790 }
2791
42f11cf2
AK
2792 be_cmd_get_phy_info(adapter);
2793 if (be_pause_supported(adapter))
2794 adapter->phy.fc_autoneg = 1;
2795
191eb756
SP
2796 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2797 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2798 return 0;
a54769f5
SP
2799err:
2800 be_clear(adapter);
2801 return status;
2802}
6b7c5b94 2803
66268739
IV
2804#ifdef CONFIG_NET_POLL_CONTROLLER
2805static void be_netpoll(struct net_device *netdev)
2806{
2807 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2808 struct be_eq_obj *eqo;
66268739
IV
2809 int i;
2810
10ef9ab4
SP
2811 for_all_evt_queues(adapter, eqo, i)
2812 event_handle(eqo);
2813
2814 return;
66268739
IV
2815}
2816#endif
2817
84517482 2818#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2819char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2820
fa9a6fed 2821static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2822 const u8 *p, u32 img_start, int image_size,
2823 int hdr_size)
fa9a6fed
SB
2824{
2825 u32 crc_offset;
2826 u8 flashed_crc[4];
2827 int status;
3f0d4560
AK
2828
2829 crc_offset = hdr_size + img_start + image_size - 4;
2830
fa9a6fed 2831 p += crc_offset;
3f0d4560
AK
2832
2833 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2834 (image_size - 4));
fa9a6fed
SB
2835 if (status) {
2836 dev_err(&adapter->pdev->dev,
2837 "could not get crc from flash, not flashing redboot\n");
2838 return false;
2839 }
2840
2841 /*update redboot only if crc does not match*/
2842 if (!memcmp(flashed_crc, p, 4))
2843 return false;
2844 else
2845 return true;
fa9a6fed
SB
2846}
2847
306f1348
SP
2848static bool phy_flashing_required(struct be_adapter *adapter)
2849{
42f11cf2
AK
2850 return (adapter->phy.phy_type == TN_8022 &&
2851 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2852}
2853
c165541e
PR
2854static bool is_comp_in_ufi(struct be_adapter *adapter,
2855 struct flash_section_info *fsec, int type)
2856{
2857 int i = 0, img_type = 0;
2858 struct flash_section_info_g2 *fsec_g2 = NULL;
2859
2860 if (adapter->generation != BE_GEN3)
2861 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2862
2863 for (i = 0; i < MAX_FLASH_COMP; i++) {
2864 if (fsec_g2)
2865 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2866 else
2867 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2868
2869 if (img_type == type)
2870 return true;
2871 }
2872 return false;
2873
2874}
2875
2876struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2877 int header_size,
2878 const struct firmware *fw)
2879{
2880 struct flash_section_info *fsec = NULL;
2881 const u8 *p = fw->data;
2882
2883 p += header_size;
2884 while (p < (fw->data + fw->size)) {
2885 fsec = (struct flash_section_info *)p;
2886 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2887 return fsec;
2888 p += 32;
2889 }
2890 return NULL;
2891}
2892
3f0d4560 2893static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2894 const struct firmware *fw,
2895 struct be_dma_mem *flash_cmd,
2896 int num_of_images)
3f0d4560 2897
84517482 2898{
3f0d4560 2899 int status = 0, i, filehdr_size = 0;
c165541e 2900 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2901 u32 total_bytes = 0, flash_op;
84517482
AK
2902 int num_bytes;
2903 const u8 *p = fw->data;
2904 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2905 const struct flash_comp *pflashcomp;
c165541e
PR
2906 int num_comp, hdr_size;
2907 struct flash_section_info *fsec = NULL;
2908
2909 struct flash_comp gen3_flash_types[] = {
2910 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2911 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2912 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2913 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2914 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2915 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2916 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2917 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2918 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2919 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2920 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2921 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2922 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2923 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2924 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2925 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2926 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2927 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2928 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2929 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2930 };
c165541e
PR
2931
2932 struct flash_comp gen2_flash_types[] = {
2933 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2934 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2935 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2936 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2937 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2938 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2939 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2940 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2941 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2942 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2943 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2944 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2945 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2946 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2947 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2948 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2949 };
2950
2951 if (adapter->generation == BE_GEN3) {
2952 pflashcomp = gen3_flash_types;
2953 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2954 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2955 } else {
2956 pflashcomp = gen2_flash_types;
2957 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2958 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2959 }
c165541e
PR
2960 /* Get flash section info*/
2961 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2962 if (!fsec) {
2963 dev_err(&adapter->pdev->dev,
2964 "Invalid Cookie. UFI corrupted ?\n");
2965 return -1;
2966 }
9fe96934 2967 for (i = 0; i < num_comp; i++) {
c165541e 2968 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 2969 continue;
c165541e
PR
2970
2971 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2972 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2973 continue;
2974
2975 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
2976 if (!phy_flashing_required(adapter))
2977 continue;
2978 }
c165541e
PR
2979
2980 hdr_size = filehdr_size +
2981 (num_of_images * sizeof(struct image_hdr));
2982
2983 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2984 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2985 pflashcomp[i].size, hdr_size)))
3f0d4560 2986 continue;
c165541e
PR
2987
2988 /* Flash the component */
3f0d4560 2989 p = fw->data;
c165541e 2990 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
2991 if (p + pflashcomp[i].size > fw->data + fw->size)
2992 return -1;
2993 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2994 while (total_bytes) {
2995 if (total_bytes > 32*1024)
2996 num_bytes = 32*1024;
2997 else
2998 num_bytes = total_bytes;
2999 total_bytes -= num_bytes;
306f1348 3000 if (!total_bytes) {
c165541e 3001 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3002 flash_op = FLASHROM_OPER_PHY_FLASH;
3003 else
3004 flash_op = FLASHROM_OPER_FLASH;
3005 } else {
c165541e 3006 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3007 flash_op = FLASHROM_OPER_PHY_SAVE;
3008 else
3009 flash_op = FLASHROM_OPER_SAVE;
3010 }
3f0d4560
AK
3011 memcpy(req->params.data_buf, p, num_bytes);
3012 p += num_bytes;
3013 status = be_cmd_write_flashrom(adapter, flash_cmd,
3014 pflashcomp[i].optype, flash_op, num_bytes);
3015 if (status) {
306f1348
SP
3016 if ((status == ILLEGAL_IOCTL_REQ) &&
3017 (pflashcomp[i].optype ==
c165541e 3018 OPTYPE_PHY_FW))
306f1348 3019 break;
3f0d4560
AK
3020 dev_err(&adapter->pdev->dev,
3021 "cmd to write to flash rom failed.\n");
3022 return -1;
3023 }
84517482 3024 }
84517482 3025 }
84517482
AK
3026 return 0;
3027}
3028
3f0d4560
AK
3029static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3030{
3031 if (fhdr == NULL)
3032 return 0;
3033 if (fhdr->build[0] == '3')
3034 return BE_GEN3;
3035 else if (fhdr->build[0] == '2')
3036 return BE_GEN2;
3037 else
3038 return 0;
3039}
3040
485bf569
SN
3041static int lancer_fw_download(struct be_adapter *adapter,
3042 const struct firmware *fw)
84517482 3043{
485bf569
SN
3044#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3045#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3046 struct be_dma_mem flash_cmd;
485bf569
SN
3047 const u8 *data_ptr = NULL;
3048 u8 *dest_image_ptr = NULL;
3049 size_t image_size = 0;
3050 u32 chunk_size = 0;
3051 u32 data_written = 0;
3052 u32 offset = 0;
3053 int status = 0;
3054 u8 add_status = 0;
84517482 3055
485bf569 3056 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3057 dev_err(&adapter->pdev->dev,
485bf569
SN
3058 "FW Image not properly aligned. "
3059 "Length must be 4 byte aligned.\n");
3060 status = -EINVAL;
3061 goto lancer_fw_exit;
d9efd2af
SB
3062 }
3063
485bf569
SN
3064 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3065 + LANCER_FW_DOWNLOAD_CHUNK;
3066 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3067 &flash_cmd.dma, GFP_KERNEL);
3068 if (!flash_cmd.va) {
3069 status = -ENOMEM;
3070 dev_err(&adapter->pdev->dev,
3071 "Memory allocation failure while flashing\n");
3072 goto lancer_fw_exit;
3073 }
84517482 3074
485bf569
SN
3075 dest_image_ptr = flash_cmd.va +
3076 sizeof(struct lancer_cmd_req_write_object);
3077 image_size = fw->size;
3078 data_ptr = fw->data;
3079
3080 while (image_size) {
3081 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3082
3083 /* Copy the image chunk content. */
3084 memcpy(dest_image_ptr, data_ptr, chunk_size);
3085
3086 status = lancer_cmd_write_object(adapter, &flash_cmd,
3087 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3088 &data_written, &add_status);
3089
3090 if (status)
3091 break;
3092
3093 offset += data_written;
3094 data_ptr += data_written;
3095 image_size -= data_written;
3096 }
3097
3098 if (!status) {
3099 /* Commit the FW written */
3100 status = lancer_cmd_write_object(adapter, &flash_cmd,
3101 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3102 &data_written, &add_status);
3103 }
3104
3105 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3106 flash_cmd.dma);
3107 if (status) {
3108 dev_err(&adapter->pdev->dev,
3109 "Firmware load error. "
3110 "Status code: 0x%x Additional Status: 0x%x\n",
3111 status, add_status);
3112 goto lancer_fw_exit;
3113 }
3114
3115 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3116lancer_fw_exit:
3117 return status;
3118}
3119
3120static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3121{
3122 struct flash_file_hdr_g2 *fhdr;
3123 struct flash_file_hdr_g3 *fhdr3;
3124 struct image_hdr *img_hdr_ptr = NULL;
3125 struct be_dma_mem flash_cmd;
3126 const u8 *p;
3127 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3128
3129 p = fw->data;
3f0d4560 3130 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3131
84517482 3132 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3133 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3134 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3135 if (!flash_cmd.va) {
3136 status = -ENOMEM;
3137 dev_err(&adapter->pdev->dev,
3138 "Memory allocation failure while flashing\n");
485bf569 3139 goto be_fw_exit;
84517482
AK
3140 }
3141
3f0d4560
AK
3142 if ((adapter->generation == BE_GEN3) &&
3143 (get_ufigen_type(fhdr) == BE_GEN3)) {
3144 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3145 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3146 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3147 img_hdr_ptr = (struct image_hdr *) (fw->data +
3148 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3149 i * sizeof(struct image_hdr)));
3150 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3151 status = be_flash_data(adapter, fw, &flash_cmd,
3152 num_imgs);
3f0d4560
AK
3153 }
3154 } else if ((adapter->generation == BE_GEN2) &&
3155 (get_ufigen_type(fhdr) == BE_GEN2)) {
3156 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3157 } else {
3158 dev_err(&adapter->pdev->dev,
3159 "UFI and Interface are not compatible for flashing\n");
3160 status = -1;
84517482
AK
3161 }
3162
2b7bcebf
IV
3163 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3164 flash_cmd.dma);
84517482
AK
3165 if (status) {
3166 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3167 goto be_fw_exit;
84517482
AK
3168 }
3169
af901ca1 3170 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3171
485bf569
SN
3172be_fw_exit:
3173 return status;
3174}
3175
3176int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3177{
3178 const struct firmware *fw;
3179 int status;
3180
3181 if (!netif_running(adapter->netdev)) {
3182 dev_err(&adapter->pdev->dev,
3183 "Firmware load not allowed (interface is down)\n");
3184 return -1;
3185 }
3186
3187 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3188 if (status)
3189 goto fw_exit;
3190
3191 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3192
3193 if (lancer_chip(adapter))
3194 status = lancer_fw_download(adapter, fw);
3195 else
3196 status = be_fw_download(adapter, fw);
3197
84517482
AK
3198fw_exit:
3199 release_firmware(fw);
3200 return status;
3201}
3202
e5686ad8 3203static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3204 .ndo_open = be_open,
3205 .ndo_stop = be_close,
3206 .ndo_start_xmit = be_xmit,
a54769f5 3207 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3208 .ndo_set_mac_address = be_mac_addr_set,
3209 .ndo_change_mtu = be_change_mtu,
ab1594e9 3210 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3211 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3212 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3213 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3214 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3215 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3216 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3217 .ndo_get_vf_config = be_get_vf_config,
3218#ifdef CONFIG_NET_POLL_CONTROLLER
3219 .ndo_poll_controller = be_netpoll,
3220#endif
6b7c5b94
SP
3221};
3222
3223static void be_netdev_init(struct net_device *netdev)
3224{
3225 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3226 struct be_eq_obj *eqo;
3abcdeda 3227 int i;
6b7c5b94 3228
6332c8d3 3229 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3230 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3231 NETIF_F_HW_VLAN_TX;
3232 if (be_multi_rxq(adapter))
3233 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3234
3235 netdev->features |= netdev->hw_features |
8b8ddc68 3236 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3237
eb8a50d9 3238 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3239 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3240
fbc13f01
AK
3241 netdev->priv_flags |= IFF_UNICAST_FLT;
3242
6b7c5b94
SP
3243 netdev->flags |= IFF_MULTICAST;
3244
b7e5887e 3245 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3246
10ef9ab4 3247 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3248
3249 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3250
10ef9ab4
SP
3251 for_all_evt_queues(adapter, eqo, i)
3252 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3253}
3254
3255static void be_unmap_pci_bars(struct be_adapter *adapter)
3256{
8788fdc2
SP
3257 if (adapter->csr)
3258 iounmap(adapter->csr);
3259 if (adapter->db)
3260 iounmap(adapter->db);
045508a8
PP
3261 if (adapter->roce_db.base)
3262 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3263}
3264
3265static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3266{
3267 struct pci_dev *pdev = adapter->pdev;
3268 u8 __iomem *addr;
3269
3270 addr = pci_iomap(pdev, 2, 0);
3271 if (addr == NULL)
3272 return -ENOMEM;
3273
3274 adapter->roce_db.base = addr;
3275 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3276 adapter->roce_db.size = 8192;
3277 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3278 return 0;
6b7c5b94
SP
3279}
3280
3281static int be_map_pci_bars(struct be_adapter *adapter)
3282{
3283 u8 __iomem *addr;
db3ea781 3284 int db_reg;
6b7c5b94 3285
fe6d2a38 3286 if (lancer_chip(adapter)) {
045508a8
PP
3287 if (be_type_2_3(adapter)) {
3288 addr = ioremap_nocache(
3289 pci_resource_start(adapter->pdev, 0),
3290 pci_resource_len(adapter->pdev, 0));
3291 if (addr == NULL)
3292 return -ENOMEM;
3293 adapter->db = addr;
3294 }
3295 if (adapter->if_type == SLI_INTF_TYPE_3) {
3296 if (lancer_roce_map_pci_bars(adapter))
3297 goto pci_map_err;
3298 }
fe6d2a38
SP
3299 return 0;
3300 }
3301
ba343c77
SB
3302 if (be_physfn(adapter)) {
3303 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3304 pci_resource_len(adapter->pdev, 2));
3305 if (addr == NULL)
3306 return -ENOMEM;
3307 adapter->csr = addr;
3308 }
6b7c5b94 3309
ba343c77 3310 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3311 db_reg = 4;
3312 } else {
ba343c77
SB
3313 if (be_physfn(adapter))
3314 db_reg = 4;
3315 else
3316 db_reg = 0;
3317 }
3318 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3319 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3320 if (addr == NULL)
3321 goto pci_map_err;
ba343c77 3322 adapter->db = addr;
045508a8
PP
3323 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3324 adapter->roce_db.size = 4096;
3325 adapter->roce_db.io_addr =
3326 pci_resource_start(adapter->pdev, db_reg);
3327 adapter->roce_db.total_size =
3328 pci_resource_len(adapter->pdev, db_reg);
3329 }
6b7c5b94
SP
3330 return 0;
3331pci_map_err:
3332 be_unmap_pci_bars(adapter);
3333 return -ENOMEM;
3334}
3335
6b7c5b94
SP
3336static void be_ctrl_cleanup(struct be_adapter *adapter)
3337{
8788fdc2 3338 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3339
3340 be_unmap_pci_bars(adapter);
3341
3342 if (mem->va)
2b7bcebf
IV
3343 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3344 mem->dma);
e7b909a6 3345
5b8821b7 3346 mem = &adapter->rx_filter;
e7b909a6 3347 if (mem->va)
2b7bcebf
IV
3348 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3349 mem->dma);
6b7c5b94
SP
3350}
3351
6b7c5b94
SP
3352static int be_ctrl_init(struct be_adapter *adapter)
3353{
8788fdc2
SP
3354 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3355 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3356 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3357 int status;
6b7c5b94
SP
3358
3359 status = be_map_pci_bars(adapter);
3360 if (status)
e7b909a6 3361 goto done;
6b7c5b94
SP
3362
3363 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3364 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3365 mbox_mem_alloc->size,
3366 &mbox_mem_alloc->dma,
3367 GFP_KERNEL);
6b7c5b94 3368 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3369 status = -ENOMEM;
3370 goto unmap_pci_bars;
6b7c5b94
SP
3371 }
3372 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3373 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3374 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3375 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3376
5b8821b7
SP
3377 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3378 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3379 &rx_filter->dma, GFP_KERNEL);
3380 if (rx_filter->va == NULL) {
e7b909a6
SP
3381 status = -ENOMEM;
3382 goto free_mbox;
3383 }
5b8821b7 3384 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3385
2984961c 3386 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3387 spin_lock_init(&adapter->mcc_lock);
3388 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3389
dd131e76 3390 init_completion(&adapter->flash_compl);
cf588477 3391 pci_save_state(adapter->pdev);
6b7c5b94 3392 return 0;
e7b909a6
SP
3393
3394free_mbox:
2b7bcebf
IV
3395 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3396 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3397
3398unmap_pci_bars:
3399 be_unmap_pci_bars(adapter);
3400
3401done:
3402 return status;
6b7c5b94
SP
3403}
3404
3405static void be_stats_cleanup(struct be_adapter *adapter)
3406{
3abcdeda 3407 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3408
3409 if (cmd->va)
2b7bcebf
IV
3410 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3411 cmd->va, cmd->dma);
6b7c5b94
SP
3412}
3413
3414static int be_stats_init(struct be_adapter *adapter)
3415{
3abcdeda 3416 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3417
005d5696 3418 if (adapter->generation == BE_GEN2) {
89a88ab8 3419 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3420 } else {
3421 if (lancer_chip(adapter))
3422 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3423 else
3424 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3425 }
2b7bcebf
IV
3426 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3427 GFP_KERNEL);
6b7c5b94
SP
3428 if (cmd->va == NULL)
3429 return -1;
d291b9af 3430 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3431 return 0;
3432}
3433
3434static void __devexit be_remove(struct pci_dev *pdev)
3435{
3436 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3437
6b7c5b94
SP
3438 if (!adapter)
3439 return;
3440
045508a8
PP
3441 be_roce_dev_remove(adapter);
3442
6b7c5b94
SP
3443 unregister_netdev(adapter->netdev);
3444
5fb379ee
SP
3445 be_clear(adapter);
3446
6b7c5b94
SP
3447 be_stats_cleanup(adapter);
3448
3449 be_ctrl_cleanup(adapter);
3450
6b7c5b94
SP
3451 pci_set_drvdata(pdev, NULL);
3452 pci_release_regions(pdev);
3453 pci_disable_device(pdev);
3454
3455 free_netdev(adapter->netdev);
3456}
3457
4762f6ce
AK
3458bool be_is_wol_supported(struct be_adapter *adapter)
3459{
3460 return ((adapter->wol_cap & BE_WOL_CAP) &&
3461 !be_is_wol_excluded(adapter)) ? true : false;
3462}
3463
941a77d5
SK
3464u32 be_get_fw_log_level(struct be_adapter *adapter)
3465{
3466 struct be_dma_mem extfat_cmd;
3467 struct be_fat_conf_params *cfgs;
3468 int status;
3469 u32 level = 0;
3470 int j;
3471
3472 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3473 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3474 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3475 &extfat_cmd.dma);
3476
3477 if (!extfat_cmd.va) {
3478 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3479 __func__);
3480 goto err;
3481 }
3482
3483 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3484 if (!status) {
3485 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3486 sizeof(struct be_cmd_resp_hdr));
3487 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3488 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3489 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3490 }
3491 }
3492 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3493 extfat_cmd.dma);
3494err:
3495 return level;
3496}
39f1d94d 3497static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3498{
6b7c5b94 3499 int status;
941a77d5 3500 u32 level;
6b7c5b94 3501
3abcdeda
SP
3502 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3503 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3504 if (status)
3505 return status;
3506
752961a1 3507 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3508 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3509 else
3510 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3511
fbc13f01
AK
3512 if (be_physfn(adapter))
3513 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3514 else
3515 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3516
3517 /* primary mac needs 1 pmac entry */
3518 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3519 sizeof(u32), GFP_KERNEL);
3520 if (!adapter->pmac_id)
3521 return -ENOMEM;
3522
9e1453c5
AK
3523 status = be_cmd_get_cntl_attributes(adapter);
3524 if (status)
3525 return status;
3526
4762f6ce
AK
3527 status = be_cmd_get_acpi_wol_cap(adapter);
3528 if (status) {
3529 /* in case of a failure to get wol capabillities
3530 * check the exclusion list to determine WOL capability */
3531 if (!be_is_wol_excluded(adapter))
3532 adapter->wol_cap |= BE_WOL_CAP;
3533 }
3534
3535 if (be_is_wol_supported(adapter))
3536 adapter->wol = true;
3537
941a77d5
SK
3538 level = be_get_fw_log_level(adapter);
3539 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3540
2243e2e9 3541 return 0;
6b7c5b94
SP
3542}
3543
39f1d94d 3544static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3545{
3546 struct pci_dev *pdev = adapter->pdev;
3547 u32 sli_intf = 0, if_type;
3548
3549 switch (pdev->device) {
3550 case BE_DEVICE_ID1:
3551 case OC_DEVICE_ID1:
3552 adapter->generation = BE_GEN2;
3553 break;
3554 case BE_DEVICE_ID2:
3555 case OC_DEVICE_ID2:
3556 adapter->generation = BE_GEN3;
3557 break;
3558 case OC_DEVICE_ID3:
12f4d0a8 3559 case OC_DEVICE_ID4:
fe6d2a38 3560 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3561 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3562 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3563 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3564 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3565 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3566 !be_type_2_3(adapter)) {
3567 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3568 return -EINVAL;
3569 }
3570 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3571 SLI_INTF_FAMILY_SHIFT);
3572 adapter->generation = BE_GEN3;
3573 break;
3574 case OC_DEVICE_ID5:
3575 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3576 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3577 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3578 return -EINVAL;
3579 }
fe6d2a38
SP
3580 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3581 SLI_INTF_FAMILY_SHIFT);
3582 adapter->generation = BE_GEN3;
3583 break;
3584 default:
3585 adapter->generation = 0;
3586 }
39f1d94d
SP
3587
3588 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3589 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3590 return 0;
3591}
3592
37eed1cb
PR
3593static int lancer_wait_ready(struct be_adapter *adapter)
3594{
d8110f62 3595#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3596 u32 sliport_status;
3597 int status = 0, i;
3598
3599 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3600 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3601 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3602 break;
3603
d8110f62 3604 msleep(1000);
37eed1cb
PR
3605 }
3606
3607 if (i == SLIPORT_READY_TIMEOUT)
3608 status = -1;
3609
3610 return status;
3611}
3612
3613static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3614{
3615 int status;
3616 u32 sliport_status, err, reset_needed;
3617 status = lancer_wait_ready(adapter);
3618 if (!status) {
3619 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3620 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3621 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3622 if (err && reset_needed) {
3623 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3624 adapter->db + SLIPORT_CONTROL_OFFSET);
3625
3626 /* check adapter has corrected the error */
3627 status = lancer_wait_ready(adapter);
3628 sliport_status = ioread32(adapter->db +
3629 SLIPORT_STATUS_OFFSET);
3630 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3631 SLIPORT_STATUS_RN_MASK);
3632 if (status || sliport_status)
3633 status = -1;
3634 } else if (err || reset_needed) {
3635 status = -1;
3636 }
3637 }
3638 return status;
3639}
3640
d8110f62
PR
3641static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3642{
3643 int status;
3644 u32 sliport_status;
3645
3646 if (adapter->eeh_err || adapter->ue_detected)
3647 return;
3648
3649 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3650
3651 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3652 dev_err(&adapter->pdev->dev,
3653 "Adapter in error state."
3654 "Trying to recover.\n");
3655
3656 status = lancer_test_and_set_rdy_state(adapter);
3657 if (status)
3658 goto err;
3659
3660 netif_device_detach(adapter->netdev);
3661
3662 if (netif_running(adapter->netdev))
3663 be_close(adapter->netdev);
3664
3665 be_clear(adapter);
3666
3667 adapter->fw_timeout = false;
3668
3669 status = be_setup(adapter);
3670 if (status)
3671 goto err;
3672
3673 if (netif_running(adapter->netdev)) {
3674 status = be_open(adapter->netdev);
3675 if (status)
3676 goto err;
3677 }
3678
3679 netif_device_attach(adapter->netdev);
3680
3681 dev_err(&adapter->pdev->dev,
3682 "Adapter error recovery succeeded\n");
3683 }
3684 return;
3685err:
3686 dev_err(&adapter->pdev->dev,
3687 "Adapter error recovery failed\n");
3688}
3689
3690static void be_worker(struct work_struct *work)
3691{
3692 struct be_adapter *adapter =
3693 container_of(work, struct be_adapter, work.work);
3694 struct be_rx_obj *rxo;
10ef9ab4 3695 struct be_eq_obj *eqo;
d8110f62
PR
3696 int i;
3697
3698 if (lancer_chip(adapter))
3699 lancer_test_and_recover_fn_err(adapter);
3700
3701 be_detect_dump_ue(adapter);
3702
3703 /* when interrupts are not yet enabled, just reap any pending
3704 * mcc completions */
3705 if (!netif_running(adapter->netdev)) {
10ef9ab4 3706 be_process_mcc(adapter);
d8110f62
PR
3707 goto reschedule;
3708 }
3709
3710 if (!adapter->stats_cmd_sent) {
3711 if (lancer_chip(adapter))
3712 lancer_cmd_get_pport_stats(adapter,
3713 &adapter->stats_cmd);
3714 else
3715 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3716 }
3717
3718 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3719 if (rxo->rx_post_starved) {
3720 rxo->rx_post_starved = false;
3721 be_post_rx_frags(rxo, GFP_KERNEL);
3722 }
3723 }
3724
10ef9ab4
SP
3725 for_all_evt_queues(adapter, eqo, i)
3726 be_eqd_update(adapter, eqo);
3727
d8110f62
PR
3728reschedule:
3729 adapter->work_counter++;
3730 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3731}
3732
39f1d94d
SP
3733static bool be_reset_required(struct be_adapter *adapter)
3734{
d79c0a20 3735 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3736}
3737
6b7c5b94
SP
3738static int __devinit be_probe(struct pci_dev *pdev,
3739 const struct pci_device_id *pdev_id)
3740{
3741 int status = 0;
3742 struct be_adapter *adapter;
3743 struct net_device *netdev;
6b7c5b94
SP
3744
3745 status = pci_enable_device(pdev);
3746 if (status)
3747 goto do_none;
3748
3749 status = pci_request_regions(pdev, DRV_NAME);
3750 if (status)
3751 goto disable_dev;
3752 pci_set_master(pdev);
3753
7f640062 3754 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
3755 if (netdev == NULL) {
3756 status = -ENOMEM;
3757 goto rel_reg;
3758 }
3759 adapter = netdev_priv(netdev);
3760 adapter->pdev = pdev;
3761 pci_set_drvdata(pdev, adapter);
fe6d2a38 3762
39f1d94d 3763 status = be_dev_type_check(adapter);
63657b9c 3764 if (status)
fe6d2a38
SP
3765 goto free_netdev;
3766
6b7c5b94 3767 adapter->netdev = netdev;
2243e2e9 3768 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3769
2b7bcebf 3770 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3771 if (!status) {
3772 netdev->features |= NETIF_F_HIGHDMA;
3773 } else {
2b7bcebf 3774 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3775 if (status) {
3776 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3777 goto free_netdev;
3778 }
3779 }
3780
6b7c5b94
SP
3781 status = be_ctrl_init(adapter);
3782 if (status)
39f1d94d 3783 goto free_netdev;
6b7c5b94 3784
37eed1cb 3785 if (lancer_chip(adapter)) {
d8110f62
PR
3786 status = lancer_wait_ready(adapter);
3787 if (!status) {
3788 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3789 adapter->db + SLIPORT_CONTROL_OFFSET);
3790 status = lancer_test_and_set_rdy_state(adapter);
3791 }
37eed1cb
PR
3792 if (status) {
3793 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3794 goto ctrl_clean;
37eed1cb
PR
3795 }
3796 }
3797
2243e2e9 3798 /* sync up with fw's ready state */
ba343c77
SB
3799 if (be_physfn(adapter)) {
3800 status = be_cmd_POST(adapter);
3801 if (status)
3802 goto ctrl_clean;
ba343c77 3803 }
6b7c5b94 3804
2243e2e9
SP
3805 /* tell fw we're ready to fire cmds */
3806 status = be_cmd_fw_init(adapter);
6b7c5b94 3807 if (status)
2243e2e9
SP
3808 goto ctrl_clean;
3809
39f1d94d
SP
3810 if (be_reset_required(adapter)) {
3811 status = be_cmd_reset_function(adapter);
3812 if (status)
3813 goto ctrl_clean;
3814 }
556ae191 3815
10ef9ab4
SP
3816 /* The INTR bit may be set in the card when probed by a kdump kernel
3817 * after a crash.
3818 */
3819 if (!lancer_chip(adapter))
3820 be_intr_set(adapter, false);
3821
2243e2e9
SP
3822 status = be_stats_init(adapter);
3823 if (status)
3824 goto ctrl_clean;
3825
39f1d94d 3826 status = be_get_initial_config(adapter);
6b7c5b94
SP
3827 if (status)
3828 goto stats_clean;
6b7c5b94
SP
3829
3830 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3831 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3832
5fb379ee
SP
3833 status = be_setup(adapter);
3834 if (status)
3abcdeda 3835 goto msix_disable;
2243e2e9 3836
3abcdeda 3837 be_netdev_init(netdev);
6b7c5b94
SP
3838 status = register_netdev(netdev);
3839 if (status != 0)
5fb379ee 3840 goto unsetup;
6b7c5b94 3841
045508a8
PP
3842 be_roce_dev_add(adapter);
3843
10ef9ab4
SP
3844 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3845 adapter->port_num);
34b1ef04 3846
6b7c5b94
SP
3847 return 0;
3848
5fb379ee
SP
3849unsetup:
3850 be_clear(adapter);
3abcdeda
SP
3851msix_disable:
3852 be_msix_disable(adapter);
6b7c5b94
SP
3853stats_clean:
3854 be_stats_cleanup(adapter);
3855ctrl_clean:
3856 be_ctrl_cleanup(adapter);
f9449ab7 3857free_netdev:
fe6d2a38 3858 free_netdev(netdev);
8d56ff11 3859 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3860rel_reg:
3861 pci_release_regions(pdev);
3862disable_dev:
3863 pci_disable_device(pdev);
3864do_none:
c4ca2374 3865 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3866 return status;
3867}
3868
3869static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3870{
3871 struct be_adapter *adapter = pci_get_drvdata(pdev);
3872 struct net_device *netdev = adapter->netdev;
3873
71d8d1b5
AK
3874 if (adapter->wol)
3875 be_setup_wol(adapter, true);
3876
6b7c5b94
SP
3877 netif_device_detach(netdev);
3878 if (netif_running(netdev)) {
3879 rtnl_lock();
3880 be_close(netdev);
3881 rtnl_unlock();
3882 }
9b0365f1 3883 be_clear(adapter);
6b7c5b94
SP
3884
3885 pci_save_state(pdev);
3886 pci_disable_device(pdev);
3887 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3888 return 0;
3889}
3890
3891static int be_resume(struct pci_dev *pdev)
3892{
3893 int status = 0;
3894 struct be_adapter *adapter = pci_get_drvdata(pdev);
3895 struct net_device *netdev = adapter->netdev;
3896
3897 netif_device_detach(netdev);
3898
3899 status = pci_enable_device(pdev);
3900 if (status)
3901 return status;
3902
3903 pci_set_power_state(pdev, 0);
3904 pci_restore_state(pdev);
3905
2243e2e9
SP
3906 /* tell fw we're ready to fire cmds */
3907 status = be_cmd_fw_init(adapter);
3908 if (status)
3909 return status;
3910
9b0365f1 3911 be_setup(adapter);
6b7c5b94
SP
3912 if (netif_running(netdev)) {
3913 rtnl_lock();
3914 be_open(netdev);
3915 rtnl_unlock();
3916 }
3917 netif_device_attach(netdev);
71d8d1b5
AK
3918
3919 if (adapter->wol)
3920 be_setup_wol(adapter, false);
a4ca055f 3921
6b7c5b94
SP
3922 return 0;
3923}
3924
82456b03
SP
3925/*
3926 * An FLR will stop BE from DMAing any data.
3927 */
3928static void be_shutdown(struct pci_dev *pdev)
3929{
3930 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3931
2d5d4154
AK
3932 if (!adapter)
3933 return;
82456b03 3934
0f4a6828 3935 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3936
2d5d4154 3937 netif_device_detach(adapter->netdev);
82456b03 3938
82456b03
SP
3939 if (adapter->wol)
3940 be_setup_wol(adapter, true);
3941
57841869
AK
3942 be_cmd_reset_function(adapter);
3943
82456b03 3944 pci_disable_device(pdev);
82456b03
SP
3945}
3946
cf588477
SP
3947static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3948 pci_channel_state_t state)
3949{
3950 struct be_adapter *adapter = pci_get_drvdata(pdev);
3951 struct net_device *netdev = adapter->netdev;
3952
3953 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3954
3955 adapter->eeh_err = true;
3956
3957 netif_device_detach(netdev);
3958
3959 if (netif_running(netdev)) {
3960 rtnl_lock();
3961 be_close(netdev);
3962 rtnl_unlock();
3963 }
3964 be_clear(adapter);
3965
3966 if (state == pci_channel_io_perm_failure)
3967 return PCI_ERS_RESULT_DISCONNECT;
3968
3969 pci_disable_device(pdev);
3970
eeb7fc7b
SK
3971 /* The error could cause the FW to trigger a flash debug dump.
3972 * Resetting the card while flash dump is in progress
3973 * can cause it not to recover; wait for it to finish
3974 */
3975 ssleep(30);
cf588477
SP
3976 return PCI_ERS_RESULT_NEED_RESET;
3977}
3978
3979static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3980{
3981 struct be_adapter *adapter = pci_get_drvdata(pdev);
3982 int status;
3983
3984 dev_info(&adapter->pdev->dev, "EEH reset\n");
3985 adapter->eeh_err = false;
6589ade0
SP
3986 adapter->ue_detected = false;
3987 adapter->fw_timeout = false;
cf588477
SP
3988
3989 status = pci_enable_device(pdev);
3990 if (status)
3991 return PCI_ERS_RESULT_DISCONNECT;
3992
3993 pci_set_master(pdev);
3994 pci_set_power_state(pdev, 0);
3995 pci_restore_state(pdev);
3996
3997 /* Check if card is ok and fw is ready */
3998 status = be_cmd_POST(adapter);
3999 if (status)
4000 return PCI_ERS_RESULT_DISCONNECT;
4001
4002 return PCI_ERS_RESULT_RECOVERED;
4003}
4004
4005static void be_eeh_resume(struct pci_dev *pdev)
4006{
4007 int status = 0;
4008 struct be_adapter *adapter = pci_get_drvdata(pdev);
4009 struct net_device *netdev = adapter->netdev;
4010
4011 dev_info(&adapter->pdev->dev, "EEH resume\n");
4012
4013 pci_save_state(pdev);
4014
4015 /* tell fw we're ready to fire cmds */
4016 status = be_cmd_fw_init(adapter);
4017 if (status)
4018 goto err;
4019
4020 status = be_setup(adapter);
4021 if (status)
4022 goto err;
4023
4024 if (netif_running(netdev)) {
4025 status = be_open(netdev);
4026 if (status)
4027 goto err;
4028 }
4029 netif_device_attach(netdev);
4030 return;
4031err:
4032 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4033}
4034
4035static struct pci_error_handlers be_eeh_handlers = {
4036 .error_detected = be_eeh_err_detected,
4037 .slot_reset = be_eeh_reset,
4038 .resume = be_eeh_resume,
4039};
4040
6b7c5b94
SP
4041static struct pci_driver be_driver = {
4042 .name = DRV_NAME,
4043 .id_table = be_dev_ids,
4044 .probe = be_probe,
4045 .remove = be_remove,
4046 .suspend = be_suspend,
cf588477 4047 .resume = be_resume,
82456b03 4048 .shutdown = be_shutdown,
cf588477 4049 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4050};
4051
4052static int __init be_init_module(void)
4053{
8e95a202
JP
4054 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4055 rx_frag_size != 2048) {
6b7c5b94
SP
4056 printk(KERN_WARNING DRV_NAME
4057 " : Module param rx_frag_size must be 2048/4096/8192."
4058 " Using 2048\n");
4059 rx_frag_size = 2048;
4060 }
6b7c5b94
SP
4061
4062 return pci_register_driver(&be_driver);
4063}
4064module_init(be_init_module);
4065
4066static void __exit be_exit_module(void)
4067{
4068 pci_unregister_driver(&be_driver);
4069}
4070module_exit(be_exit_module);
This page took 0.68897 seconds and 5 git commands to generate.