be2net: do not call be_set/get_fw_log_level() on Skyhawk-R
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50 { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
113 "NETC",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122 };
123
124 /* Is BE in a multi-channel mode */
125 static inline bool be_is_mc(struct be_adapter *adapter) {
126 return (adapter->function_mode & FLEX10_MODE ||
127 adapter->function_mode & VNIC_MODE ||
128 adapter->function_mode & UMC_ENABLED);
129 }
130
131 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132 {
133 struct be_dma_mem *mem = &q->dma_mem;
134 if (mem->va) {
135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136 mem->dma);
137 mem->va = NULL;
138 }
139 }
140
141 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142 u16 len, u16 entry_size)
143 {
144 struct be_dma_mem *mem = &q->dma_mem;
145
146 memset(q, 0, sizeof(*q));
147 q->len = len;
148 q->entry_size = entry_size;
149 mem->size = len * entry_size;
150 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151 GFP_KERNEL);
152 if (!mem->va)
153 return -ENOMEM;
154 return 0;
155 }
156
157 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
158 {
159 u32 reg, enabled;
160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else
170 return;
171
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_intr_set(struct be_adapter *adapter, bool enable)
177 {
178 int status = 0;
179
180 /* On lancer interrupts can't be controlled via this register */
181 if (lancer_chip(adapter))
182 return;
183
184 if (adapter->eeh_error)
185 return;
186
187 status = be_cmd_intr_set(adapter, enable);
188 if (status)
189 be_reg_intr_set(adapter, enable);
190 }
191
192 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 {
194 u32 val = 0;
195 val |= qid & DB_RQ_RING_ID_MASK;
196 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197
198 wmb();
199 iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 }
201
202 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203 u16 posted)
204 {
205 u32 val = 0;
206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208
209 wmb();
210 iowrite32(val, adapter->db + txo->db_offset);
211 }
212
213 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
214 bool arm, bool clear_int, u16 num_popped)
215 {
216 u32 val = 0;
217 val |= qid & DB_EQ_RING_ID_MASK;
218 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219 DB_EQ_RING_ID_EXT_MASK_SHIFT);
220
221 if (adapter->eeh_error)
222 return;
223
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 }
232
233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 {
235 u32 val = 0;
236 val |= qid & DB_CQ_RING_ID_MASK;
237 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238 DB_CQ_RING_ID_EXT_MASK_SHIFT);
239
240 if (adapter->eeh_error)
241 return;
242
243 if (arm)
244 val |= 1 << DB_CQ_REARM_SHIFT;
245 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
246 iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 }
248
249 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 {
251 struct be_adapter *adapter = netdev_priv(netdev);
252 struct device *dev = &adapter->pdev->dev;
253 struct sockaddr *addr = p;
254 int status;
255 u8 mac[ETH_ALEN];
256 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257
258 if (!is_valid_ether_addr(addr->sa_data))
259 return -EADDRNOTAVAIL;
260
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
266 */
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
278 }
279
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
282 */
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
284 if (status)
285 goto err;
286
287 /* The MAC change did not happen, either due to lack of privilege
288 * or PF didn't pre-provision.
289 */
290 if (!ether_addr_equal(addr->sa_data, mac)) {
291 status = -EPERM;
292 goto err;
293 }
294
295 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
296 dev_info(dev, "MAC address changed to %pM\n", mac);
297 return 0;
298 err:
299 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
300 return status;
301 }
302
303 /* BE2 supports only v0 cmd */
304 static void *hw_stats_from_cmd(struct be_adapter *adapter)
305 {
306 if (BE2_chip(adapter)) {
307 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309 return &cmd->hw_stats;
310 } else if (BE3_chip(adapter)) {
311 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313 return &cmd->hw_stats;
314 } else {
315 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
318 }
319 }
320
321 /* BE2 supports only v0 cmd */
322 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323 {
324 if (BE2_chip(adapter)) {
325 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327 return &hw_stats->erx;
328 } else if (BE3_chip(adapter)) {
329 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331 return &hw_stats->erx;
332 } else {
333 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
336 }
337 }
338
339 static void populate_be_v0_stats(struct be_adapter *adapter)
340 {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
344 struct be_port_rxf_stats_v0 *port_stats =
345 &rxf_stats->port[adapter->port_num];
346 struct be_drv_stats *drvs = &adapter->drv_stats;
347
348 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
363 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
364 drvs->rx_dropped_header_too_small =
365 port_stats->rx_dropped_header_too_small;
366 drvs->rx_address_filtered =
367 port_stats->rx_address_filtered +
368 port_stats->rx_vlan_filtered;
369 drvs->rx_alignment_symbol_errors =
370 port_stats->rx_alignment_symbol_errors;
371
372 drvs->tx_pauseframes = port_stats->tx_pauseframes;
373 drvs->tx_controlframes = port_stats->tx_controlframes;
374
375 if (adapter->port_num)
376 drvs->jabber_events = rxf_stats->port1_jabber_events;
377 else
378 drvs->jabber_events = rxf_stats->port0_jabber_events;
379 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
380 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
381 drvs->forwarded_packets = rxf_stats->forwarded_packets;
382 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
383 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
385 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386 }
387
388 static void populate_be_v1_stats(struct be_adapter *adapter)
389 {
390 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
393 struct be_port_rxf_stats_v1 *port_stats =
394 &rxf_stats->port[adapter->port_num];
395 struct be_drv_stats *drvs = &adapter->drv_stats;
396
397 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
398 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
400 drvs->rx_pause_frames = port_stats->rx_pause_frames;
401 drvs->rx_crc_errors = port_stats->rx_crc_errors;
402 drvs->rx_control_frames = port_stats->rx_control_frames;
403 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413 drvs->rx_dropped_header_too_small =
414 port_stats->rx_dropped_header_too_small;
415 drvs->rx_input_fifo_overflow_drop =
416 port_stats->rx_input_fifo_overflow_drop;
417 drvs->rx_address_filtered = port_stats->rx_address_filtered;
418 drvs->rx_alignment_symbol_errors =
419 port_stats->rx_alignment_symbol_errors;
420 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
421 drvs->tx_pauseframes = port_stats->tx_pauseframes;
422 drvs->tx_controlframes = port_stats->tx_controlframes;
423 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
424 drvs->jabber_events = port_stats->jabber_events;
425 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
426 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
427 drvs->forwarded_packets = rxf_stats->forwarded_packets;
428 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
429 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
431 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432 }
433
434 static void populate_be_v2_stats(struct be_adapter *adapter)
435 {
436 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439 struct be_port_rxf_stats_v2 *port_stats =
440 &rxf_stats->port[adapter->port_num];
441 struct be_drv_stats *drvs = &adapter->drv_stats;
442
443 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446 drvs->rx_pause_frames = port_stats->rx_pause_frames;
447 drvs->rx_crc_errors = port_stats->rx_crc_errors;
448 drvs->rx_control_frames = port_stats->rx_control_frames;
449 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459 drvs->rx_dropped_header_too_small =
460 port_stats->rx_dropped_header_too_small;
461 drvs->rx_input_fifo_overflow_drop =
462 port_stats->rx_input_fifo_overflow_drop;
463 drvs->rx_address_filtered = port_stats->rx_address_filtered;
464 drvs->rx_alignment_symbol_errors =
465 port_stats->rx_alignment_symbol_errors;
466 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467 drvs->tx_pauseframes = port_stats->tx_pauseframes;
468 drvs->tx_controlframes = port_stats->tx_controlframes;
469 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470 drvs->jabber_events = port_stats->jabber_events;
471 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473 drvs->forwarded_packets = rxf_stats->forwarded_packets;
474 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
478 if (be_roce_supported(adapter)) {
479 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481 drvs->rx_roce_frames = port_stats->roce_frames_received;
482 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483 drvs->roce_drops_payload_len =
484 port_stats->roce_drops_payload_len;
485 }
486 }
487
488 static void populate_lancer_stats(struct be_adapter *adapter)
489 {
490
491 struct be_drv_stats *drvs = &adapter->drv_stats;
492 struct lancer_pport_stats *pport_stats =
493 pport_stats_from_cmd(adapter);
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520 drvs->jabber_events = pport_stats->rx_jabbers;
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523 drvs->rx_drops_too_many_frags =
524 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x) (x & 0xFFFF)
530 #define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540 struct be_rx_obj *rxo,
541 u32 erx_stat)
542 {
543 if (!BEx_chip(adapter))
544 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545 else
546 /* below erx HW counter can actually wrap around after
547 * 65535. Driver accumulates a 32-bit value
548 */
549 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550 (u16)erx_stat);
551 }
552
553 void be_parse_stats(struct be_adapter *adapter)
554 {
555 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
556 struct be_rx_obj *rxo;
557 int i;
558 u32 erx_stat;
559
560 if (lancer_chip(adapter)) {
561 populate_lancer_stats(adapter);
562 } else {
563 if (BE2_chip(adapter))
564 populate_be_v0_stats(adapter);
565 else if (BE3_chip(adapter))
566 /* for BE3 */
567 populate_be_v1_stats(adapter);
568 else
569 populate_be_v2_stats(adapter);
570
571 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
572 for_all_rx_queues(adapter, rxo, i) {
573 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574 populate_erx_stats(adapter, rxo, erx_stat);
575 }
576 }
577 }
578
579 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580 struct rtnl_link_stats64 *stats)
581 {
582 struct be_adapter *adapter = netdev_priv(netdev);
583 struct be_drv_stats *drvs = &adapter->drv_stats;
584 struct be_rx_obj *rxo;
585 struct be_tx_obj *txo;
586 u64 pkts, bytes;
587 unsigned int start;
588 int i;
589
590 for_all_rx_queues(adapter, rxo, i) {
591 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592 do {
593 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594 pkts = rx_stats(rxo)->rx_pkts;
595 bytes = rx_stats(rxo)->rx_bytes;
596 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597 stats->rx_packets += pkts;
598 stats->rx_bytes += bytes;
599 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601 rx_stats(rxo)->rx_drops_no_frags;
602 }
603
604 for_all_tx_queues(adapter, txo, i) {
605 const struct be_tx_stats *tx_stats = tx_stats(txo);
606 do {
607 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608 pkts = tx_stats(txo)->tx_pkts;
609 bytes = tx_stats(txo)->tx_bytes;
610 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611 stats->tx_packets += pkts;
612 stats->tx_bytes += bytes;
613 }
614
615 /* bad pkts received */
616 stats->rx_errors = drvs->rx_crc_errors +
617 drvs->rx_alignment_symbol_errors +
618 drvs->rx_in_range_errors +
619 drvs->rx_out_range_errors +
620 drvs->rx_frame_too_long +
621 drvs->rx_dropped_too_small +
622 drvs->rx_dropped_too_short +
623 drvs->rx_dropped_header_too_small +
624 drvs->rx_dropped_tcp_length +
625 drvs->rx_dropped_runt;
626
627 /* detailed rx errors */
628 stats->rx_length_errors = drvs->rx_in_range_errors +
629 drvs->rx_out_range_errors +
630 drvs->rx_frame_too_long;
631
632 stats->rx_crc_errors = drvs->rx_crc_errors;
633
634 /* frame alignment errors */
635 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
636
637 /* receiver fifo overrun */
638 /* drops_no_pbuf is no per i/f, it's per BE card */
639 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
640 drvs->rx_input_fifo_overflow_drop +
641 drvs->rx_drops_no_pbuf;
642 return stats;
643 }
644
645 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
646 {
647 struct net_device *netdev = adapter->netdev;
648
649 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
650 netif_carrier_off(netdev);
651 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
652 }
653
654 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655 netif_carrier_on(netdev);
656 else
657 netif_carrier_off(netdev);
658 }
659
660 static void be_tx_stats_update(struct be_tx_obj *txo,
661 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
662 {
663 struct be_tx_stats *stats = tx_stats(txo);
664
665 u64_stats_update_begin(&stats->sync);
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
670 if (stopped)
671 stats->tx_stops++;
672 u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677 bool *dummy)
678 {
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
683 /* to account for hdr wrb */
684 cnt++;
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
691 }
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701 wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705 struct sk_buff *skb)
706 {
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718 }
719
720 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
721 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
722 {
723 u16 vlan_tag;
724
725 memset(hdr, 0, sizeof(*hdr));
726
727 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
729 if (skb_is_gso(skb)) {
730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732 hdr, skb_shinfo(skb)->gso_size);
733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736 if (is_tcp_pkt(skb))
737 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738 else if (is_udp_pkt(skb))
739 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740 }
741
742 if (vlan_tx_tag_present(skb)) {
743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
744 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
746 }
747
748 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753 }
754
755 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
756 bool unmap_single)
757 {
758 dma_addr_t dma;
759
760 be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
763 if (wrb->frag_len) {
764 if (unmap_single)
765 dma_unmap_single(dev, dma, wrb->frag_len,
766 DMA_TO_DEVICE);
767 else
768 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
769 }
770 }
771
772 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
773 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774 bool skip_hw_vlan)
775 {
776 dma_addr_t busaddr;
777 int i, copied = 0;
778 struct device *dev = &adapter->pdev->dev;
779 struct sk_buff *first_skb = skb;
780 struct be_eth_wrb *wrb;
781 struct be_eth_hdr_wrb *hdr;
782 bool map_single = false;
783 u16 map_head;
784
785 hdr = queue_head_node(txq);
786 queue_head_inc(txq);
787 map_head = txq->head;
788
789 if (skb->len > skb->data_len) {
790 int len = skb_headlen(skb);
791 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792 if (dma_mapping_error(dev, busaddr))
793 goto dma_err;
794 map_single = true;
795 wrb = queue_head_node(txq);
796 wrb_fill(wrb, busaddr, len);
797 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798 queue_head_inc(txq);
799 copied += len;
800 }
801
802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
803 const struct skb_frag_struct *frag =
804 &skb_shinfo(skb)->frags[i];
805 busaddr = skb_frag_dma_map(dev, frag, 0,
806 skb_frag_size(frag), DMA_TO_DEVICE);
807 if (dma_mapping_error(dev, busaddr))
808 goto dma_err;
809 wrb = queue_head_node(txq);
810 wrb_fill(wrb, busaddr, skb_frag_size(frag));
811 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812 queue_head_inc(txq);
813 copied += skb_frag_size(frag);
814 }
815
816 if (dummy_wrb) {
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, 0, 0);
819 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820 queue_head_inc(txq);
821 }
822
823 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
824 be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826 return copied;
827 dma_err:
828 txq->head = map_head;
829 while (copied) {
830 wrb = queue_head_node(txq);
831 unmap_tx_frag(dev, wrb, map_single);
832 map_single = false;
833 copied -= wrb->frag_len;
834 queue_head_inc(txq);
835 }
836 return 0;
837 }
838
839 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
840 struct sk_buff *skb,
841 bool *skip_hw_vlan)
842 {
843 u16 vlan_tag = 0;
844
845 skb = skb_share_check(skb, GFP_ATOMIC);
846 if (unlikely(!skb))
847 return skb;
848
849 if (vlan_tx_tag_present(skb))
850 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
851
852 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853 if (!vlan_tag)
854 vlan_tag = adapter->pvid;
855 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856 * skip VLAN insertion
857 */
858 if (skip_hw_vlan)
859 *skip_hw_vlan = true;
860 }
861
862 if (vlan_tag) {
863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
864 if (unlikely(!skb))
865 return skb;
866 skb->vlan_tci = 0;
867 }
868
869 /* Insert the outer VLAN, if any */
870 if (adapter->qnq_vid) {
871 vlan_tag = adapter->qnq_vid;
872 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
873 if (unlikely(!skb))
874 return skb;
875 if (skip_hw_vlan)
876 *skip_hw_vlan = true;
877 }
878
879 return skb;
880 }
881
882 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883 {
884 struct ethhdr *eh = (struct ethhdr *)skb->data;
885 u16 offset = ETH_HLEN;
886
887 if (eh->h_proto == htons(ETH_P_IPV6)) {
888 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890 offset += sizeof(struct ipv6hdr);
891 if (ip6h->nexthdr != NEXTHDR_TCP &&
892 ip6h->nexthdr != NEXTHDR_UDP) {
893 struct ipv6_opt_hdr *ehdr =
894 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897 if (ehdr->hdrlen == 0xff)
898 return true;
899 }
900 }
901 return false;
902 }
903
904 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905 {
906 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907 }
908
909 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910 struct sk_buff *skb)
911 {
912 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
913 }
914
915 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916 struct sk_buff *skb,
917 bool *skip_hw_vlan)
918 {
919 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
920 unsigned int eth_hdr_len;
921 struct iphdr *ip;
922
923 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
924 * may cause a transmit stall on that port. So the work-around is to
925 * pad short packets (<= 32 bytes) to a 36-byte length.
926 */
927 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
928 if (skb_padto(skb, 36))
929 goto tx_drop;
930 skb->len = 36;
931 }
932
933 /* For padded packets, BE HW modifies tot_len field in IP header
934 * incorrecly when VLAN tag is inserted by HW.
935 * For padded packets, Lancer computes incorrect checksum.
936 */
937 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938 VLAN_ETH_HLEN : ETH_HLEN;
939 if (skb->len <= 60 &&
940 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
941 is_ipv4_pkt(skb)) {
942 ip = (struct iphdr *)ip_hdr(skb);
943 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944 }
945
946 /* If vlan tag is already inlined in the packet, skip HW VLAN
947 * tagging in UMC mode
948 */
949 if ((adapter->function_mode & UMC_ENABLED) &&
950 veh->h_vlan_proto == htons(ETH_P_8021Q))
951 *skip_hw_vlan = true;
952
953 /* HW has a bug wherein it will calculate CSUM for VLAN
954 * pkts even though it is disabled.
955 * Manually insert VLAN in pkt.
956 */
957 if (skb->ip_summed != CHECKSUM_PARTIAL &&
958 vlan_tx_tag_present(skb)) {
959 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
960 if (unlikely(!skb))
961 goto tx_drop;
962 }
963
964 /* HW may lockup when VLAN HW tagging is requested on
965 * certain ipv6 packets. Drop such pkts if the HW workaround to
966 * skip HW tagging is not enabled by FW.
967 */
968 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
969 (adapter->pvid || adapter->qnq_vid) &&
970 !qnq_async_evt_rcvd(adapter)))
971 goto tx_drop;
972
973 /* Manual VLAN tag insertion to prevent:
974 * ASIC lockup when the ASIC inserts VLAN tag into
975 * certain ipv6 packets. Insert VLAN tags in driver,
976 * and set event, completion, vlan bits accordingly
977 * in the Tx WRB.
978 */
979 if (be_ipv6_tx_stall_chk(adapter, skb) &&
980 be_vlan_tag_tx_chk(adapter, skb)) {
981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
982 if (unlikely(!skb))
983 goto tx_drop;
984 }
985
986 return skb;
987 tx_drop:
988 dev_kfree_skb_any(skb);
989 return NULL;
990 }
991
992 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993 {
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996 struct be_queue_info *txq = &txo->q;
997 bool dummy_wrb, stopped = false;
998 u32 wrb_cnt = 0, copied = 0;
999 bool skip_hw_vlan = false;
1000 u32 start = txq->head;
1001
1002 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1003 if (!skb) {
1004 tx_stats(txo)->tx_drv_drops++;
1005 return NETDEV_TX_OK;
1006 }
1007
1008 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1009
1010 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011 skip_hw_vlan);
1012 if (copied) {
1013 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
1015 /* record the sent skb in the sent_skb table */
1016 BUG_ON(txo->sent_skb_list[start]);
1017 txo->sent_skb_list[start] = skb;
1018
1019 /* Ensure txq has space for the next skb; Else stop the queue
1020 * *BEFORE* ringing the tx doorbell, so that we serialze the
1021 * tx compls of the current transmit which'll wake up the queue
1022 */
1023 atomic_add(wrb_cnt, &txq->used);
1024 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025 txq->len) {
1026 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1027 stopped = true;
1028 }
1029
1030 be_txq_notify(adapter, txo, wrb_cnt);
1031
1032 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1033 } else {
1034 txq->head = start;
1035 tx_stats(txo)->tx_drv_drops++;
1036 dev_kfree_skb_any(skb);
1037 }
1038 return NETDEV_TX_OK;
1039 }
1040
1041 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042 {
1043 struct be_adapter *adapter = netdev_priv(netdev);
1044 if (new_mtu < BE_MIN_MTU ||
1045 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046 (ETH_HLEN + ETH_FCS_LEN))) {
1047 dev_info(&adapter->pdev->dev,
1048 "MTU must be between %d and %d bytes\n",
1049 BE_MIN_MTU,
1050 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1051 return -EINVAL;
1052 }
1053 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054 netdev->mtu, new_mtu);
1055 netdev->mtu = new_mtu;
1056 return 0;
1057 }
1058
1059 /*
1060 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061 * If the user configures more, place BE in vlan promiscuous mode.
1062 */
1063 static int be_vid_config(struct be_adapter *adapter)
1064 {
1065 u16 vids[BE_NUM_VLANS_SUPPORTED];
1066 u16 num = 0, i;
1067 int status = 0;
1068
1069 /* No need to further configure vids if in promiscuous mode */
1070 if (adapter->promiscuous)
1071 return 0;
1072
1073 if (adapter->vlans_added > be_max_vlans(adapter))
1074 goto set_vlan_promisc;
1075
1076 /* Construct VLAN Table to give to HW */
1077 for (i = 0; i < VLAN_N_VID; i++)
1078 if (adapter->vlan_tag[i])
1079 vids[num++] = cpu_to_le16(i);
1080
1081 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1082 vids, num, 0);
1083
1084 if (status) {
1085 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087 goto set_vlan_promisc;
1088 dev_err(&adapter->pdev->dev,
1089 "Setting HW VLAN filtering failed.\n");
1090 } else {
1091 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092 /* hw VLAN filtering re-enabled. */
1093 status = be_cmd_rx_filter(adapter,
1094 BE_FLAGS_VLAN_PROMISC, OFF);
1095 if (!status) {
1096 dev_info(&adapter->pdev->dev,
1097 "Disabling VLAN Promiscuous mode.\n");
1098 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099 dev_info(&adapter->pdev->dev,
1100 "Re-Enabling HW VLAN filtering\n");
1101 }
1102 }
1103 }
1104
1105 return status;
1106
1107 set_vlan_promisc:
1108 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
1118 return status;
1119 }
1120
1121 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 {
1123 struct be_adapter *adapter = netdev_priv(netdev);
1124 int status = 0;
1125
1126
1127 /* Packets with VID 0 are always received by Lancer by default */
1128 if (lancer_chip(adapter) && vid == 0)
1129 goto ret;
1130
1131 adapter->vlan_tag[vid] = 1;
1132 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1133 status = be_vid_config(adapter);
1134
1135 if (!status)
1136 adapter->vlans_added++;
1137 else
1138 adapter->vlan_tag[vid] = 0;
1139 ret:
1140 return status;
1141 }
1142
1143 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1144 {
1145 struct be_adapter *adapter = netdev_priv(netdev);
1146 int status = 0;
1147
1148 /* Packets with VID 0 are always received by Lancer by default */
1149 if (lancer_chip(adapter) && vid == 0)
1150 goto ret;
1151
1152 adapter->vlan_tag[vid] = 0;
1153 if (adapter->vlans_added <= be_max_vlans(adapter))
1154 status = be_vid_config(adapter);
1155
1156 if (!status)
1157 adapter->vlans_added--;
1158 else
1159 adapter->vlan_tag[vid] = 1;
1160 ret:
1161 return status;
1162 }
1163
1164 static void be_set_rx_mode(struct net_device *netdev)
1165 {
1166 struct be_adapter *adapter = netdev_priv(netdev);
1167 int status;
1168
1169 if (netdev->flags & IFF_PROMISC) {
1170 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1171 adapter->promiscuous = true;
1172 goto done;
1173 }
1174
1175 /* BE was previously in promiscuous mode; disable it */
1176 if (adapter->promiscuous) {
1177 adapter->promiscuous = false;
1178 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1179
1180 if (adapter->vlans_added)
1181 be_vid_config(adapter);
1182 }
1183
1184 /* Enable multicast promisc if num configured exceeds what we support */
1185 if (netdev->flags & IFF_ALLMULTI ||
1186 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1187 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1188 goto done;
1189 }
1190
1191 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192 struct netdev_hw_addr *ha;
1193 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196 be_cmd_pmac_del(adapter, adapter->if_handle,
1197 adapter->pmac_id[i], 0);
1198 }
1199
1200 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1201 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202 adapter->promiscuous = true;
1203 goto done;
1204 }
1205
1206 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207 adapter->uc_macs++; /* First slot is for Primary MAC */
1208 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209 adapter->if_handle,
1210 &adapter->pmac_id[adapter->uc_macs], 0);
1211 }
1212 }
1213
1214 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217 if (status) {
1218 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221 }
1222 done:
1223 return;
1224 }
1225
1226 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227 {
1228 struct be_adapter *adapter = netdev_priv(netdev);
1229 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1230 int status;
1231
1232 if (!sriov_enabled(adapter))
1233 return -EPERM;
1234
1235 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1236 return -EINVAL;
1237
1238 if (BEx_chip(adapter)) {
1239 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240 vf + 1);
1241
1242 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243 &vf_cfg->pmac_id, vf + 1);
1244 } else {
1245 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246 vf + 1);
1247 }
1248
1249 if (status)
1250 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251 mac, vf);
1252 else
1253 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1254
1255 return status;
1256 }
1257
1258 static int be_get_vf_config(struct net_device *netdev, int vf,
1259 struct ifla_vf_info *vi)
1260 {
1261 struct be_adapter *adapter = netdev_priv(netdev);
1262 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1263
1264 if (!sriov_enabled(adapter))
1265 return -EPERM;
1266
1267 if (vf >= adapter->num_vfs)
1268 return -EINVAL;
1269
1270 vi->vf = vf;
1271 vi->tx_rate = vf_cfg->tx_rate;
1272 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1274 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1275
1276 return 0;
1277 }
1278
1279 static int be_set_vf_vlan(struct net_device *netdev,
1280 int vf, u16 vlan, u8 qos)
1281 {
1282 struct be_adapter *adapter = netdev_priv(netdev);
1283 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1284 int status = 0;
1285
1286 if (!sriov_enabled(adapter))
1287 return -EPERM;
1288
1289 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1290 return -EINVAL;
1291
1292 if (vlan || qos) {
1293 vlan |= qos << VLAN_PRIO_SHIFT;
1294 if (vf_cfg->vlan_tag != vlan) {
1295 /* If this is new value, program it. Else skip. */
1296 vf_cfg->vlan_tag = vlan;
1297 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298 vf_cfg->if_handle, 0);
1299 }
1300 } else {
1301 /* Reset Transparent Vlan Tagging. */
1302 vf_cfg->vlan_tag = 0;
1303 vlan = vf_cfg->def_vid;
1304 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1305 vf_cfg->if_handle, 0);
1306 }
1307
1308
1309 if (status)
1310 dev_info(&adapter->pdev->dev,
1311 "VLAN %d config on VF %d failed\n", vlan, vf);
1312 return status;
1313 }
1314
1315 static int be_set_vf_tx_rate(struct net_device *netdev,
1316 int vf, int rate)
1317 {
1318 struct be_adapter *adapter = netdev_priv(netdev);
1319 int status = 0;
1320
1321 if (!sriov_enabled(adapter))
1322 return -EPERM;
1323
1324 if (vf >= adapter->num_vfs)
1325 return -EINVAL;
1326
1327 if (rate < 100 || rate > 10000) {
1328 dev_err(&adapter->pdev->dev,
1329 "tx rate must be between 100 and 10000 Mbps\n");
1330 return -EINVAL;
1331 }
1332
1333 if (lancer_chip(adapter))
1334 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335 else
1336 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1337
1338 if (status)
1339 dev_err(&adapter->pdev->dev,
1340 "tx rate %d on VF %d failed\n", rate, vf);
1341 else
1342 adapter->vf_cfg[vf].tx_rate = rate;
1343 return status;
1344 }
1345
1346 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347 ulong now)
1348 {
1349 aic->rx_pkts_prev = rx_pkts;
1350 aic->tx_reqs_prev = tx_pkts;
1351 aic->jiffies = now;
1352 }
1353
1354 static void be_eqd_update(struct be_adapter *adapter)
1355 {
1356 struct be_set_eqd set_eqd[MAX_EVT_QS];
1357 int eqd, i, num = 0, start;
1358 struct be_aic_obj *aic;
1359 struct be_eq_obj *eqo;
1360 struct be_rx_obj *rxo;
1361 struct be_tx_obj *txo;
1362 u64 rx_pkts, tx_pkts;
1363 ulong now;
1364 u32 pps, delta;
1365
1366 for_all_evt_queues(adapter, eqo, i) {
1367 aic = &adapter->aic_obj[eqo->idx];
1368 if (!aic->enable) {
1369 if (aic->jiffies)
1370 aic->jiffies = 0;
1371 eqd = aic->et_eqd;
1372 goto modify_eqd;
1373 }
1374
1375 rxo = &adapter->rx_obj[eqo->idx];
1376 do {
1377 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378 rx_pkts = rxo->stats.rx_pkts;
1379 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1380
1381 txo = &adapter->tx_obj[eqo->idx];
1382 do {
1383 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384 tx_pkts = txo->stats.tx_reqs;
1385 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1386
1387
1388 /* Skip, if wrapped around or first calculation */
1389 now = jiffies;
1390 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391 rx_pkts < aic->rx_pkts_prev ||
1392 tx_pkts < aic->tx_reqs_prev) {
1393 be_aic_update(aic, rx_pkts, tx_pkts, now);
1394 continue;
1395 }
1396
1397 delta = jiffies_to_msecs(now - aic->jiffies);
1398 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400 eqd = (pps / 15000) << 2;
1401
1402 if (eqd < 8)
1403 eqd = 0;
1404 eqd = min_t(u32, eqd, aic->max_eqd);
1405 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407 be_aic_update(aic, rx_pkts, tx_pkts, now);
1408 modify_eqd:
1409 if (eqd != aic->prev_eqd) {
1410 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411 set_eqd[num].eq_id = eqo->q.id;
1412 aic->prev_eqd = eqd;
1413 num++;
1414 }
1415 }
1416
1417 if (num)
1418 be_cmd_modify_eqd(adapter, set_eqd, num);
1419 }
1420
1421 static void be_rx_stats_update(struct be_rx_obj *rxo,
1422 struct be_rx_compl_info *rxcp)
1423 {
1424 struct be_rx_stats *stats = rx_stats(rxo);
1425
1426 u64_stats_update_begin(&stats->sync);
1427 stats->rx_compl++;
1428 stats->rx_bytes += rxcp->pkt_size;
1429 stats->rx_pkts++;
1430 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1431 stats->rx_mcast_pkts++;
1432 if (rxcp->err)
1433 stats->rx_compl_err++;
1434 u64_stats_update_end(&stats->sync);
1435 }
1436
1437 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1438 {
1439 /* L4 checksum is not reliable for non TCP/UDP packets.
1440 * Also ignore ipcksm for ipv6 pkts */
1441 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442 (rxcp->ip_csum || rxcp->ipv6);
1443 }
1444
1445 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446 u16 frag_idx)
1447 {
1448 struct be_adapter *adapter = rxo->adapter;
1449 struct be_rx_page_info *rx_page_info;
1450 struct be_queue_info *rxq = &rxo->q;
1451
1452 rx_page_info = &rxo->page_info_tbl[frag_idx];
1453 BUG_ON(!rx_page_info->page);
1454
1455 if (rx_page_info->last_page_user) {
1456 dma_unmap_page(&adapter->pdev->dev,
1457 dma_unmap_addr(rx_page_info, bus),
1458 adapter->big_page_size, DMA_FROM_DEVICE);
1459 rx_page_info->last_page_user = false;
1460 }
1461
1462 atomic_dec(&rxq->used);
1463 return rx_page_info;
1464 }
1465
1466 /* Throwaway the data in the Rx completion */
1467 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468 struct be_rx_compl_info *rxcp)
1469 {
1470 struct be_queue_info *rxq = &rxo->q;
1471 struct be_rx_page_info *page_info;
1472 u16 i, num_rcvd = rxcp->num_rcvd;
1473
1474 for (i = 0; i < num_rcvd; i++) {
1475 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1476 put_page(page_info->page);
1477 memset(page_info, 0, sizeof(*page_info));
1478 index_inc(&rxcp->rxq_idx, rxq->len);
1479 }
1480 }
1481
1482 /*
1483 * skb_fill_rx_data forms a complete skb for an ether frame
1484 * indicated by rxcp.
1485 */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487 struct be_rx_compl_info *rxcp)
1488 {
1489 struct be_queue_info *rxq = &rxo->q;
1490 struct be_rx_page_info *page_info;
1491 u16 i, j;
1492 u16 hdr_len, curr_frag_len, remaining;
1493 u8 *start;
1494
1495 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1496 start = page_address(page_info->page) + page_info->page_offset;
1497 prefetch(start);
1498
1499 /* Copy data in the first descriptor of this completion */
1500 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1501
1502 skb->len = curr_frag_len;
1503 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1504 memcpy(skb->data, start, curr_frag_len);
1505 /* Complete packet has now been moved to data */
1506 put_page(page_info->page);
1507 skb->data_len = 0;
1508 skb->tail += curr_frag_len;
1509 } else {
1510 hdr_len = ETH_HLEN;
1511 memcpy(skb->data, start, hdr_len);
1512 skb_shinfo(skb)->nr_frags = 1;
1513 skb_frag_set_page(skb, 0, page_info->page);
1514 skb_shinfo(skb)->frags[0].page_offset =
1515 page_info->page_offset + hdr_len;
1516 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1517 skb->data_len = curr_frag_len - hdr_len;
1518 skb->truesize += rx_frag_size;
1519 skb->tail += hdr_len;
1520 }
1521 page_info->page = NULL;
1522
1523 if (rxcp->pkt_size <= rx_frag_size) {
1524 BUG_ON(rxcp->num_rcvd != 1);
1525 return;
1526 }
1527
1528 /* More frags present for this completion */
1529 index_inc(&rxcp->rxq_idx, rxq->len);
1530 remaining = rxcp->pkt_size - curr_frag_len;
1531 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1532 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1533 curr_frag_len = min(remaining, rx_frag_size);
1534
1535 /* Coalesce all frags from the same physical page in one slot */
1536 if (page_info->page_offset == 0) {
1537 /* Fresh page */
1538 j++;
1539 skb_frag_set_page(skb, j, page_info->page);
1540 skb_shinfo(skb)->frags[j].page_offset =
1541 page_info->page_offset;
1542 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1543 skb_shinfo(skb)->nr_frags++;
1544 } else {
1545 put_page(page_info->page);
1546 }
1547
1548 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1549 skb->len += curr_frag_len;
1550 skb->data_len += curr_frag_len;
1551 skb->truesize += rx_frag_size;
1552 remaining -= curr_frag_len;
1553 index_inc(&rxcp->rxq_idx, rxq->len);
1554 page_info->page = NULL;
1555 }
1556 BUG_ON(j > MAX_SKB_FRAGS);
1557 }
1558
1559 /* Process the RX completion indicated by rxcp when GRO is disabled */
1560 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1561 struct be_rx_compl_info *rxcp)
1562 {
1563 struct be_adapter *adapter = rxo->adapter;
1564 struct net_device *netdev = adapter->netdev;
1565 struct sk_buff *skb;
1566
1567 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1568 if (unlikely(!skb)) {
1569 rx_stats(rxo)->rx_drops_no_skbs++;
1570 be_rx_compl_discard(rxo, rxcp);
1571 return;
1572 }
1573
1574 skb_fill_rx_data(rxo, skb, rxcp);
1575
1576 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 else
1579 skb_checksum_none_assert(skb);
1580
1581 skb->protocol = eth_type_trans(skb, netdev);
1582 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1583 if (netdev->features & NETIF_F_RXHASH)
1584 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1585 skb_mark_napi_id(skb, napi);
1586
1587 if (rxcp->vlanf)
1588 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1589
1590 netif_receive_skb(skb);
1591 }
1592
1593 /* Process the RX completion indicated by rxcp when GRO is enabled */
1594 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595 struct napi_struct *napi,
1596 struct be_rx_compl_info *rxcp)
1597 {
1598 struct be_adapter *adapter = rxo->adapter;
1599 struct be_rx_page_info *page_info;
1600 struct sk_buff *skb = NULL;
1601 struct be_queue_info *rxq = &rxo->q;
1602 u16 remaining, curr_frag_len;
1603 u16 i, j;
1604
1605 skb = napi_get_frags(napi);
1606 if (!skb) {
1607 be_rx_compl_discard(rxo, rxcp);
1608 return;
1609 }
1610
1611 remaining = rxcp->pkt_size;
1612 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1613 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1614
1615 curr_frag_len = min(remaining, rx_frag_size);
1616
1617 /* Coalesce all frags from the same physical page in one slot */
1618 if (i == 0 || page_info->page_offset == 0) {
1619 /* First frag or Fresh page */
1620 j++;
1621 skb_frag_set_page(skb, j, page_info->page);
1622 skb_shinfo(skb)->frags[j].page_offset =
1623 page_info->page_offset;
1624 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1625 } else {
1626 put_page(page_info->page);
1627 }
1628 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1629 skb->truesize += rx_frag_size;
1630 remaining -= curr_frag_len;
1631 index_inc(&rxcp->rxq_idx, rxq->len);
1632 memset(page_info, 0, sizeof(*page_info));
1633 }
1634 BUG_ON(j > MAX_SKB_FRAGS);
1635
1636 skb_shinfo(skb)->nr_frags = j + 1;
1637 skb->len = rxcp->pkt_size;
1638 skb->data_len = rxcp->pkt_size;
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1641 if (adapter->netdev->features & NETIF_F_RXHASH)
1642 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1643 skb_mark_napi_id(skb, napi);
1644
1645 if (rxcp->vlanf)
1646 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1647
1648 napi_gro_frags(napi);
1649 }
1650
1651 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652 struct be_rx_compl_info *rxcp)
1653 {
1654 rxcp->pkt_size =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1659 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1660 rxcp->ip_csum =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662 rxcp->l4_csum =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664 rxcp->ipv6 =
1665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666 rxcp->rxq_idx =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668 rxcp->num_rcvd =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670 rxcp->pkt_type =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1672 rxcp->rss_hash =
1673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1674 if (rxcp->vlanf) {
1675 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1676 compl);
1677 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678 compl);
1679 }
1680 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1681 }
1682
1683 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684 struct be_rx_compl_info *rxcp)
1685 {
1686 rxcp->pkt_size =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1691 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1692 rxcp->ip_csum =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694 rxcp->l4_csum =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696 rxcp->ipv6 =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698 rxcp->rxq_idx =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700 rxcp->num_rcvd =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702 rxcp->pkt_type =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1704 rxcp->rss_hash =
1705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1706 if (rxcp->vlanf) {
1707 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1708 compl);
1709 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710 compl);
1711 }
1712 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1713 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714 ip_frag, compl);
1715 }
1716
1717 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718 {
1719 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721 struct be_adapter *adapter = rxo->adapter;
1722
1723 /* For checking the valid bit it is Ok to use either definition as the
1724 * valid bit is at the same position in both v0 and v1 Rx compl */
1725 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1726 return NULL;
1727
1728 rmb();
1729 be_dws_le_to_cpu(compl, sizeof(*compl));
1730
1731 if (adapter->be3_native)
1732 be_parse_rx_compl_v1(compl, rxcp);
1733 else
1734 be_parse_rx_compl_v0(compl, rxcp);
1735
1736 if (rxcp->ip_frag)
1737 rxcp->l4_csum = 0;
1738
1739 if (rxcp->vlanf) {
1740 /* vlanf could be wrongly set in some cards.
1741 * ignore if vtm is not set */
1742 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1743 rxcp->vlanf = 0;
1744
1745 if (!lancer_chip(adapter))
1746 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1747
1748 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1749 !adapter->vlan_tag[rxcp->vlan_tag])
1750 rxcp->vlanf = 0;
1751 }
1752
1753 /* As the compl has been parsed, reset it; we wont touch it again */
1754 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1755
1756 queue_tail_inc(&rxo->cq);
1757 return rxcp;
1758 }
1759
1760 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1761 {
1762 u32 order = get_order(size);
1763
1764 if (order > 0)
1765 gfp |= __GFP_COMP;
1766 return alloc_pages(gfp, order);
1767 }
1768
1769 /*
1770 * Allocate a page, split it to fragments of size rx_frag_size and post as
1771 * receive buffers to BE
1772 */
1773 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1774 {
1775 struct be_adapter *adapter = rxo->adapter;
1776 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1777 struct be_queue_info *rxq = &rxo->q;
1778 struct page *pagep = NULL;
1779 struct be_eth_rx_d *rxd;
1780 u64 page_dmaaddr = 0, frag_dmaaddr;
1781 u32 posted, page_offset = 0;
1782
1783 page_info = &rxo->page_info_tbl[rxq->head];
1784 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1785 if (!pagep) {
1786 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1787 if (unlikely(!pagep)) {
1788 rx_stats(rxo)->rx_post_fail++;
1789 break;
1790 }
1791 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1792 0, adapter->big_page_size,
1793 DMA_FROM_DEVICE);
1794 page_info->page_offset = 0;
1795 } else {
1796 get_page(pagep);
1797 page_info->page_offset = page_offset + rx_frag_size;
1798 }
1799 page_offset = page_info->page_offset;
1800 page_info->page = pagep;
1801 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1802 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1803
1804 rxd = queue_head_node(rxq);
1805 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1806 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1807
1808 /* Any space left in the current big page for another frag? */
1809 if ((page_offset + rx_frag_size + rx_frag_size) >
1810 adapter->big_page_size) {
1811 pagep = NULL;
1812 page_info->last_page_user = true;
1813 }
1814
1815 prev_page_info = page_info;
1816 queue_head_inc(rxq);
1817 page_info = &rxo->page_info_tbl[rxq->head];
1818 }
1819 if (pagep)
1820 prev_page_info->last_page_user = true;
1821
1822 if (posted) {
1823 atomic_add(posted, &rxq->used);
1824 if (rxo->rx_post_starved)
1825 rxo->rx_post_starved = false;
1826 be_rxq_notify(adapter, rxq->id, posted);
1827 } else if (atomic_read(&rxq->used) == 0) {
1828 /* Let be_worker replenish when memory is available */
1829 rxo->rx_post_starved = true;
1830 }
1831 }
1832
1833 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1834 {
1835 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1836
1837 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1838 return NULL;
1839
1840 rmb();
1841 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1842
1843 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1844
1845 queue_tail_inc(tx_cq);
1846 return txcp;
1847 }
1848
1849 static u16 be_tx_compl_process(struct be_adapter *adapter,
1850 struct be_tx_obj *txo, u16 last_index)
1851 {
1852 struct be_queue_info *txq = &txo->q;
1853 struct be_eth_wrb *wrb;
1854 struct sk_buff **sent_skbs = txo->sent_skb_list;
1855 struct sk_buff *sent_skb;
1856 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1857 bool unmap_skb_hdr = true;
1858
1859 sent_skb = sent_skbs[txq->tail];
1860 BUG_ON(!sent_skb);
1861 sent_skbs[txq->tail] = NULL;
1862
1863 /* skip header wrb */
1864 queue_tail_inc(txq);
1865
1866 do {
1867 cur_index = txq->tail;
1868 wrb = queue_tail_node(txq);
1869 unmap_tx_frag(&adapter->pdev->dev, wrb,
1870 (unmap_skb_hdr && skb_headlen(sent_skb)));
1871 unmap_skb_hdr = false;
1872
1873 num_wrbs++;
1874 queue_tail_inc(txq);
1875 } while (cur_index != last_index);
1876
1877 kfree_skb(sent_skb);
1878 return num_wrbs;
1879 }
1880
1881 /* Return the number of events in the event queue */
1882 static inline int events_get(struct be_eq_obj *eqo)
1883 {
1884 struct be_eq_entry *eqe;
1885 int num = 0;
1886
1887 do {
1888 eqe = queue_tail_node(&eqo->q);
1889 if (eqe->evt == 0)
1890 break;
1891
1892 rmb();
1893 eqe->evt = 0;
1894 num++;
1895 queue_tail_inc(&eqo->q);
1896 } while (true);
1897
1898 return num;
1899 }
1900
1901 /* Leaves the EQ is disarmed state */
1902 static void be_eq_clean(struct be_eq_obj *eqo)
1903 {
1904 int num = events_get(eqo);
1905
1906 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1907 }
1908
1909 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1910 {
1911 struct be_rx_page_info *page_info;
1912 struct be_queue_info *rxq = &rxo->q;
1913 struct be_queue_info *rx_cq = &rxo->cq;
1914 struct be_rx_compl_info *rxcp;
1915 struct be_adapter *adapter = rxo->adapter;
1916 int flush_wait = 0;
1917 u16 tail;
1918
1919 /* Consume pending rx completions.
1920 * Wait for the flush completion (identified by zero num_rcvd)
1921 * to arrive. Notify CQ even when there are no more CQ entries
1922 * for HW to flush partially coalesced CQ entries.
1923 * In Lancer, there is no need to wait for flush compl.
1924 */
1925 for (;;) {
1926 rxcp = be_rx_compl_get(rxo);
1927 if (rxcp == NULL) {
1928 if (lancer_chip(adapter))
1929 break;
1930
1931 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1932 dev_warn(&adapter->pdev->dev,
1933 "did not receive flush compl\n");
1934 break;
1935 }
1936 be_cq_notify(adapter, rx_cq->id, true, 0);
1937 mdelay(1);
1938 } else {
1939 be_rx_compl_discard(rxo, rxcp);
1940 be_cq_notify(adapter, rx_cq->id, false, 1);
1941 if (rxcp->num_rcvd == 0)
1942 break;
1943 }
1944 }
1945
1946 /* After cleanup, leave the CQ in unarmed state */
1947 be_cq_notify(adapter, rx_cq->id, false, 0);
1948
1949 /* Then free posted rx buffers that were not used */
1950 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1951 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1952 page_info = get_rx_page_info(rxo, tail);
1953 put_page(page_info->page);
1954 memset(page_info, 0, sizeof(*page_info));
1955 }
1956 BUG_ON(atomic_read(&rxq->used));
1957 rxq->tail = rxq->head = 0;
1958 }
1959
1960 static void be_tx_compl_clean(struct be_adapter *adapter)
1961 {
1962 struct be_tx_obj *txo;
1963 struct be_queue_info *txq;
1964 struct be_eth_tx_compl *txcp;
1965 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1966 struct sk_buff *sent_skb;
1967 bool dummy_wrb;
1968 int i, pending_txqs;
1969
1970 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1971 do {
1972 pending_txqs = adapter->num_tx_qs;
1973
1974 for_all_tx_queues(adapter, txo, i) {
1975 txq = &txo->q;
1976 while ((txcp = be_tx_compl_get(&txo->cq))) {
1977 end_idx =
1978 AMAP_GET_BITS(struct amap_eth_tx_compl,
1979 wrb_index, txcp);
1980 num_wrbs += be_tx_compl_process(adapter, txo,
1981 end_idx);
1982 cmpl++;
1983 }
1984 if (cmpl) {
1985 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1986 atomic_sub(num_wrbs, &txq->used);
1987 cmpl = 0;
1988 num_wrbs = 0;
1989 }
1990 if (atomic_read(&txq->used) == 0)
1991 pending_txqs--;
1992 }
1993
1994 if (pending_txqs == 0 || ++timeo > 200)
1995 break;
1996
1997 mdelay(1);
1998 } while (true);
1999
2000 for_all_tx_queues(adapter, txo, i) {
2001 txq = &txo->q;
2002 if (atomic_read(&txq->used))
2003 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2004 atomic_read(&txq->used));
2005
2006 /* free posted tx for which compls will never arrive */
2007 while (atomic_read(&txq->used)) {
2008 sent_skb = txo->sent_skb_list[txq->tail];
2009 end_idx = txq->tail;
2010 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2011 &dummy_wrb);
2012 index_adv(&end_idx, num_wrbs - 1, txq->len);
2013 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2014 atomic_sub(num_wrbs, &txq->used);
2015 }
2016 }
2017 }
2018
2019 static void be_evt_queues_destroy(struct be_adapter *adapter)
2020 {
2021 struct be_eq_obj *eqo;
2022 int i;
2023
2024 for_all_evt_queues(adapter, eqo, i) {
2025 if (eqo->q.created) {
2026 be_eq_clean(eqo);
2027 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2028 napi_hash_del(&eqo->napi);
2029 netif_napi_del(&eqo->napi);
2030 }
2031 be_queue_free(adapter, &eqo->q);
2032 }
2033 }
2034
2035 static int be_evt_queues_create(struct be_adapter *adapter)
2036 {
2037 struct be_queue_info *eq;
2038 struct be_eq_obj *eqo;
2039 struct be_aic_obj *aic;
2040 int i, rc;
2041
2042 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2043 adapter->cfg_num_qs);
2044
2045 for_all_evt_queues(adapter, eqo, i) {
2046 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2047 BE_NAPI_WEIGHT);
2048 napi_hash_add(&eqo->napi);
2049 aic = &adapter->aic_obj[i];
2050 eqo->adapter = adapter;
2051 eqo->tx_budget = BE_TX_BUDGET;
2052 eqo->idx = i;
2053 aic->max_eqd = BE_MAX_EQD;
2054 aic->enable = true;
2055
2056 eq = &eqo->q;
2057 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2058 sizeof(struct be_eq_entry));
2059 if (rc)
2060 return rc;
2061
2062 rc = be_cmd_eq_create(adapter, eqo);
2063 if (rc)
2064 return rc;
2065 }
2066 return 0;
2067 }
2068
2069 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2070 {
2071 struct be_queue_info *q;
2072
2073 q = &adapter->mcc_obj.q;
2074 if (q->created)
2075 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2076 be_queue_free(adapter, q);
2077
2078 q = &adapter->mcc_obj.cq;
2079 if (q->created)
2080 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2081 be_queue_free(adapter, q);
2082 }
2083
2084 /* Must be called only after TX qs are created as MCC shares TX EQ */
2085 static int be_mcc_queues_create(struct be_adapter *adapter)
2086 {
2087 struct be_queue_info *q, *cq;
2088
2089 cq = &adapter->mcc_obj.cq;
2090 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2091 sizeof(struct be_mcc_compl)))
2092 goto err;
2093
2094 /* Use the default EQ for MCC completions */
2095 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2096 goto mcc_cq_free;
2097
2098 q = &adapter->mcc_obj.q;
2099 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2100 goto mcc_cq_destroy;
2101
2102 if (be_cmd_mccq_create(adapter, q, cq))
2103 goto mcc_q_free;
2104
2105 return 0;
2106
2107 mcc_q_free:
2108 be_queue_free(adapter, q);
2109 mcc_cq_destroy:
2110 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2111 mcc_cq_free:
2112 be_queue_free(adapter, cq);
2113 err:
2114 return -1;
2115 }
2116
2117 static void be_tx_queues_destroy(struct be_adapter *adapter)
2118 {
2119 struct be_queue_info *q;
2120 struct be_tx_obj *txo;
2121 u8 i;
2122
2123 for_all_tx_queues(adapter, txo, i) {
2124 q = &txo->q;
2125 if (q->created)
2126 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2127 be_queue_free(adapter, q);
2128
2129 q = &txo->cq;
2130 if (q->created)
2131 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2132 be_queue_free(adapter, q);
2133 }
2134 }
2135
2136 static int be_tx_qs_create(struct be_adapter *adapter)
2137 {
2138 struct be_queue_info *cq, *eq;
2139 struct be_tx_obj *txo;
2140 int status, i;
2141
2142 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2143
2144 for_all_tx_queues(adapter, txo, i) {
2145 cq = &txo->cq;
2146 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2147 sizeof(struct be_eth_tx_compl));
2148 if (status)
2149 return status;
2150
2151 u64_stats_init(&txo->stats.sync);
2152 u64_stats_init(&txo->stats.sync_compl);
2153
2154 /* If num_evt_qs is less than num_tx_qs, then more than
2155 * one txq share an eq
2156 */
2157 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2158 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2159 if (status)
2160 return status;
2161
2162 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2163 sizeof(struct be_eth_wrb));
2164 if (status)
2165 return status;
2166
2167 status = be_cmd_txq_create(adapter, txo);
2168 if (status)
2169 return status;
2170 }
2171
2172 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2173 adapter->num_tx_qs);
2174 return 0;
2175 }
2176
2177 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2178 {
2179 struct be_queue_info *q;
2180 struct be_rx_obj *rxo;
2181 int i;
2182
2183 for_all_rx_queues(adapter, rxo, i) {
2184 q = &rxo->cq;
2185 if (q->created)
2186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2187 be_queue_free(adapter, q);
2188 }
2189 }
2190
2191 static int be_rx_cqs_create(struct be_adapter *adapter)
2192 {
2193 struct be_queue_info *eq, *cq;
2194 struct be_rx_obj *rxo;
2195 int rc, i;
2196
2197 /* We can create as many RSS rings as there are EQs. */
2198 adapter->num_rx_qs = adapter->num_evt_qs;
2199
2200 /* We'll use RSS only if atleast 2 RSS rings are supported.
2201 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2202 */
2203 if (adapter->num_rx_qs > 1)
2204 adapter->num_rx_qs++;
2205
2206 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2207 for_all_rx_queues(adapter, rxo, i) {
2208 rxo->adapter = adapter;
2209 cq = &rxo->cq;
2210 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2211 sizeof(struct be_eth_rx_compl));
2212 if (rc)
2213 return rc;
2214
2215 u64_stats_init(&rxo->stats.sync);
2216 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2217 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2218 if (rc)
2219 return rc;
2220 }
2221
2222 dev_info(&adapter->pdev->dev,
2223 "created %d RSS queue(s) and 1 default RX queue\n",
2224 adapter->num_rx_qs - 1);
2225 return 0;
2226 }
2227
2228 static irqreturn_t be_intx(int irq, void *dev)
2229 {
2230 struct be_eq_obj *eqo = dev;
2231 struct be_adapter *adapter = eqo->adapter;
2232 int num_evts = 0;
2233
2234 /* IRQ is not expected when NAPI is scheduled as the EQ
2235 * will not be armed.
2236 * But, this can happen on Lancer INTx where it takes
2237 * a while to de-assert INTx or in BE2 where occasionaly
2238 * an interrupt may be raised even when EQ is unarmed.
2239 * If NAPI is already scheduled, then counting & notifying
2240 * events will orphan them.
2241 */
2242 if (napi_schedule_prep(&eqo->napi)) {
2243 num_evts = events_get(eqo);
2244 __napi_schedule(&eqo->napi);
2245 if (num_evts)
2246 eqo->spurious_intr = 0;
2247 }
2248 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2249
2250 /* Return IRQ_HANDLED only for the the first spurious intr
2251 * after a valid intr to stop the kernel from branding
2252 * this irq as a bad one!
2253 */
2254 if (num_evts || eqo->spurious_intr++ == 0)
2255 return IRQ_HANDLED;
2256 else
2257 return IRQ_NONE;
2258 }
2259
2260 static irqreturn_t be_msix(int irq, void *dev)
2261 {
2262 struct be_eq_obj *eqo = dev;
2263
2264 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2265 napi_schedule(&eqo->napi);
2266 return IRQ_HANDLED;
2267 }
2268
2269 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2270 {
2271 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2272 }
2273
2274 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2275 int budget, int polling)
2276 {
2277 struct be_adapter *adapter = rxo->adapter;
2278 struct be_queue_info *rx_cq = &rxo->cq;
2279 struct be_rx_compl_info *rxcp;
2280 u32 work_done;
2281
2282 for (work_done = 0; work_done < budget; work_done++) {
2283 rxcp = be_rx_compl_get(rxo);
2284 if (!rxcp)
2285 break;
2286
2287 /* Is it a flush compl that has no data */
2288 if (unlikely(rxcp->num_rcvd == 0))
2289 goto loop_continue;
2290
2291 /* Discard compl with partial DMA Lancer B0 */
2292 if (unlikely(!rxcp->pkt_size)) {
2293 be_rx_compl_discard(rxo, rxcp);
2294 goto loop_continue;
2295 }
2296
2297 /* On BE drop pkts that arrive due to imperfect filtering in
2298 * promiscuous mode on some skews
2299 */
2300 if (unlikely(rxcp->port != adapter->port_num &&
2301 !lancer_chip(adapter))) {
2302 be_rx_compl_discard(rxo, rxcp);
2303 goto loop_continue;
2304 }
2305
2306 /* Don't do gro when we're busy_polling */
2307 if (do_gro(rxcp) && polling != BUSY_POLLING)
2308 be_rx_compl_process_gro(rxo, napi, rxcp);
2309 else
2310 be_rx_compl_process(rxo, napi, rxcp);
2311
2312 loop_continue:
2313 be_rx_stats_update(rxo, rxcp);
2314 }
2315
2316 if (work_done) {
2317 be_cq_notify(adapter, rx_cq->id, true, work_done);
2318
2319 /* When an rx-obj gets into post_starved state, just
2320 * let be_worker do the posting.
2321 */
2322 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2323 !rxo->rx_post_starved)
2324 be_post_rx_frags(rxo, GFP_ATOMIC);
2325 }
2326
2327 return work_done;
2328 }
2329
2330 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2331 int budget, int idx)
2332 {
2333 struct be_eth_tx_compl *txcp;
2334 int num_wrbs = 0, work_done;
2335
2336 for (work_done = 0; work_done < budget; work_done++) {
2337 txcp = be_tx_compl_get(&txo->cq);
2338 if (!txcp)
2339 break;
2340 num_wrbs += be_tx_compl_process(adapter, txo,
2341 AMAP_GET_BITS(struct amap_eth_tx_compl,
2342 wrb_index, txcp));
2343 }
2344
2345 if (work_done) {
2346 be_cq_notify(adapter, txo->cq.id, true, work_done);
2347 atomic_sub(num_wrbs, &txo->q.used);
2348
2349 /* As Tx wrbs have been freed up, wake up netdev queue
2350 * if it was stopped due to lack of tx wrbs. */
2351 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2352 atomic_read(&txo->q.used) < txo->q.len / 2) {
2353 netif_wake_subqueue(adapter->netdev, idx);
2354 }
2355
2356 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2357 tx_stats(txo)->tx_compl += work_done;
2358 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2359 }
2360 return (work_done < budget); /* Done */
2361 }
2362
2363 int be_poll(struct napi_struct *napi, int budget)
2364 {
2365 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2366 struct be_adapter *adapter = eqo->adapter;
2367 int max_work = 0, work, i, num_evts;
2368 struct be_rx_obj *rxo;
2369 bool tx_done;
2370
2371 num_evts = events_get(eqo);
2372
2373 /* Process all TXQs serviced by this EQ */
2374 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2375 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2376 eqo->tx_budget, i);
2377 if (!tx_done)
2378 max_work = budget;
2379 }
2380
2381 if (be_lock_napi(eqo)) {
2382 /* This loop will iterate twice for EQ0 in which
2383 * completions of the last RXQ (default one) are also processed
2384 * For other EQs the loop iterates only once
2385 */
2386 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2387 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2388 max_work = max(work, max_work);
2389 }
2390 be_unlock_napi(eqo);
2391 } else {
2392 max_work = budget;
2393 }
2394
2395 if (is_mcc_eqo(eqo))
2396 be_process_mcc(adapter);
2397
2398 if (max_work < budget) {
2399 napi_complete(napi);
2400 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2401 } else {
2402 /* As we'll continue in polling mode, count and clear events */
2403 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2404 }
2405 return max_work;
2406 }
2407
2408 #ifdef CONFIG_NET_RX_BUSY_POLL
2409 static int be_busy_poll(struct napi_struct *napi)
2410 {
2411 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2412 struct be_adapter *adapter = eqo->adapter;
2413 struct be_rx_obj *rxo;
2414 int i, work = 0;
2415
2416 if (!be_lock_busy_poll(eqo))
2417 return LL_FLUSH_BUSY;
2418
2419 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2420 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2421 if (work)
2422 break;
2423 }
2424
2425 be_unlock_busy_poll(eqo);
2426 return work;
2427 }
2428 #endif
2429
2430 void be_detect_error(struct be_adapter *adapter)
2431 {
2432 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2433 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2434 u32 i;
2435
2436 if (be_hw_error(adapter))
2437 return;
2438
2439 if (lancer_chip(adapter)) {
2440 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2441 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2442 sliport_err1 = ioread32(adapter->db +
2443 SLIPORT_ERROR1_OFFSET);
2444 sliport_err2 = ioread32(adapter->db +
2445 SLIPORT_ERROR2_OFFSET);
2446 }
2447 } else {
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_LOW, &ue_lo);
2450 pci_read_config_dword(adapter->pdev,
2451 PCICFG_UE_STATUS_HIGH, &ue_hi);
2452 pci_read_config_dword(adapter->pdev,
2453 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2454 pci_read_config_dword(adapter->pdev,
2455 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2456
2457 ue_lo = (ue_lo & ~ue_lo_mask);
2458 ue_hi = (ue_hi & ~ue_hi_mask);
2459 }
2460
2461 /* On certain platforms BE hardware can indicate spurious UEs.
2462 * Allow the h/w to stop working completely in case of a real UE.
2463 * Hence not setting the hw_error for UE detection.
2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466 adapter->hw_error = true;
2467 /* Do not log error messages if its a FW reset */
2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
2477 }
2478
2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2480 dev_err(&adapter->pdev->dev,
2481 "ERR: sliport status 0x%x\n", sliport_status);
2482 dev_err(&adapter->pdev->dev,
2483 "ERR: sliport error1 0x%x\n", sliport_err1);
2484 dev_err(&adapter->pdev->dev,
2485 "ERR: sliport error2 0x%x\n", sliport_err2);
2486 }
2487
2488 if (ue_lo) {
2489 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2490 if (ue_lo & 1)
2491 dev_err(&adapter->pdev->dev,
2492 "UE: %s bit set\n", ue_status_low_desc[i]);
2493 }
2494 }
2495
2496 if (ue_hi) {
2497 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2498 if (ue_hi & 1)
2499 dev_err(&adapter->pdev->dev,
2500 "UE: %s bit set\n", ue_status_hi_desc[i]);
2501 }
2502 }
2503
2504 }
2505
2506 static void be_msix_disable(struct be_adapter *adapter)
2507 {
2508 if (msix_enabled(adapter)) {
2509 pci_disable_msix(adapter->pdev);
2510 adapter->num_msix_vec = 0;
2511 adapter->num_msix_roce_vec = 0;
2512 }
2513 }
2514
2515 static int be_msix_enable(struct be_adapter *adapter)
2516 {
2517 int i, status, num_vec;
2518 struct device *dev = &adapter->pdev->dev;
2519
2520 /* If RoCE is supported, program the max number of NIC vectors that
2521 * may be configured via set-channels, along with vectors needed for
2522 * RoCe. Else, just program the number we'll use initially.
2523 */
2524 if (be_roce_supported(adapter))
2525 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2526 2 * num_online_cpus());
2527 else
2528 num_vec = adapter->cfg_num_qs;
2529
2530 for (i = 0; i < num_vec; i++)
2531 adapter->msix_entries[i].entry = i;
2532
2533 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2534 if (status == 0) {
2535 goto done;
2536 } else if (status >= MIN_MSIX_VECTORS) {
2537 num_vec = status;
2538 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2539 num_vec);
2540 if (!status)
2541 goto done;
2542 }
2543
2544 dev_warn(dev, "MSIx enable failed\n");
2545
2546 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2547 if (!be_physfn(adapter))
2548 return status;
2549 return 0;
2550 done:
2551 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2552 adapter->num_msix_roce_vec = num_vec / 2;
2553 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2554 adapter->num_msix_roce_vec);
2555 }
2556
2557 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2558
2559 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2560 adapter->num_msix_vec);
2561 return 0;
2562 }
2563
2564 static inline int be_msix_vec_get(struct be_adapter *adapter,
2565 struct be_eq_obj *eqo)
2566 {
2567 return adapter->msix_entries[eqo->msix_idx].vector;
2568 }
2569
2570 static int be_msix_register(struct be_adapter *adapter)
2571 {
2572 struct net_device *netdev = adapter->netdev;
2573 struct be_eq_obj *eqo;
2574 int status, i, vec;
2575
2576 for_all_evt_queues(adapter, eqo, i) {
2577 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2578 vec = be_msix_vec_get(adapter, eqo);
2579 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2580 if (status)
2581 goto err_msix;
2582 }
2583
2584 return 0;
2585 err_msix:
2586 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2587 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2589 status);
2590 be_msix_disable(adapter);
2591 return status;
2592 }
2593
2594 static int be_irq_register(struct be_adapter *adapter)
2595 {
2596 struct net_device *netdev = adapter->netdev;
2597 int status;
2598
2599 if (msix_enabled(adapter)) {
2600 status = be_msix_register(adapter);
2601 if (status == 0)
2602 goto done;
2603 /* INTx is not supported for VF */
2604 if (!be_physfn(adapter))
2605 return status;
2606 }
2607
2608 /* INTx: only the first EQ is used */
2609 netdev->irq = adapter->pdev->irq;
2610 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2611 &adapter->eq_obj[0]);
2612 if (status) {
2613 dev_err(&adapter->pdev->dev,
2614 "INTx request IRQ failed - err %d\n", status);
2615 return status;
2616 }
2617 done:
2618 adapter->isr_registered = true;
2619 return 0;
2620 }
2621
2622 static void be_irq_unregister(struct be_adapter *adapter)
2623 {
2624 struct net_device *netdev = adapter->netdev;
2625 struct be_eq_obj *eqo;
2626 int i;
2627
2628 if (!adapter->isr_registered)
2629 return;
2630
2631 /* INTx */
2632 if (!msix_enabled(adapter)) {
2633 free_irq(netdev->irq, &adapter->eq_obj[0]);
2634 goto done;
2635 }
2636
2637 /* MSIx */
2638 for_all_evt_queues(adapter, eqo, i)
2639 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2640
2641 done:
2642 adapter->isr_registered = false;
2643 }
2644
2645 static void be_rx_qs_destroy(struct be_adapter *adapter)
2646 {
2647 struct be_queue_info *q;
2648 struct be_rx_obj *rxo;
2649 int i;
2650
2651 for_all_rx_queues(adapter, rxo, i) {
2652 q = &rxo->q;
2653 if (q->created) {
2654 be_cmd_rxq_destroy(adapter, q);
2655 be_rx_cq_clean(rxo);
2656 }
2657 be_queue_free(adapter, q);
2658 }
2659 }
2660
2661 static int be_close(struct net_device *netdev)
2662 {
2663 struct be_adapter *adapter = netdev_priv(netdev);
2664 struct be_eq_obj *eqo;
2665 int i;
2666
2667 be_roce_dev_close(adapter);
2668
2669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2670 for_all_evt_queues(adapter, eqo, i) {
2671 napi_disable(&eqo->napi);
2672 be_disable_busy_poll(eqo);
2673 }
2674 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2675 }
2676
2677 be_async_mcc_disable(adapter);
2678
2679 /* Wait for all pending tx completions to arrive so that
2680 * all tx skbs are freed.
2681 */
2682 netif_tx_disable(netdev);
2683 be_tx_compl_clean(adapter);
2684
2685 be_rx_qs_destroy(adapter);
2686
2687 for (i = 1; i < (adapter->uc_macs + 1); i++)
2688 be_cmd_pmac_del(adapter, adapter->if_handle,
2689 adapter->pmac_id[i], 0);
2690 adapter->uc_macs = 0;
2691
2692 for_all_evt_queues(adapter, eqo, i) {
2693 if (msix_enabled(adapter))
2694 synchronize_irq(be_msix_vec_get(adapter, eqo));
2695 else
2696 synchronize_irq(netdev->irq);
2697 be_eq_clean(eqo);
2698 }
2699
2700 be_irq_unregister(adapter);
2701
2702 return 0;
2703 }
2704
2705 static int be_rx_qs_create(struct be_adapter *adapter)
2706 {
2707 struct be_rx_obj *rxo;
2708 int rc, i, j;
2709 u8 rsstable[128];
2710
2711 for_all_rx_queues(adapter, rxo, i) {
2712 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2713 sizeof(struct be_eth_rx_d));
2714 if (rc)
2715 return rc;
2716 }
2717
2718 /* The FW would like the default RXQ to be created first */
2719 rxo = default_rxo(adapter);
2720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2721 adapter->if_handle, false, &rxo->rss_id);
2722 if (rc)
2723 return rc;
2724
2725 for_all_rss_queues(adapter, rxo, i) {
2726 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2727 rx_frag_size, adapter->if_handle,
2728 true, &rxo->rss_id);
2729 if (rc)
2730 return rc;
2731 }
2732
2733 if (be_multi_rxq(adapter)) {
2734 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2735 for_all_rss_queues(adapter, rxo, i) {
2736 if ((j + i) >= 128)
2737 break;
2738 rsstable[j + i] = rxo->rss_id;
2739 }
2740 }
2741 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2742 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2743
2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6;
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
2751
2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2753 128);
2754 if (rc) {
2755 adapter->rss_flags = RSS_ENABLE_NONE;
2756 return rc;
2757 }
2758
2759 /* First time posting */
2760 for_all_rx_queues(adapter, rxo, i)
2761 be_post_rx_frags(rxo, GFP_KERNEL);
2762 return 0;
2763 }
2764
2765 static int be_open(struct net_device *netdev)
2766 {
2767 struct be_adapter *adapter = netdev_priv(netdev);
2768 struct be_eq_obj *eqo;
2769 struct be_rx_obj *rxo;
2770 struct be_tx_obj *txo;
2771 u8 link_status;
2772 int status, i;
2773
2774 status = be_rx_qs_create(adapter);
2775 if (status)
2776 goto err;
2777
2778 status = be_irq_register(adapter);
2779 if (status)
2780 goto err;
2781
2782 for_all_rx_queues(adapter, rxo, i)
2783 be_cq_notify(adapter, rxo->cq.id, true, 0);
2784
2785 for_all_tx_queues(adapter, txo, i)
2786 be_cq_notify(adapter, txo->cq.id, true, 0);
2787
2788 be_async_mcc_enable(adapter);
2789
2790 for_all_evt_queues(adapter, eqo, i) {
2791 napi_enable(&eqo->napi);
2792 be_enable_busy_poll(eqo);
2793 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2794 }
2795 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2796
2797 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2798 if (!status)
2799 be_link_status_update(adapter, link_status);
2800
2801 netif_tx_start_all_queues(netdev);
2802 be_roce_dev_open(adapter);
2803 return 0;
2804 err:
2805 be_close(adapter->netdev);
2806 return -EIO;
2807 }
2808
2809 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2810 {
2811 struct be_dma_mem cmd;
2812 int status = 0;
2813 u8 mac[ETH_ALEN];
2814
2815 memset(mac, 0, ETH_ALEN);
2816
2817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2818 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2819 GFP_KERNEL);
2820 if (cmd.va == NULL)
2821 return -1;
2822
2823 if (enable) {
2824 status = pci_write_config_dword(adapter->pdev,
2825 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2826 if (status) {
2827 dev_err(&adapter->pdev->dev,
2828 "Could not enable Wake-on-lan\n");
2829 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2830 cmd.dma);
2831 return status;
2832 }
2833 status = be_cmd_enable_magic_wol(adapter,
2834 adapter->netdev->dev_addr, &cmd);
2835 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2836 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2837 } else {
2838 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2839 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2840 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2841 }
2842
2843 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2844 return status;
2845 }
2846
2847 /*
2848 * Generate a seed MAC address from the PF MAC Address using jhash.
2849 * MAC Address for VFs are assigned incrementally starting from the seed.
2850 * These addresses are programmed in the ASIC by the PF and the VF driver
2851 * queries for the MAC address during its probe.
2852 */
2853 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2854 {
2855 u32 vf;
2856 int status = 0;
2857 u8 mac[ETH_ALEN];
2858 struct be_vf_cfg *vf_cfg;
2859
2860 be_vf_eth_addr_generate(adapter, mac);
2861
2862 for_all_vfs(adapter, vf_cfg, vf) {
2863 if (BEx_chip(adapter))
2864 status = be_cmd_pmac_add(adapter, mac,
2865 vf_cfg->if_handle,
2866 &vf_cfg->pmac_id, vf + 1);
2867 else
2868 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2869 vf + 1);
2870
2871 if (status)
2872 dev_err(&adapter->pdev->dev,
2873 "Mac address assignment failed for VF %d\n", vf);
2874 else
2875 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2876
2877 mac[5] += 1;
2878 }
2879 return status;
2880 }
2881
2882 static int be_vfs_mac_query(struct be_adapter *adapter)
2883 {
2884 int status, vf;
2885 u8 mac[ETH_ALEN];
2886 struct be_vf_cfg *vf_cfg;
2887 bool active = false;
2888
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 be_cmd_get_mac_from_list(adapter, mac, &active,
2891 &vf_cfg->pmac_id, 0);
2892
2893 status = be_cmd_mac_addr_query(adapter, mac, false,
2894 vf_cfg->if_handle, 0);
2895 if (status)
2896 return status;
2897 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2898 }
2899 return 0;
2900 }
2901
2902 static void be_vf_clear(struct be_adapter *adapter)
2903 {
2904 struct be_vf_cfg *vf_cfg;
2905 u32 vf;
2906
2907 if (pci_vfs_assigned(adapter->pdev)) {
2908 dev_warn(&adapter->pdev->dev,
2909 "VFs are assigned to VMs: not disabling VFs\n");
2910 goto done;
2911 }
2912
2913 pci_disable_sriov(adapter->pdev);
2914
2915 for_all_vfs(adapter, vf_cfg, vf) {
2916 if (BEx_chip(adapter))
2917 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2918 vf_cfg->pmac_id, vf + 1);
2919 else
2920 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2921 vf + 1);
2922
2923 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2924 }
2925 done:
2926 kfree(adapter->vf_cfg);
2927 adapter->num_vfs = 0;
2928 }
2929
2930 static void be_clear_queues(struct be_adapter *adapter)
2931 {
2932 be_mcc_queues_destroy(adapter);
2933 be_rx_cqs_destroy(adapter);
2934 be_tx_queues_destroy(adapter);
2935 be_evt_queues_destroy(adapter);
2936 }
2937
2938 static void be_cancel_worker(struct be_adapter *adapter)
2939 {
2940 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2941 cancel_delayed_work_sync(&adapter->work);
2942 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2943 }
2944 }
2945
2946 static void be_mac_clear(struct be_adapter *adapter)
2947 {
2948 int i;
2949
2950 if (adapter->pmac_id) {
2951 for (i = 0; i < (adapter->uc_macs + 1); i++)
2952 be_cmd_pmac_del(adapter, adapter->if_handle,
2953 adapter->pmac_id[i], 0);
2954 adapter->uc_macs = 0;
2955
2956 kfree(adapter->pmac_id);
2957 adapter->pmac_id = NULL;
2958 }
2959 }
2960
2961 static int be_clear(struct be_adapter *adapter)
2962 {
2963 be_cancel_worker(adapter);
2964
2965 if (sriov_enabled(adapter))
2966 be_vf_clear(adapter);
2967
2968 /* delete the primary mac along with the uc-mac list */
2969 be_mac_clear(adapter);
2970
2971 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2972
2973 be_clear_queues(adapter);
2974
2975 be_msix_disable(adapter);
2976 return 0;
2977 }
2978
2979 static int be_vfs_if_create(struct be_adapter *adapter)
2980 {
2981 struct be_resources res = {0};
2982 struct be_vf_cfg *vf_cfg;
2983 u32 cap_flags, en_flags, vf;
2984 int status = 0;
2985
2986 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2987 BE_IF_FLAGS_MULTICAST;
2988
2989 for_all_vfs(adapter, vf_cfg, vf) {
2990 if (!BE3_chip(adapter)) {
2991 status = be_cmd_get_profile_config(adapter, &res,
2992 vf + 1);
2993 if (!status)
2994 cap_flags = res.if_cap_flags;
2995 }
2996
2997 /* If a FW profile exists, then cap_flags are updated */
2998 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2999 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3000 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3001 &vf_cfg->if_handle, vf + 1);
3002 if (status)
3003 goto err;
3004 }
3005 err:
3006 return status;
3007 }
3008
3009 static int be_vf_setup_init(struct be_adapter *adapter)
3010 {
3011 struct be_vf_cfg *vf_cfg;
3012 int vf;
3013
3014 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3015 GFP_KERNEL);
3016 if (!adapter->vf_cfg)
3017 return -ENOMEM;
3018
3019 for_all_vfs(adapter, vf_cfg, vf) {
3020 vf_cfg->if_handle = -1;
3021 vf_cfg->pmac_id = -1;
3022 }
3023 return 0;
3024 }
3025
3026 static int be_vf_setup(struct be_adapter *adapter)
3027 {
3028 struct be_vf_cfg *vf_cfg;
3029 u16 def_vlan, lnk_speed;
3030 int status, old_vfs, vf;
3031 struct device *dev = &adapter->pdev->dev;
3032 u32 privileges;
3033
3034 old_vfs = pci_num_vf(adapter->pdev);
3035 if (old_vfs) {
3036 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3037 if (old_vfs != num_vfs)
3038 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3039 adapter->num_vfs = old_vfs;
3040 } else {
3041 if (num_vfs > be_max_vfs(adapter))
3042 dev_info(dev, "Device supports %d VFs and not %d\n",
3043 be_max_vfs(adapter), num_vfs);
3044 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3045 if (!adapter->num_vfs)
3046 return 0;
3047 }
3048
3049 status = be_vf_setup_init(adapter);
3050 if (status)
3051 goto err;
3052
3053 if (old_vfs) {
3054 for_all_vfs(adapter, vf_cfg, vf) {
3055 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3056 if (status)
3057 goto err;
3058 }
3059 } else {
3060 status = be_vfs_if_create(adapter);
3061 if (status)
3062 goto err;
3063 }
3064
3065 if (old_vfs) {
3066 status = be_vfs_mac_query(adapter);
3067 if (status)
3068 goto err;
3069 } else {
3070 status = be_vf_eth_addr_config(adapter);
3071 if (status)
3072 goto err;
3073 }
3074
3075 for_all_vfs(adapter, vf_cfg, vf) {
3076 /* Allow VFs to programs MAC/VLAN filters */
3077 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3078 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3079 status = be_cmd_set_fn_privileges(adapter,
3080 privileges |
3081 BE_PRIV_FILTMGMT,
3082 vf + 1);
3083 if (!status)
3084 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3085 vf);
3086 }
3087
3088 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3089 * Allow full available bandwidth
3090 */
3091 if (BE3_chip(adapter) && !old_vfs)
3092 be_cmd_set_qos(adapter, 1000, vf+1);
3093
3094 status = be_cmd_link_status_query(adapter, &lnk_speed,
3095 NULL, vf + 1);
3096 if (!status)
3097 vf_cfg->tx_rate = lnk_speed;
3098
3099 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3100 vf + 1, vf_cfg->if_handle, NULL);
3101 if (status)
3102 goto err;
3103 vf_cfg->def_vid = def_vlan;
3104
3105 if (!old_vfs)
3106 be_cmd_enable_vf(adapter, vf + 1);
3107 }
3108
3109 if (!old_vfs) {
3110 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3111 if (status) {
3112 dev_err(dev, "SRIOV enable failed\n");
3113 adapter->num_vfs = 0;
3114 goto err;
3115 }
3116 }
3117 return 0;
3118 err:
3119 dev_err(dev, "VF setup failed\n");
3120 be_vf_clear(adapter);
3121 return status;
3122 }
3123
3124 /* On BE2/BE3 FW does not suggest the supported limits */
3125 static void BEx_get_resources(struct be_adapter *adapter,
3126 struct be_resources *res)
3127 {
3128 struct pci_dev *pdev = adapter->pdev;
3129 bool use_sriov = false;
3130 int max_vfs;
3131
3132 max_vfs = pci_sriov_get_totalvfs(pdev);
3133
3134 if (BE3_chip(adapter) && sriov_want(adapter)) {
3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3136 use_sriov = res->max_vfs;
3137 }
3138
3139 if (be_physfn(adapter))
3140 res->max_uc_mac = BE_UC_PMAC_COUNT;
3141 else
3142 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3143
3144 if (adapter->function_mode & FLEX10_MODE)
3145 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3146 else if (adapter->function_mode & UMC_ENABLED)
3147 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3148 else
3149 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3150 res->max_mcast_mac = BE_MAX_MC;
3151
3152 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3153 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3154 !be_physfn(adapter) || (adapter->port_num > 1))
3155 res->max_tx_qs = 1;
3156 else
3157 res->max_tx_qs = BE3_MAX_TX_QS;
3158
3159 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3160 !use_sriov && be_physfn(adapter))
3161 res->max_rss_qs = (adapter->be3_native) ?
3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3163 res->max_rx_qs = res->max_rss_qs + 1;
3164
3165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
3170
3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3173 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3174 }
3175
3176 static void be_setup_init(struct be_adapter *adapter)
3177 {
3178 adapter->vlan_prio_bmap = 0xff;
3179 adapter->phy.link_speed = -1;
3180 adapter->if_handle = -1;
3181 adapter->be3_native = false;
3182 adapter->promiscuous = false;
3183 if (be_physfn(adapter))
3184 adapter->cmd_privileges = MAX_PRIVILEGES;
3185 else
3186 adapter->cmd_privileges = MIN_PRIVILEGES;
3187 }
3188
3189 static int be_get_resources(struct be_adapter *adapter)
3190 {
3191 struct device *dev = &adapter->pdev->dev;
3192 struct be_resources res = {0};
3193 int status;
3194
3195 if (BEx_chip(adapter)) {
3196 BEx_get_resources(adapter, &res);
3197 adapter->res = res;
3198 }
3199
3200 /* For Lancer, SH etc read per-function resource limits from FW.
3201 * GET_FUNC_CONFIG returns per function guaranteed limits.
3202 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3203 */
3204 if (!BEx_chip(adapter)) {
3205 status = be_cmd_get_func_config(adapter, &res);
3206 if (status)
3207 return status;
3208
3209 /* If RoCE may be enabled stash away half the EQs for RoCE */
3210 if (be_roce_supported(adapter))
3211 res.max_evt_qs /= 2;
3212 adapter->res = res;
3213
3214 if (be_physfn(adapter)) {
3215 status = be_cmd_get_profile_config(adapter, &res, 0);
3216 if (status)
3217 return status;
3218 adapter->res.max_vfs = res.max_vfs;
3219 }
3220
3221 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3222 be_max_txqs(adapter), be_max_rxqs(adapter),
3223 be_max_rss(adapter), be_max_eqs(adapter),
3224 be_max_vfs(adapter));
3225 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3226 be_max_uc(adapter), be_max_mc(adapter),
3227 be_max_vlans(adapter));
3228 }
3229
3230 return 0;
3231 }
3232
3233 /* Routine to query per function resource limits */
3234 static int be_get_config(struct be_adapter *adapter)
3235 {
3236 u16 profile_id;
3237 int status;
3238
3239 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3240 &adapter->function_mode,
3241 &adapter->function_caps,
3242 &adapter->asic_rev);
3243 if (status)
3244 return status;
3245
3246 if (be_physfn(adapter)) {
3247 status = be_cmd_get_active_profile(adapter, &profile_id);
3248 if (!status)
3249 dev_info(&adapter->pdev->dev,
3250 "Using profile 0x%x\n", profile_id);
3251 }
3252
3253 status = be_get_resources(adapter);
3254 if (status)
3255 return status;
3256
3257 /* primary mac needs 1 pmac entry */
3258 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3259 GFP_KERNEL);
3260 if (!adapter->pmac_id)
3261 return -ENOMEM;
3262
3263 /* Sanitize cfg_num_qs based on HW and platform limits */
3264 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3265
3266 return 0;
3267 }
3268
3269 static int be_mac_setup(struct be_adapter *adapter)
3270 {
3271 u8 mac[ETH_ALEN];
3272 int status;
3273
3274 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3275 status = be_cmd_get_perm_mac(adapter, mac);
3276 if (status)
3277 return status;
3278
3279 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3280 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3281 } else {
3282 /* Maybe the HW was reset; dev_addr must be re-programmed */
3283 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3284 }
3285
3286 /* For BE3-R VFs, the PF programs the initial MAC address */
3287 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3288 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3289 &adapter->pmac_id[0], 0);
3290 return 0;
3291 }
3292
3293 static void be_schedule_worker(struct be_adapter *adapter)
3294 {
3295 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3296 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3297 }
3298
3299 static int be_setup_queues(struct be_adapter *adapter)
3300 {
3301 struct net_device *netdev = adapter->netdev;
3302 int status;
3303
3304 status = be_evt_queues_create(adapter);
3305 if (status)
3306 goto err;
3307
3308 status = be_tx_qs_create(adapter);
3309 if (status)
3310 goto err;
3311
3312 status = be_rx_cqs_create(adapter);
3313 if (status)
3314 goto err;
3315
3316 status = be_mcc_queues_create(adapter);
3317 if (status)
3318 goto err;
3319
3320 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3321 if (status)
3322 goto err;
3323
3324 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3325 if (status)
3326 goto err;
3327
3328 return 0;
3329 err:
3330 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3331 return status;
3332 }
3333
3334 int be_update_queues(struct be_adapter *adapter)
3335 {
3336 struct net_device *netdev = adapter->netdev;
3337 int status;
3338
3339 if (netif_running(netdev))
3340 be_close(netdev);
3341
3342 be_cancel_worker(adapter);
3343
3344 /* If any vectors have been shared with RoCE we cannot re-program
3345 * the MSIx table.
3346 */
3347 if (!adapter->num_msix_roce_vec)
3348 be_msix_disable(adapter);
3349
3350 be_clear_queues(adapter);
3351
3352 if (!msix_enabled(adapter)) {
3353 status = be_msix_enable(adapter);
3354 if (status)
3355 return status;
3356 }
3357
3358 status = be_setup_queues(adapter);
3359 if (status)
3360 return status;
3361
3362 be_schedule_worker(adapter);
3363
3364 if (netif_running(netdev))
3365 status = be_open(netdev);
3366
3367 return status;
3368 }
3369
3370 static int be_setup(struct be_adapter *adapter)
3371 {
3372 struct device *dev = &adapter->pdev->dev;
3373 u32 tx_fc, rx_fc, en_flags;
3374 int status;
3375
3376 be_setup_init(adapter);
3377
3378 if (!lancer_chip(adapter))
3379 be_cmd_req_native_mode(adapter);
3380
3381 status = be_get_config(adapter);
3382 if (status)
3383 goto err;
3384
3385 status = be_msix_enable(adapter);
3386 if (status)
3387 goto err;
3388
3389 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3390 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3391 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3392 en_flags |= BE_IF_FLAGS_RSS;
3393 en_flags = en_flags & be_if_cap_flags(adapter);
3394 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3395 &adapter->if_handle, 0);
3396 if (status)
3397 goto err;
3398
3399 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3400 rtnl_lock();
3401 status = be_setup_queues(adapter);
3402 rtnl_unlock();
3403 if (status)
3404 goto err;
3405
3406 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3407 /* In UMC mode FW does not return right privileges.
3408 * Override with correct privilege equivalent to PF.
3409 */
3410 if (be_is_mc(adapter))
3411 adapter->cmd_privileges = MAX_PRIVILEGES;
3412
3413 status = be_mac_setup(adapter);
3414 if (status)
3415 goto err;
3416
3417 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3418
3419 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3420 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3421 adapter->fw_ver);
3422 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3423 }
3424
3425 if (adapter->vlans_added)
3426 be_vid_config(adapter);
3427
3428 be_set_rx_mode(adapter->netdev);
3429
3430 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3431
3432 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3433 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3434 adapter->rx_fc);
3435
3436 if (sriov_want(adapter)) {
3437 if (be_max_vfs(adapter))
3438 be_vf_setup(adapter);
3439 else
3440 dev_warn(dev, "device doesn't support SRIOV\n");
3441 }
3442
3443 status = be_cmd_get_phy_info(adapter);
3444 if (!status && be_pause_supported(adapter))
3445 adapter->phy.fc_autoneg = 1;
3446
3447 be_schedule_worker(adapter);
3448 return 0;
3449 err:
3450 be_clear(adapter);
3451 return status;
3452 }
3453
3454 #ifdef CONFIG_NET_POLL_CONTROLLER
3455 static void be_netpoll(struct net_device *netdev)
3456 {
3457 struct be_adapter *adapter = netdev_priv(netdev);
3458 struct be_eq_obj *eqo;
3459 int i;
3460
3461 for_all_evt_queues(adapter, eqo, i) {
3462 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3463 napi_schedule(&eqo->napi);
3464 }
3465
3466 return;
3467 }
3468 #endif
3469
3470 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3471 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3472
3473 static bool be_flash_redboot(struct be_adapter *adapter,
3474 const u8 *p, u32 img_start, int image_size,
3475 int hdr_size)
3476 {
3477 u32 crc_offset;
3478 u8 flashed_crc[4];
3479 int status;
3480
3481 crc_offset = hdr_size + img_start + image_size - 4;
3482
3483 p += crc_offset;
3484
3485 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3486 (image_size - 4));
3487 if (status) {
3488 dev_err(&adapter->pdev->dev,
3489 "could not get crc from flash, not flashing redboot\n");
3490 return false;
3491 }
3492
3493 /*update redboot only if crc does not match*/
3494 if (!memcmp(flashed_crc, p, 4))
3495 return false;
3496 else
3497 return true;
3498 }
3499
3500 static bool phy_flashing_required(struct be_adapter *adapter)
3501 {
3502 return (adapter->phy.phy_type == TN_8022 &&
3503 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3504 }
3505
3506 static bool is_comp_in_ufi(struct be_adapter *adapter,
3507 struct flash_section_info *fsec, int type)
3508 {
3509 int i = 0, img_type = 0;
3510 struct flash_section_info_g2 *fsec_g2 = NULL;
3511
3512 if (BE2_chip(adapter))
3513 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3514
3515 for (i = 0; i < MAX_FLASH_COMP; i++) {
3516 if (fsec_g2)
3517 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3518 else
3519 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3520
3521 if (img_type == type)
3522 return true;
3523 }
3524 return false;
3525
3526 }
3527
3528 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3529 int header_size,
3530 const struct firmware *fw)
3531 {
3532 struct flash_section_info *fsec = NULL;
3533 const u8 *p = fw->data;
3534
3535 p += header_size;
3536 while (p < (fw->data + fw->size)) {
3537 fsec = (struct flash_section_info *)p;
3538 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3539 return fsec;
3540 p += 32;
3541 }
3542 return NULL;
3543 }
3544
3545 static int be_flash(struct be_adapter *adapter, const u8 *img,
3546 struct be_dma_mem *flash_cmd, int optype, int img_size)
3547 {
3548 u32 total_bytes = 0, flash_op, num_bytes = 0;
3549 int status = 0;
3550 struct be_cmd_write_flashrom *req = flash_cmd->va;
3551
3552 total_bytes = img_size;
3553 while (total_bytes) {
3554 num_bytes = min_t(u32, 32*1024, total_bytes);
3555
3556 total_bytes -= num_bytes;
3557
3558 if (!total_bytes) {
3559 if (optype == OPTYPE_PHY_FW)
3560 flash_op = FLASHROM_OPER_PHY_FLASH;
3561 else
3562 flash_op = FLASHROM_OPER_FLASH;
3563 } else {
3564 if (optype == OPTYPE_PHY_FW)
3565 flash_op = FLASHROM_OPER_PHY_SAVE;
3566 else
3567 flash_op = FLASHROM_OPER_SAVE;
3568 }
3569
3570 memcpy(req->data_buf, img, num_bytes);
3571 img += num_bytes;
3572 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3573 flash_op, num_bytes);
3574 if (status) {
3575 if (status == ILLEGAL_IOCTL_REQ &&
3576 optype == OPTYPE_PHY_FW)
3577 break;
3578 dev_err(&adapter->pdev->dev,
3579 "cmd to write to flash rom failed.\n");
3580 return status;
3581 }
3582 }
3583 return 0;
3584 }
3585
3586 /* For BE2, BE3 and BE3-R */
3587 static int be_flash_BEx(struct be_adapter *adapter,
3588 const struct firmware *fw,
3589 struct be_dma_mem *flash_cmd,
3590 int num_of_images)
3591
3592 {
3593 int status = 0, i, filehdr_size = 0;
3594 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3595 const u8 *p = fw->data;
3596 const struct flash_comp *pflashcomp;
3597 int num_comp, redboot;
3598 struct flash_section_info *fsec = NULL;
3599
3600 struct flash_comp gen3_flash_types[] = {
3601 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3602 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3603 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3604 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3605 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3606 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3607 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3608 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3609 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3610 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3611 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3612 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3613 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3614 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3615 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3616 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3617 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3618 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3619 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3620 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3621 };
3622
3623 struct flash_comp gen2_flash_types[] = {
3624 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3625 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3626 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3627 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3628 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3629 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3630 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3631 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3632 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3633 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3634 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3635 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3636 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3637 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3638 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3639 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3640 };
3641
3642 if (BE3_chip(adapter)) {
3643 pflashcomp = gen3_flash_types;
3644 filehdr_size = sizeof(struct flash_file_hdr_g3);
3645 num_comp = ARRAY_SIZE(gen3_flash_types);
3646 } else {
3647 pflashcomp = gen2_flash_types;
3648 filehdr_size = sizeof(struct flash_file_hdr_g2);
3649 num_comp = ARRAY_SIZE(gen2_flash_types);
3650 }
3651
3652 /* Get flash section info*/
3653 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3654 if (!fsec) {
3655 dev_err(&adapter->pdev->dev,
3656 "Invalid Cookie. UFI corrupted ?\n");
3657 return -1;
3658 }
3659 for (i = 0; i < num_comp; i++) {
3660 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3661 continue;
3662
3663 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3664 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3665 continue;
3666
3667 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3668 !phy_flashing_required(adapter))
3669 continue;
3670
3671 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3672 redboot = be_flash_redboot(adapter, fw->data,
3673 pflashcomp[i].offset, pflashcomp[i].size,
3674 filehdr_size + img_hdrs_size);
3675 if (!redboot)
3676 continue;
3677 }
3678
3679 p = fw->data;
3680 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3681 if (p + pflashcomp[i].size > fw->data + fw->size)
3682 return -1;
3683
3684 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3685 pflashcomp[i].size);
3686 if (status) {
3687 dev_err(&adapter->pdev->dev,
3688 "Flashing section type %d failed.\n",
3689 pflashcomp[i].img_type);
3690 return status;
3691 }
3692 }
3693 return 0;
3694 }
3695
3696 static int be_flash_skyhawk(struct be_adapter *adapter,
3697 const struct firmware *fw,
3698 struct be_dma_mem *flash_cmd, int num_of_images)
3699 {
3700 int status = 0, i, filehdr_size = 0;
3701 int img_offset, img_size, img_optype, redboot;
3702 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3703 const u8 *p = fw->data;
3704 struct flash_section_info *fsec = NULL;
3705
3706 filehdr_size = sizeof(struct flash_file_hdr_g3);
3707 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3708 if (!fsec) {
3709 dev_err(&adapter->pdev->dev,
3710 "Invalid Cookie. UFI corrupted ?\n");
3711 return -1;
3712 }
3713
3714 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3715 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3716 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3717
3718 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3719 case IMAGE_FIRMWARE_iSCSI:
3720 img_optype = OPTYPE_ISCSI_ACTIVE;
3721 break;
3722 case IMAGE_BOOT_CODE:
3723 img_optype = OPTYPE_REDBOOT;
3724 break;
3725 case IMAGE_OPTION_ROM_ISCSI:
3726 img_optype = OPTYPE_BIOS;
3727 break;
3728 case IMAGE_OPTION_ROM_PXE:
3729 img_optype = OPTYPE_PXE_BIOS;
3730 break;
3731 case IMAGE_OPTION_ROM_FCoE:
3732 img_optype = OPTYPE_FCOE_BIOS;
3733 break;
3734 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3735 img_optype = OPTYPE_ISCSI_BACKUP;
3736 break;
3737 case IMAGE_NCSI:
3738 img_optype = OPTYPE_NCSI_FW;
3739 break;
3740 default:
3741 continue;
3742 }
3743
3744 if (img_optype == OPTYPE_REDBOOT) {
3745 redboot = be_flash_redboot(adapter, fw->data,
3746 img_offset, img_size,
3747 filehdr_size + img_hdrs_size);
3748 if (!redboot)
3749 continue;
3750 }
3751
3752 p = fw->data;
3753 p += filehdr_size + img_offset + img_hdrs_size;
3754 if (p + img_size > fw->data + fw->size)
3755 return -1;
3756
3757 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3758 if (status) {
3759 dev_err(&adapter->pdev->dev,
3760 "Flashing section type %d failed.\n",
3761 fsec->fsec_entry[i].type);
3762 return status;
3763 }
3764 }
3765 return 0;
3766 }
3767
3768 static int lancer_fw_download(struct be_adapter *adapter,
3769 const struct firmware *fw)
3770 {
3771 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3772 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3773 struct be_dma_mem flash_cmd;
3774 const u8 *data_ptr = NULL;
3775 u8 *dest_image_ptr = NULL;
3776 size_t image_size = 0;
3777 u32 chunk_size = 0;
3778 u32 data_written = 0;
3779 u32 offset = 0;
3780 int status = 0;
3781 u8 add_status = 0;
3782 u8 change_status;
3783
3784 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3785 dev_err(&adapter->pdev->dev,
3786 "FW Image not properly aligned. "
3787 "Length must be 4 byte aligned.\n");
3788 status = -EINVAL;
3789 goto lancer_fw_exit;
3790 }
3791
3792 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3793 + LANCER_FW_DOWNLOAD_CHUNK;
3794 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3795 &flash_cmd.dma, GFP_KERNEL);
3796 if (!flash_cmd.va) {
3797 status = -ENOMEM;
3798 goto lancer_fw_exit;
3799 }
3800
3801 dest_image_ptr = flash_cmd.va +
3802 sizeof(struct lancer_cmd_req_write_object);
3803 image_size = fw->size;
3804 data_ptr = fw->data;
3805
3806 while (image_size) {
3807 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3808
3809 /* Copy the image chunk content. */
3810 memcpy(dest_image_ptr, data_ptr, chunk_size);
3811
3812 status = lancer_cmd_write_object(adapter, &flash_cmd,
3813 chunk_size, offset,
3814 LANCER_FW_DOWNLOAD_LOCATION,
3815 &data_written, &change_status,
3816 &add_status);
3817 if (status)
3818 break;
3819
3820 offset += data_written;
3821 data_ptr += data_written;
3822 image_size -= data_written;
3823 }
3824
3825 if (!status) {
3826 /* Commit the FW written */
3827 status = lancer_cmd_write_object(adapter, &flash_cmd,
3828 0, offset,
3829 LANCER_FW_DOWNLOAD_LOCATION,
3830 &data_written, &change_status,
3831 &add_status);
3832 }
3833
3834 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3835 flash_cmd.dma);
3836 if (status) {
3837 dev_err(&adapter->pdev->dev,
3838 "Firmware load error. "
3839 "Status code: 0x%x Additional Status: 0x%x\n",
3840 status, add_status);
3841 goto lancer_fw_exit;
3842 }
3843
3844 if (change_status == LANCER_FW_RESET_NEEDED) {
3845 dev_info(&adapter->pdev->dev,
3846 "Resetting adapter to activate new FW\n");
3847 status = lancer_physdev_ctrl(adapter,
3848 PHYSDEV_CONTROL_FW_RESET_MASK);
3849 if (status) {
3850 dev_err(&adapter->pdev->dev,
3851 "Adapter busy for FW reset.\n"
3852 "New FW will not be active.\n");
3853 goto lancer_fw_exit;
3854 }
3855 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3856 dev_err(&adapter->pdev->dev,
3857 "System reboot required for new FW"
3858 " to be active\n");
3859 }
3860
3861 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3862 lancer_fw_exit:
3863 return status;
3864 }
3865
3866 #define UFI_TYPE2 2
3867 #define UFI_TYPE3 3
3868 #define UFI_TYPE3R 10
3869 #define UFI_TYPE4 4
3870 static int be_get_ufi_type(struct be_adapter *adapter,
3871 struct flash_file_hdr_g3 *fhdr)
3872 {
3873 if (fhdr == NULL)
3874 goto be_get_ufi_exit;
3875
3876 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3877 return UFI_TYPE4;
3878 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3879 if (fhdr->asic_type_rev == 0x10)
3880 return UFI_TYPE3R;
3881 else
3882 return UFI_TYPE3;
3883 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3884 return UFI_TYPE2;
3885
3886 be_get_ufi_exit:
3887 dev_err(&adapter->pdev->dev,
3888 "UFI and Interface are not compatible for flashing\n");
3889 return -1;
3890 }
3891
3892 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3893 {
3894 struct flash_file_hdr_g3 *fhdr3;
3895 struct image_hdr *img_hdr_ptr = NULL;
3896 struct be_dma_mem flash_cmd;
3897 const u8 *p;
3898 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3899
3900 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3901 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3902 &flash_cmd.dma, GFP_KERNEL);
3903 if (!flash_cmd.va) {
3904 status = -ENOMEM;
3905 goto be_fw_exit;
3906 }
3907
3908 p = fw->data;
3909 fhdr3 = (struct flash_file_hdr_g3 *)p;
3910
3911 ufi_type = be_get_ufi_type(adapter, fhdr3);
3912
3913 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3914 for (i = 0; i < num_imgs; i++) {
3915 img_hdr_ptr = (struct image_hdr *)(fw->data +
3916 (sizeof(struct flash_file_hdr_g3) +
3917 i * sizeof(struct image_hdr)));
3918 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3919 switch (ufi_type) {
3920 case UFI_TYPE4:
3921 status = be_flash_skyhawk(adapter, fw,
3922 &flash_cmd, num_imgs);
3923 break;
3924 case UFI_TYPE3R:
3925 status = be_flash_BEx(adapter, fw, &flash_cmd,
3926 num_imgs);
3927 break;
3928 case UFI_TYPE3:
3929 /* Do not flash this ufi on BE3-R cards */
3930 if (adapter->asic_rev < 0x10)
3931 status = be_flash_BEx(adapter, fw,
3932 &flash_cmd,
3933 num_imgs);
3934 else {
3935 status = -1;
3936 dev_err(&adapter->pdev->dev,
3937 "Can't load BE3 UFI on BE3R\n");
3938 }
3939 }
3940 }
3941 }
3942
3943 if (ufi_type == UFI_TYPE2)
3944 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3945 else if (ufi_type == -1)
3946 status = -1;
3947
3948 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3949 flash_cmd.dma);
3950 if (status) {
3951 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3952 goto be_fw_exit;
3953 }
3954
3955 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3956
3957 be_fw_exit:
3958 return status;
3959 }
3960
3961 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3962 {
3963 const struct firmware *fw;
3964 int status;
3965
3966 if (!netif_running(adapter->netdev)) {
3967 dev_err(&adapter->pdev->dev,
3968 "Firmware load not allowed (interface is down)\n");
3969 return -1;
3970 }
3971
3972 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3973 if (status)
3974 goto fw_exit;
3975
3976 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3977
3978 if (lancer_chip(adapter))
3979 status = lancer_fw_download(adapter, fw);
3980 else
3981 status = be_fw_download(adapter, fw);
3982
3983 if (!status)
3984 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3985 adapter->fw_on_flash);
3986
3987 fw_exit:
3988 release_firmware(fw);
3989 return status;
3990 }
3991
3992 static int be_ndo_bridge_setlink(struct net_device *dev,
3993 struct nlmsghdr *nlh)
3994 {
3995 struct be_adapter *adapter = netdev_priv(dev);
3996 struct nlattr *attr, *br_spec;
3997 int rem;
3998 int status = 0;
3999 u16 mode = 0;
4000
4001 if (!sriov_enabled(adapter))
4002 return -EOPNOTSUPP;
4003
4004 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4005
4006 nla_for_each_nested(attr, br_spec, rem) {
4007 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4008 continue;
4009
4010 mode = nla_get_u16(attr);
4011 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4012 return -EINVAL;
4013
4014 status = be_cmd_set_hsw_config(adapter, 0, 0,
4015 adapter->if_handle,
4016 mode == BRIDGE_MODE_VEPA ?
4017 PORT_FWD_TYPE_VEPA :
4018 PORT_FWD_TYPE_VEB);
4019 if (status)
4020 goto err;
4021
4022 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4023 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4024
4025 return status;
4026 }
4027 err:
4028 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4029 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4030
4031 return status;
4032 }
4033
4034 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4035 struct net_device *dev,
4036 u32 filter_mask)
4037 {
4038 struct be_adapter *adapter = netdev_priv(dev);
4039 int status = 0;
4040 u8 hsw_mode;
4041
4042 if (!sriov_enabled(adapter))
4043 return 0;
4044
4045 /* BE and Lancer chips support VEB mode only */
4046 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4047 hsw_mode = PORT_FWD_TYPE_VEB;
4048 } else {
4049 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4050 adapter->if_handle, &hsw_mode);
4051 if (status)
4052 return 0;
4053 }
4054
4055 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4056 hsw_mode == PORT_FWD_TYPE_VEPA ?
4057 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4058 }
4059
4060 static const struct net_device_ops be_netdev_ops = {
4061 .ndo_open = be_open,
4062 .ndo_stop = be_close,
4063 .ndo_start_xmit = be_xmit,
4064 .ndo_set_rx_mode = be_set_rx_mode,
4065 .ndo_set_mac_address = be_mac_addr_set,
4066 .ndo_change_mtu = be_change_mtu,
4067 .ndo_get_stats64 = be_get_stats64,
4068 .ndo_validate_addr = eth_validate_addr,
4069 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4070 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4071 .ndo_set_vf_mac = be_set_vf_mac,
4072 .ndo_set_vf_vlan = be_set_vf_vlan,
4073 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4074 .ndo_get_vf_config = be_get_vf_config,
4075 #ifdef CONFIG_NET_POLL_CONTROLLER
4076 .ndo_poll_controller = be_netpoll,
4077 #endif
4078 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4079 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4080 #ifdef CONFIG_NET_RX_BUSY_POLL
4081 .ndo_busy_poll = be_busy_poll
4082 #endif
4083 };
4084
4085 static void be_netdev_init(struct net_device *netdev)
4086 {
4087 struct be_adapter *adapter = netdev_priv(netdev);
4088
4089 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4090 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4091 NETIF_F_HW_VLAN_CTAG_TX;
4092 if (be_multi_rxq(adapter))
4093 netdev->hw_features |= NETIF_F_RXHASH;
4094
4095 netdev->features |= netdev->hw_features |
4096 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4097
4098 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4099 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4100
4101 netdev->priv_flags |= IFF_UNICAST_FLT;
4102
4103 netdev->flags |= IFF_MULTICAST;
4104
4105 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4106
4107 netdev->netdev_ops = &be_netdev_ops;
4108
4109 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4110 }
4111
4112 static void be_unmap_pci_bars(struct be_adapter *adapter)
4113 {
4114 if (adapter->csr)
4115 pci_iounmap(adapter->pdev, adapter->csr);
4116 if (adapter->db)
4117 pci_iounmap(adapter->pdev, adapter->db);
4118 }
4119
4120 static int db_bar(struct be_adapter *adapter)
4121 {
4122 if (lancer_chip(adapter) || !be_physfn(adapter))
4123 return 0;
4124 else
4125 return 4;
4126 }
4127
4128 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4129 {
4130 if (skyhawk_chip(adapter)) {
4131 adapter->roce_db.size = 4096;
4132 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4133 db_bar(adapter));
4134 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4135 db_bar(adapter));
4136 }
4137 return 0;
4138 }
4139
4140 static int be_map_pci_bars(struct be_adapter *adapter)
4141 {
4142 u8 __iomem *addr;
4143
4144 if (BEx_chip(adapter) && be_physfn(adapter)) {
4145 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4146 if (adapter->csr == NULL)
4147 return -ENOMEM;
4148 }
4149
4150 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4151 if (addr == NULL)
4152 goto pci_map_err;
4153 adapter->db = addr;
4154
4155 be_roce_map_pci_bars(adapter);
4156 return 0;
4157
4158 pci_map_err:
4159 be_unmap_pci_bars(adapter);
4160 return -ENOMEM;
4161 }
4162
4163 static void be_ctrl_cleanup(struct be_adapter *adapter)
4164 {
4165 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4166
4167 be_unmap_pci_bars(adapter);
4168
4169 if (mem->va)
4170 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4171 mem->dma);
4172
4173 mem = &adapter->rx_filter;
4174 if (mem->va)
4175 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4176 mem->dma);
4177 }
4178
4179 static int be_ctrl_init(struct be_adapter *adapter)
4180 {
4181 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4182 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4183 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4184 u32 sli_intf;
4185 int status;
4186
4187 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4188 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4189 SLI_INTF_FAMILY_SHIFT;
4190 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4191
4192 status = be_map_pci_bars(adapter);
4193 if (status)
4194 goto done;
4195
4196 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4197 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4198 mbox_mem_alloc->size,
4199 &mbox_mem_alloc->dma,
4200 GFP_KERNEL);
4201 if (!mbox_mem_alloc->va) {
4202 status = -ENOMEM;
4203 goto unmap_pci_bars;
4204 }
4205 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4206 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4207 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4208 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4209
4210 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4211 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4212 rx_filter->size, &rx_filter->dma,
4213 GFP_KERNEL);
4214 if (rx_filter->va == NULL) {
4215 status = -ENOMEM;
4216 goto free_mbox;
4217 }
4218
4219 mutex_init(&adapter->mbox_lock);
4220 spin_lock_init(&adapter->mcc_lock);
4221 spin_lock_init(&adapter->mcc_cq_lock);
4222
4223 init_completion(&adapter->et_cmd_compl);
4224 pci_save_state(adapter->pdev);
4225 return 0;
4226
4227 free_mbox:
4228 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4229 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4230
4231 unmap_pci_bars:
4232 be_unmap_pci_bars(adapter);
4233
4234 done:
4235 return status;
4236 }
4237
4238 static void be_stats_cleanup(struct be_adapter *adapter)
4239 {
4240 struct be_dma_mem *cmd = &adapter->stats_cmd;
4241
4242 if (cmd->va)
4243 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4244 cmd->va, cmd->dma);
4245 }
4246
4247 static int be_stats_init(struct be_adapter *adapter)
4248 {
4249 struct be_dma_mem *cmd = &adapter->stats_cmd;
4250
4251 if (lancer_chip(adapter))
4252 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4253 else if (BE2_chip(adapter))
4254 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4255 else if (BE3_chip(adapter))
4256 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4257 else
4258 /* ALL non-BE ASICs */
4259 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4260
4261 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4262 GFP_KERNEL);
4263 if (cmd->va == NULL)
4264 return -1;
4265 return 0;
4266 }
4267
4268 static void be_remove(struct pci_dev *pdev)
4269 {
4270 struct be_adapter *adapter = pci_get_drvdata(pdev);
4271
4272 if (!adapter)
4273 return;
4274
4275 be_roce_dev_remove(adapter);
4276 be_intr_set(adapter, false);
4277
4278 cancel_delayed_work_sync(&adapter->func_recovery_work);
4279
4280 unregister_netdev(adapter->netdev);
4281
4282 be_clear(adapter);
4283
4284 /* tell fw we're done with firing cmds */
4285 be_cmd_fw_clean(adapter);
4286
4287 be_stats_cleanup(adapter);
4288
4289 be_ctrl_cleanup(adapter);
4290
4291 pci_disable_pcie_error_reporting(pdev);
4292
4293 pci_release_regions(pdev);
4294 pci_disable_device(pdev);
4295
4296 free_netdev(adapter->netdev);
4297 }
4298
4299 bool be_is_wol_supported(struct be_adapter *adapter)
4300 {
4301 return ((adapter->wol_cap & BE_WOL_CAP) &&
4302 !be_is_wol_excluded(adapter)) ? true : false;
4303 }
4304
4305 static int be_get_initial_config(struct be_adapter *adapter)
4306 {
4307 int status, level;
4308
4309 status = be_cmd_get_cntl_attributes(adapter);
4310 if (status)
4311 return status;
4312
4313 status = be_cmd_get_acpi_wol_cap(adapter);
4314 if (status) {
4315 /* in case of a failure to get wol capabillities
4316 * check the exclusion list to determine WOL capability */
4317 if (!be_is_wol_excluded(adapter))
4318 adapter->wol_cap |= BE_WOL_CAP;
4319 }
4320
4321 if (be_is_wol_supported(adapter))
4322 adapter->wol = true;
4323
4324 /* Must be a power of 2 or else MODULO will BUG_ON */
4325 adapter->be_get_temp_freq = 64;
4326
4327 if (BEx_chip(adapter)) {
4328 level = be_cmd_get_fw_log_level(adapter);
4329 adapter->msg_enable =
4330 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4331 }
4332
4333 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4334 return 0;
4335 }
4336
4337 static int lancer_recover_func(struct be_adapter *adapter)
4338 {
4339 struct device *dev = &adapter->pdev->dev;
4340 int status;
4341
4342 status = lancer_test_and_set_rdy_state(adapter);
4343 if (status)
4344 goto err;
4345
4346 if (netif_running(adapter->netdev))
4347 be_close(adapter->netdev);
4348
4349 be_clear(adapter);
4350
4351 be_clear_all_error(adapter);
4352
4353 status = be_setup(adapter);
4354 if (status)
4355 goto err;
4356
4357 if (netif_running(adapter->netdev)) {
4358 status = be_open(adapter->netdev);
4359 if (status)
4360 goto err;
4361 }
4362
4363 dev_err(dev, "Adapter recovery successful\n");
4364 return 0;
4365 err:
4366 if (status == -EAGAIN)
4367 dev_err(dev, "Waiting for resource provisioning\n");
4368 else
4369 dev_err(dev, "Adapter recovery failed\n");
4370
4371 return status;
4372 }
4373
4374 static void be_func_recovery_task(struct work_struct *work)
4375 {
4376 struct be_adapter *adapter =
4377 container_of(work, struct be_adapter, func_recovery_work.work);
4378 int status = 0;
4379
4380 be_detect_error(adapter);
4381
4382 if (adapter->hw_error && lancer_chip(adapter)) {
4383
4384 rtnl_lock();
4385 netif_device_detach(adapter->netdev);
4386 rtnl_unlock();
4387
4388 status = lancer_recover_func(adapter);
4389 if (!status)
4390 netif_device_attach(adapter->netdev);
4391 }
4392
4393 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4394 * no need to attempt further recovery.
4395 */
4396 if (!status || status == -EAGAIN)
4397 schedule_delayed_work(&adapter->func_recovery_work,
4398 msecs_to_jiffies(1000));
4399 }
4400
4401 static void be_worker(struct work_struct *work)
4402 {
4403 struct be_adapter *adapter =
4404 container_of(work, struct be_adapter, work.work);
4405 struct be_rx_obj *rxo;
4406 int i;
4407
4408 /* when interrupts are not yet enabled, just reap any pending
4409 * mcc completions */
4410 if (!netif_running(adapter->netdev)) {
4411 local_bh_disable();
4412 be_process_mcc(adapter);
4413 local_bh_enable();
4414 goto reschedule;
4415 }
4416
4417 if (!adapter->stats_cmd_sent) {
4418 if (lancer_chip(adapter))
4419 lancer_cmd_get_pport_stats(adapter,
4420 &adapter->stats_cmd);
4421 else
4422 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4423 }
4424
4425 if (be_physfn(adapter) &&
4426 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4427 be_cmd_get_die_temperature(adapter);
4428
4429 for_all_rx_queues(adapter, rxo, i) {
4430 /* Replenish RX-queues starved due to memory
4431 * allocation failures.
4432 */
4433 if (rxo->rx_post_starved)
4434 be_post_rx_frags(rxo, GFP_KERNEL);
4435 }
4436
4437 be_eqd_update(adapter);
4438
4439 reschedule:
4440 adapter->work_counter++;
4441 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4442 }
4443
4444 /* If any VFs are already enabled don't FLR the PF */
4445 static bool be_reset_required(struct be_adapter *adapter)
4446 {
4447 return pci_num_vf(adapter->pdev) ? false : true;
4448 }
4449
4450 static char *mc_name(struct be_adapter *adapter)
4451 {
4452 if (adapter->function_mode & FLEX10_MODE)
4453 return "FLEX10";
4454 else if (adapter->function_mode & VNIC_MODE)
4455 return "vNIC";
4456 else if (adapter->function_mode & UMC_ENABLED)
4457 return "UMC";
4458 else
4459 return "";
4460 }
4461
4462 static inline char *func_name(struct be_adapter *adapter)
4463 {
4464 return be_physfn(adapter) ? "PF" : "VF";
4465 }
4466
4467 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4468 {
4469 int status = 0;
4470 struct be_adapter *adapter;
4471 struct net_device *netdev;
4472 char port_name;
4473
4474 status = pci_enable_device(pdev);
4475 if (status)
4476 goto do_none;
4477
4478 status = pci_request_regions(pdev, DRV_NAME);
4479 if (status)
4480 goto disable_dev;
4481 pci_set_master(pdev);
4482
4483 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4484 if (netdev == NULL) {
4485 status = -ENOMEM;
4486 goto rel_reg;
4487 }
4488 adapter = netdev_priv(netdev);
4489 adapter->pdev = pdev;
4490 pci_set_drvdata(pdev, adapter);
4491 adapter->netdev = netdev;
4492 SET_NETDEV_DEV(netdev, &pdev->dev);
4493
4494 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4495 if (!status) {
4496 netdev->features |= NETIF_F_HIGHDMA;
4497 } else {
4498 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4499 if (status) {
4500 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4501 goto free_netdev;
4502 }
4503 }
4504
4505 if (be_physfn(adapter)) {
4506 status = pci_enable_pcie_error_reporting(pdev);
4507 if (!status)
4508 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4509 }
4510
4511 status = be_ctrl_init(adapter);
4512 if (status)
4513 goto free_netdev;
4514
4515 /* sync up with fw's ready state */
4516 if (be_physfn(adapter)) {
4517 status = be_fw_wait_ready(adapter);
4518 if (status)
4519 goto ctrl_clean;
4520 }
4521
4522 if (be_reset_required(adapter)) {
4523 status = be_cmd_reset_function(adapter);
4524 if (status)
4525 goto ctrl_clean;
4526
4527 /* Wait for interrupts to quiesce after an FLR */
4528 msleep(100);
4529 }
4530
4531 /* Allow interrupts for other ULPs running on NIC function */
4532 be_intr_set(adapter, true);
4533
4534 /* tell fw we're ready to fire cmds */
4535 status = be_cmd_fw_init(adapter);
4536 if (status)
4537 goto ctrl_clean;
4538
4539 status = be_stats_init(adapter);
4540 if (status)
4541 goto ctrl_clean;
4542
4543 status = be_get_initial_config(adapter);
4544 if (status)
4545 goto stats_clean;
4546
4547 INIT_DELAYED_WORK(&adapter->work, be_worker);
4548 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4549 adapter->rx_fc = adapter->tx_fc = true;
4550
4551 status = be_setup(adapter);
4552 if (status)
4553 goto stats_clean;
4554
4555 be_netdev_init(netdev);
4556 status = register_netdev(netdev);
4557 if (status != 0)
4558 goto unsetup;
4559
4560 be_roce_dev_add(adapter);
4561
4562 schedule_delayed_work(&adapter->func_recovery_work,
4563 msecs_to_jiffies(1000));
4564
4565 be_cmd_query_port_name(adapter, &port_name);
4566
4567 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4568 func_name(adapter), mc_name(adapter), port_name);
4569
4570 return 0;
4571
4572 unsetup:
4573 be_clear(adapter);
4574 stats_clean:
4575 be_stats_cleanup(adapter);
4576 ctrl_clean:
4577 be_ctrl_cleanup(adapter);
4578 free_netdev:
4579 free_netdev(netdev);
4580 rel_reg:
4581 pci_release_regions(pdev);
4582 disable_dev:
4583 pci_disable_device(pdev);
4584 do_none:
4585 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4586 return status;
4587 }
4588
4589 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4590 {
4591 struct be_adapter *adapter = pci_get_drvdata(pdev);
4592 struct net_device *netdev = adapter->netdev;
4593
4594 if (adapter->wol)
4595 be_setup_wol(adapter, true);
4596
4597 be_intr_set(adapter, false);
4598 cancel_delayed_work_sync(&adapter->func_recovery_work);
4599
4600 netif_device_detach(netdev);
4601 if (netif_running(netdev)) {
4602 rtnl_lock();
4603 be_close(netdev);
4604 rtnl_unlock();
4605 }
4606 be_clear(adapter);
4607
4608 pci_save_state(pdev);
4609 pci_disable_device(pdev);
4610 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4611 return 0;
4612 }
4613
4614 static int be_resume(struct pci_dev *pdev)
4615 {
4616 int status = 0;
4617 struct be_adapter *adapter = pci_get_drvdata(pdev);
4618 struct net_device *netdev = adapter->netdev;
4619
4620 netif_device_detach(netdev);
4621
4622 status = pci_enable_device(pdev);
4623 if (status)
4624 return status;
4625
4626 pci_set_power_state(pdev, PCI_D0);
4627 pci_restore_state(pdev);
4628
4629 status = be_fw_wait_ready(adapter);
4630 if (status)
4631 return status;
4632
4633 be_intr_set(adapter, true);
4634 /* tell fw we're ready to fire cmds */
4635 status = be_cmd_fw_init(adapter);
4636 if (status)
4637 return status;
4638
4639 be_setup(adapter);
4640 if (netif_running(netdev)) {
4641 rtnl_lock();
4642 be_open(netdev);
4643 rtnl_unlock();
4644 }
4645
4646 schedule_delayed_work(&adapter->func_recovery_work,
4647 msecs_to_jiffies(1000));
4648 netif_device_attach(netdev);
4649
4650 if (adapter->wol)
4651 be_setup_wol(adapter, false);
4652
4653 return 0;
4654 }
4655
4656 /*
4657 * An FLR will stop BE from DMAing any data.
4658 */
4659 static void be_shutdown(struct pci_dev *pdev)
4660 {
4661 struct be_adapter *adapter = pci_get_drvdata(pdev);
4662
4663 if (!adapter)
4664 return;
4665
4666 cancel_delayed_work_sync(&adapter->work);
4667 cancel_delayed_work_sync(&adapter->func_recovery_work);
4668
4669 netif_device_detach(adapter->netdev);
4670
4671 be_cmd_reset_function(adapter);
4672
4673 pci_disable_device(pdev);
4674 }
4675
4676 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4677 pci_channel_state_t state)
4678 {
4679 struct be_adapter *adapter = pci_get_drvdata(pdev);
4680 struct net_device *netdev = adapter->netdev;
4681
4682 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4683
4684 if (!adapter->eeh_error) {
4685 adapter->eeh_error = true;
4686
4687 cancel_delayed_work_sync(&adapter->func_recovery_work);
4688
4689 rtnl_lock();
4690 netif_device_detach(netdev);
4691 if (netif_running(netdev))
4692 be_close(netdev);
4693 rtnl_unlock();
4694
4695 be_clear(adapter);
4696 }
4697
4698 if (state == pci_channel_io_perm_failure)
4699 return PCI_ERS_RESULT_DISCONNECT;
4700
4701 pci_disable_device(pdev);
4702
4703 /* The error could cause the FW to trigger a flash debug dump.
4704 * Resetting the card while flash dump is in progress
4705 * can cause it not to recover; wait for it to finish.
4706 * Wait only for first function as it is needed only once per
4707 * adapter.
4708 */
4709 if (pdev->devfn == 0)
4710 ssleep(30);
4711
4712 return PCI_ERS_RESULT_NEED_RESET;
4713 }
4714
4715 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4716 {
4717 struct be_adapter *adapter = pci_get_drvdata(pdev);
4718 int status;
4719
4720 dev_info(&adapter->pdev->dev, "EEH reset\n");
4721
4722 status = pci_enable_device(pdev);
4723 if (status)
4724 return PCI_ERS_RESULT_DISCONNECT;
4725
4726 pci_set_master(pdev);
4727 pci_set_power_state(pdev, PCI_D0);
4728 pci_restore_state(pdev);
4729
4730 /* Check if card is ok and fw is ready */
4731 dev_info(&adapter->pdev->dev,
4732 "Waiting for FW to be ready after EEH reset\n");
4733 status = be_fw_wait_ready(adapter);
4734 if (status)
4735 return PCI_ERS_RESULT_DISCONNECT;
4736
4737 pci_cleanup_aer_uncorrect_error_status(pdev);
4738 be_clear_all_error(adapter);
4739 return PCI_ERS_RESULT_RECOVERED;
4740 }
4741
4742 static void be_eeh_resume(struct pci_dev *pdev)
4743 {
4744 int status = 0;
4745 struct be_adapter *adapter = pci_get_drvdata(pdev);
4746 struct net_device *netdev = adapter->netdev;
4747
4748 dev_info(&adapter->pdev->dev, "EEH resume\n");
4749
4750 pci_save_state(pdev);
4751
4752 status = be_cmd_reset_function(adapter);
4753 if (status)
4754 goto err;
4755
4756 /* tell fw we're ready to fire cmds */
4757 status = be_cmd_fw_init(adapter);
4758 if (status)
4759 goto err;
4760
4761 status = be_setup(adapter);
4762 if (status)
4763 goto err;
4764
4765 if (netif_running(netdev)) {
4766 status = be_open(netdev);
4767 if (status)
4768 goto err;
4769 }
4770
4771 schedule_delayed_work(&adapter->func_recovery_work,
4772 msecs_to_jiffies(1000));
4773 netif_device_attach(netdev);
4774 return;
4775 err:
4776 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4777 }
4778
4779 static const struct pci_error_handlers be_eeh_handlers = {
4780 .error_detected = be_eeh_err_detected,
4781 .slot_reset = be_eeh_reset,
4782 .resume = be_eeh_resume,
4783 };
4784
4785 static struct pci_driver be_driver = {
4786 .name = DRV_NAME,
4787 .id_table = be_dev_ids,
4788 .probe = be_probe,
4789 .remove = be_remove,
4790 .suspend = be_suspend,
4791 .resume = be_resume,
4792 .shutdown = be_shutdown,
4793 .err_handler = &be_eeh_handlers
4794 };
4795
4796 static int __init be_init_module(void)
4797 {
4798 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4799 rx_frag_size != 2048) {
4800 printk(KERN_WARNING DRV_NAME
4801 " : Module param rx_frag_size must be 2048/4096/8192."
4802 " Using 2048\n");
4803 rx_frag_size = 2048;
4804 }
4805
4806 return pci_register_driver(&be_driver);
4807 }
4808 module_init(be_init_module);
4809
4810 static void __exit be_exit_module(void)
4811 {
4812 pci_unregister_driver(&be_driver);
4813 }
4814 module_exit(be_exit_module);
This page took 0.135625 seconds and 6 git commands to generate.