be2net: ignore mac-addr set call for an already programmed mac-addr
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50 { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
113 "NETC",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122 };
123
124 /* Is BE in a multi-channel mode */
125 static inline bool be_is_mc(struct be_adapter *adapter) {
126 return (adapter->function_mode & FLEX10_MODE ||
127 adapter->function_mode & VNIC_MODE ||
128 adapter->function_mode & UMC_ENABLED);
129 }
130
131 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132 {
133 struct be_dma_mem *mem = &q->dma_mem;
134 if (mem->va) {
135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136 mem->dma);
137 mem->va = NULL;
138 }
139 }
140
141 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142 u16 len, u16 entry_size)
143 {
144 struct be_dma_mem *mem = &q->dma_mem;
145
146 memset(q, 0, sizeof(*q));
147 q->len = len;
148 q->entry_size = entry_size;
149 mem->size = len * entry_size;
150 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151 GFP_KERNEL);
152 if (!mem->va)
153 return -ENOMEM;
154 return 0;
155 }
156
157 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
158 {
159 u32 reg, enabled;
160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else
170 return;
171
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_intr_set(struct be_adapter *adapter, bool enable)
177 {
178 int status = 0;
179
180 /* On lancer interrupts can't be controlled via this register */
181 if (lancer_chip(adapter))
182 return;
183
184 if (adapter->eeh_error)
185 return;
186
187 status = be_cmd_intr_set(adapter, enable);
188 if (status)
189 be_reg_intr_set(adapter, enable);
190 }
191
192 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 {
194 u32 val = 0;
195 val |= qid & DB_RQ_RING_ID_MASK;
196 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197
198 wmb();
199 iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 }
201
202 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203 u16 posted)
204 {
205 u32 val = 0;
206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208
209 wmb();
210 iowrite32(val, adapter->db + txo->db_offset);
211 }
212
213 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
214 bool arm, bool clear_int, u16 num_popped)
215 {
216 u32 val = 0;
217 val |= qid & DB_EQ_RING_ID_MASK;
218 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219 DB_EQ_RING_ID_EXT_MASK_SHIFT);
220
221 if (adapter->eeh_error)
222 return;
223
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 }
232
233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 {
235 u32 val = 0;
236 val |= qid & DB_CQ_RING_ID_MASK;
237 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238 DB_CQ_RING_ID_EXT_MASK_SHIFT);
239
240 if (adapter->eeh_error)
241 return;
242
243 if (arm)
244 val |= 1 << DB_CQ_REARM_SHIFT;
245 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
246 iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 }
248
249 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 {
251 struct be_adapter *adapter = netdev_priv(netdev);
252 struct device *dev = &adapter->pdev->dev;
253 struct sockaddr *addr = p;
254 int status;
255 u8 mac[ETH_ALEN];
256 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257
258 if (!is_valid_ether_addr(addr->sa_data))
259 return -EADDRNOTAVAIL;
260
261 /* Proceed further only if, User provided MAC is different
262 * from active MAC
263 */
264 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
265 return 0;
266
267 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
268 * privilege or if PF did not provision the new MAC address.
269 * On BE3, this cmd will always fail if the VF doesn't have the
270 * FILTMGMT privilege. This failure is OK, only if the PF programmed
271 * the MAC for the VF.
272 */
273 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
274 adapter->if_handle, &adapter->pmac_id[0], 0);
275 if (!status) {
276 curr_pmac_id = adapter->pmac_id[0];
277
278 /* Delete the old programmed MAC. This call may fail if the
279 * old MAC was already deleted by the PF driver.
280 */
281 if (adapter->pmac_id[0] != old_pmac_id)
282 be_cmd_pmac_del(adapter, adapter->if_handle,
283 old_pmac_id, 0);
284 }
285
286 /* Decide if the new MAC is successfully activated only after
287 * querying the FW
288 */
289 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
290 if (status)
291 goto err;
292
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
296 if (!ether_addr_equal(addr->sa_data, mac)) {
297 status = -EPERM;
298 goto err;
299 }
300
301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
302 dev_info(dev, "MAC address changed to %pM\n", mac);
303 return 0;
304 err:
305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
306 return status;
307 }
308
309 /* BE2 supports only v0 cmd */
310 static void *hw_stats_from_cmd(struct be_adapter *adapter)
311 {
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
316 } else if (BE3_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
324 }
325 }
326
327 /* BE2 supports only v0 cmd */
328 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329 {
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
334 } else if (BE3_chip(adapter)) {
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
342 }
343 }
344
345 static void populate_be_v0_stats(struct be_adapter *adapter)
346 {
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
350 struct be_port_rxf_stats_v0 *port_stats =
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
353
354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
382 drvs->jabber_events = rxf_stats->port1_jabber_events;
383 else
384 drvs->jabber_events = rxf_stats->port0_jabber_events;
385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392 }
393
394 static void populate_be_v1_stats(struct be_adapter *adapter)
395 {
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
399 struct be_port_rxf_stats_v1 *port_stats =
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
402
403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438 }
439
440 static void populate_be_v2_stats(struct be_adapter *adapter)
441 {
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
484 if (be_roce_supported(adapter)) {
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
492 }
493
494 static void populate_lancer_stats(struct be_adapter *adapter)
495 {
496
497 struct be_drv_stats *drvs = &adapter->drv_stats;
498 struct lancer_pport_stats *pport_stats =
499 pport_stats_from_cmd(adapter);
500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
526 drvs->jabber_events = pport_stats->rx_jabbers;
527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
529 drvs->rx_drops_too_many_frags =
530 pport_stats->rx_drops_too_many_frags_lo;
531 }
532
533 static void accumulate_16bit_val(u32 *acc, u16 val)
534 {
535 #define lo(x) (x & 0xFFFF)
536 #define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543 }
544
545 static void populate_erx_stats(struct be_adapter *adapter,
546 struct be_rx_obj *rxo,
547 u32 erx_stat)
548 {
549 if (!BEx_chip(adapter))
550 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
551 else
552 /* below erx HW counter can actually wrap around after
553 * 65535. Driver accumulates a 32-bit value
554 */
555 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
556 (u16)erx_stat);
557 }
558
559 void be_parse_stats(struct be_adapter *adapter)
560 {
561 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
562 struct be_rx_obj *rxo;
563 int i;
564 u32 erx_stat;
565
566 if (lancer_chip(adapter)) {
567 populate_lancer_stats(adapter);
568 } else {
569 if (BE2_chip(adapter))
570 populate_be_v0_stats(adapter);
571 else if (BE3_chip(adapter))
572 /* for BE3 */
573 populate_be_v1_stats(adapter);
574 else
575 populate_be_v2_stats(adapter);
576
577 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
578 for_all_rx_queues(adapter, rxo, i) {
579 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
580 populate_erx_stats(adapter, rxo, erx_stat);
581 }
582 }
583 }
584
585 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
586 struct rtnl_link_stats64 *stats)
587 {
588 struct be_adapter *adapter = netdev_priv(netdev);
589 struct be_drv_stats *drvs = &adapter->drv_stats;
590 struct be_rx_obj *rxo;
591 struct be_tx_obj *txo;
592 u64 pkts, bytes;
593 unsigned int start;
594 int i;
595
596 for_all_rx_queues(adapter, rxo, i) {
597 const struct be_rx_stats *rx_stats = rx_stats(rxo);
598 do {
599 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
602 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
608 }
609
610 for_all_tx_queues(adapter, txo, i) {
611 const struct be_tx_stats *tx_stats = tx_stats(txo);
612 do {
613 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
614 pkts = tx_stats(txo)->tx_pkts;
615 bytes = tx_stats(txo)->tx_bytes;
616 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
617 stats->tx_packets += pkts;
618 stats->tx_bytes += bytes;
619 }
620
621 /* bad pkts received */
622 stats->rx_errors = drvs->rx_crc_errors +
623 drvs->rx_alignment_symbol_errors +
624 drvs->rx_in_range_errors +
625 drvs->rx_out_range_errors +
626 drvs->rx_frame_too_long +
627 drvs->rx_dropped_too_small +
628 drvs->rx_dropped_too_short +
629 drvs->rx_dropped_header_too_small +
630 drvs->rx_dropped_tcp_length +
631 drvs->rx_dropped_runt;
632
633 /* detailed rx errors */
634 stats->rx_length_errors = drvs->rx_in_range_errors +
635 drvs->rx_out_range_errors +
636 drvs->rx_frame_too_long;
637
638 stats->rx_crc_errors = drvs->rx_crc_errors;
639
640 /* frame alignment errors */
641 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
642
643 /* receiver fifo overrun */
644 /* drops_no_pbuf is no per i/f, it's per BE card */
645 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
646 drvs->rx_input_fifo_overflow_drop +
647 drvs->rx_drops_no_pbuf;
648 return stats;
649 }
650
651 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
652 {
653 struct net_device *netdev = adapter->netdev;
654
655 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
656 netif_carrier_off(netdev);
657 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
658 }
659
660 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
661 netif_carrier_on(netdev);
662 else
663 netif_carrier_off(netdev);
664 }
665
666 static void be_tx_stats_update(struct be_tx_obj *txo,
667 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
668 {
669 struct be_tx_stats *stats = tx_stats(txo);
670
671 u64_stats_update_begin(&stats->sync);
672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
676 if (stopped)
677 stats->tx_stops++;
678 u64_stats_update_end(&stats->sync);
679 }
680
681 /* Determine number of WRB entries needed to xmit data in an skb */
682 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
683 bool *dummy)
684 {
685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
689 /* to account for hdr wrb */
690 cnt++;
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
697 }
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700 }
701
702 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703 {
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
707 wrb->rsvd0 = 0;
708 }
709
710 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
711 struct sk_buff *skb)
712 {
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724 }
725
726 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
727 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
728 {
729 u16 vlan_tag;
730
731 memset(hdr, 0, sizeof(*hdr));
732
733 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
734
735 if (skb_is_gso(skb)) {
736 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
737 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
738 hdr, skb_shinfo(skb)->gso_size);
739 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
741 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
742 if (is_tcp_pkt(skb))
743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
744 else if (is_udp_pkt(skb))
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
746 }
747
748 if (vlan_tx_tag_present(skb)) {
749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
752 }
753
754 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
755 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
756 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
758 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
759 }
760
761 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
762 bool unmap_single)
763 {
764 dma_addr_t dma;
765
766 be_dws_le_to_cpu(wrb, sizeof(*wrb));
767
768 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
769 if (wrb->frag_len) {
770 if (unmap_single)
771 dma_unmap_single(dev, dma, wrb->frag_len,
772 DMA_TO_DEVICE);
773 else
774 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
775 }
776 }
777
778 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
779 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
780 bool skip_hw_vlan)
781 {
782 dma_addr_t busaddr;
783 int i, copied = 0;
784 struct device *dev = &adapter->pdev->dev;
785 struct sk_buff *first_skb = skb;
786 struct be_eth_wrb *wrb;
787 struct be_eth_hdr_wrb *hdr;
788 bool map_single = false;
789 u16 map_head;
790
791 hdr = queue_head_node(txq);
792 queue_head_inc(txq);
793 map_head = txq->head;
794
795 if (skb->len > skb->data_len) {
796 int len = skb_headlen(skb);
797 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
798 if (dma_mapping_error(dev, busaddr))
799 goto dma_err;
800 map_single = true;
801 wrb = queue_head_node(txq);
802 wrb_fill(wrb, busaddr, len);
803 be_dws_cpu_to_le(wrb, sizeof(*wrb));
804 queue_head_inc(txq);
805 copied += len;
806 }
807
808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
809 const struct skb_frag_struct *frag =
810 &skb_shinfo(skb)->frags[i];
811 busaddr = skb_frag_dma_map(dev, frag, 0,
812 skb_frag_size(frag), DMA_TO_DEVICE);
813 if (dma_mapping_error(dev, busaddr))
814 goto dma_err;
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, skb_frag_size(frag));
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += skb_frag_size(frag);
820 }
821
822 if (dummy_wrb) {
823 wrb = queue_head_node(txq);
824 wrb_fill(wrb, 0, 0);
825 be_dws_cpu_to_le(wrb, sizeof(*wrb));
826 queue_head_inc(txq);
827 }
828
829 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
830 be_dws_cpu_to_le(hdr, sizeof(*hdr));
831
832 return copied;
833 dma_err:
834 txq->head = map_head;
835 while (copied) {
836 wrb = queue_head_node(txq);
837 unmap_tx_frag(dev, wrb, map_single);
838 map_single = false;
839 copied -= wrb->frag_len;
840 queue_head_inc(txq);
841 }
842 return 0;
843 }
844
845 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
846 struct sk_buff *skb,
847 bool *skip_hw_vlan)
848 {
849 u16 vlan_tag = 0;
850
851 skb = skb_share_check(skb, GFP_ATOMIC);
852 if (unlikely(!skb))
853 return skb;
854
855 if (vlan_tx_tag_present(skb))
856 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
857
858 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
859 if (!vlan_tag)
860 vlan_tag = adapter->pvid;
861 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
862 * skip VLAN insertion
863 */
864 if (skip_hw_vlan)
865 *skip_hw_vlan = true;
866 }
867
868 if (vlan_tag) {
869 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
870 if (unlikely(!skb))
871 return skb;
872 skb->vlan_tci = 0;
873 }
874
875 /* Insert the outer VLAN, if any */
876 if (adapter->qnq_vid) {
877 vlan_tag = adapter->qnq_vid;
878 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
879 if (unlikely(!skb))
880 return skb;
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
884
885 return skb;
886 }
887
888 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
889 {
890 struct ethhdr *eh = (struct ethhdr *)skb->data;
891 u16 offset = ETH_HLEN;
892
893 if (eh->h_proto == htons(ETH_P_IPV6)) {
894 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
895
896 offset += sizeof(struct ipv6hdr);
897 if (ip6h->nexthdr != NEXTHDR_TCP &&
898 ip6h->nexthdr != NEXTHDR_UDP) {
899 struct ipv6_opt_hdr *ehdr =
900 (struct ipv6_opt_hdr *) (skb->data + offset);
901
902 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
903 if (ehdr->hdrlen == 0xff)
904 return true;
905 }
906 }
907 return false;
908 }
909
910 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
911 {
912 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
913 }
914
915 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
916 struct sk_buff *skb)
917 {
918 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
919 }
920
921 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
922 struct sk_buff *skb,
923 bool *skip_hw_vlan)
924 {
925 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
926 unsigned int eth_hdr_len;
927 struct iphdr *ip;
928
929 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
930 * may cause a transmit stall on that port. So the work-around is to
931 * pad short packets (<= 32 bytes) to a 36-byte length.
932 */
933 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
934 if (skb_padto(skb, 36))
935 goto tx_drop;
936 skb->len = 36;
937 }
938
939 /* For padded packets, BE HW modifies tot_len field in IP header
940 * incorrecly when VLAN tag is inserted by HW.
941 * For padded packets, Lancer computes incorrect checksum.
942 */
943 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
944 VLAN_ETH_HLEN : ETH_HLEN;
945 if (skb->len <= 60 &&
946 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
947 is_ipv4_pkt(skb)) {
948 ip = (struct iphdr *)ip_hdr(skb);
949 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
950 }
951
952 /* If vlan tag is already inlined in the packet, skip HW VLAN
953 * tagging in UMC mode
954 */
955 if ((adapter->function_mode & UMC_ENABLED) &&
956 veh->h_vlan_proto == htons(ETH_P_8021Q))
957 *skip_hw_vlan = true;
958
959 /* HW has a bug wherein it will calculate CSUM for VLAN
960 * pkts even though it is disabled.
961 * Manually insert VLAN in pkt.
962 */
963 if (skb->ip_summed != CHECKSUM_PARTIAL &&
964 vlan_tx_tag_present(skb)) {
965 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
966 if (unlikely(!skb))
967 goto tx_drop;
968 }
969
970 /* HW may lockup when VLAN HW tagging is requested on
971 * certain ipv6 packets. Drop such pkts if the HW workaround to
972 * skip HW tagging is not enabled by FW.
973 */
974 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
975 (adapter->pvid || adapter->qnq_vid) &&
976 !qnq_async_evt_rcvd(adapter)))
977 goto tx_drop;
978
979 /* Manual VLAN tag insertion to prevent:
980 * ASIC lockup when the ASIC inserts VLAN tag into
981 * certain ipv6 packets. Insert VLAN tags in driver,
982 * and set event, completion, vlan bits accordingly
983 * in the Tx WRB.
984 */
985 if (be_ipv6_tx_stall_chk(adapter, skb) &&
986 be_vlan_tag_tx_chk(adapter, skb)) {
987 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
988 if (unlikely(!skb))
989 goto tx_drop;
990 }
991
992 return skb;
993 tx_drop:
994 dev_kfree_skb_any(skb);
995 return NULL;
996 }
997
998 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
999 {
1000 struct be_adapter *adapter = netdev_priv(netdev);
1001 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1002 struct be_queue_info *txq = &txo->q;
1003 bool dummy_wrb, stopped = false;
1004 u32 wrb_cnt = 0, copied = 0;
1005 bool skip_hw_vlan = false;
1006 u32 start = txq->head;
1007
1008 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1009 if (!skb) {
1010 tx_stats(txo)->tx_drv_drops++;
1011 return NETDEV_TX_OK;
1012 }
1013
1014 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1015
1016 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1017 skip_hw_vlan);
1018 if (copied) {
1019 int gso_segs = skb_shinfo(skb)->gso_segs;
1020
1021 /* record the sent skb in the sent_skb table */
1022 BUG_ON(txo->sent_skb_list[start]);
1023 txo->sent_skb_list[start] = skb;
1024
1025 /* Ensure txq has space for the next skb; Else stop the queue
1026 * *BEFORE* ringing the tx doorbell, so that we serialze the
1027 * tx compls of the current transmit which'll wake up the queue
1028 */
1029 atomic_add(wrb_cnt, &txq->used);
1030 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1031 txq->len) {
1032 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1033 stopped = true;
1034 }
1035
1036 be_txq_notify(adapter, txo, wrb_cnt);
1037
1038 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1039 } else {
1040 txq->head = start;
1041 tx_stats(txo)->tx_drv_drops++;
1042 dev_kfree_skb_any(skb);
1043 }
1044 return NETDEV_TX_OK;
1045 }
1046
1047 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1048 {
1049 struct be_adapter *adapter = netdev_priv(netdev);
1050 if (new_mtu < BE_MIN_MTU ||
1051 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1052 (ETH_HLEN + ETH_FCS_LEN))) {
1053 dev_info(&adapter->pdev->dev,
1054 "MTU must be between %d and %d bytes\n",
1055 BE_MIN_MTU,
1056 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1057 return -EINVAL;
1058 }
1059 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1060 netdev->mtu, new_mtu);
1061 netdev->mtu = new_mtu;
1062 return 0;
1063 }
1064
1065 /*
1066 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1067 * If the user configures more, place BE in vlan promiscuous mode.
1068 */
1069 static int be_vid_config(struct be_adapter *adapter)
1070 {
1071 u16 vids[BE_NUM_VLANS_SUPPORTED];
1072 u16 num = 0, i;
1073 int status = 0;
1074
1075 /* No need to further configure vids if in promiscuous mode */
1076 if (adapter->promiscuous)
1077 return 0;
1078
1079 if (adapter->vlans_added > be_max_vlans(adapter))
1080 goto set_vlan_promisc;
1081
1082 /* Construct VLAN Table to give to HW */
1083 for (i = 0; i < VLAN_N_VID; i++)
1084 if (adapter->vlan_tag[i])
1085 vids[num++] = cpu_to_le16(i);
1086
1087 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1088 vids, num, 0);
1089
1090 if (status) {
1091 /* Set to VLAN promisc mode as setting VLAN filter failed */
1092 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1093 goto set_vlan_promisc;
1094 dev_err(&adapter->pdev->dev,
1095 "Setting HW VLAN filtering failed.\n");
1096 } else {
1097 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1098 /* hw VLAN filtering re-enabled. */
1099 status = be_cmd_rx_filter(adapter,
1100 BE_FLAGS_VLAN_PROMISC, OFF);
1101 if (!status) {
1102 dev_info(&adapter->pdev->dev,
1103 "Disabling VLAN Promiscuous mode.\n");
1104 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1105 dev_info(&adapter->pdev->dev,
1106 "Re-Enabling HW VLAN filtering\n");
1107 }
1108 }
1109 }
1110
1111 return status;
1112
1113 set_vlan_promisc:
1114 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1115
1116 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1117 if (!status) {
1118 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1119 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1120 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1121 } else
1122 dev_err(&adapter->pdev->dev,
1123 "Failed to enable VLAN Promiscuous mode.\n");
1124 return status;
1125 }
1126
1127 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1128 {
1129 struct be_adapter *adapter = netdev_priv(netdev);
1130 int status = 0;
1131
1132
1133 /* Packets with VID 0 are always received by Lancer by default */
1134 if (lancer_chip(adapter) && vid == 0)
1135 goto ret;
1136
1137 adapter->vlan_tag[vid] = 1;
1138 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1139 status = be_vid_config(adapter);
1140
1141 if (!status)
1142 adapter->vlans_added++;
1143 else
1144 adapter->vlan_tag[vid] = 0;
1145 ret:
1146 return status;
1147 }
1148
1149 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1150 {
1151 struct be_adapter *adapter = netdev_priv(netdev);
1152 int status = 0;
1153
1154 /* Packets with VID 0 are always received by Lancer by default */
1155 if (lancer_chip(adapter) && vid == 0)
1156 goto ret;
1157
1158 adapter->vlan_tag[vid] = 0;
1159 if (adapter->vlans_added <= be_max_vlans(adapter))
1160 status = be_vid_config(adapter);
1161
1162 if (!status)
1163 adapter->vlans_added--;
1164 else
1165 adapter->vlan_tag[vid] = 1;
1166 ret:
1167 return status;
1168 }
1169
1170 static void be_set_rx_mode(struct net_device *netdev)
1171 {
1172 struct be_adapter *adapter = netdev_priv(netdev);
1173 int status;
1174
1175 if (netdev->flags & IFF_PROMISC) {
1176 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1177 adapter->promiscuous = true;
1178 goto done;
1179 }
1180
1181 /* BE was previously in promiscuous mode; disable it */
1182 if (adapter->promiscuous) {
1183 adapter->promiscuous = false;
1184 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1185
1186 if (adapter->vlans_added)
1187 be_vid_config(adapter);
1188 }
1189
1190 /* Enable multicast promisc if num configured exceeds what we support */
1191 if (netdev->flags & IFF_ALLMULTI ||
1192 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1193 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1194 goto done;
1195 }
1196
1197 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1198 struct netdev_hw_addr *ha;
1199 int i = 1; /* First slot is claimed by the Primary MAC */
1200
1201 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1202 be_cmd_pmac_del(adapter, adapter->if_handle,
1203 adapter->pmac_id[i], 0);
1204 }
1205
1206 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1207 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1208 adapter->promiscuous = true;
1209 goto done;
1210 }
1211
1212 netdev_for_each_uc_addr(ha, adapter->netdev) {
1213 adapter->uc_macs++; /* First slot is for Primary MAC */
1214 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1215 adapter->if_handle,
1216 &adapter->pmac_id[adapter->uc_macs], 0);
1217 }
1218 }
1219
1220 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1221
1222 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1223 if (status) {
1224 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1225 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1227 }
1228 done:
1229 return;
1230 }
1231
1232 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1233 {
1234 struct be_adapter *adapter = netdev_priv(netdev);
1235 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1236 int status;
1237
1238 if (!sriov_enabled(adapter))
1239 return -EPERM;
1240
1241 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1242 return -EINVAL;
1243
1244 if (BEx_chip(adapter)) {
1245 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1246 vf + 1);
1247
1248 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1249 &vf_cfg->pmac_id, vf + 1);
1250 } else {
1251 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1252 vf + 1);
1253 }
1254
1255 if (status)
1256 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1257 mac, vf);
1258 else
1259 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1260
1261 return status;
1262 }
1263
1264 static int be_get_vf_config(struct net_device *netdev, int vf,
1265 struct ifla_vf_info *vi)
1266 {
1267 struct be_adapter *adapter = netdev_priv(netdev);
1268 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1269
1270 if (!sriov_enabled(adapter))
1271 return -EPERM;
1272
1273 if (vf >= adapter->num_vfs)
1274 return -EINVAL;
1275
1276 vi->vf = vf;
1277 vi->tx_rate = vf_cfg->tx_rate;
1278 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1279 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1280 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1281
1282 return 0;
1283 }
1284
1285 static int be_set_vf_vlan(struct net_device *netdev,
1286 int vf, u16 vlan, u8 qos)
1287 {
1288 struct be_adapter *adapter = netdev_priv(netdev);
1289 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1290 int status = 0;
1291
1292 if (!sriov_enabled(adapter))
1293 return -EPERM;
1294
1295 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1296 return -EINVAL;
1297
1298 if (vlan || qos) {
1299 vlan |= qos << VLAN_PRIO_SHIFT;
1300 if (vf_cfg->vlan_tag != vlan) {
1301 /* If this is new value, program it. Else skip. */
1302 vf_cfg->vlan_tag = vlan;
1303 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1304 vf_cfg->if_handle, 0);
1305 }
1306 } else {
1307 /* Reset Transparent Vlan Tagging. */
1308 vf_cfg->vlan_tag = 0;
1309 vlan = vf_cfg->def_vid;
1310 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1311 vf_cfg->if_handle, 0);
1312 }
1313
1314
1315 if (status)
1316 dev_info(&adapter->pdev->dev,
1317 "VLAN %d config on VF %d failed\n", vlan, vf);
1318 return status;
1319 }
1320
1321 static int be_set_vf_tx_rate(struct net_device *netdev,
1322 int vf, int rate)
1323 {
1324 struct be_adapter *adapter = netdev_priv(netdev);
1325 int status = 0;
1326
1327 if (!sriov_enabled(adapter))
1328 return -EPERM;
1329
1330 if (vf >= adapter->num_vfs)
1331 return -EINVAL;
1332
1333 if (rate < 100 || rate > 10000) {
1334 dev_err(&adapter->pdev->dev,
1335 "tx rate must be between 100 and 10000 Mbps\n");
1336 return -EINVAL;
1337 }
1338
1339 if (lancer_chip(adapter))
1340 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1341 else
1342 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1343
1344 if (status)
1345 dev_err(&adapter->pdev->dev,
1346 "tx rate %d on VF %d failed\n", rate, vf);
1347 else
1348 adapter->vf_cfg[vf].tx_rate = rate;
1349 return status;
1350 }
1351
1352 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1353 ulong now)
1354 {
1355 aic->rx_pkts_prev = rx_pkts;
1356 aic->tx_reqs_prev = tx_pkts;
1357 aic->jiffies = now;
1358 }
1359
1360 static void be_eqd_update(struct be_adapter *adapter)
1361 {
1362 struct be_set_eqd set_eqd[MAX_EVT_QS];
1363 int eqd, i, num = 0, start;
1364 struct be_aic_obj *aic;
1365 struct be_eq_obj *eqo;
1366 struct be_rx_obj *rxo;
1367 struct be_tx_obj *txo;
1368 u64 rx_pkts, tx_pkts;
1369 ulong now;
1370 u32 pps, delta;
1371
1372 for_all_evt_queues(adapter, eqo, i) {
1373 aic = &adapter->aic_obj[eqo->idx];
1374 if (!aic->enable) {
1375 if (aic->jiffies)
1376 aic->jiffies = 0;
1377 eqd = aic->et_eqd;
1378 goto modify_eqd;
1379 }
1380
1381 rxo = &adapter->rx_obj[eqo->idx];
1382 do {
1383 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1384 rx_pkts = rxo->stats.rx_pkts;
1385 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1386
1387 txo = &adapter->tx_obj[eqo->idx];
1388 do {
1389 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1390 tx_pkts = txo->stats.tx_reqs;
1391 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1392
1393
1394 /* Skip, if wrapped around or first calculation */
1395 now = jiffies;
1396 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1397 rx_pkts < aic->rx_pkts_prev ||
1398 tx_pkts < aic->tx_reqs_prev) {
1399 be_aic_update(aic, rx_pkts, tx_pkts, now);
1400 continue;
1401 }
1402
1403 delta = jiffies_to_msecs(now - aic->jiffies);
1404 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1405 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1406 eqd = (pps / 15000) << 2;
1407
1408 if (eqd < 8)
1409 eqd = 0;
1410 eqd = min_t(u32, eqd, aic->max_eqd);
1411 eqd = max_t(u32, eqd, aic->min_eqd);
1412
1413 be_aic_update(aic, rx_pkts, tx_pkts, now);
1414 modify_eqd:
1415 if (eqd != aic->prev_eqd) {
1416 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1417 set_eqd[num].eq_id = eqo->q.id;
1418 aic->prev_eqd = eqd;
1419 num++;
1420 }
1421 }
1422
1423 if (num)
1424 be_cmd_modify_eqd(adapter, set_eqd, num);
1425 }
1426
1427 static void be_rx_stats_update(struct be_rx_obj *rxo,
1428 struct be_rx_compl_info *rxcp)
1429 {
1430 struct be_rx_stats *stats = rx_stats(rxo);
1431
1432 u64_stats_update_begin(&stats->sync);
1433 stats->rx_compl++;
1434 stats->rx_bytes += rxcp->pkt_size;
1435 stats->rx_pkts++;
1436 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1437 stats->rx_mcast_pkts++;
1438 if (rxcp->err)
1439 stats->rx_compl_err++;
1440 u64_stats_update_end(&stats->sync);
1441 }
1442
1443 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1444 {
1445 /* L4 checksum is not reliable for non TCP/UDP packets.
1446 * Also ignore ipcksm for ipv6 pkts */
1447 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1448 (rxcp->ip_csum || rxcp->ipv6);
1449 }
1450
1451 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1452 u16 frag_idx)
1453 {
1454 struct be_adapter *adapter = rxo->adapter;
1455 struct be_rx_page_info *rx_page_info;
1456 struct be_queue_info *rxq = &rxo->q;
1457
1458 rx_page_info = &rxo->page_info_tbl[frag_idx];
1459 BUG_ON(!rx_page_info->page);
1460
1461 if (rx_page_info->last_page_user) {
1462 dma_unmap_page(&adapter->pdev->dev,
1463 dma_unmap_addr(rx_page_info, bus),
1464 adapter->big_page_size, DMA_FROM_DEVICE);
1465 rx_page_info->last_page_user = false;
1466 }
1467
1468 atomic_dec(&rxq->used);
1469 return rx_page_info;
1470 }
1471
1472 /* Throwaway the data in the Rx completion */
1473 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1474 struct be_rx_compl_info *rxcp)
1475 {
1476 struct be_queue_info *rxq = &rxo->q;
1477 struct be_rx_page_info *page_info;
1478 u16 i, num_rcvd = rxcp->num_rcvd;
1479
1480 for (i = 0; i < num_rcvd; i++) {
1481 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1482 put_page(page_info->page);
1483 memset(page_info, 0, sizeof(*page_info));
1484 index_inc(&rxcp->rxq_idx, rxq->len);
1485 }
1486 }
1487
1488 /*
1489 * skb_fill_rx_data forms a complete skb for an ether frame
1490 * indicated by rxcp.
1491 */
1492 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1493 struct be_rx_compl_info *rxcp)
1494 {
1495 struct be_queue_info *rxq = &rxo->q;
1496 struct be_rx_page_info *page_info;
1497 u16 i, j;
1498 u16 hdr_len, curr_frag_len, remaining;
1499 u8 *start;
1500
1501 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1502 start = page_address(page_info->page) + page_info->page_offset;
1503 prefetch(start);
1504
1505 /* Copy data in the first descriptor of this completion */
1506 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1507
1508 skb->len = curr_frag_len;
1509 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1510 memcpy(skb->data, start, curr_frag_len);
1511 /* Complete packet has now been moved to data */
1512 put_page(page_info->page);
1513 skb->data_len = 0;
1514 skb->tail += curr_frag_len;
1515 } else {
1516 hdr_len = ETH_HLEN;
1517 memcpy(skb->data, start, hdr_len);
1518 skb_shinfo(skb)->nr_frags = 1;
1519 skb_frag_set_page(skb, 0, page_info->page);
1520 skb_shinfo(skb)->frags[0].page_offset =
1521 page_info->page_offset + hdr_len;
1522 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1523 skb->data_len = curr_frag_len - hdr_len;
1524 skb->truesize += rx_frag_size;
1525 skb->tail += hdr_len;
1526 }
1527 page_info->page = NULL;
1528
1529 if (rxcp->pkt_size <= rx_frag_size) {
1530 BUG_ON(rxcp->num_rcvd != 1);
1531 return;
1532 }
1533
1534 /* More frags present for this completion */
1535 index_inc(&rxcp->rxq_idx, rxq->len);
1536 remaining = rxcp->pkt_size - curr_frag_len;
1537 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1538 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1539 curr_frag_len = min(remaining, rx_frag_size);
1540
1541 /* Coalesce all frags from the same physical page in one slot */
1542 if (page_info->page_offset == 0) {
1543 /* Fresh page */
1544 j++;
1545 skb_frag_set_page(skb, j, page_info->page);
1546 skb_shinfo(skb)->frags[j].page_offset =
1547 page_info->page_offset;
1548 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1549 skb_shinfo(skb)->nr_frags++;
1550 } else {
1551 put_page(page_info->page);
1552 }
1553
1554 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1555 skb->len += curr_frag_len;
1556 skb->data_len += curr_frag_len;
1557 skb->truesize += rx_frag_size;
1558 remaining -= curr_frag_len;
1559 index_inc(&rxcp->rxq_idx, rxq->len);
1560 page_info->page = NULL;
1561 }
1562 BUG_ON(j > MAX_SKB_FRAGS);
1563 }
1564
1565 /* Process the RX completion indicated by rxcp when GRO is disabled */
1566 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1567 struct be_rx_compl_info *rxcp)
1568 {
1569 struct be_adapter *adapter = rxo->adapter;
1570 struct net_device *netdev = adapter->netdev;
1571 struct sk_buff *skb;
1572
1573 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1574 if (unlikely(!skb)) {
1575 rx_stats(rxo)->rx_drops_no_skbs++;
1576 be_rx_compl_discard(rxo, rxcp);
1577 return;
1578 }
1579
1580 skb_fill_rx_data(rxo, skb, rxcp);
1581
1582 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1583 skb->ip_summed = CHECKSUM_UNNECESSARY;
1584 else
1585 skb_checksum_none_assert(skb);
1586
1587 skb->protocol = eth_type_trans(skb, netdev);
1588 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1589 if (netdev->features & NETIF_F_RXHASH)
1590 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1591 skb_mark_napi_id(skb, napi);
1592
1593 if (rxcp->vlanf)
1594 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1595
1596 netif_receive_skb(skb);
1597 }
1598
1599 /* Process the RX completion indicated by rxcp when GRO is enabled */
1600 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1601 struct napi_struct *napi,
1602 struct be_rx_compl_info *rxcp)
1603 {
1604 struct be_adapter *adapter = rxo->adapter;
1605 struct be_rx_page_info *page_info;
1606 struct sk_buff *skb = NULL;
1607 struct be_queue_info *rxq = &rxo->q;
1608 u16 remaining, curr_frag_len;
1609 u16 i, j;
1610
1611 skb = napi_get_frags(napi);
1612 if (!skb) {
1613 be_rx_compl_discard(rxo, rxcp);
1614 return;
1615 }
1616
1617 remaining = rxcp->pkt_size;
1618 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1619 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1620
1621 curr_frag_len = min(remaining, rx_frag_size);
1622
1623 /* Coalesce all frags from the same physical page in one slot */
1624 if (i == 0 || page_info->page_offset == 0) {
1625 /* First frag or Fresh page */
1626 j++;
1627 skb_frag_set_page(skb, j, page_info->page);
1628 skb_shinfo(skb)->frags[j].page_offset =
1629 page_info->page_offset;
1630 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1631 } else {
1632 put_page(page_info->page);
1633 }
1634 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1635 skb->truesize += rx_frag_size;
1636 remaining -= curr_frag_len;
1637 index_inc(&rxcp->rxq_idx, rxq->len);
1638 memset(page_info, 0, sizeof(*page_info));
1639 }
1640 BUG_ON(j > MAX_SKB_FRAGS);
1641
1642 skb_shinfo(skb)->nr_frags = j + 1;
1643 skb->len = rxcp->pkt_size;
1644 skb->data_len = rxcp->pkt_size;
1645 skb->ip_summed = CHECKSUM_UNNECESSARY;
1646 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1647 if (adapter->netdev->features & NETIF_F_RXHASH)
1648 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1649 skb_mark_napi_id(skb, napi);
1650
1651 if (rxcp->vlanf)
1652 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1653
1654 napi_gro_frags(napi);
1655 }
1656
1657 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1658 struct be_rx_compl_info *rxcp)
1659 {
1660 rxcp->pkt_size =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1662 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1663 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1664 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1665 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1666 rxcp->ip_csum =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1668 rxcp->l4_csum =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1670 rxcp->ipv6 =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1672 rxcp->rxq_idx =
1673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1674 rxcp->num_rcvd =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1676 rxcp->pkt_type =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1678 rxcp->rss_hash =
1679 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1680 if (rxcp->vlanf) {
1681 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1682 compl);
1683 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1684 compl);
1685 }
1686 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1687 }
1688
1689 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1690 struct be_rx_compl_info *rxcp)
1691 {
1692 rxcp->pkt_size =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1694 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1695 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1696 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1697 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1698 rxcp->ip_csum =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1700 rxcp->l4_csum =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1702 rxcp->ipv6 =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1704 rxcp->rxq_idx =
1705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1706 rxcp->num_rcvd =
1707 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1708 rxcp->pkt_type =
1709 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1710 rxcp->rss_hash =
1711 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1712 if (rxcp->vlanf) {
1713 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1714 compl);
1715 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1716 compl);
1717 }
1718 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1719 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1720 ip_frag, compl);
1721 }
1722
1723 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1724 {
1725 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1726 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1727 struct be_adapter *adapter = rxo->adapter;
1728
1729 /* For checking the valid bit it is Ok to use either definition as the
1730 * valid bit is at the same position in both v0 and v1 Rx compl */
1731 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1732 return NULL;
1733
1734 rmb();
1735 be_dws_le_to_cpu(compl, sizeof(*compl));
1736
1737 if (adapter->be3_native)
1738 be_parse_rx_compl_v1(compl, rxcp);
1739 else
1740 be_parse_rx_compl_v0(compl, rxcp);
1741
1742 if (rxcp->ip_frag)
1743 rxcp->l4_csum = 0;
1744
1745 if (rxcp->vlanf) {
1746 /* vlanf could be wrongly set in some cards.
1747 * ignore if vtm is not set */
1748 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1749 rxcp->vlanf = 0;
1750
1751 if (!lancer_chip(adapter))
1752 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1753
1754 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1755 !adapter->vlan_tag[rxcp->vlan_tag])
1756 rxcp->vlanf = 0;
1757 }
1758
1759 /* As the compl has been parsed, reset it; we wont touch it again */
1760 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1761
1762 queue_tail_inc(&rxo->cq);
1763 return rxcp;
1764 }
1765
1766 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1767 {
1768 u32 order = get_order(size);
1769
1770 if (order > 0)
1771 gfp |= __GFP_COMP;
1772 return alloc_pages(gfp, order);
1773 }
1774
1775 /*
1776 * Allocate a page, split it to fragments of size rx_frag_size and post as
1777 * receive buffers to BE
1778 */
1779 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1780 {
1781 struct be_adapter *adapter = rxo->adapter;
1782 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1783 struct be_queue_info *rxq = &rxo->q;
1784 struct page *pagep = NULL;
1785 struct be_eth_rx_d *rxd;
1786 u64 page_dmaaddr = 0, frag_dmaaddr;
1787 u32 posted, page_offset = 0;
1788
1789 page_info = &rxo->page_info_tbl[rxq->head];
1790 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1791 if (!pagep) {
1792 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1793 if (unlikely(!pagep)) {
1794 rx_stats(rxo)->rx_post_fail++;
1795 break;
1796 }
1797 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1798 0, adapter->big_page_size,
1799 DMA_FROM_DEVICE);
1800 page_info->page_offset = 0;
1801 } else {
1802 get_page(pagep);
1803 page_info->page_offset = page_offset + rx_frag_size;
1804 }
1805 page_offset = page_info->page_offset;
1806 page_info->page = pagep;
1807 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1808 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1809
1810 rxd = queue_head_node(rxq);
1811 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1812 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1813
1814 /* Any space left in the current big page for another frag? */
1815 if ((page_offset + rx_frag_size + rx_frag_size) >
1816 adapter->big_page_size) {
1817 pagep = NULL;
1818 page_info->last_page_user = true;
1819 }
1820
1821 prev_page_info = page_info;
1822 queue_head_inc(rxq);
1823 page_info = &rxo->page_info_tbl[rxq->head];
1824 }
1825 if (pagep)
1826 prev_page_info->last_page_user = true;
1827
1828 if (posted) {
1829 atomic_add(posted, &rxq->used);
1830 if (rxo->rx_post_starved)
1831 rxo->rx_post_starved = false;
1832 be_rxq_notify(adapter, rxq->id, posted);
1833 } else if (atomic_read(&rxq->used) == 0) {
1834 /* Let be_worker replenish when memory is available */
1835 rxo->rx_post_starved = true;
1836 }
1837 }
1838
1839 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1840 {
1841 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1842
1843 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1844 return NULL;
1845
1846 rmb();
1847 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1848
1849 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1850
1851 queue_tail_inc(tx_cq);
1852 return txcp;
1853 }
1854
1855 static u16 be_tx_compl_process(struct be_adapter *adapter,
1856 struct be_tx_obj *txo, u16 last_index)
1857 {
1858 struct be_queue_info *txq = &txo->q;
1859 struct be_eth_wrb *wrb;
1860 struct sk_buff **sent_skbs = txo->sent_skb_list;
1861 struct sk_buff *sent_skb;
1862 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1863 bool unmap_skb_hdr = true;
1864
1865 sent_skb = sent_skbs[txq->tail];
1866 BUG_ON(!sent_skb);
1867 sent_skbs[txq->tail] = NULL;
1868
1869 /* skip header wrb */
1870 queue_tail_inc(txq);
1871
1872 do {
1873 cur_index = txq->tail;
1874 wrb = queue_tail_node(txq);
1875 unmap_tx_frag(&adapter->pdev->dev, wrb,
1876 (unmap_skb_hdr && skb_headlen(sent_skb)));
1877 unmap_skb_hdr = false;
1878
1879 num_wrbs++;
1880 queue_tail_inc(txq);
1881 } while (cur_index != last_index);
1882
1883 kfree_skb(sent_skb);
1884 return num_wrbs;
1885 }
1886
1887 /* Return the number of events in the event queue */
1888 static inline int events_get(struct be_eq_obj *eqo)
1889 {
1890 struct be_eq_entry *eqe;
1891 int num = 0;
1892
1893 do {
1894 eqe = queue_tail_node(&eqo->q);
1895 if (eqe->evt == 0)
1896 break;
1897
1898 rmb();
1899 eqe->evt = 0;
1900 num++;
1901 queue_tail_inc(&eqo->q);
1902 } while (true);
1903
1904 return num;
1905 }
1906
1907 /* Leaves the EQ is disarmed state */
1908 static void be_eq_clean(struct be_eq_obj *eqo)
1909 {
1910 int num = events_get(eqo);
1911
1912 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1913 }
1914
1915 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1916 {
1917 struct be_rx_page_info *page_info;
1918 struct be_queue_info *rxq = &rxo->q;
1919 struct be_queue_info *rx_cq = &rxo->cq;
1920 struct be_rx_compl_info *rxcp;
1921 struct be_adapter *adapter = rxo->adapter;
1922 int flush_wait = 0;
1923 u16 tail;
1924
1925 /* Consume pending rx completions.
1926 * Wait for the flush completion (identified by zero num_rcvd)
1927 * to arrive. Notify CQ even when there are no more CQ entries
1928 * for HW to flush partially coalesced CQ entries.
1929 * In Lancer, there is no need to wait for flush compl.
1930 */
1931 for (;;) {
1932 rxcp = be_rx_compl_get(rxo);
1933 if (rxcp == NULL) {
1934 if (lancer_chip(adapter))
1935 break;
1936
1937 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1938 dev_warn(&adapter->pdev->dev,
1939 "did not receive flush compl\n");
1940 break;
1941 }
1942 be_cq_notify(adapter, rx_cq->id, true, 0);
1943 mdelay(1);
1944 } else {
1945 be_rx_compl_discard(rxo, rxcp);
1946 be_cq_notify(adapter, rx_cq->id, false, 1);
1947 if (rxcp->num_rcvd == 0)
1948 break;
1949 }
1950 }
1951
1952 /* After cleanup, leave the CQ in unarmed state */
1953 be_cq_notify(adapter, rx_cq->id, false, 0);
1954
1955 /* Then free posted rx buffers that were not used */
1956 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1957 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1958 page_info = get_rx_page_info(rxo, tail);
1959 put_page(page_info->page);
1960 memset(page_info, 0, sizeof(*page_info));
1961 }
1962 BUG_ON(atomic_read(&rxq->used));
1963 rxq->tail = rxq->head = 0;
1964 }
1965
1966 static void be_tx_compl_clean(struct be_adapter *adapter)
1967 {
1968 struct be_tx_obj *txo;
1969 struct be_queue_info *txq;
1970 struct be_eth_tx_compl *txcp;
1971 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1972 struct sk_buff *sent_skb;
1973 bool dummy_wrb;
1974 int i, pending_txqs;
1975
1976 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1977 do {
1978 pending_txqs = adapter->num_tx_qs;
1979
1980 for_all_tx_queues(adapter, txo, i) {
1981 txq = &txo->q;
1982 while ((txcp = be_tx_compl_get(&txo->cq))) {
1983 end_idx =
1984 AMAP_GET_BITS(struct amap_eth_tx_compl,
1985 wrb_index, txcp);
1986 num_wrbs += be_tx_compl_process(adapter, txo,
1987 end_idx);
1988 cmpl++;
1989 }
1990 if (cmpl) {
1991 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1992 atomic_sub(num_wrbs, &txq->used);
1993 cmpl = 0;
1994 num_wrbs = 0;
1995 }
1996 if (atomic_read(&txq->used) == 0)
1997 pending_txqs--;
1998 }
1999
2000 if (pending_txqs == 0 || ++timeo > 200)
2001 break;
2002
2003 mdelay(1);
2004 } while (true);
2005
2006 for_all_tx_queues(adapter, txo, i) {
2007 txq = &txo->q;
2008 if (atomic_read(&txq->used))
2009 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2010 atomic_read(&txq->used));
2011
2012 /* free posted tx for which compls will never arrive */
2013 while (atomic_read(&txq->used)) {
2014 sent_skb = txo->sent_skb_list[txq->tail];
2015 end_idx = txq->tail;
2016 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2017 &dummy_wrb);
2018 index_adv(&end_idx, num_wrbs - 1, txq->len);
2019 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2020 atomic_sub(num_wrbs, &txq->used);
2021 }
2022 }
2023 }
2024
2025 static void be_evt_queues_destroy(struct be_adapter *adapter)
2026 {
2027 struct be_eq_obj *eqo;
2028 int i;
2029
2030 for_all_evt_queues(adapter, eqo, i) {
2031 if (eqo->q.created) {
2032 be_eq_clean(eqo);
2033 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2034 napi_hash_del(&eqo->napi);
2035 netif_napi_del(&eqo->napi);
2036 }
2037 be_queue_free(adapter, &eqo->q);
2038 }
2039 }
2040
2041 static int be_evt_queues_create(struct be_adapter *adapter)
2042 {
2043 struct be_queue_info *eq;
2044 struct be_eq_obj *eqo;
2045 struct be_aic_obj *aic;
2046 int i, rc;
2047
2048 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2049 adapter->cfg_num_qs);
2050
2051 for_all_evt_queues(adapter, eqo, i) {
2052 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2053 BE_NAPI_WEIGHT);
2054 napi_hash_add(&eqo->napi);
2055 aic = &adapter->aic_obj[i];
2056 eqo->adapter = adapter;
2057 eqo->tx_budget = BE_TX_BUDGET;
2058 eqo->idx = i;
2059 aic->max_eqd = BE_MAX_EQD;
2060 aic->enable = true;
2061
2062 eq = &eqo->q;
2063 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2064 sizeof(struct be_eq_entry));
2065 if (rc)
2066 return rc;
2067
2068 rc = be_cmd_eq_create(adapter, eqo);
2069 if (rc)
2070 return rc;
2071 }
2072 return 0;
2073 }
2074
2075 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2076 {
2077 struct be_queue_info *q;
2078
2079 q = &adapter->mcc_obj.q;
2080 if (q->created)
2081 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2082 be_queue_free(adapter, q);
2083
2084 q = &adapter->mcc_obj.cq;
2085 if (q->created)
2086 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2087 be_queue_free(adapter, q);
2088 }
2089
2090 /* Must be called only after TX qs are created as MCC shares TX EQ */
2091 static int be_mcc_queues_create(struct be_adapter *adapter)
2092 {
2093 struct be_queue_info *q, *cq;
2094
2095 cq = &adapter->mcc_obj.cq;
2096 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2097 sizeof(struct be_mcc_compl)))
2098 goto err;
2099
2100 /* Use the default EQ for MCC completions */
2101 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2102 goto mcc_cq_free;
2103
2104 q = &adapter->mcc_obj.q;
2105 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2106 goto mcc_cq_destroy;
2107
2108 if (be_cmd_mccq_create(adapter, q, cq))
2109 goto mcc_q_free;
2110
2111 return 0;
2112
2113 mcc_q_free:
2114 be_queue_free(adapter, q);
2115 mcc_cq_destroy:
2116 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2117 mcc_cq_free:
2118 be_queue_free(adapter, cq);
2119 err:
2120 return -1;
2121 }
2122
2123 static void be_tx_queues_destroy(struct be_adapter *adapter)
2124 {
2125 struct be_queue_info *q;
2126 struct be_tx_obj *txo;
2127 u8 i;
2128
2129 for_all_tx_queues(adapter, txo, i) {
2130 q = &txo->q;
2131 if (q->created)
2132 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2133 be_queue_free(adapter, q);
2134
2135 q = &txo->cq;
2136 if (q->created)
2137 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2138 be_queue_free(adapter, q);
2139 }
2140 }
2141
2142 static int be_tx_qs_create(struct be_adapter *adapter)
2143 {
2144 struct be_queue_info *cq, *eq;
2145 struct be_tx_obj *txo;
2146 int status, i;
2147
2148 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2149
2150 for_all_tx_queues(adapter, txo, i) {
2151 cq = &txo->cq;
2152 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2153 sizeof(struct be_eth_tx_compl));
2154 if (status)
2155 return status;
2156
2157 u64_stats_init(&txo->stats.sync);
2158 u64_stats_init(&txo->stats.sync_compl);
2159
2160 /* If num_evt_qs is less than num_tx_qs, then more than
2161 * one txq share an eq
2162 */
2163 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2164 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2165 if (status)
2166 return status;
2167
2168 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2169 sizeof(struct be_eth_wrb));
2170 if (status)
2171 return status;
2172
2173 status = be_cmd_txq_create(adapter, txo);
2174 if (status)
2175 return status;
2176 }
2177
2178 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2179 adapter->num_tx_qs);
2180 return 0;
2181 }
2182
2183 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2184 {
2185 struct be_queue_info *q;
2186 struct be_rx_obj *rxo;
2187 int i;
2188
2189 for_all_rx_queues(adapter, rxo, i) {
2190 q = &rxo->cq;
2191 if (q->created)
2192 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2193 be_queue_free(adapter, q);
2194 }
2195 }
2196
2197 static int be_rx_cqs_create(struct be_adapter *adapter)
2198 {
2199 struct be_queue_info *eq, *cq;
2200 struct be_rx_obj *rxo;
2201 int rc, i;
2202
2203 /* We can create as many RSS rings as there are EQs. */
2204 adapter->num_rx_qs = adapter->num_evt_qs;
2205
2206 /* We'll use RSS only if atleast 2 RSS rings are supported.
2207 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2208 */
2209 if (adapter->num_rx_qs > 1)
2210 adapter->num_rx_qs++;
2211
2212 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2213 for_all_rx_queues(adapter, rxo, i) {
2214 rxo->adapter = adapter;
2215 cq = &rxo->cq;
2216 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2217 sizeof(struct be_eth_rx_compl));
2218 if (rc)
2219 return rc;
2220
2221 u64_stats_init(&rxo->stats.sync);
2222 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2223 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2224 if (rc)
2225 return rc;
2226 }
2227
2228 dev_info(&adapter->pdev->dev,
2229 "created %d RSS queue(s) and 1 default RX queue\n",
2230 adapter->num_rx_qs - 1);
2231 return 0;
2232 }
2233
2234 static irqreturn_t be_intx(int irq, void *dev)
2235 {
2236 struct be_eq_obj *eqo = dev;
2237 struct be_adapter *adapter = eqo->adapter;
2238 int num_evts = 0;
2239
2240 /* IRQ is not expected when NAPI is scheduled as the EQ
2241 * will not be armed.
2242 * But, this can happen on Lancer INTx where it takes
2243 * a while to de-assert INTx or in BE2 where occasionaly
2244 * an interrupt may be raised even when EQ is unarmed.
2245 * If NAPI is already scheduled, then counting & notifying
2246 * events will orphan them.
2247 */
2248 if (napi_schedule_prep(&eqo->napi)) {
2249 num_evts = events_get(eqo);
2250 __napi_schedule(&eqo->napi);
2251 if (num_evts)
2252 eqo->spurious_intr = 0;
2253 }
2254 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2255
2256 /* Return IRQ_HANDLED only for the the first spurious intr
2257 * after a valid intr to stop the kernel from branding
2258 * this irq as a bad one!
2259 */
2260 if (num_evts || eqo->spurious_intr++ == 0)
2261 return IRQ_HANDLED;
2262 else
2263 return IRQ_NONE;
2264 }
2265
2266 static irqreturn_t be_msix(int irq, void *dev)
2267 {
2268 struct be_eq_obj *eqo = dev;
2269
2270 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2271 napi_schedule(&eqo->napi);
2272 return IRQ_HANDLED;
2273 }
2274
2275 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2276 {
2277 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2278 }
2279
2280 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2281 int budget, int polling)
2282 {
2283 struct be_adapter *adapter = rxo->adapter;
2284 struct be_queue_info *rx_cq = &rxo->cq;
2285 struct be_rx_compl_info *rxcp;
2286 u32 work_done;
2287
2288 for (work_done = 0; work_done < budget; work_done++) {
2289 rxcp = be_rx_compl_get(rxo);
2290 if (!rxcp)
2291 break;
2292
2293 /* Is it a flush compl that has no data */
2294 if (unlikely(rxcp->num_rcvd == 0))
2295 goto loop_continue;
2296
2297 /* Discard compl with partial DMA Lancer B0 */
2298 if (unlikely(!rxcp->pkt_size)) {
2299 be_rx_compl_discard(rxo, rxcp);
2300 goto loop_continue;
2301 }
2302
2303 /* On BE drop pkts that arrive due to imperfect filtering in
2304 * promiscuous mode on some skews
2305 */
2306 if (unlikely(rxcp->port != adapter->port_num &&
2307 !lancer_chip(adapter))) {
2308 be_rx_compl_discard(rxo, rxcp);
2309 goto loop_continue;
2310 }
2311
2312 /* Don't do gro when we're busy_polling */
2313 if (do_gro(rxcp) && polling != BUSY_POLLING)
2314 be_rx_compl_process_gro(rxo, napi, rxcp);
2315 else
2316 be_rx_compl_process(rxo, napi, rxcp);
2317
2318 loop_continue:
2319 be_rx_stats_update(rxo, rxcp);
2320 }
2321
2322 if (work_done) {
2323 be_cq_notify(adapter, rx_cq->id, true, work_done);
2324
2325 /* When an rx-obj gets into post_starved state, just
2326 * let be_worker do the posting.
2327 */
2328 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2329 !rxo->rx_post_starved)
2330 be_post_rx_frags(rxo, GFP_ATOMIC);
2331 }
2332
2333 return work_done;
2334 }
2335
2336 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2337 int budget, int idx)
2338 {
2339 struct be_eth_tx_compl *txcp;
2340 int num_wrbs = 0, work_done;
2341
2342 for (work_done = 0; work_done < budget; work_done++) {
2343 txcp = be_tx_compl_get(&txo->cq);
2344 if (!txcp)
2345 break;
2346 num_wrbs += be_tx_compl_process(adapter, txo,
2347 AMAP_GET_BITS(struct amap_eth_tx_compl,
2348 wrb_index, txcp));
2349 }
2350
2351 if (work_done) {
2352 be_cq_notify(adapter, txo->cq.id, true, work_done);
2353 atomic_sub(num_wrbs, &txo->q.used);
2354
2355 /* As Tx wrbs have been freed up, wake up netdev queue
2356 * if it was stopped due to lack of tx wrbs. */
2357 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2358 atomic_read(&txo->q.used) < txo->q.len / 2) {
2359 netif_wake_subqueue(adapter->netdev, idx);
2360 }
2361
2362 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2363 tx_stats(txo)->tx_compl += work_done;
2364 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2365 }
2366 return (work_done < budget); /* Done */
2367 }
2368
2369 int be_poll(struct napi_struct *napi, int budget)
2370 {
2371 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2372 struct be_adapter *adapter = eqo->adapter;
2373 int max_work = 0, work, i, num_evts;
2374 struct be_rx_obj *rxo;
2375 bool tx_done;
2376
2377 num_evts = events_get(eqo);
2378
2379 /* Process all TXQs serviced by this EQ */
2380 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2381 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2382 eqo->tx_budget, i);
2383 if (!tx_done)
2384 max_work = budget;
2385 }
2386
2387 if (be_lock_napi(eqo)) {
2388 /* This loop will iterate twice for EQ0 in which
2389 * completions of the last RXQ (default one) are also processed
2390 * For other EQs the loop iterates only once
2391 */
2392 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2393 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2394 max_work = max(work, max_work);
2395 }
2396 be_unlock_napi(eqo);
2397 } else {
2398 max_work = budget;
2399 }
2400
2401 if (is_mcc_eqo(eqo))
2402 be_process_mcc(adapter);
2403
2404 if (max_work < budget) {
2405 napi_complete(napi);
2406 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2407 } else {
2408 /* As we'll continue in polling mode, count and clear events */
2409 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2410 }
2411 return max_work;
2412 }
2413
2414 #ifdef CONFIG_NET_RX_BUSY_POLL
2415 static int be_busy_poll(struct napi_struct *napi)
2416 {
2417 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2418 struct be_adapter *adapter = eqo->adapter;
2419 struct be_rx_obj *rxo;
2420 int i, work = 0;
2421
2422 if (!be_lock_busy_poll(eqo))
2423 return LL_FLUSH_BUSY;
2424
2425 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2426 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2427 if (work)
2428 break;
2429 }
2430
2431 be_unlock_busy_poll(eqo);
2432 return work;
2433 }
2434 #endif
2435
2436 void be_detect_error(struct be_adapter *adapter)
2437 {
2438 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2439 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2440 u32 i;
2441
2442 if (be_hw_error(adapter))
2443 return;
2444
2445 if (lancer_chip(adapter)) {
2446 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2447 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2448 sliport_err1 = ioread32(adapter->db +
2449 SLIPORT_ERROR1_OFFSET);
2450 sliport_err2 = ioread32(adapter->db +
2451 SLIPORT_ERROR2_OFFSET);
2452 }
2453 } else {
2454 pci_read_config_dword(adapter->pdev,
2455 PCICFG_UE_STATUS_LOW, &ue_lo);
2456 pci_read_config_dword(adapter->pdev,
2457 PCICFG_UE_STATUS_HIGH, &ue_hi);
2458 pci_read_config_dword(adapter->pdev,
2459 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2460 pci_read_config_dword(adapter->pdev,
2461 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2462
2463 ue_lo = (ue_lo & ~ue_lo_mask);
2464 ue_hi = (ue_hi & ~ue_hi_mask);
2465 }
2466
2467 /* On certain platforms BE hardware can indicate spurious UEs.
2468 * Allow the h/w to stop working completely in case of a real UE.
2469 * Hence not setting the hw_error for UE detection.
2470 */
2471 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2472 adapter->hw_error = true;
2473 /* Do not log error messages if its a FW reset */
2474 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2475 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2476 dev_info(&adapter->pdev->dev,
2477 "Firmware update in progress\n");
2478 return;
2479 } else {
2480 dev_err(&adapter->pdev->dev,
2481 "Error detected in the card\n");
2482 }
2483 }
2484
2485 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2486 dev_err(&adapter->pdev->dev,
2487 "ERR: sliport status 0x%x\n", sliport_status);
2488 dev_err(&adapter->pdev->dev,
2489 "ERR: sliport error1 0x%x\n", sliport_err1);
2490 dev_err(&adapter->pdev->dev,
2491 "ERR: sliport error2 0x%x\n", sliport_err2);
2492 }
2493
2494 if (ue_lo) {
2495 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2496 if (ue_lo & 1)
2497 dev_err(&adapter->pdev->dev,
2498 "UE: %s bit set\n", ue_status_low_desc[i]);
2499 }
2500 }
2501
2502 if (ue_hi) {
2503 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2504 if (ue_hi & 1)
2505 dev_err(&adapter->pdev->dev,
2506 "UE: %s bit set\n", ue_status_hi_desc[i]);
2507 }
2508 }
2509
2510 }
2511
2512 static void be_msix_disable(struct be_adapter *adapter)
2513 {
2514 if (msix_enabled(adapter)) {
2515 pci_disable_msix(adapter->pdev);
2516 adapter->num_msix_vec = 0;
2517 adapter->num_msix_roce_vec = 0;
2518 }
2519 }
2520
2521 static int be_msix_enable(struct be_adapter *adapter)
2522 {
2523 int i, status, num_vec;
2524 struct device *dev = &adapter->pdev->dev;
2525
2526 /* If RoCE is supported, program the max number of NIC vectors that
2527 * may be configured via set-channels, along with vectors needed for
2528 * RoCe. Else, just program the number we'll use initially.
2529 */
2530 if (be_roce_supported(adapter))
2531 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2532 2 * num_online_cpus());
2533 else
2534 num_vec = adapter->cfg_num_qs;
2535
2536 for (i = 0; i < num_vec; i++)
2537 adapter->msix_entries[i].entry = i;
2538
2539 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2540 if (status == 0) {
2541 goto done;
2542 } else if (status >= MIN_MSIX_VECTORS) {
2543 num_vec = status;
2544 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2545 num_vec);
2546 if (!status)
2547 goto done;
2548 }
2549
2550 dev_warn(dev, "MSIx enable failed\n");
2551
2552 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2553 if (!be_physfn(adapter))
2554 return status;
2555 return 0;
2556 done:
2557 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2558 adapter->num_msix_roce_vec = num_vec / 2;
2559 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2560 adapter->num_msix_roce_vec);
2561 }
2562
2563 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2564
2565 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2566 adapter->num_msix_vec);
2567 return 0;
2568 }
2569
2570 static inline int be_msix_vec_get(struct be_adapter *adapter,
2571 struct be_eq_obj *eqo)
2572 {
2573 return adapter->msix_entries[eqo->msix_idx].vector;
2574 }
2575
2576 static int be_msix_register(struct be_adapter *adapter)
2577 {
2578 struct net_device *netdev = adapter->netdev;
2579 struct be_eq_obj *eqo;
2580 int status, i, vec;
2581
2582 for_all_evt_queues(adapter, eqo, i) {
2583 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2584 vec = be_msix_vec_get(adapter, eqo);
2585 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2586 if (status)
2587 goto err_msix;
2588 }
2589
2590 return 0;
2591 err_msix:
2592 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2593 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2594 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2595 status);
2596 be_msix_disable(adapter);
2597 return status;
2598 }
2599
2600 static int be_irq_register(struct be_adapter *adapter)
2601 {
2602 struct net_device *netdev = adapter->netdev;
2603 int status;
2604
2605 if (msix_enabled(adapter)) {
2606 status = be_msix_register(adapter);
2607 if (status == 0)
2608 goto done;
2609 /* INTx is not supported for VF */
2610 if (!be_physfn(adapter))
2611 return status;
2612 }
2613
2614 /* INTx: only the first EQ is used */
2615 netdev->irq = adapter->pdev->irq;
2616 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2617 &adapter->eq_obj[0]);
2618 if (status) {
2619 dev_err(&adapter->pdev->dev,
2620 "INTx request IRQ failed - err %d\n", status);
2621 return status;
2622 }
2623 done:
2624 adapter->isr_registered = true;
2625 return 0;
2626 }
2627
2628 static void be_irq_unregister(struct be_adapter *adapter)
2629 {
2630 struct net_device *netdev = adapter->netdev;
2631 struct be_eq_obj *eqo;
2632 int i;
2633
2634 if (!adapter->isr_registered)
2635 return;
2636
2637 /* INTx */
2638 if (!msix_enabled(adapter)) {
2639 free_irq(netdev->irq, &adapter->eq_obj[0]);
2640 goto done;
2641 }
2642
2643 /* MSIx */
2644 for_all_evt_queues(adapter, eqo, i)
2645 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2646
2647 done:
2648 adapter->isr_registered = false;
2649 }
2650
2651 static void be_rx_qs_destroy(struct be_adapter *adapter)
2652 {
2653 struct be_queue_info *q;
2654 struct be_rx_obj *rxo;
2655 int i;
2656
2657 for_all_rx_queues(adapter, rxo, i) {
2658 q = &rxo->q;
2659 if (q->created) {
2660 be_cmd_rxq_destroy(adapter, q);
2661 be_rx_cq_clean(rxo);
2662 }
2663 be_queue_free(adapter, q);
2664 }
2665 }
2666
2667 static int be_close(struct net_device *netdev)
2668 {
2669 struct be_adapter *adapter = netdev_priv(netdev);
2670 struct be_eq_obj *eqo;
2671 int i;
2672
2673 be_roce_dev_close(adapter);
2674
2675 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2676 for_all_evt_queues(adapter, eqo, i) {
2677 napi_disable(&eqo->napi);
2678 be_disable_busy_poll(eqo);
2679 }
2680 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2681 }
2682
2683 be_async_mcc_disable(adapter);
2684
2685 /* Wait for all pending tx completions to arrive so that
2686 * all tx skbs are freed.
2687 */
2688 netif_tx_disable(netdev);
2689 be_tx_compl_clean(adapter);
2690
2691 be_rx_qs_destroy(adapter);
2692
2693 for (i = 1; i < (adapter->uc_macs + 1); i++)
2694 be_cmd_pmac_del(adapter, adapter->if_handle,
2695 adapter->pmac_id[i], 0);
2696 adapter->uc_macs = 0;
2697
2698 for_all_evt_queues(adapter, eqo, i) {
2699 if (msix_enabled(adapter))
2700 synchronize_irq(be_msix_vec_get(adapter, eqo));
2701 else
2702 synchronize_irq(netdev->irq);
2703 be_eq_clean(eqo);
2704 }
2705
2706 be_irq_unregister(adapter);
2707
2708 return 0;
2709 }
2710
2711 static int be_rx_qs_create(struct be_adapter *adapter)
2712 {
2713 struct be_rx_obj *rxo;
2714 int rc, i, j;
2715 u8 rsstable[128];
2716
2717 for_all_rx_queues(adapter, rxo, i) {
2718 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2719 sizeof(struct be_eth_rx_d));
2720 if (rc)
2721 return rc;
2722 }
2723
2724 /* The FW would like the default RXQ to be created first */
2725 rxo = default_rxo(adapter);
2726 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2727 adapter->if_handle, false, &rxo->rss_id);
2728 if (rc)
2729 return rc;
2730
2731 for_all_rss_queues(adapter, rxo, i) {
2732 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2733 rx_frag_size, adapter->if_handle,
2734 true, &rxo->rss_id);
2735 if (rc)
2736 return rc;
2737 }
2738
2739 if (be_multi_rxq(adapter)) {
2740 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2741 for_all_rss_queues(adapter, rxo, i) {
2742 if ((j + i) >= 128)
2743 break;
2744 rsstable[j + i] = rxo->rss_id;
2745 }
2746 }
2747 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2748 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2749
2750 if (!BEx_chip(adapter))
2751 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2752 RSS_ENABLE_UDP_IPV6;
2753 } else {
2754 /* Disable RSS, if only default RX Q is created */
2755 adapter->rss_flags = RSS_ENABLE_NONE;
2756 }
2757
2758 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2759 128);
2760 if (rc) {
2761 adapter->rss_flags = RSS_ENABLE_NONE;
2762 return rc;
2763 }
2764
2765 /* First time posting */
2766 for_all_rx_queues(adapter, rxo, i)
2767 be_post_rx_frags(rxo, GFP_KERNEL);
2768 return 0;
2769 }
2770
2771 static int be_open(struct net_device *netdev)
2772 {
2773 struct be_adapter *adapter = netdev_priv(netdev);
2774 struct be_eq_obj *eqo;
2775 struct be_rx_obj *rxo;
2776 struct be_tx_obj *txo;
2777 u8 link_status;
2778 int status, i;
2779
2780 status = be_rx_qs_create(adapter);
2781 if (status)
2782 goto err;
2783
2784 status = be_irq_register(adapter);
2785 if (status)
2786 goto err;
2787
2788 for_all_rx_queues(adapter, rxo, i)
2789 be_cq_notify(adapter, rxo->cq.id, true, 0);
2790
2791 for_all_tx_queues(adapter, txo, i)
2792 be_cq_notify(adapter, txo->cq.id, true, 0);
2793
2794 be_async_mcc_enable(adapter);
2795
2796 for_all_evt_queues(adapter, eqo, i) {
2797 napi_enable(&eqo->napi);
2798 be_enable_busy_poll(eqo);
2799 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2800 }
2801 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2802
2803 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2804 if (!status)
2805 be_link_status_update(adapter, link_status);
2806
2807 netif_tx_start_all_queues(netdev);
2808 be_roce_dev_open(adapter);
2809 return 0;
2810 err:
2811 be_close(adapter->netdev);
2812 return -EIO;
2813 }
2814
2815 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2816 {
2817 struct be_dma_mem cmd;
2818 int status = 0;
2819 u8 mac[ETH_ALEN];
2820
2821 memset(mac, 0, ETH_ALEN);
2822
2823 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2824 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2825 GFP_KERNEL);
2826 if (cmd.va == NULL)
2827 return -1;
2828
2829 if (enable) {
2830 status = pci_write_config_dword(adapter->pdev,
2831 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2832 if (status) {
2833 dev_err(&adapter->pdev->dev,
2834 "Could not enable Wake-on-lan\n");
2835 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2836 cmd.dma);
2837 return status;
2838 }
2839 status = be_cmd_enable_magic_wol(adapter,
2840 adapter->netdev->dev_addr, &cmd);
2841 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2842 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2843 } else {
2844 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2845 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2846 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2847 }
2848
2849 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2850 return status;
2851 }
2852
2853 /*
2854 * Generate a seed MAC address from the PF MAC Address using jhash.
2855 * MAC Address for VFs are assigned incrementally starting from the seed.
2856 * These addresses are programmed in the ASIC by the PF and the VF driver
2857 * queries for the MAC address during its probe.
2858 */
2859 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2860 {
2861 u32 vf;
2862 int status = 0;
2863 u8 mac[ETH_ALEN];
2864 struct be_vf_cfg *vf_cfg;
2865
2866 be_vf_eth_addr_generate(adapter, mac);
2867
2868 for_all_vfs(adapter, vf_cfg, vf) {
2869 if (BEx_chip(adapter))
2870 status = be_cmd_pmac_add(adapter, mac,
2871 vf_cfg->if_handle,
2872 &vf_cfg->pmac_id, vf + 1);
2873 else
2874 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2875 vf + 1);
2876
2877 if (status)
2878 dev_err(&adapter->pdev->dev,
2879 "Mac address assignment failed for VF %d\n", vf);
2880 else
2881 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2882
2883 mac[5] += 1;
2884 }
2885 return status;
2886 }
2887
2888 static int be_vfs_mac_query(struct be_adapter *adapter)
2889 {
2890 int status, vf;
2891 u8 mac[ETH_ALEN];
2892 struct be_vf_cfg *vf_cfg;
2893 bool active = false;
2894
2895 for_all_vfs(adapter, vf_cfg, vf) {
2896 be_cmd_get_mac_from_list(adapter, mac, &active,
2897 &vf_cfg->pmac_id, 0);
2898
2899 status = be_cmd_mac_addr_query(adapter, mac, false,
2900 vf_cfg->if_handle, 0);
2901 if (status)
2902 return status;
2903 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2904 }
2905 return 0;
2906 }
2907
2908 static void be_vf_clear(struct be_adapter *adapter)
2909 {
2910 struct be_vf_cfg *vf_cfg;
2911 u32 vf;
2912
2913 if (pci_vfs_assigned(adapter->pdev)) {
2914 dev_warn(&adapter->pdev->dev,
2915 "VFs are assigned to VMs: not disabling VFs\n");
2916 goto done;
2917 }
2918
2919 pci_disable_sriov(adapter->pdev);
2920
2921 for_all_vfs(adapter, vf_cfg, vf) {
2922 if (BEx_chip(adapter))
2923 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2924 vf_cfg->pmac_id, vf + 1);
2925 else
2926 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2927 vf + 1);
2928
2929 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2930 }
2931 done:
2932 kfree(adapter->vf_cfg);
2933 adapter->num_vfs = 0;
2934 }
2935
2936 static void be_clear_queues(struct be_adapter *adapter)
2937 {
2938 be_mcc_queues_destroy(adapter);
2939 be_rx_cqs_destroy(adapter);
2940 be_tx_queues_destroy(adapter);
2941 be_evt_queues_destroy(adapter);
2942 }
2943
2944 static void be_cancel_worker(struct be_adapter *adapter)
2945 {
2946 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2947 cancel_delayed_work_sync(&adapter->work);
2948 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2949 }
2950 }
2951
2952 static void be_mac_clear(struct be_adapter *adapter)
2953 {
2954 int i;
2955
2956 if (adapter->pmac_id) {
2957 for (i = 0; i < (adapter->uc_macs + 1); i++)
2958 be_cmd_pmac_del(adapter, adapter->if_handle,
2959 adapter->pmac_id[i], 0);
2960 adapter->uc_macs = 0;
2961
2962 kfree(adapter->pmac_id);
2963 adapter->pmac_id = NULL;
2964 }
2965 }
2966
2967 static int be_clear(struct be_adapter *adapter)
2968 {
2969 be_cancel_worker(adapter);
2970
2971 if (sriov_enabled(adapter))
2972 be_vf_clear(adapter);
2973
2974 /* delete the primary mac along with the uc-mac list */
2975 be_mac_clear(adapter);
2976
2977 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2978
2979 be_clear_queues(adapter);
2980
2981 be_msix_disable(adapter);
2982 return 0;
2983 }
2984
2985 static int be_vfs_if_create(struct be_adapter *adapter)
2986 {
2987 struct be_resources res = {0};
2988 struct be_vf_cfg *vf_cfg;
2989 u32 cap_flags, en_flags, vf;
2990 int status = 0;
2991
2992 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2993 BE_IF_FLAGS_MULTICAST;
2994
2995 for_all_vfs(adapter, vf_cfg, vf) {
2996 if (!BE3_chip(adapter)) {
2997 status = be_cmd_get_profile_config(adapter, &res,
2998 vf + 1);
2999 if (!status)
3000 cap_flags = res.if_cap_flags;
3001 }
3002
3003 /* If a FW profile exists, then cap_flags are updated */
3004 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3005 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3006 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3007 &vf_cfg->if_handle, vf + 1);
3008 if (status)
3009 goto err;
3010 }
3011 err:
3012 return status;
3013 }
3014
3015 static int be_vf_setup_init(struct be_adapter *adapter)
3016 {
3017 struct be_vf_cfg *vf_cfg;
3018 int vf;
3019
3020 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3021 GFP_KERNEL);
3022 if (!adapter->vf_cfg)
3023 return -ENOMEM;
3024
3025 for_all_vfs(adapter, vf_cfg, vf) {
3026 vf_cfg->if_handle = -1;
3027 vf_cfg->pmac_id = -1;
3028 }
3029 return 0;
3030 }
3031
3032 static int be_vf_setup(struct be_adapter *adapter)
3033 {
3034 struct be_vf_cfg *vf_cfg;
3035 u16 def_vlan, lnk_speed;
3036 int status, old_vfs, vf;
3037 struct device *dev = &adapter->pdev->dev;
3038 u32 privileges;
3039
3040 old_vfs = pci_num_vf(adapter->pdev);
3041 if (old_vfs) {
3042 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3043 if (old_vfs != num_vfs)
3044 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3045 adapter->num_vfs = old_vfs;
3046 } else {
3047 if (num_vfs > be_max_vfs(adapter))
3048 dev_info(dev, "Device supports %d VFs and not %d\n",
3049 be_max_vfs(adapter), num_vfs);
3050 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3051 if (!adapter->num_vfs)
3052 return 0;
3053 }
3054
3055 status = be_vf_setup_init(adapter);
3056 if (status)
3057 goto err;
3058
3059 if (old_vfs) {
3060 for_all_vfs(adapter, vf_cfg, vf) {
3061 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3062 if (status)
3063 goto err;
3064 }
3065 } else {
3066 status = be_vfs_if_create(adapter);
3067 if (status)
3068 goto err;
3069 }
3070
3071 if (old_vfs) {
3072 status = be_vfs_mac_query(adapter);
3073 if (status)
3074 goto err;
3075 } else {
3076 status = be_vf_eth_addr_config(adapter);
3077 if (status)
3078 goto err;
3079 }
3080
3081 for_all_vfs(adapter, vf_cfg, vf) {
3082 /* Allow VFs to programs MAC/VLAN filters */
3083 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3084 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3085 status = be_cmd_set_fn_privileges(adapter,
3086 privileges |
3087 BE_PRIV_FILTMGMT,
3088 vf + 1);
3089 if (!status)
3090 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3091 vf);
3092 }
3093
3094 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3095 * Allow full available bandwidth
3096 */
3097 if (BE3_chip(adapter) && !old_vfs)
3098 be_cmd_set_qos(adapter, 1000, vf+1);
3099
3100 status = be_cmd_link_status_query(adapter, &lnk_speed,
3101 NULL, vf + 1);
3102 if (!status)
3103 vf_cfg->tx_rate = lnk_speed;
3104
3105 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3106 vf + 1, vf_cfg->if_handle, NULL);
3107 if (status)
3108 goto err;
3109 vf_cfg->def_vid = def_vlan;
3110
3111 if (!old_vfs)
3112 be_cmd_enable_vf(adapter, vf + 1);
3113 }
3114
3115 if (!old_vfs) {
3116 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3117 if (status) {
3118 dev_err(dev, "SRIOV enable failed\n");
3119 adapter->num_vfs = 0;
3120 goto err;
3121 }
3122 }
3123 return 0;
3124 err:
3125 dev_err(dev, "VF setup failed\n");
3126 be_vf_clear(adapter);
3127 return status;
3128 }
3129
3130 /* On BE2/BE3 FW does not suggest the supported limits */
3131 static void BEx_get_resources(struct be_adapter *adapter,
3132 struct be_resources *res)
3133 {
3134 struct pci_dev *pdev = adapter->pdev;
3135 bool use_sriov = false;
3136 int max_vfs;
3137
3138 max_vfs = pci_sriov_get_totalvfs(pdev);
3139
3140 if (BE3_chip(adapter) && sriov_want(adapter)) {
3141 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3142 use_sriov = res->max_vfs;
3143 }
3144
3145 if (be_physfn(adapter))
3146 res->max_uc_mac = BE_UC_PMAC_COUNT;
3147 else
3148 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3149
3150 if (adapter->function_mode & FLEX10_MODE)
3151 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3152 else if (adapter->function_mode & UMC_ENABLED)
3153 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3154 else
3155 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3156 res->max_mcast_mac = BE_MAX_MC;
3157
3158 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3159 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3160 !be_physfn(adapter) || (adapter->port_num > 1))
3161 res->max_tx_qs = 1;
3162 else
3163 res->max_tx_qs = BE3_MAX_TX_QS;
3164
3165 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3166 !use_sriov && be_physfn(adapter))
3167 res->max_rss_qs = (adapter->be3_native) ?
3168 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3169 res->max_rx_qs = res->max_rss_qs + 1;
3170
3171 if (be_physfn(adapter))
3172 res->max_evt_qs = (max_vfs > 0) ?
3173 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3174 else
3175 res->max_evt_qs = 1;
3176
3177 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3178 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3179 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3180 }
3181
3182 static void be_setup_init(struct be_adapter *adapter)
3183 {
3184 adapter->vlan_prio_bmap = 0xff;
3185 adapter->phy.link_speed = -1;
3186 adapter->if_handle = -1;
3187 adapter->be3_native = false;
3188 adapter->promiscuous = false;
3189 if (be_physfn(adapter))
3190 adapter->cmd_privileges = MAX_PRIVILEGES;
3191 else
3192 adapter->cmd_privileges = MIN_PRIVILEGES;
3193 }
3194
3195 static int be_get_resources(struct be_adapter *adapter)
3196 {
3197 struct device *dev = &adapter->pdev->dev;
3198 struct be_resources res = {0};
3199 int status;
3200
3201 if (BEx_chip(adapter)) {
3202 BEx_get_resources(adapter, &res);
3203 adapter->res = res;
3204 }
3205
3206 /* For Lancer, SH etc read per-function resource limits from FW.
3207 * GET_FUNC_CONFIG returns per function guaranteed limits.
3208 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3209 */
3210 if (!BEx_chip(adapter)) {
3211 status = be_cmd_get_func_config(adapter, &res);
3212 if (status)
3213 return status;
3214
3215 /* If RoCE may be enabled stash away half the EQs for RoCE */
3216 if (be_roce_supported(adapter))
3217 res.max_evt_qs /= 2;
3218 adapter->res = res;
3219
3220 if (be_physfn(adapter)) {
3221 status = be_cmd_get_profile_config(adapter, &res, 0);
3222 if (status)
3223 return status;
3224 adapter->res.max_vfs = res.max_vfs;
3225 }
3226
3227 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3228 be_max_txqs(adapter), be_max_rxqs(adapter),
3229 be_max_rss(adapter), be_max_eqs(adapter),
3230 be_max_vfs(adapter));
3231 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3232 be_max_uc(adapter), be_max_mc(adapter),
3233 be_max_vlans(adapter));
3234 }
3235
3236 return 0;
3237 }
3238
3239 /* Routine to query per function resource limits */
3240 static int be_get_config(struct be_adapter *adapter)
3241 {
3242 u16 profile_id;
3243 int status;
3244
3245 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3246 &adapter->function_mode,
3247 &adapter->function_caps,
3248 &adapter->asic_rev);
3249 if (status)
3250 return status;
3251
3252 if (be_physfn(adapter)) {
3253 status = be_cmd_get_active_profile(adapter, &profile_id);
3254 if (!status)
3255 dev_info(&adapter->pdev->dev,
3256 "Using profile 0x%x\n", profile_id);
3257 }
3258
3259 status = be_get_resources(adapter);
3260 if (status)
3261 return status;
3262
3263 /* primary mac needs 1 pmac entry */
3264 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3265 GFP_KERNEL);
3266 if (!adapter->pmac_id)
3267 return -ENOMEM;
3268
3269 /* Sanitize cfg_num_qs based on HW and platform limits */
3270 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3271
3272 return 0;
3273 }
3274
3275 static int be_mac_setup(struct be_adapter *adapter)
3276 {
3277 u8 mac[ETH_ALEN];
3278 int status;
3279
3280 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3281 status = be_cmd_get_perm_mac(adapter, mac);
3282 if (status)
3283 return status;
3284
3285 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3286 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3287 } else {
3288 /* Maybe the HW was reset; dev_addr must be re-programmed */
3289 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3290 }
3291
3292 /* For BE3-R VFs, the PF programs the initial MAC address */
3293 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3294 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3295 &adapter->pmac_id[0], 0);
3296 return 0;
3297 }
3298
3299 static void be_schedule_worker(struct be_adapter *adapter)
3300 {
3301 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3302 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3303 }
3304
3305 static int be_setup_queues(struct be_adapter *adapter)
3306 {
3307 struct net_device *netdev = adapter->netdev;
3308 int status;
3309
3310 status = be_evt_queues_create(adapter);
3311 if (status)
3312 goto err;
3313
3314 status = be_tx_qs_create(adapter);
3315 if (status)
3316 goto err;
3317
3318 status = be_rx_cqs_create(adapter);
3319 if (status)
3320 goto err;
3321
3322 status = be_mcc_queues_create(adapter);
3323 if (status)
3324 goto err;
3325
3326 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3327 if (status)
3328 goto err;
3329
3330 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3331 if (status)
3332 goto err;
3333
3334 return 0;
3335 err:
3336 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3337 return status;
3338 }
3339
3340 int be_update_queues(struct be_adapter *adapter)
3341 {
3342 struct net_device *netdev = adapter->netdev;
3343 int status;
3344
3345 if (netif_running(netdev))
3346 be_close(netdev);
3347
3348 be_cancel_worker(adapter);
3349
3350 /* If any vectors have been shared with RoCE we cannot re-program
3351 * the MSIx table.
3352 */
3353 if (!adapter->num_msix_roce_vec)
3354 be_msix_disable(adapter);
3355
3356 be_clear_queues(adapter);
3357
3358 if (!msix_enabled(adapter)) {
3359 status = be_msix_enable(adapter);
3360 if (status)
3361 return status;
3362 }
3363
3364 status = be_setup_queues(adapter);
3365 if (status)
3366 return status;
3367
3368 be_schedule_worker(adapter);
3369
3370 if (netif_running(netdev))
3371 status = be_open(netdev);
3372
3373 return status;
3374 }
3375
3376 static int be_setup(struct be_adapter *adapter)
3377 {
3378 struct device *dev = &adapter->pdev->dev;
3379 u32 tx_fc, rx_fc, en_flags;
3380 int status;
3381
3382 be_setup_init(adapter);
3383
3384 if (!lancer_chip(adapter))
3385 be_cmd_req_native_mode(adapter);
3386
3387 status = be_get_config(adapter);
3388 if (status)
3389 goto err;
3390
3391 status = be_msix_enable(adapter);
3392 if (status)
3393 goto err;
3394
3395 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3396 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3397 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3398 en_flags |= BE_IF_FLAGS_RSS;
3399 en_flags = en_flags & be_if_cap_flags(adapter);
3400 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3401 &adapter->if_handle, 0);
3402 if (status)
3403 goto err;
3404
3405 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3406 rtnl_lock();
3407 status = be_setup_queues(adapter);
3408 rtnl_unlock();
3409 if (status)
3410 goto err;
3411
3412 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3413 /* In UMC mode FW does not return right privileges.
3414 * Override with correct privilege equivalent to PF.
3415 */
3416 if (be_is_mc(adapter))
3417 adapter->cmd_privileges = MAX_PRIVILEGES;
3418
3419 status = be_mac_setup(adapter);
3420 if (status)
3421 goto err;
3422
3423 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3424
3425 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3426 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3427 adapter->fw_ver);
3428 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3429 }
3430
3431 if (adapter->vlans_added)
3432 be_vid_config(adapter);
3433
3434 be_set_rx_mode(adapter->netdev);
3435
3436 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3437
3438 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3439 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3440 adapter->rx_fc);
3441
3442 if (sriov_want(adapter)) {
3443 if (be_max_vfs(adapter))
3444 be_vf_setup(adapter);
3445 else
3446 dev_warn(dev, "device doesn't support SRIOV\n");
3447 }
3448
3449 status = be_cmd_get_phy_info(adapter);
3450 if (!status && be_pause_supported(adapter))
3451 adapter->phy.fc_autoneg = 1;
3452
3453 be_schedule_worker(adapter);
3454 return 0;
3455 err:
3456 be_clear(adapter);
3457 return status;
3458 }
3459
3460 #ifdef CONFIG_NET_POLL_CONTROLLER
3461 static void be_netpoll(struct net_device *netdev)
3462 {
3463 struct be_adapter *adapter = netdev_priv(netdev);
3464 struct be_eq_obj *eqo;
3465 int i;
3466
3467 for_all_evt_queues(adapter, eqo, i) {
3468 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3469 napi_schedule(&eqo->napi);
3470 }
3471
3472 return;
3473 }
3474 #endif
3475
3476 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3477 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3478
3479 static bool be_flash_redboot(struct be_adapter *adapter,
3480 const u8 *p, u32 img_start, int image_size,
3481 int hdr_size)
3482 {
3483 u32 crc_offset;
3484 u8 flashed_crc[4];
3485 int status;
3486
3487 crc_offset = hdr_size + img_start + image_size - 4;
3488
3489 p += crc_offset;
3490
3491 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3492 (image_size - 4));
3493 if (status) {
3494 dev_err(&adapter->pdev->dev,
3495 "could not get crc from flash, not flashing redboot\n");
3496 return false;
3497 }
3498
3499 /*update redboot only if crc does not match*/
3500 if (!memcmp(flashed_crc, p, 4))
3501 return false;
3502 else
3503 return true;
3504 }
3505
3506 static bool phy_flashing_required(struct be_adapter *adapter)
3507 {
3508 return (adapter->phy.phy_type == TN_8022 &&
3509 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3510 }
3511
3512 static bool is_comp_in_ufi(struct be_adapter *adapter,
3513 struct flash_section_info *fsec, int type)
3514 {
3515 int i = 0, img_type = 0;
3516 struct flash_section_info_g2 *fsec_g2 = NULL;
3517
3518 if (BE2_chip(adapter))
3519 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3520
3521 for (i = 0; i < MAX_FLASH_COMP; i++) {
3522 if (fsec_g2)
3523 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3524 else
3525 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3526
3527 if (img_type == type)
3528 return true;
3529 }
3530 return false;
3531
3532 }
3533
3534 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3535 int header_size,
3536 const struct firmware *fw)
3537 {
3538 struct flash_section_info *fsec = NULL;
3539 const u8 *p = fw->data;
3540
3541 p += header_size;
3542 while (p < (fw->data + fw->size)) {
3543 fsec = (struct flash_section_info *)p;
3544 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3545 return fsec;
3546 p += 32;
3547 }
3548 return NULL;
3549 }
3550
3551 static int be_flash(struct be_adapter *adapter, const u8 *img,
3552 struct be_dma_mem *flash_cmd, int optype, int img_size)
3553 {
3554 u32 total_bytes = 0, flash_op, num_bytes = 0;
3555 int status = 0;
3556 struct be_cmd_write_flashrom *req = flash_cmd->va;
3557
3558 total_bytes = img_size;
3559 while (total_bytes) {
3560 num_bytes = min_t(u32, 32*1024, total_bytes);
3561
3562 total_bytes -= num_bytes;
3563
3564 if (!total_bytes) {
3565 if (optype == OPTYPE_PHY_FW)
3566 flash_op = FLASHROM_OPER_PHY_FLASH;
3567 else
3568 flash_op = FLASHROM_OPER_FLASH;
3569 } else {
3570 if (optype == OPTYPE_PHY_FW)
3571 flash_op = FLASHROM_OPER_PHY_SAVE;
3572 else
3573 flash_op = FLASHROM_OPER_SAVE;
3574 }
3575
3576 memcpy(req->data_buf, img, num_bytes);
3577 img += num_bytes;
3578 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3579 flash_op, num_bytes);
3580 if (status) {
3581 if (status == ILLEGAL_IOCTL_REQ &&
3582 optype == OPTYPE_PHY_FW)
3583 break;
3584 dev_err(&adapter->pdev->dev,
3585 "cmd to write to flash rom failed.\n");
3586 return status;
3587 }
3588 }
3589 return 0;
3590 }
3591
3592 /* For BE2, BE3 and BE3-R */
3593 static int be_flash_BEx(struct be_adapter *adapter,
3594 const struct firmware *fw,
3595 struct be_dma_mem *flash_cmd,
3596 int num_of_images)
3597
3598 {
3599 int status = 0, i, filehdr_size = 0;
3600 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3601 const u8 *p = fw->data;
3602 const struct flash_comp *pflashcomp;
3603 int num_comp, redboot;
3604 struct flash_section_info *fsec = NULL;
3605
3606 struct flash_comp gen3_flash_types[] = {
3607 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3608 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3609 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3610 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3611 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3612 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3613 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3615 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3617 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3618 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3619 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3620 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3621 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3622 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3623 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3624 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3625 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3626 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3627 };
3628
3629 struct flash_comp gen2_flash_types[] = {
3630 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3631 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3632 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3633 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3634 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3635 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3636 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3637 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3638 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3639 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3640 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3641 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3642 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3643 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3644 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3645 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3646 };
3647
3648 if (BE3_chip(adapter)) {
3649 pflashcomp = gen3_flash_types;
3650 filehdr_size = sizeof(struct flash_file_hdr_g3);
3651 num_comp = ARRAY_SIZE(gen3_flash_types);
3652 } else {
3653 pflashcomp = gen2_flash_types;
3654 filehdr_size = sizeof(struct flash_file_hdr_g2);
3655 num_comp = ARRAY_SIZE(gen2_flash_types);
3656 }
3657
3658 /* Get flash section info*/
3659 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3660 if (!fsec) {
3661 dev_err(&adapter->pdev->dev,
3662 "Invalid Cookie. UFI corrupted ?\n");
3663 return -1;
3664 }
3665 for (i = 0; i < num_comp; i++) {
3666 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3667 continue;
3668
3669 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3670 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3671 continue;
3672
3673 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3674 !phy_flashing_required(adapter))
3675 continue;
3676
3677 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3678 redboot = be_flash_redboot(adapter, fw->data,
3679 pflashcomp[i].offset, pflashcomp[i].size,
3680 filehdr_size + img_hdrs_size);
3681 if (!redboot)
3682 continue;
3683 }
3684
3685 p = fw->data;
3686 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3687 if (p + pflashcomp[i].size > fw->data + fw->size)
3688 return -1;
3689
3690 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3691 pflashcomp[i].size);
3692 if (status) {
3693 dev_err(&adapter->pdev->dev,
3694 "Flashing section type %d failed.\n",
3695 pflashcomp[i].img_type);
3696 return status;
3697 }
3698 }
3699 return 0;
3700 }
3701
3702 static int be_flash_skyhawk(struct be_adapter *adapter,
3703 const struct firmware *fw,
3704 struct be_dma_mem *flash_cmd, int num_of_images)
3705 {
3706 int status = 0, i, filehdr_size = 0;
3707 int img_offset, img_size, img_optype, redboot;
3708 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3709 const u8 *p = fw->data;
3710 struct flash_section_info *fsec = NULL;
3711
3712 filehdr_size = sizeof(struct flash_file_hdr_g3);
3713 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3714 if (!fsec) {
3715 dev_err(&adapter->pdev->dev,
3716 "Invalid Cookie. UFI corrupted ?\n");
3717 return -1;
3718 }
3719
3720 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3721 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3722 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3723
3724 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3725 case IMAGE_FIRMWARE_iSCSI:
3726 img_optype = OPTYPE_ISCSI_ACTIVE;
3727 break;
3728 case IMAGE_BOOT_CODE:
3729 img_optype = OPTYPE_REDBOOT;
3730 break;
3731 case IMAGE_OPTION_ROM_ISCSI:
3732 img_optype = OPTYPE_BIOS;
3733 break;
3734 case IMAGE_OPTION_ROM_PXE:
3735 img_optype = OPTYPE_PXE_BIOS;
3736 break;
3737 case IMAGE_OPTION_ROM_FCoE:
3738 img_optype = OPTYPE_FCOE_BIOS;
3739 break;
3740 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3741 img_optype = OPTYPE_ISCSI_BACKUP;
3742 break;
3743 case IMAGE_NCSI:
3744 img_optype = OPTYPE_NCSI_FW;
3745 break;
3746 default:
3747 continue;
3748 }
3749
3750 if (img_optype == OPTYPE_REDBOOT) {
3751 redboot = be_flash_redboot(adapter, fw->data,
3752 img_offset, img_size,
3753 filehdr_size + img_hdrs_size);
3754 if (!redboot)
3755 continue;
3756 }
3757
3758 p = fw->data;
3759 p += filehdr_size + img_offset + img_hdrs_size;
3760 if (p + img_size > fw->data + fw->size)
3761 return -1;
3762
3763 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3764 if (status) {
3765 dev_err(&adapter->pdev->dev,
3766 "Flashing section type %d failed.\n",
3767 fsec->fsec_entry[i].type);
3768 return status;
3769 }
3770 }
3771 return 0;
3772 }
3773
3774 static int lancer_fw_download(struct be_adapter *adapter,
3775 const struct firmware *fw)
3776 {
3777 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3778 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3779 struct be_dma_mem flash_cmd;
3780 const u8 *data_ptr = NULL;
3781 u8 *dest_image_ptr = NULL;
3782 size_t image_size = 0;
3783 u32 chunk_size = 0;
3784 u32 data_written = 0;
3785 u32 offset = 0;
3786 int status = 0;
3787 u8 add_status = 0;
3788 u8 change_status;
3789
3790 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3791 dev_err(&adapter->pdev->dev,
3792 "FW Image not properly aligned. "
3793 "Length must be 4 byte aligned.\n");
3794 status = -EINVAL;
3795 goto lancer_fw_exit;
3796 }
3797
3798 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3799 + LANCER_FW_DOWNLOAD_CHUNK;
3800 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3801 &flash_cmd.dma, GFP_KERNEL);
3802 if (!flash_cmd.va) {
3803 status = -ENOMEM;
3804 goto lancer_fw_exit;
3805 }
3806
3807 dest_image_ptr = flash_cmd.va +
3808 sizeof(struct lancer_cmd_req_write_object);
3809 image_size = fw->size;
3810 data_ptr = fw->data;
3811
3812 while (image_size) {
3813 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3814
3815 /* Copy the image chunk content. */
3816 memcpy(dest_image_ptr, data_ptr, chunk_size);
3817
3818 status = lancer_cmd_write_object(adapter, &flash_cmd,
3819 chunk_size, offset,
3820 LANCER_FW_DOWNLOAD_LOCATION,
3821 &data_written, &change_status,
3822 &add_status);
3823 if (status)
3824 break;
3825
3826 offset += data_written;
3827 data_ptr += data_written;
3828 image_size -= data_written;
3829 }
3830
3831 if (!status) {
3832 /* Commit the FW written */
3833 status = lancer_cmd_write_object(adapter, &flash_cmd,
3834 0, offset,
3835 LANCER_FW_DOWNLOAD_LOCATION,
3836 &data_written, &change_status,
3837 &add_status);
3838 }
3839
3840 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3841 flash_cmd.dma);
3842 if (status) {
3843 dev_err(&adapter->pdev->dev,
3844 "Firmware load error. "
3845 "Status code: 0x%x Additional Status: 0x%x\n",
3846 status, add_status);
3847 goto lancer_fw_exit;
3848 }
3849
3850 if (change_status == LANCER_FW_RESET_NEEDED) {
3851 dev_info(&adapter->pdev->dev,
3852 "Resetting adapter to activate new FW\n");
3853 status = lancer_physdev_ctrl(adapter,
3854 PHYSDEV_CONTROL_FW_RESET_MASK);
3855 if (status) {
3856 dev_err(&adapter->pdev->dev,
3857 "Adapter busy for FW reset.\n"
3858 "New FW will not be active.\n");
3859 goto lancer_fw_exit;
3860 }
3861 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3862 dev_err(&adapter->pdev->dev,
3863 "System reboot required for new FW"
3864 " to be active\n");
3865 }
3866
3867 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3868 lancer_fw_exit:
3869 return status;
3870 }
3871
3872 #define UFI_TYPE2 2
3873 #define UFI_TYPE3 3
3874 #define UFI_TYPE3R 10
3875 #define UFI_TYPE4 4
3876 static int be_get_ufi_type(struct be_adapter *adapter,
3877 struct flash_file_hdr_g3 *fhdr)
3878 {
3879 if (fhdr == NULL)
3880 goto be_get_ufi_exit;
3881
3882 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3883 return UFI_TYPE4;
3884 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3885 if (fhdr->asic_type_rev == 0x10)
3886 return UFI_TYPE3R;
3887 else
3888 return UFI_TYPE3;
3889 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3890 return UFI_TYPE2;
3891
3892 be_get_ufi_exit:
3893 dev_err(&adapter->pdev->dev,
3894 "UFI and Interface are not compatible for flashing\n");
3895 return -1;
3896 }
3897
3898 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3899 {
3900 struct flash_file_hdr_g3 *fhdr3;
3901 struct image_hdr *img_hdr_ptr = NULL;
3902 struct be_dma_mem flash_cmd;
3903 const u8 *p;
3904 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3905
3906 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3907 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3908 &flash_cmd.dma, GFP_KERNEL);
3909 if (!flash_cmd.va) {
3910 status = -ENOMEM;
3911 goto be_fw_exit;
3912 }
3913
3914 p = fw->data;
3915 fhdr3 = (struct flash_file_hdr_g3 *)p;
3916
3917 ufi_type = be_get_ufi_type(adapter, fhdr3);
3918
3919 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3920 for (i = 0; i < num_imgs; i++) {
3921 img_hdr_ptr = (struct image_hdr *)(fw->data +
3922 (sizeof(struct flash_file_hdr_g3) +
3923 i * sizeof(struct image_hdr)));
3924 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3925 switch (ufi_type) {
3926 case UFI_TYPE4:
3927 status = be_flash_skyhawk(adapter, fw,
3928 &flash_cmd, num_imgs);
3929 break;
3930 case UFI_TYPE3R:
3931 status = be_flash_BEx(adapter, fw, &flash_cmd,
3932 num_imgs);
3933 break;
3934 case UFI_TYPE3:
3935 /* Do not flash this ufi on BE3-R cards */
3936 if (adapter->asic_rev < 0x10)
3937 status = be_flash_BEx(adapter, fw,
3938 &flash_cmd,
3939 num_imgs);
3940 else {
3941 status = -1;
3942 dev_err(&adapter->pdev->dev,
3943 "Can't load BE3 UFI on BE3R\n");
3944 }
3945 }
3946 }
3947 }
3948
3949 if (ufi_type == UFI_TYPE2)
3950 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3951 else if (ufi_type == -1)
3952 status = -1;
3953
3954 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3955 flash_cmd.dma);
3956 if (status) {
3957 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3958 goto be_fw_exit;
3959 }
3960
3961 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3962
3963 be_fw_exit:
3964 return status;
3965 }
3966
3967 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3968 {
3969 const struct firmware *fw;
3970 int status;
3971
3972 if (!netif_running(adapter->netdev)) {
3973 dev_err(&adapter->pdev->dev,
3974 "Firmware load not allowed (interface is down)\n");
3975 return -1;
3976 }
3977
3978 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3979 if (status)
3980 goto fw_exit;
3981
3982 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3983
3984 if (lancer_chip(adapter))
3985 status = lancer_fw_download(adapter, fw);
3986 else
3987 status = be_fw_download(adapter, fw);
3988
3989 if (!status)
3990 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3991 adapter->fw_on_flash);
3992
3993 fw_exit:
3994 release_firmware(fw);
3995 return status;
3996 }
3997
3998 static int be_ndo_bridge_setlink(struct net_device *dev,
3999 struct nlmsghdr *nlh)
4000 {
4001 struct be_adapter *adapter = netdev_priv(dev);
4002 struct nlattr *attr, *br_spec;
4003 int rem;
4004 int status = 0;
4005 u16 mode = 0;
4006
4007 if (!sriov_enabled(adapter))
4008 return -EOPNOTSUPP;
4009
4010 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4011
4012 nla_for_each_nested(attr, br_spec, rem) {
4013 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4014 continue;
4015
4016 mode = nla_get_u16(attr);
4017 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4018 return -EINVAL;
4019
4020 status = be_cmd_set_hsw_config(adapter, 0, 0,
4021 adapter->if_handle,
4022 mode == BRIDGE_MODE_VEPA ?
4023 PORT_FWD_TYPE_VEPA :
4024 PORT_FWD_TYPE_VEB);
4025 if (status)
4026 goto err;
4027
4028 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4029 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4030
4031 return status;
4032 }
4033 err:
4034 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4035 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4036
4037 return status;
4038 }
4039
4040 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4041 struct net_device *dev,
4042 u32 filter_mask)
4043 {
4044 struct be_adapter *adapter = netdev_priv(dev);
4045 int status = 0;
4046 u8 hsw_mode;
4047
4048 if (!sriov_enabled(adapter))
4049 return 0;
4050
4051 /* BE and Lancer chips support VEB mode only */
4052 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4053 hsw_mode = PORT_FWD_TYPE_VEB;
4054 } else {
4055 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4056 adapter->if_handle, &hsw_mode);
4057 if (status)
4058 return 0;
4059 }
4060
4061 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4062 hsw_mode == PORT_FWD_TYPE_VEPA ?
4063 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4064 }
4065
4066 static const struct net_device_ops be_netdev_ops = {
4067 .ndo_open = be_open,
4068 .ndo_stop = be_close,
4069 .ndo_start_xmit = be_xmit,
4070 .ndo_set_rx_mode = be_set_rx_mode,
4071 .ndo_set_mac_address = be_mac_addr_set,
4072 .ndo_change_mtu = be_change_mtu,
4073 .ndo_get_stats64 = be_get_stats64,
4074 .ndo_validate_addr = eth_validate_addr,
4075 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4076 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4077 .ndo_set_vf_mac = be_set_vf_mac,
4078 .ndo_set_vf_vlan = be_set_vf_vlan,
4079 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4080 .ndo_get_vf_config = be_get_vf_config,
4081 #ifdef CONFIG_NET_POLL_CONTROLLER
4082 .ndo_poll_controller = be_netpoll,
4083 #endif
4084 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4085 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4086 #ifdef CONFIG_NET_RX_BUSY_POLL
4087 .ndo_busy_poll = be_busy_poll
4088 #endif
4089 };
4090
4091 static void be_netdev_init(struct net_device *netdev)
4092 {
4093 struct be_adapter *adapter = netdev_priv(netdev);
4094
4095 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4096 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4097 NETIF_F_HW_VLAN_CTAG_TX;
4098 if (be_multi_rxq(adapter))
4099 netdev->hw_features |= NETIF_F_RXHASH;
4100
4101 netdev->features |= netdev->hw_features |
4102 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4103
4104 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4105 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4106
4107 netdev->priv_flags |= IFF_UNICAST_FLT;
4108
4109 netdev->flags |= IFF_MULTICAST;
4110
4111 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4112
4113 netdev->netdev_ops = &be_netdev_ops;
4114
4115 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4116 }
4117
4118 static void be_unmap_pci_bars(struct be_adapter *adapter)
4119 {
4120 if (adapter->csr)
4121 pci_iounmap(adapter->pdev, adapter->csr);
4122 if (adapter->db)
4123 pci_iounmap(adapter->pdev, adapter->db);
4124 }
4125
4126 static int db_bar(struct be_adapter *adapter)
4127 {
4128 if (lancer_chip(adapter) || !be_physfn(adapter))
4129 return 0;
4130 else
4131 return 4;
4132 }
4133
4134 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4135 {
4136 if (skyhawk_chip(adapter)) {
4137 adapter->roce_db.size = 4096;
4138 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4139 db_bar(adapter));
4140 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4141 db_bar(adapter));
4142 }
4143 return 0;
4144 }
4145
4146 static int be_map_pci_bars(struct be_adapter *adapter)
4147 {
4148 u8 __iomem *addr;
4149
4150 if (BEx_chip(adapter) && be_physfn(adapter)) {
4151 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4152 if (adapter->csr == NULL)
4153 return -ENOMEM;
4154 }
4155
4156 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4157 if (addr == NULL)
4158 goto pci_map_err;
4159 adapter->db = addr;
4160
4161 be_roce_map_pci_bars(adapter);
4162 return 0;
4163
4164 pci_map_err:
4165 be_unmap_pci_bars(adapter);
4166 return -ENOMEM;
4167 }
4168
4169 static void be_ctrl_cleanup(struct be_adapter *adapter)
4170 {
4171 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4172
4173 be_unmap_pci_bars(adapter);
4174
4175 if (mem->va)
4176 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4177 mem->dma);
4178
4179 mem = &adapter->rx_filter;
4180 if (mem->va)
4181 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4182 mem->dma);
4183 }
4184
4185 static int be_ctrl_init(struct be_adapter *adapter)
4186 {
4187 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4188 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4189 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4190 u32 sli_intf;
4191 int status;
4192
4193 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4194 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4195 SLI_INTF_FAMILY_SHIFT;
4196 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4197
4198 status = be_map_pci_bars(adapter);
4199 if (status)
4200 goto done;
4201
4202 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4203 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4204 mbox_mem_alloc->size,
4205 &mbox_mem_alloc->dma,
4206 GFP_KERNEL);
4207 if (!mbox_mem_alloc->va) {
4208 status = -ENOMEM;
4209 goto unmap_pci_bars;
4210 }
4211 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4212 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4213 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4214 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4215
4216 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4217 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4218 rx_filter->size, &rx_filter->dma,
4219 GFP_KERNEL);
4220 if (rx_filter->va == NULL) {
4221 status = -ENOMEM;
4222 goto free_mbox;
4223 }
4224
4225 mutex_init(&adapter->mbox_lock);
4226 spin_lock_init(&adapter->mcc_lock);
4227 spin_lock_init(&adapter->mcc_cq_lock);
4228
4229 init_completion(&adapter->et_cmd_compl);
4230 pci_save_state(adapter->pdev);
4231 return 0;
4232
4233 free_mbox:
4234 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4235 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4236
4237 unmap_pci_bars:
4238 be_unmap_pci_bars(adapter);
4239
4240 done:
4241 return status;
4242 }
4243
4244 static void be_stats_cleanup(struct be_adapter *adapter)
4245 {
4246 struct be_dma_mem *cmd = &adapter->stats_cmd;
4247
4248 if (cmd->va)
4249 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4250 cmd->va, cmd->dma);
4251 }
4252
4253 static int be_stats_init(struct be_adapter *adapter)
4254 {
4255 struct be_dma_mem *cmd = &adapter->stats_cmd;
4256
4257 if (lancer_chip(adapter))
4258 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4259 else if (BE2_chip(adapter))
4260 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4261 else if (BE3_chip(adapter))
4262 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4263 else
4264 /* ALL non-BE ASICs */
4265 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4266
4267 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4268 GFP_KERNEL);
4269 if (cmd->va == NULL)
4270 return -1;
4271 return 0;
4272 }
4273
4274 static void be_remove(struct pci_dev *pdev)
4275 {
4276 struct be_adapter *adapter = pci_get_drvdata(pdev);
4277
4278 if (!adapter)
4279 return;
4280
4281 be_roce_dev_remove(adapter);
4282 be_intr_set(adapter, false);
4283
4284 cancel_delayed_work_sync(&adapter->func_recovery_work);
4285
4286 unregister_netdev(adapter->netdev);
4287
4288 be_clear(adapter);
4289
4290 /* tell fw we're done with firing cmds */
4291 be_cmd_fw_clean(adapter);
4292
4293 be_stats_cleanup(adapter);
4294
4295 be_ctrl_cleanup(adapter);
4296
4297 pci_disable_pcie_error_reporting(pdev);
4298
4299 pci_release_regions(pdev);
4300 pci_disable_device(pdev);
4301
4302 free_netdev(adapter->netdev);
4303 }
4304
4305 bool be_is_wol_supported(struct be_adapter *adapter)
4306 {
4307 return ((adapter->wol_cap & BE_WOL_CAP) &&
4308 !be_is_wol_excluded(adapter)) ? true : false;
4309 }
4310
4311 static int be_get_initial_config(struct be_adapter *adapter)
4312 {
4313 int status, level;
4314
4315 status = be_cmd_get_cntl_attributes(adapter);
4316 if (status)
4317 return status;
4318
4319 status = be_cmd_get_acpi_wol_cap(adapter);
4320 if (status) {
4321 /* in case of a failure to get wol capabillities
4322 * check the exclusion list to determine WOL capability */
4323 if (!be_is_wol_excluded(adapter))
4324 adapter->wol_cap |= BE_WOL_CAP;
4325 }
4326
4327 if (be_is_wol_supported(adapter))
4328 adapter->wol = true;
4329
4330 /* Must be a power of 2 or else MODULO will BUG_ON */
4331 adapter->be_get_temp_freq = 64;
4332
4333 if (BEx_chip(adapter)) {
4334 level = be_cmd_get_fw_log_level(adapter);
4335 adapter->msg_enable =
4336 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4337 }
4338
4339 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4340 return 0;
4341 }
4342
4343 static int lancer_recover_func(struct be_adapter *adapter)
4344 {
4345 struct device *dev = &adapter->pdev->dev;
4346 int status;
4347
4348 status = lancer_test_and_set_rdy_state(adapter);
4349 if (status)
4350 goto err;
4351
4352 if (netif_running(adapter->netdev))
4353 be_close(adapter->netdev);
4354
4355 be_clear(adapter);
4356
4357 be_clear_all_error(adapter);
4358
4359 status = be_setup(adapter);
4360 if (status)
4361 goto err;
4362
4363 if (netif_running(adapter->netdev)) {
4364 status = be_open(adapter->netdev);
4365 if (status)
4366 goto err;
4367 }
4368
4369 dev_err(dev, "Adapter recovery successful\n");
4370 return 0;
4371 err:
4372 if (status == -EAGAIN)
4373 dev_err(dev, "Waiting for resource provisioning\n");
4374 else
4375 dev_err(dev, "Adapter recovery failed\n");
4376
4377 return status;
4378 }
4379
4380 static void be_func_recovery_task(struct work_struct *work)
4381 {
4382 struct be_adapter *adapter =
4383 container_of(work, struct be_adapter, func_recovery_work.work);
4384 int status = 0;
4385
4386 be_detect_error(adapter);
4387
4388 if (adapter->hw_error && lancer_chip(adapter)) {
4389
4390 rtnl_lock();
4391 netif_device_detach(adapter->netdev);
4392 rtnl_unlock();
4393
4394 status = lancer_recover_func(adapter);
4395 if (!status)
4396 netif_device_attach(adapter->netdev);
4397 }
4398
4399 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4400 * no need to attempt further recovery.
4401 */
4402 if (!status || status == -EAGAIN)
4403 schedule_delayed_work(&adapter->func_recovery_work,
4404 msecs_to_jiffies(1000));
4405 }
4406
4407 static void be_worker(struct work_struct *work)
4408 {
4409 struct be_adapter *adapter =
4410 container_of(work, struct be_adapter, work.work);
4411 struct be_rx_obj *rxo;
4412 int i;
4413
4414 /* when interrupts are not yet enabled, just reap any pending
4415 * mcc completions */
4416 if (!netif_running(adapter->netdev)) {
4417 local_bh_disable();
4418 be_process_mcc(adapter);
4419 local_bh_enable();
4420 goto reschedule;
4421 }
4422
4423 if (!adapter->stats_cmd_sent) {
4424 if (lancer_chip(adapter))
4425 lancer_cmd_get_pport_stats(adapter,
4426 &adapter->stats_cmd);
4427 else
4428 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4429 }
4430
4431 if (be_physfn(adapter) &&
4432 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4433 be_cmd_get_die_temperature(adapter);
4434
4435 for_all_rx_queues(adapter, rxo, i) {
4436 /* Replenish RX-queues starved due to memory
4437 * allocation failures.
4438 */
4439 if (rxo->rx_post_starved)
4440 be_post_rx_frags(rxo, GFP_KERNEL);
4441 }
4442
4443 be_eqd_update(adapter);
4444
4445 reschedule:
4446 adapter->work_counter++;
4447 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4448 }
4449
4450 /* If any VFs are already enabled don't FLR the PF */
4451 static bool be_reset_required(struct be_adapter *adapter)
4452 {
4453 return pci_num_vf(adapter->pdev) ? false : true;
4454 }
4455
4456 static char *mc_name(struct be_adapter *adapter)
4457 {
4458 if (adapter->function_mode & FLEX10_MODE)
4459 return "FLEX10";
4460 else if (adapter->function_mode & VNIC_MODE)
4461 return "vNIC";
4462 else if (adapter->function_mode & UMC_ENABLED)
4463 return "UMC";
4464 else
4465 return "";
4466 }
4467
4468 static inline char *func_name(struct be_adapter *adapter)
4469 {
4470 return be_physfn(adapter) ? "PF" : "VF";
4471 }
4472
4473 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4474 {
4475 int status = 0;
4476 struct be_adapter *adapter;
4477 struct net_device *netdev;
4478 char port_name;
4479
4480 status = pci_enable_device(pdev);
4481 if (status)
4482 goto do_none;
4483
4484 status = pci_request_regions(pdev, DRV_NAME);
4485 if (status)
4486 goto disable_dev;
4487 pci_set_master(pdev);
4488
4489 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4490 if (netdev == NULL) {
4491 status = -ENOMEM;
4492 goto rel_reg;
4493 }
4494 adapter = netdev_priv(netdev);
4495 adapter->pdev = pdev;
4496 pci_set_drvdata(pdev, adapter);
4497 adapter->netdev = netdev;
4498 SET_NETDEV_DEV(netdev, &pdev->dev);
4499
4500 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4501 if (!status) {
4502 netdev->features |= NETIF_F_HIGHDMA;
4503 } else {
4504 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4505 if (status) {
4506 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4507 goto free_netdev;
4508 }
4509 }
4510
4511 if (be_physfn(adapter)) {
4512 status = pci_enable_pcie_error_reporting(pdev);
4513 if (!status)
4514 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4515 }
4516
4517 status = be_ctrl_init(adapter);
4518 if (status)
4519 goto free_netdev;
4520
4521 /* sync up with fw's ready state */
4522 if (be_physfn(adapter)) {
4523 status = be_fw_wait_ready(adapter);
4524 if (status)
4525 goto ctrl_clean;
4526 }
4527
4528 if (be_reset_required(adapter)) {
4529 status = be_cmd_reset_function(adapter);
4530 if (status)
4531 goto ctrl_clean;
4532
4533 /* Wait for interrupts to quiesce after an FLR */
4534 msleep(100);
4535 }
4536
4537 /* Allow interrupts for other ULPs running on NIC function */
4538 be_intr_set(adapter, true);
4539
4540 /* tell fw we're ready to fire cmds */
4541 status = be_cmd_fw_init(adapter);
4542 if (status)
4543 goto ctrl_clean;
4544
4545 status = be_stats_init(adapter);
4546 if (status)
4547 goto ctrl_clean;
4548
4549 status = be_get_initial_config(adapter);
4550 if (status)
4551 goto stats_clean;
4552
4553 INIT_DELAYED_WORK(&adapter->work, be_worker);
4554 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4555 adapter->rx_fc = adapter->tx_fc = true;
4556
4557 status = be_setup(adapter);
4558 if (status)
4559 goto stats_clean;
4560
4561 be_netdev_init(netdev);
4562 status = register_netdev(netdev);
4563 if (status != 0)
4564 goto unsetup;
4565
4566 be_roce_dev_add(adapter);
4567
4568 schedule_delayed_work(&adapter->func_recovery_work,
4569 msecs_to_jiffies(1000));
4570
4571 be_cmd_query_port_name(adapter, &port_name);
4572
4573 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4574 func_name(adapter), mc_name(adapter), port_name);
4575
4576 return 0;
4577
4578 unsetup:
4579 be_clear(adapter);
4580 stats_clean:
4581 be_stats_cleanup(adapter);
4582 ctrl_clean:
4583 be_ctrl_cleanup(adapter);
4584 free_netdev:
4585 free_netdev(netdev);
4586 rel_reg:
4587 pci_release_regions(pdev);
4588 disable_dev:
4589 pci_disable_device(pdev);
4590 do_none:
4591 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4592 return status;
4593 }
4594
4595 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4596 {
4597 struct be_adapter *adapter = pci_get_drvdata(pdev);
4598 struct net_device *netdev = adapter->netdev;
4599
4600 if (adapter->wol)
4601 be_setup_wol(adapter, true);
4602
4603 be_intr_set(adapter, false);
4604 cancel_delayed_work_sync(&adapter->func_recovery_work);
4605
4606 netif_device_detach(netdev);
4607 if (netif_running(netdev)) {
4608 rtnl_lock();
4609 be_close(netdev);
4610 rtnl_unlock();
4611 }
4612 be_clear(adapter);
4613
4614 pci_save_state(pdev);
4615 pci_disable_device(pdev);
4616 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4617 return 0;
4618 }
4619
4620 static int be_resume(struct pci_dev *pdev)
4621 {
4622 int status = 0;
4623 struct be_adapter *adapter = pci_get_drvdata(pdev);
4624 struct net_device *netdev = adapter->netdev;
4625
4626 netif_device_detach(netdev);
4627
4628 status = pci_enable_device(pdev);
4629 if (status)
4630 return status;
4631
4632 pci_set_power_state(pdev, PCI_D0);
4633 pci_restore_state(pdev);
4634
4635 status = be_fw_wait_ready(adapter);
4636 if (status)
4637 return status;
4638
4639 be_intr_set(adapter, true);
4640 /* tell fw we're ready to fire cmds */
4641 status = be_cmd_fw_init(adapter);
4642 if (status)
4643 return status;
4644
4645 be_setup(adapter);
4646 if (netif_running(netdev)) {
4647 rtnl_lock();
4648 be_open(netdev);
4649 rtnl_unlock();
4650 }
4651
4652 schedule_delayed_work(&adapter->func_recovery_work,
4653 msecs_to_jiffies(1000));
4654 netif_device_attach(netdev);
4655
4656 if (adapter->wol)
4657 be_setup_wol(adapter, false);
4658
4659 return 0;
4660 }
4661
4662 /*
4663 * An FLR will stop BE from DMAing any data.
4664 */
4665 static void be_shutdown(struct pci_dev *pdev)
4666 {
4667 struct be_adapter *adapter = pci_get_drvdata(pdev);
4668
4669 if (!adapter)
4670 return;
4671
4672 cancel_delayed_work_sync(&adapter->work);
4673 cancel_delayed_work_sync(&adapter->func_recovery_work);
4674
4675 netif_device_detach(adapter->netdev);
4676
4677 be_cmd_reset_function(adapter);
4678
4679 pci_disable_device(pdev);
4680 }
4681
4682 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4683 pci_channel_state_t state)
4684 {
4685 struct be_adapter *adapter = pci_get_drvdata(pdev);
4686 struct net_device *netdev = adapter->netdev;
4687
4688 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4689
4690 if (!adapter->eeh_error) {
4691 adapter->eeh_error = true;
4692
4693 cancel_delayed_work_sync(&adapter->func_recovery_work);
4694
4695 rtnl_lock();
4696 netif_device_detach(netdev);
4697 if (netif_running(netdev))
4698 be_close(netdev);
4699 rtnl_unlock();
4700
4701 be_clear(adapter);
4702 }
4703
4704 if (state == pci_channel_io_perm_failure)
4705 return PCI_ERS_RESULT_DISCONNECT;
4706
4707 pci_disable_device(pdev);
4708
4709 /* The error could cause the FW to trigger a flash debug dump.
4710 * Resetting the card while flash dump is in progress
4711 * can cause it not to recover; wait for it to finish.
4712 * Wait only for first function as it is needed only once per
4713 * adapter.
4714 */
4715 if (pdev->devfn == 0)
4716 ssleep(30);
4717
4718 return PCI_ERS_RESULT_NEED_RESET;
4719 }
4720
4721 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4722 {
4723 struct be_adapter *adapter = pci_get_drvdata(pdev);
4724 int status;
4725
4726 dev_info(&adapter->pdev->dev, "EEH reset\n");
4727
4728 status = pci_enable_device(pdev);
4729 if (status)
4730 return PCI_ERS_RESULT_DISCONNECT;
4731
4732 pci_set_master(pdev);
4733 pci_set_power_state(pdev, PCI_D0);
4734 pci_restore_state(pdev);
4735
4736 /* Check if card is ok and fw is ready */
4737 dev_info(&adapter->pdev->dev,
4738 "Waiting for FW to be ready after EEH reset\n");
4739 status = be_fw_wait_ready(adapter);
4740 if (status)
4741 return PCI_ERS_RESULT_DISCONNECT;
4742
4743 pci_cleanup_aer_uncorrect_error_status(pdev);
4744 be_clear_all_error(adapter);
4745 return PCI_ERS_RESULT_RECOVERED;
4746 }
4747
4748 static void be_eeh_resume(struct pci_dev *pdev)
4749 {
4750 int status = 0;
4751 struct be_adapter *adapter = pci_get_drvdata(pdev);
4752 struct net_device *netdev = adapter->netdev;
4753
4754 dev_info(&adapter->pdev->dev, "EEH resume\n");
4755
4756 pci_save_state(pdev);
4757
4758 status = be_cmd_reset_function(adapter);
4759 if (status)
4760 goto err;
4761
4762 /* tell fw we're ready to fire cmds */
4763 status = be_cmd_fw_init(adapter);
4764 if (status)
4765 goto err;
4766
4767 status = be_setup(adapter);
4768 if (status)
4769 goto err;
4770
4771 if (netif_running(netdev)) {
4772 status = be_open(netdev);
4773 if (status)
4774 goto err;
4775 }
4776
4777 schedule_delayed_work(&adapter->func_recovery_work,
4778 msecs_to_jiffies(1000));
4779 netif_device_attach(netdev);
4780 return;
4781 err:
4782 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4783 }
4784
4785 static const struct pci_error_handlers be_eeh_handlers = {
4786 .error_detected = be_eeh_err_detected,
4787 .slot_reset = be_eeh_reset,
4788 .resume = be_eeh_resume,
4789 };
4790
4791 static struct pci_driver be_driver = {
4792 .name = DRV_NAME,
4793 .id_table = be_dev_ids,
4794 .probe = be_probe,
4795 .remove = be_remove,
4796 .suspend = be_suspend,
4797 .resume = be_resume,
4798 .shutdown = be_shutdown,
4799 .err_handler = &be_eeh_handlers
4800 };
4801
4802 static int __init be_init_module(void)
4803 {
4804 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4805 rx_frag_size != 2048) {
4806 printk(KERN_WARNING DRV_NAME
4807 " : Module param rx_frag_size must be 2048/4096/8192."
4808 " Using 2048\n");
4809 rx_frag_size = 2048;
4810 }
4811
4812 return pci_register_driver(&be_driver);
4813 }
4814 module_init(be_init_module);
4815
4816 static void __exit be_exit_module(void)
4817 {
4818 pci_unregister_driver(&be_driver);
4819 }
4820 module_exit(be_exit_module);
This page took 0.127934 seconds and 6 git commands to generate.