be2net: Fix to use 32-bit stats to report rx_drops_no_fragment
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
111 "NETC",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131 struct be_dma_mem *mem = &q->dma_mem;
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141 {
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va)
151 return -ENOMEM;
152 return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157 u32 reg, enabled;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196 wmb();
197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
202 {
203 u32 val = 0;
204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207 wmb();
208 iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212 bool arm, bool clear_int, u16 num_popped)
213 {
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219 if (adapter->eeh_error)
220 return;
221
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238 if (adapter->eeh_error)
239 return;
240
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
252 u8 current_mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0];
254 bool active_mac = true;
255
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
287 if (status)
288 goto err;
289
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293 done:
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296 err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298 return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334 struct be_port_rxf_stats_v0 *port_stats =
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
337
338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
366 drvs->jabber_events = rxf_stats->port1_jabber_events;
367 else
368 drvs->jabber_events = rxf_stats->port0_jabber_events;
369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383 struct be_port_rxf_stats_v1 *port_stats =
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
386
387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426 struct be_drv_stats *drvs = &adapter->drv_stats;
427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448 drvs->rx_address_filtered =
449 pport_stats->rx_address_filtered +
450 pport_stats->rx_vlan_filtered;
451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455 drvs->jabber_events = pport_stats->rx_jabbers;
456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458 drvs->rx_drops_too_many_frags =
459 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x) (x & 0xFFFF)
465 #define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void populate_erx_stats(struct be_adapter *adapter,
475 struct be_rx_obj *rxo,
476 u32 erx_stat)
477 {
478 if (!BEx_chip(adapter))
479 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
480 else
481 /* below erx HW counter can actually wrap around after
482 * 65535. Driver accumulates a 32-bit value
483 */
484 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
485 (u16)erx_stat);
486 }
487
488 void be_parse_stats(struct be_adapter *adapter)
489 {
490 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
491 struct be_rx_obj *rxo;
492 int i;
493 u32 erx_stat;
494
495 if (lancer_chip(adapter)) {
496 populate_lancer_stats(adapter);
497 } else {
498 if (BE2_chip(adapter))
499 populate_be_v0_stats(adapter);
500 else
501 /* for BE3 and Skyhawk */
502 populate_be_v1_stats(adapter);
503
504 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
505 for_all_rx_queues(adapter, rxo, i) {
506 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
507 populate_erx_stats(adapter, rxo, erx_stat);
508 }
509 }
510 }
511
512 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
513 struct rtnl_link_stats64 *stats)
514 {
515 struct be_adapter *adapter = netdev_priv(netdev);
516 struct be_drv_stats *drvs = &adapter->drv_stats;
517 struct be_rx_obj *rxo;
518 struct be_tx_obj *txo;
519 u64 pkts, bytes;
520 unsigned int start;
521 int i;
522
523 for_all_rx_queues(adapter, rxo, i) {
524 const struct be_rx_stats *rx_stats = rx_stats(rxo);
525 do {
526 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
527 pkts = rx_stats(rxo)->rx_pkts;
528 bytes = rx_stats(rxo)->rx_bytes;
529 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
530 stats->rx_packets += pkts;
531 stats->rx_bytes += bytes;
532 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
533 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
534 rx_stats(rxo)->rx_drops_no_frags;
535 }
536
537 for_all_tx_queues(adapter, txo, i) {
538 const struct be_tx_stats *tx_stats = tx_stats(txo);
539 do {
540 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
541 pkts = tx_stats(txo)->tx_pkts;
542 bytes = tx_stats(txo)->tx_bytes;
543 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
544 stats->tx_packets += pkts;
545 stats->tx_bytes += bytes;
546 }
547
548 /* bad pkts received */
549 stats->rx_errors = drvs->rx_crc_errors +
550 drvs->rx_alignment_symbol_errors +
551 drvs->rx_in_range_errors +
552 drvs->rx_out_range_errors +
553 drvs->rx_frame_too_long +
554 drvs->rx_dropped_too_small +
555 drvs->rx_dropped_too_short +
556 drvs->rx_dropped_header_too_small +
557 drvs->rx_dropped_tcp_length +
558 drvs->rx_dropped_runt;
559
560 /* detailed rx errors */
561 stats->rx_length_errors = drvs->rx_in_range_errors +
562 drvs->rx_out_range_errors +
563 drvs->rx_frame_too_long;
564
565 stats->rx_crc_errors = drvs->rx_crc_errors;
566
567 /* frame alignment errors */
568 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
569
570 /* receiver fifo overrun */
571 /* drops_no_pbuf is no per i/f, it's per BE card */
572 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
573 drvs->rx_input_fifo_overflow_drop +
574 drvs->rx_drops_no_pbuf;
575 return stats;
576 }
577
578 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
579 {
580 struct net_device *netdev = adapter->netdev;
581
582 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
583 netif_carrier_off(netdev);
584 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
585 }
586
587 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
588 netif_carrier_on(netdev);
589 else
590 netif_carrier_off(netdev);
591 }
592
593 static void be_tx_stats_update(struct be_tx_obj *txo,
594 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
595 {
596 struct be_tx_stats *stats = tx_stats(txo);
597
598 u64_stats_update_begin(&stats->sync);
599 stats->tx_reqs++;
600 stats->tx_wrbs += wrb_cnt;
601 stats->tx_bytes += copied;
602 stats->tx_pkts += (gso_segs ? gso_segs : 1);
603 if (stopped)
604 stats->tx_stops++;
605 u64_stats_update_end(&stats->sync);
606 }
607
608 /* Determine number of WRB entries needed to xmit data in an skb */
609 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
610 bool *dummy)
611 {
612 int cnt = (skb->len > skb->data_len);
613
614 cnt += skb_shinfo(skb)->nr_frags;
615
616 /* to account for hdr wrb */
617 cnt++;
618 if (lancer_chip(adapter) || !(cnt & 1)) {
619 *dummy = false;
620 } else {
621 /* add a dummy to make it an even num */
622 cnt++;
623 *dummy = true;
624 }
625 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
626 return cnt;
627 }
628
629 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
630 {
631 wrb->frag_pa_hi = upper_32_bits(addr);
632 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
633 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
634 wrb->rsvd0 = 0;
635 }
636
637 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
638 struct sk_buff *skb)
639 {
640 u8 vlan_prio;
641 u16 vlan_tag;
642
643 vlan_tag = vlan_tx_tag_get(skb);
644 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
645 /* If vlan priority provided by OS is NOT in available bmap */
646 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
647 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
648 adapter->recommended_prio;
649
650 return vlan_tag;
651 }
652
653 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
654 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
655 {
656 u16 vlan_tag;
657
658 memset(hdr, 0, sizeof(*hdr));
659
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
661
662 if (skb_is_gso(skb)) {
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
665 hdr, skb_shinfo(skb)->gso_size);
666 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
668 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
669 if (is_tcp_pkt(skb))
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
671 else if (is_udp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
673 }
674
675 if (vlan_tx_tag_present(skb)) {
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
677 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
679 }
680
681 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
682 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
686 }
687
688 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
689 bool unmap_single)
690 {
691 dma_addr_t dma;
692
693 be_dws_le_to_cpu(wrb, sizeof(*wrb));
694
695 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
696 if (wrb->frag_len) {
697 if (unmap_single)
698 dma_unmap_single(dev, dma, wrb->frag_len,
699 DMA_TO_DEVICE);
700 else
701 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
702 }
703 }
704
705 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
706 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
707 bool skip_hw_vlan)
708 {
709 dma_addr_t busaddr;
710 int i, copied = 0;
711 struct device *dev = &adapter->pdev->dev;
712 struct sk_buff *first_skb = skb;
713 struct be_eth_wrb *wrb;
714 struct be_eth_hdr_wrb *hdr;
715 bool map_single = false;
716 u16 map_head;
717
718 hdr = queue_head_node(txq);
719 queue_head_inc(txq);
720 map_head = txq->head;
721
722 if (skb->len > skb->data_len) {
723 int len = skb_headlen(skb);
724 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
725 if (dma_mapping_error(dev, busaddr))
726 goto dma_err;
727 map_single = true;
728 wrb = queue_head_node(txq);
729 wrb_fill(wrb, busaddr, len);
730 be_dws_cpu_to_le(wrb, sizeof(*wrb));
731 queue_head_inc(txq);
732 copied += len;
733 }
734
735 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
736 const struct skb_frag_struct *frag =
737 &skb_shinfo(skb)->frags[i];
738 busaddr = skb_frag_dma_map(dev, frag, 0,
739 skb_frag_size(frag), DMA_TO_DEVICE);
740 if (dma_mapping_error(dev, busaddr))
741 goto dma_err;
742 wrb = queue_head_node(txq);
743 wrb_fill(wrb, busaddr, skb_frag_size(frag));
744 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745 queue_head_inc(txq);
746 copied += skb_frag_size(frag);
747 }
748
749 if (dummy_wrb) {
750 wrb = queue_head_node(txq);
751 wrb_fill(wrb, 0, 0);
752 be_dws_cpu_to_le(wrb, sizeof(*wrb));
753 queue_head_inc(txq);
754 }
755
756 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
757 be_dws_cpu_to_le(hdr, sizeof(*hdr));
758
759 return copied;
760 dma_err:
761 txq->head = map_head;
762 while (copied) {
763 wrb = queue_head_node(txq);
764 unmap_tx_frag(dev, wrb, map_single);
765 map_single = false;
766 copied -= wrb->frag_len;
767 queue_head_inc(txq);
768 }
769 return 0;
770 }
771
772 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
773 struct sk_buff *skb,
774 bool *skip_hw_vlan)
775 {
776 u16 vlan_tag = 0;
777
778 skb = skb_share_check(skb, GFP_ATOMIC);
779 if (unlikely(!skb))
780 return skb;
781
782 if (vlan_tx_tag_present(skb)) {
783 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
784 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
785 if (skb)
786 skb->vlan_tci = 0;
787 }
788
789 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
790 if (!vlan_tag)
791 vlan_tag = adapter->pvid;
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
795
796 if (vlan_tag) {
797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
798 if (unlikely(!skb))
799 return skb;
800
801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
814 return skb;
815 }
816
817 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818 {
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837 }
838
839 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840 {
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842 }
843
844 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
845 {
846 return BE3_chip(adapter) &&
847 be_ipv6_exthdr_check(skb);
848 }
849
850 static netdev_tx_t be_xmit(struct sk_buff *skb,
851 struct net_device *netdev)
852 {
853 struct be_adapter *adapter = netdev_priv(netdev);
854 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
855 struct be_queue_info *txq = &txo->q;
856 struct iphdr *ip = NULL;
857 u32 wrb_cnt = 0, copied = 0;
858 u32 start = txq->head, eth_hdr_len;
859 bool dummy_wrb, stopped = false;
860 bool skip_hw_vlan = false;
861 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
862
863 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
864 VLAN_ETH_HLEN : ETH_HLEN;
865
866 /* For padded packets, BE HW modifies tot_len field in IP header
867 * incorrecly when VLAN tag is inserted by HW.
868 */
869 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
870 ip = (struct iphdr *)ip_hdr(skb);
871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872 }
873
874 /* If vlan tag is already inlined in the packet, skip HW VLAN
875 * tagging in UMC mode
876 */
877 if ((adapter->function_mode & UMC_ENABLED) &&
878 veh->h_vlan_proto == htons(ETH_P_8021Q))
879 skip_hw_vlan = true;
880
881 /* HW has a bug wherein it will calculate CSUM for VLAN
882 * pkts even though it is disabled.
883 * Manually insert VLAN in pkt.
884 */
885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
886 vlan_tx_tag_present(skb)) {
887 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
888 if (unlikely(!skb))
889 goto tx_drop;
890 }
891
892 /* HW may lockup when VLAN HW tagging is requested on
893 * certain ipv6 packets. Drop such pkts if the HW workaround to
894 * skip HW tagging is not enabled by FW.
895 */
896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
897 (adapter->pvid || adapter->qnq_vid) &&
898 !qnq_async_evt_rcvd(adapter)))
899 goto tx_drop;
900
901 /* Manual VLAN tag insertion to prevent:
902 * ASIC lockup when the ASIC inserts VLAN tag into
903 * certain ipv6 packets. Insert VLAN tags in driver,
904 * and set event, completion, vlan bits accordingly
905 * in the Tx WRB.
906 */
907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
908 be_vlan_tag_tx_chk(adapter, skb)) {
909 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
910 if (unlikely(!skb))
911 goto tx_drop;
912 }
913
914 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
915
916 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
917 skip_hw_vlan);
918 if (copied) {
919 int gso_segs = skb_shinfo(skb)->gso_segs;
920
921 /* record the sent skb in the sent_skb table */
922 BUG_ON(txo->sent_skb_list[start]);
923 txo->sent_skb_list[start] = skb;
924
925 /* Ensure txq has space for the next skb; Else stop the queue
926 * *BEFORE* ringing the tx doorbell, so that we serialze the
927 * tx compls of the current transmit which'll wake up the queue
928 */
929 atomic_add(wrb_cnt, &txq->used);
930 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
931 txq->len) {
932 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
933 stopped = true;
934 }
935
936 be_txq_notify(adapter, txo, wrb_cnt);
937
938 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
939 } else {
940 txq->head = start;
941 dev_kfree_skb_any(skb);
942 }
943 tx_drop:
944 return NETDEV_TX_OK;
945 }
946
947 static int be_change_mtu(struct net_device *netdev, int new_mtu)
948 {
949 struct be_adapter *adapter = netdev_priv(netdev);
950 if (new_mtu < BE_MIN_MTU ||
951 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
952 (ETH_HLEN + ETH_FCS_LEN))) {
953 dev_info(&adapter->pdev->dev,
954 "MTU must be between %d and %d bytes\n",
955 BE_MIN_MTU,
956 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
957 return -EINVAL;
958 }
959 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
960 netdev->mtu, new_mtu);
961 netdev->mtu = new_mtu;
962 return 0;
963 }
964
965 /*
966 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
967 * If the user configures more, place BE in vlan promiscuous mode.
968 */
969 static int be_vid_config(struct be_adapter *adapter)
970 {
971 u16 vids[BE_NUM_VLANS_SUPPORTED];
972 u16 num = 0, i;
973 int status = 0;
974
975 /* No need to further configure vids if in promiscuous mode */
976 if (adapter->promiscuous)
977 return 0;
978
979 if (adapter->vlans_added > adapter->max_vlans)
980 goto set_vlan_promisc;
981
982 /* Construct VLAN Table to give to HW */
983 for (i = 0; i < VLAN_N_VID; i++)
984 if (adapter->vlan_tag[i])
985 vids[num++] = cpu_to_le16(i);
986
987 status = be_cmd_vlan_config(adapter, adapter->if_handle,
988 vids, num, 1, 0);
989
990 /* Set to VLAN promisc mode as setting VLAN filter failed */
991 if (status) {
992 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
993 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
994 goto set_vlan_promisc;
995 }
996
997 return status;
998
999 set_vlan_promisc:
1000 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1001 NULL, 0, 1, 1);
1002 return status;
1003 }
1004
1005 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1006 {
1007 struct be_adapter *adapter = netdev_priv(netdev);
1008 int status = 0;
1009
1010 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1011 status = -EINVAL;
1012 goto ret;
1013 }
1014
1015 /* Packets with VID 0 are always received by Lancer by default */
1016 if (lancer_chip(adapter) && vid == 0)
1017 goto ret;
1018
1019 adapter->vlan_tag[vid] = 1;
1020 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1021 status = be_vid_config(adapter);
1022
1023 if (!status)
1024 adapter->vlans_added++;
1025 else
1026 adapter->vlan_tag[vid] = 0;
1027 ret:
1028 return status;
1029 }
1030
1031 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1032 {
1033 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0;
1035
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040
1041 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0)
1043 goto ret;
1044
1045 adapter->vlan_tag[vid] = 0;
1046 if (adapter->vlans_added <= adapter->max_vlans)
1047 status = be_vid_config(adapter);
1048
1049 if (!status)
1050 adapter->vlans_added--;
1051 else
1052 adapter->vlan_tag[vid] = 1;
1053 ret:
1054 return status;
1055 }
1056
1057 static void be_set_rx_mode(struct net_device *netdev)
1058 {
1059 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status;
1061
1062 if (netdev->flags & IFF_PROMISC) {
1063 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1064 adapter->promiscuous = true;
1065 goto done;
1066 }
1067
1068 /* BE was previously in promiscuous mode; disable it */
1069 if (adapter->promiscuous) {
1070 adapter->promiscuous = false;
1071 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1072
1073 if (adapter->vlans_added)
1074 be_vid_config(adapter);
1075 }
1076
1077 /* Enable multicast promisc if num configured exceeds what we support */
1078 if (netdev->flags & IFF_ALLMULTI ||
1079 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1080 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1081 goto done;
1082 }
1083
1084 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1085 struct netdev_hw_addr *ha;
1086 int i = 1; /* First slot is claimed by the Primary MAC */
1087
1088 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1089 be_cmd_pmac_del(adapter, adapter->if_handle,
1090 adapter->pmac_id[i], 0);
1091 }
1092
1093 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1094 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1095 adapter->promiscuous = true;
1096 goto done;
1097 }
1098
1099 netdev_for_each_uc_addr(ha, adapter->netdev) {
1100 adapter->uc_macs++; /* First slot is for Primary MAC */
1101 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1102 adapter->if_handle,
1103 &adapter->pmac_id[adapter->uc_macs], 0);
1104 }
1105 }
1106
1107 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1108
1109 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1110 if (status) {
1111 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1112 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1113 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1114 }
1115 done:
1116 return;
1117 }
1118
1119 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1120 {
1121 struct be_adapter *adapter = netdev_priv(netdev);
1122 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1123 int status;
1124 bool active_mac = false;
1125 u32 pmac_id;
1126 u8 old_mac[ETH_ALEN];
1127
1128 if (!sriov_enabled(adapter))
1129 return -EPERM;
1130
1131 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1132 return -EINVAL;
1133
1134 if (lancer_chip(adapter)) {
1135 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1136 &pmac_id, vf + 1);
1137 if (!status && active_mac)
1138 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1139 pmac_id, vf + 1);
1140
1141 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1142 } else {
1143 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1144 vf_cfg->pmac_id, vf + 1);
1145
1146 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1147 &vf_cfg->pmac_id, vf + 1);
1148 }
1149
1150 if (status)
1151 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1152 mac, vf);
1153 else
1154 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1155
1156 return status;
1157 }
1158
1159 static int be_get_vf_config(struct net_device *netdev, int vf,
1160 struct ifla_vf_info *vi)
1161 {
1162 struct be_adapter *adapter = netdev_priv(netdev);
1163 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1164
1165 if (!sriov_enabled(adapter))
1166 return -EPERM;
1167
1168 if (vf >= adapter->num_vfs)
1169 return -EINVAL;
1170
1171 vi->vf = vf;
1172 vi->tx_rate = vf_cfg->tx_rate;
1173 vi->vlan = vf_cfg->vlan_tag;
1174 vi->qos = 0;
1175 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1176
1177 return 0;
1178 }
1179
1180 static int be_set_vf_vlan(struct net_device *netdev,
1181 int vf, u16 vlan, u8 qos)
1182 {
1183 struct be_adapter *adapter = netdev_priv(netdev);
1184 int status = 0;
1185
1186 if (!sriov_enabled(adapter))
1187 return -EPERM;
1188
1189 if (vf >= adapter->num_vfs || vlan > 4095)
1190 return -EINVAL;
1191
1192 if (vlan) {
1193 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1194 /* If this is new value, program it. Else skip. */
1195 adapter->vf_cfg[vf].vlan_tag = vlan;
1196
1197 status = be_cmd_set_hsw_config(adapter, vlan,
1198 vf + 1, adapter->vf_cfg[vf].if_handle);
1199 }
1200 } else {
1201 /* Reset Transparent Vlan Tagging. */
1202 adapter->vf_cfg[vf].vlan_tag = 0;
1203 vlan = adapter->vf_cfg[vf].def_vid;
1204 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1205 adapter->vf_cfg[vf].if_handle);
1206 }
1207
1208
1209 if (status)
1210 dev_info(&adapter->pdev->dev,
1211 "VLAN %d config on VF %d failed\n", vlan, vf);
1212 return status;
1213 }
1214
1215 static int be_set_vf_tx_rate(struct net_device *netdev,
1216 int vf, int rate)
1217 {
1218 struct be_adapter *adapter = netdev_priv(netdev);
1219 int status = 0;
1220
1221 if (!sriov_enabled(adapter))
1222 return -EPERM;
1223
1224 if (vf >= adapter->num_vfs)
1225 return -EINVAL;
1226
1227 if (rate < 100 || rate > 10000) {
1228 dev_err(&adapter->pdev->dev,
1229 "tx rate must be between 100 and 10000 Mbps\n");
1230 return -EINVAL;
1231 }
1232
1233 if (lancer_chip(adapter))
1234 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1235 else
1236 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1237
1238 if (status)
1239 dev_err(&adapter->pdev->dev,
1240 "tx rate %d on VF %d failed\n", rate, vf);
1241 else
1242 adapter->vf_cfg[vf].tx_rate = rate;
1243 return status;
1244 }
1245
1246 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1247 {
1248 struct pci_dev *dev, *pdev = adapter->pdev;
1249 int vfs = 0, assigned_vfs = 0, pos;
1250 u16 offset, stride;
1251
1252 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1253 if (!pos)
1254 return 0;
1255 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1256 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1257
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1259 while (dev) {
1260 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1261 vfs++;
1262 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1263 assigned_vfs++;
1264 }
1265 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1266 }
1267 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1268 }
1269
1270 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1271 {
1272 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1273 ulong now = jiffies;
1274 ulong delta = now - stats->rx_jiffies;
1275 u64 pkts;
1276 unsigned int start, eqd;
1277
1278 if (!eqo->enable_aic) {
1279 eqd = eqo->eqd;
1280 goto modify_eqd;
1281 }
1282
1283 if (eqo->idx >= adapter->num_rx_qs)
1284 return;
1285
1286 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1287
1288 /* Wrapped around */
1289 if (time_before(now, stats->rx_jiffies)) {
1290 stats->rx_jiffies = now;
1291 return;
1292 }
1293
1294 /* Update once a second */
1295 if (delta < HZ)
1296 return;
1297
1298 do {
1299 start = u64_stats_fetch_begin_bh(&stats->sync);
1300 pkts = stats->rx_pkts;
1301 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1302
1303 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1304 stats->rx_pkts_prev = pkts;
1305 stats->rx_jiffies = now;
1306 eqd = (stats->rx_pps / 110000) << 3;
1307 eqd = min(eqd, eqo->max_eqd);
1308 eqd = max(eqd, eqo->min_eqd);
1309 if (eqd < 10)
1310 eqd = 0;
1311
1312 modify_eqd:
1313 if (eqd != eqo->cur_eqd) {
1314 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1315 eqo->cur_eqd = eqd;
1316 }
1317 }
1318
1319 static void be_rx_stats_update(struct be_rx_obj *rxo,
1320 struct be_rx_compl_info *rxcp)
1321 {
1322 struct be_rx_stats *stats = rx_stats(rxo);
1323
1324 u64_stats_update_begin(&stats->sync);
1325 stats->rx_compl++;
1326 stats->rx_bytes += rxcp->pkt_size;
1327 stats->rx_pkts++;
1328 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1329 stats->rx_mcast_pkts++;
1330 if (rxcp->err)
1331 stats->rx_compl_err++;
1332 u64_stats_update_end(&stats->sync);
1333 }
1334
1335 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1336 {
1337 /* L4 checksum is not reliable for non TCP/UDP packets.
1338 * Also ignore ipcksm for ipv6 pkts */
1339 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1340 (rxcp->ip_csum || rxcp->ipv6);
1341 }
1342
1343 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1344 u16 frag_idx)
1345 {
1346 struct be_adapter *adapter = rxo->adapter;
1347 struct be_rx_page_info *rx_page_info;
1348 struct be_queue_info *rxq = &rxo->q;
1349
1350 rx_page_info = &rxo->page_info_tbl[frag_idx];
1351 BUG_ON(!rx_page_info->page);
1352
1353 if (rx_page_info->last_page_user) {
1354 dma_unmap_page(&adapter->pdev->dev,
1355 dma_unmap_addr(rx_page_info, bus),
1356 adapter->big_page_size, DMA_FROM_DEVICE);
1357 rx_page_info->last_page_user = false;
1358 }
1359
1360 atomic_dec(&rxq->used);
1361 return rx_page_info;
1362 }
1363
1364 /* Throwaway the data in the Rx completion */
1365 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1366 struct be_rx_compl_info *rxcp)
1367 {
1368 struct be_queue_info *rxq = &rxo->q;
1369 struct be_rx_page_info *page_info;
1370 u16 i, num_rcvd = rxcp->num_rcvd;
1371
1372 for (i = 0; i < num_rcvd; i++) {
1373 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1374 put_page(page_info->page);
1375 memset(page_info, 0, sizeof(*page_info));
1376 index_inc(&rxcp->rxq_idx, rxq->len);
1377 }
1378 }
1379
1380 /*
1381 * skb_fill_rx_data forms a complete skb for an ether frame
1382 * indicated by rxcp.
1383 */
1384 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1385 struct be_rx_compl_info *rxcp)
1386 {
1387 struct be_queue_info *rxq = &rxo->q;
1388 struct be_rx_page_info *page_info;
1389 u16 i, j;
1390 u16 hdr_len, curr_frag_len, remaining;
1391 u8 *start;
1392
1393 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1394 start = page_address(page_info->page) + page_info->page_offset;
1395 prefetch(start);
1396
1397 /* Copy data in the first descriptor of this completion */
1398 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1399
1400 skb->len = curr_frag_len;
1401 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1402 memcpy(skb->data, start, curr_frag_len);
1403 /* Complete packet has now been moved to data */
1404 put_page(page_info->page);
1405 skb->data_len = 0;
1406 skb->tail += curr_frag_len;
1407 } else {
1408 hdr_len = ETH_HLEN;
1409 memcpy(skb->data, start, hdr_len);
1410 skb_shinfo(skb)->nr_frags = 1;
1411 skb_frag_set_page(skb, 0, page_info->page);
1412 skb_shinfo(skb)->frags[0].page_offset =
1413 page_info->page_offset + hdr_len;
1414 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1415 skb->data_len = curr_frag_len - hdr_len;
1416 skb->truesize += rx_frag_size;
1417 skb->tail += hdr_len;
1418 }
1419 page_info->page = NULL;
1420
1421 if (rxcp->pkt_size <= rx_frag_size) {
1422 BUG_ON(rxcp->num_rcvd != 1);
1423 return;
1424 }
1425
1426 /* More frags present for this completion */
1427 index_inc(&rxcp->rxq_idx, rxq->len);
1428 remaining = rxcp->pkt_size - curr_frag_len;
1429 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1430 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1431 curr_frag_len = min(remaining, rx_frag_size);
1432
1433 /* Coalesce all frags from the same physical page in one slot */
1434 if (page_info->page_offset == 0) {
1435 /* Fresh page */
1436 j++;
1437 skb_frag_set_page(skb, j, page_info->page);
1438 skb_shinfo(skb)->frags[j].page_offset =
1439 page_info->page_offset;
1440 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1441 skb_shinfo(skb)->nr_frags++;
1442 } else {
1443 put_page(page_info->page);
1444 }
1445
1446 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1447 skb->len += curr_frag_len;
1448 skb->data_len += curr_frag_len;
1449 skb->truesize += rx_frag_size;
1450 remaining -= curr_frag_len;
1451 index_inc(&rxcp->rxq_idx, rxq->len);
1452 page_info->page = NULL;
1453 }
1454 BUG_ON(j > MAX_SKB_FRAGS);
1455 }
1456
1457 /* Process the RX completion indicated by rxcp when GRO is disabled */
1458 static void be_rx_compl_process(struct be_rx_obj *rxo,
1459 struct be_rx_compl_info *rxcp)
1460 {
1461 struct be_adapter *adapter = rxo->adapter;
1462 struct net_device *netdev = adapter->netdev;
1463 struct sk_buff *skb;
1464
1465 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1466 if (unlikely(!skb)) {
1467 rx_stats(rxo)->rx_drops_no_skbs++;
1468 be_rx_compl_discard(rxo, rxcp);
1469 return;
1470 }
1471
1472 skb_fill_rx_data(rxo, skb, rxcp);
1473
1474 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1475 skb->ip_summed = CHECKSUM_UNNECESSARY;
1476 else
1477 skb_checksum_none_assert(skb);
1478
1479 skb->protocol = eth_type_trans(skb, netdev);
1480 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1481 if (netdev->features & NETIF_F_RXHASH)
1482 skb->rxhash = rxcp->rss_hash;
1483
1484
1485 if (rxcp->vlanf)
1486 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1487
1488 netif_receive_skb(skb);
1489 }
1490
1491 /* Process the RX completion indicated by rxcp when GRO is enabled */
1492 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1493 struct be_rx_compl_info *rxcp)
1494 {
1495 struct be_adapter *adapter = rxo->adapter;
1496 struct be_rx_page_info *page_info;
1497 struct sk_buff *skb = NULL;
1498 struct be_queue_info *rxq = &rxo->q;
1499 u16 remaining, curr_frag_len;
1500 u16 i, j;
1501
1502 skb = napi_get_frags(napi);
1503 if (!skb) {
1504 be_rx_compl_discard(rxo, rxcp);
1505 return;
1506 }
1507
1508 remaining = rxcp->pkt_size;
1509 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1510 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1511
1512 curr_frag_len = min(remaining, rx_frag_size);
1513
1514 /* Coalesce all frags from the same physical page in one slot */
1515 if (i == 0 || page_info->page_offset == 0) {
1516 /* First frag or Fresh page */
1517 j++;
1518 skb_frag_set_page(skb, j, page_info->page);
1519 skb_shinfo(skb)->frags[j].page_offset =
1520 page_info->page_offset;
1521 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1522 } else {
1523 put_page(page_info->page);
1524 }
1525 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1526 skb->truesize += rx_frag_size;
1527 remaining -= curr_frag_len;
1528 index_inc(&rxcp->rxq_idx, rxq->len);
1529 memset(page_info, 0, sizeof(*page_info));
1530 }
1531 BUG_ON(j > MAX_SKB_FRAGS);
1532
1533 skb_shinfo(skb)->nr_frags = j + 1;
1534 skb->len = rxcp->pkt_size;
1535 skb->data_len = rxcp->pkt_size;
1536 skb->ip_summed = CHECKSUM_UNNECESSARY;
1537 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1538 if (adapter->netdev->features & NETIF_F_RXHASH)
1539 skb->rxhash = rxcp->rss_hash;
1540
1541 if (rxcp->vlanf)
1542 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1543
1544 napi_gro_frags(napi);
1545 }
1546
1547 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1548 struct be_rx_compl_info *rxcp)
1549 {
1550 rxcp->pkt_size =
1551 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1552 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1553 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1554 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1555 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1556 rxcp->ip_csum =
1557 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1558 rxcp->l4_csum =
1559 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1560 rxcp->ipv6 =
1561 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1562 rxcp->rxq_idx =
1563 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1564 rxcp->num_rcvd =
1565 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1566 rxcp->pkt_type =
1567 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1568 rxcp->rss_hash =
1569 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1570 if (rxcp->vlanf) {
1571 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1572 compl);
1573 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1574 compl);
1575 }
1576 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1577 }
1578
1579 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1580 struct be_rx_compl_info *rxcp)
1581 {
1582 rxcp->pkt_size =
1583 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1584 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1585 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1586 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1587 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1588 rxcp->ip_csum =
1589 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1590 rxcp->l4_csum =
1591 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1592 rxcp->ipv6 =
1593 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1594 rxcp->rxq_idx =
1595 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1596 rxcp->num_rcvd =
1597 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1598 rxcp->pkt_type =
1599 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1600 rxcp->rss_hash =
1601 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1602 if (rxcp->vlanf) {
1603 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1604 compl);
1605 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1606 compl);
1607 }
1608 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1609 }
1610
1611 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1612 {
1613 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1614 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1615 struct be_adapter *adapter = rxo->adapter;
1616
1617 /* For checking the valid bit it is Ok to use either definition as the
1618 * valid bit is at the same position in both v0 and v1 Rx compl */
1619 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1620 return NULL;
1621
1622 rmb();
1623 be_dws_le_to_cpu(compl, sizeof(*compl));
1624
1625 if (adapter->be3_native)
1626 be_parse_rx_compl_v1(compl, rxcp);
1627 else
1628 be_parse_rx_compl_v0(compl, rxcp);
1629
1630 if (rxcp->vlanf) {
1631 /* vlanf could be wrongly set in some cards.
1632 * ignore if vtm is not set */
1633 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1634 rxcp->vlanf = 0;
1635
1636 if (!lancer_chip(adapter))
1637 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1638
1639 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1640 !adapter->vlan_tag[rxcp->vlan_tag])
1641 rxcp->vlanf = 0;
1642 }
1643
1644 /* As the compl has been parsed, reset it; we wont touch it again */
1645 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1646
1647 queue_tail_inc(&rxo->cq);
1648 return rxcp;
1649 }
1650
1651 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1652 {
1653 u32 order = get_order(size);
1654
1655 if (order > 0)
1656 gfp |= __GFP_COMP;
1657 return alloc_pages(gfp, order);
1658 }
1659
1660 /*
1661 * Allocate a page, split it to fragments of size rx_frag_size and post as
1662 * receive buffers to BE
1663 */
1664 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1665 {
1666 struct be_adapter *adapter = rxo->adapter;
1667 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1668 struct be_queue_info *rxq = &rxo->q;
1669 struct page *pagep = NULL;
1670 struct be_eth_rx_d *rxd;
1671 u64 page_dmaaddr = 0, frag_dmaaddr;
1672 u32 posted, page_offset = 0;
1673
1674 page_info = &rxo->page_info_tbl[rxq->head];
1675 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676 if (!pagep) {
1677 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1678 if (unlikely(!pagep)) {
1679 rx_stats(rxo)->rx_post_fail++;
1680 break;
1681 }
1682 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683 0, adapter->big_page_size,
1684 DMA_FROM_DEVICE);
1685 page_info->page_offset = 0;
1686 } else {
1687 get_page(pagep);
1688 page_info->page_offset = page_offset + rx_frag_size;
1689 }
1690 page_offset = page_info->page_offset;
1691 page_info->page = pagep;
1692 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1693 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695 rxd = queue_head_node(rxq);
1696 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1698
1699 /* Any space left in the current big page for another frag? */
1700 if ((page_offset + rx_frag_size + rx_frag_size) >
1701 adapter->big_page_size) {
1702 pagep = NULL;
1703 page_info->last_page_user = true;
1704 }
1705
1706 prev_page_info = page_info;
1707 queue_head_inc(rxq);
1708 page_info = &rxo->page_info_tbl[rxq->head];
1709 }
1710 if (pagep)
1711 prev_page_info->last_page_user = true;
1712
1713 if (posted) {
1714 atomic_add(posted, &rxq->used);
1715 be_rxq_notify(adapter, rxq->id, posted);
1716 } else if (atomic_read(&rxq->used) == 0) {
1717 /* Let be_worker replenish when memory is available */
1718 rxo->rx_post_starved = true;
1719 }
1720 }
1721
1722 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1723 {
1724 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727 return NULL;
1728
1729 rmb();
1730 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734 queue_tail_inc(tx_cq);
1735 return txcp;
1736 }
1737
1738 static u16 be_tx_compl_process(struct be_adapter *adapter,
1739 struct be_tx_obj *txo, u16 last_index)
1740 {
1741 struct be_queue_info *txq = &txo->q;
1742 struct be_eth_wrb *wrb;
1743 struct sk_buff **sent_skbs = txo->sent_skb_list;
1744 struct sk_buff *sent_skb;
1745 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746 bool unmap_skb_hdr = true;
1747
1748 sent_skb = sent_skbs[txq->tail];
1749 BUG_ON(!sent_skb);
1750 sent_skbs[txq->tail] = NULL;
1751
1752 /* skip header wrb */
1753 queue_tail_inc(txq);
1754
1755 do {
1756 cur_index = txq->tail;
1757 wrb = queue_tail_node(txq);
1758 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759 (unmap_skb_hdr && skb_headlen(sent_skb)));
1760 unmap_skb_hdr = false;
1761
1762 num_wrbs++;
1763 queue_tail_inc(txq);
1764 } while (cur_index != last_index);
1765
1766 kfree_skb(sent_skb);
1767 return num_wrbs;
1768 }
1769
1770 /* Return the number of events in the event queue */
1771 static inline int events_get(struct be_eq_obj *eqo)
1772 {
1773 struct be_eq_entry *eqe;
1774 int num = 0;
1775
1776 do {
1777 eqe = queue_tail_node(&eqo->q);
1778 if (eqe->evt == 0)
1779 break;
1780
1781 rmb();
1782 eqe->evt = 0;
1783 num++;
1784 queue_tail_inc(&eqo->q);
1785 } while (true);
1786
1787 return num;
1788 }
1789
1790 /* Leaves the EQ is disarmed state */
1791 static void be_eq_clean(struct be_eq_obj *eqo)
1792 {
1793 int num = events_get(eqo);
1794
1795 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796 }
1797
1798 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1799 {
1800 struct be_rx_page_info *page_info;
1801 struct be_queue_info *rxq = &rxo->q;
1802 struct be_queue_info *rx_cq = &rxo->cq;
1803 struct be_rx_compl_info *rxcp;
1804 struct be_adapter *adapter = rxo->adapter;
1805 int flush_wait = 0;
1806 u16 tail;
1807
1808 /* Consume pending rx completions.
1809 * Wait for the flush completion (identified by zero num_rcvd)
1810 * to arrive. Notify CQ even when there are no more CQ entries
1811 * for HW to flush partially coalesced CQ entries.
1812 * In Lancer, there is no need to wait for flush compl.
1813 */
1814 for (;;) {
1815 rxcp = be_rx_compl_get(rxo);
1816 if (rxcp == NULL) {
1817 if (lancer_chip(adapter))
1818 break;
1819
1820 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821 dev_warn(&adapter->pdev->dev,
1822 "did not receive flush compl\n");
1823 break;
1824 }
1825 be_cq_notify(adapter, rx_cq->id, true, 0);
1826 mdelay(1);
1827 } else {
1828 be_rx_compl_discard(rxo, rxcp);
1829 be_cq_notify(adapter, rx_cq->id, true, 1);
1830 if (rxcp->num_rcvd == 0)
1831 break;
1832 }
1833 }
1834
1835 /* After cleanup, leave the CQ in unarmed state */
1836 be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838 /* Then free posted rx buffers that were not used */
1839 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1840 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1841 page_info = get_rx_page_info(rxo, tail);
1842 put_page(page_info->page);
1843 memset(page_info, 0, sizeof(*page_info));
1844 }
1845 BUG_ON(atomic_read(&rxq->used));
1846 rxq->tail = rxq->head = 0;
1847 }
1848
1849 static void be_tx_compl_clean(struct be_adapter *adapter)
1850 {
1851 struct be_tx_obj *txo;
1852 struct be_queue_info *txq;
1853 struct be_eth_tx_compl *txcp;
1854 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1855 struct sk_buff *sent_skb;
1856 bool dummy_wrb;
1857 int i, pending_txqs;
1858
1859 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860 do {
1861 pending_txqs = adapter->num_tx_qs;
1862
1863 for_all_tx_queues(adapter, txo, i) {
1864 txq = &txo->q;
1865 while ((txcp = be_tx_compl_get(&txo->cq))) {
1866 end_idx =
1867 AMAP_GET_BITS(struct amap_eth_tx_compl,
1868 wrb_index, txcp);
1869 num_wrbs += be_tx_compl_process(adapter, txo,
1870 end_idx);
1871 cmpl++;
1872 }
1873 if (cmpl) {
1874 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875 atomic_sub(num_wrbs, &txq->used);
1876 cmpl = 0;
1877 num_wrbs = 0;
1878 }
1879 if (atomic_read(&txq->used) == 0)
1880 pending_txqs--;
1881 }
1882
1883 if (pending_txqs == 0 || ++timeo > 200)
1884 break;
1885
1886 mdelay(1);
1887 } while (true);
1888
1889 for_all_tx_queues(adapter, txo, i) {
1890 txq = &txo->q;
1891 if (atomic_read(&txq->used))
1892 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893 atomic_read(&txq->used));
1894
1895 /* free posted tx for which compls will never arrive */
1896 while (atomic_read(&txq->used)) {
1897 sent_skb = txo->sent_skb_list[txq->tail];
1898 end_idx = txq->tail;
1899 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900 &dummy_wrb);
1901 index_adv(&end_idx, num_wrbs - 1, txq->len);
1902 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903 atomic_sub(num_wrbs, &txq->used);
1904 }
1905 }
1906 }
1907
1908 static void be_evt_queues_destroy(struct be_adapter *adapter)
1909 {
1910 struct be_eq_obj *eqo;
1911 int i;
1912
1913 for_all_evt_queues(adapter, eqo, i) {
1914 if (eqo->q.created) {
1915 be_eq_clean(eqo);
1916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1917 }
1918 be_queue_free(adapter, &eqo->q);
1919 }
1920 }
1921
1922 static int be_evt_queues_create(struct be_adapter *adapter)
1923 {
1924 struct be_queue_info *eq;
1925 struct be_eq_obj *eqo;
1926 int i, rc;
1927
1928 adapter->num_evt_qs = num_irqs(adapter);
1929
1930 for_all_evt_queues(adapter, eqo, i) {
1931 eqo->adapter = adapter;
1932 eqo->tx_budget = BE_TX_BUDGET;
1933 eqo->idx = i;
1934 eqo->max_eqd = BE_MAX_EQD;
1935 eqo->enable_aic = true;
1936
1937 eq = &eqo->q;
1938 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1939 sizeof(struct be_eq_entry));
1940 if (rc)
1941 return rc;
1942
1943 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1944 if (rc)
1945 return rc;
1946 }
1947 return 0;
1948 }
1949
1950 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1951 {
1952 struct be_queue_info *q;
1953
1954 q = &adapter->mcc_obj.q;
1955 if (q->created)
1956 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1957 be_queue_free(adapter, q);
1958
1959 q = &adapter->mcc_obj.cq;
1960 if (q->created)
1961 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1962 be_queue_free(adapter, q);
1963 }
1964
1965 /* Must be called only after TX qs are created as MCC shares TX EQ */
1966 static int be_mcc_queues_create(struct be_adapter *adapter)
1967 {
1968 struct be_queue_info *q, *cq;
1969
1970 cq = &adapter->mcc_obj.cq;
1971 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1972 sizeof(struct be_mcc_compl)))
1973 goto err;
1974
1975 /* Use the default EQ for MCC completions */
1976 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1977 goto mcc_cq_free;
1978
1979 q = &adapter->mcc_obj.q;
1980 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1981 goto mcc_cq_destroy;
1982
1983 if (be_cmd_mccq_create(adapter, q, cq))
1984 goto mcc_q_free;
1985
1986 return 0;
1987
1988 mcc_q_free:
1989 be_queue_free(adapter, q);
1990 mcc_cq_destroy:
1991 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1992 mcc_cq_free:
1993 be_queue_free(adapter, cq);
1994 err:
1995 return -1;
1996 }
1997
1998 static void be_tx_queues_destroy(struct be_adapter *adapter)
1999 {
2000 struct be_queue_info *q;
2001 struct be_tx_obj *txo;
2002 u8 i;
2003
2004 for_all_tx_queues(adapter, txo, i) {
2005 q = &txo->q;
2006 if (q->created)
2007 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2008 be_queue_free(adapter, q);
2009
2010 q = &txo->cq;
2011 if (q->created)
2012 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2013 be_queue_free(adapter, q);
2014 }
2015 }
2016
2017 static int be_num_txqs_want(struct be_adapter *adapter)
2018 {
2019 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2020 be_is_mc(adapter) ||
2021 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2022 BE2_chip(adapter))
2023 return 1;
2024 else
2025 return adapter->max_tx_queues;
2026 }
2027
2028 static int be_tx_cqs_create(struct be_adapter *adapter)
2029 {
2030 struct be_queue_info *cq, *eq;
2031 int status;
2032 struct be_tx_obj *txo;
2033 u8 i;
2034
2035 adapter->num_tx_qs = be_num_txqs_want(adapter);
2036 if (adapter->num_tx_qs != MAX_TX_QS) {
2037 rtnl_lock();
2038 netif_set_real_num_tx_queues(adapter->netdev,
2039 adapter->num_tx_qs);
2040 rtnl_unlock();
2041 }
2042
2043 for_all_tx_queues(adapter, txo, i) {
2044 cq = &txo->cq;
2045 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046 sizeof(struct be_eth_tx_compl));
2047 if (status)
2048 return status;
2049
2050 /* If num_evt_qs is less than num_tx_qs, then more than
2051 * one txq share an eq
2052 */
2053 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055 if (status)
2056 return status;
2057 }
2058 return 0;
2059 }
2060
2061 static int be_tx_qs_create(struct be_adapter *adapter)
2062 {
2063 struct be_tx_obj *txo;
2064 int i, status;
2065
2066 for_all_tx_queues(adapter, txo, i) {
2067 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2068 sizeof(struct be_eth_wrb));
2069 if (status)
2070 return status;
2071
2072 status = be_cmd_txq_create(adapter, txo);
2073 if (status)
2074 return status;
2075 }
2076
2077 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2078 adapter->num_tx_qs);
2079 return 0;
2080 }
2081
2082 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2083 {
2084 struct be_queue_info *q;
2085 struct be_rx_obj *rxo;
2086 int i;
2087
2088 for_all_rx_queues(adapter, rxo, i) {
2089 q = &rxo->cq;
2090 if (q->created)
2091 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2092 be_queue_free(adapter, q);
2093 }
2094 }
2095
2096 static int be_rx_cqs_create(struct be_adapter *adapter)
2097 {
2098 struct be_queue_info *eq, *cq;
2099 struct be_rx_obj *rxo;
2100 int rc, i;
2101
2102 /* We'll create as many RSS rings as there are irqs.
2103 * But when there's only one irq there's no use creating RSS rings
2104 */
2105 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2106 num_irqs(adapter) + 1 : 1;
2107 if (adapter->num_rx_qs != MAX_RX_QS) {
2108 rtnl_lock();
2109 netif_set_real_num_rx_queues(adapter->netdev,
2110 adapter->num_rx_qs);
2111 rtnl_unlock();
2112 }
2113
2114 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2115 for_all_rx_queues(adapter, rxo, i) {
2116 rxo->adapter = adapter;
2117 cq = &rxo->cq;
2118 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2119 sizeof(struct be_eth_rx_compl));
2120 if (rc)
2121 return rc;
2122
2123 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2124 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2125 if (rc)
2126 return rc;
2127 }
2128
2129 dev_info(&adapter->pdev->dev,
2130 "created %d RSS queue(s) and 1 default RX queue\n",
2131 adapter->num_rx_qs - 1);
2132 return 0;
2133 }
2134
2135 static irqreturn_t be_intx(int irq, void *dev)
2136 {
2137 struct be_eq_obj *eqo = dev;
2138 struct be_adapter *adapter = eqo->adapter;
2139 int num_evts = 0;
2140
2141 /* IRQ is not expected when NAPI is scheduled as the EQ
2142 * will not be armed.
2143 * But, this can happen on Lancer INTx where it takes
2144 * a while to de-assert INTx or in BE2 where occasionaly
2145 * an interrupt may be raised even when EQ is unarmed.
2146 * If NAPI is already scheduled, then counting & notifying
2147 * events will orphan them.
2148 */
2149 if (napi_schedule_prep(&eqo->napi)) {
2150 num_evts = events_get(eqo);
2151 __napi_schedule(&eqo->napi);
2152 if (num_evts)
2153 eqo->spurious_intr = 0;
2154 }
2155 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2156
2157 /* Return IRQ_HANDLED only for the the first spurious intr
2158 * after a valid intr to stop the kernel from branding
2159 * this irq as a bad one!
2160 */
2161 if (num_evts || eqo->spurious_intr++ == 0)
2162 return IRQ_HANDLED;
2163 else
2164 return IRQ_NONE;
2165 }
2166
2167 static irqreturn_t be_msix(int irq, void *dev)
2168 {
2169 struct be_eq_obj *eqo = dev;
2170
2171 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2172 napi_schedule(&eqo->napi);
2173 return IRQ_HANDLED;
2174 }
2175
2176 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2177 {
2178 return (rxcp->tcpf && !rxcp->err) ? true : false;
2179 }
2180
2181 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2182 int budget)
2183 {
2184 struct be_adapter *adapter = rxo->adapter;
2185 struct be_queue_info *rx_cq = &rxo->cq;
2186 struct be_rx_compl_info *rxcp;
2187 u32 work_done;
2188
2189 for (work_done = 0; work_done < budget; work_done++) {
2190 rxcp = be_rx_compl_get(rxo);
2191 if (!rxcp)
2192 break;
2193
2194 /* Is it a flush compl that has no data */
2195 if (unlikely(rxcp->num_rcvd == 0))
2196 goto loop_continue;
2197
2198 /* Discard compl with partial DMA Lancer B0 */
2199 if (unlikely(!rxcp->pkt_size)) {
2200 be_rx_compl_discard(rxo, rxcp);
2201 goto loop_continue;
2202 }
2203
2204 /* On BE drop pkts that arrive due to imperfect filtering in
2205 * promiscuous mode on some skews
2206 */
2207 if (unlikely(rxcp->port != adapter->port_num &&
2208 !lancer_chip(adapter))) {
2209 be_rx_compl_discard(rxo, rxcp);
2210 goto loop_continue;
2211 }
2212
2213 if (do_gro(rxcp))
2214 be_rx_compl_process_gro(rxo, napi, rxcp);
2215 else
2216 be_rx_compl_process(rxo, rxcp);
2217 loop_continue:
2218 be_rx_stats_update(rxo, rxcp);
2219 }
2220
2221 if (work_done) {
2222 be_cq_notify(adapter, rx_cq->id, true, work_done);
2223
2224 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2225 be_post_rx_frags(rxo, GFP_ATOMIC);
2226 }
2227
2228 return work_done;
2229 }
2230
2231 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2232 int budget, int idx)
2233 {
2234 struct be_eth_tx_compl *txcp;
2235 int num_wrbs = 0, work_done;
2236
2237 for (work_done = 0; work_done < budget; work_done++) {
2238 txcp = be_tx_compl_get(&txo->cq);
2239 if (!txcp)
2240 break;
2241 num_wrbs += be_tx_compl_process(adapter, txo,
2242 AMAP_GET_BITS(struct amap_eth_tx_compl,
2243 wrb_index, txcp));
2244 }
2245
2246 if (work_done) {
2247 be_cq_notify(adapter, txo->cq.id, true, work_done);
2248 atomic_sub(num_wrbs, &txo->q.used);
2249
2250 /* As Tx wrbs have been freed up, wake up netdev queue
2251 * if it was stopped due to lack of tx wrbs. */
2252 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2253 atomic_read(&txo->q.used) < txo->q.len / 2) {
2254 netif_wake_subqueue(adapter->netdev, idx);
2255 }
2256
2257 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2258 tx_stats(txo)->tx_compl += work_done;
2259 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2260 }
2261 return (work_done < budget); /* Done */
2262 }
2263
2264 int be_poll(struct napi_struct *napi, int budget)
2265 {
2266 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2267 struct be_adapter *adapter = eqo->adapter;
2268 int max_work = 0, work, i, num_evts;
2269 bool tx_done;
2270
2271 num_evts = events_get(eqo);
2272
2273 /* Process all TXQs serviced by this EQ */
2274 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2275 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2276 eqo->tx_budget, i);
2277 if (!tx_done)
2278 max_work = budget;
2279 }
2280
2281 /* This loop will iterate twice for EQ0 in which
2282 * completions of the last RXQ (default one) are also processed
2283 * For other EQs the loop iterates only once
2284 */
2285 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2286 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2287 max_work = max(work, max_work);
2288 }
2289
2290 if (is_mcc_eqo(eqo))
2291 be_process_mcc(adapter);
2292
2293 if (max_work < budget) {
2294 napi_complete(napi);
2295 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2296 } else {
2297 /* As we'll continue in polling mode, count and clear events */
2298 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2299 }
2300 return max_work;
2301 }
2302
2303 void be_detect_error(struct be_adapter *adapter)
2304 {
2305 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2306 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2307 u32 i;
2308
2309 if (be_hw_error(adapter))
2310 return;
2311
2312 if (lancer_chip(adapter)) {
2313 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2314 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2315 sliport_err1 = ioread32(adapter->db +
2316 SLIPORT_ERROR1_OFFSET);
2317 sliport_err2 = ioread32(adapter->db +
2318 SLIPORT_ERROR2_OFFSET);
2319 }
2320 } else {
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_LOW, &ue_lo);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_HIGH, &ue_hi);
2325 pci_read_config_dword(adapter->pdev,
2326 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2327 pci_read_config_dword(adapter->pdev,
2328 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2329
2330 ue_lo = (ue_lo & ~ue_lo_mask);
2331 ue_hi = (ue_hi & ~ue_hi_mask);
2332 }
2333
2334 /* On certain platforms BE hardware can indicate spurious UEs.
2335 * Allow the h/w to stop working completely in case of a real UE.
2336 * Hence not setting the hw_error for UE detection.
2337 */
2338 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2339 adapter->hw_error = true;
2340 dev_err(&adapter->pdev->dev,
2341 "Error detected in the card\n");
2342 }
2343
2344 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport status 0x%x\n", sliport_status);
2347 dev_err(&adapter->pdev->dev,
2348 "ERR: sliport error1 0x%x\n", sliport_err1);
2349 dev_err(&adapter->pdev->dev,
2350 "ERR: sliport error2 0x%x\n", sliport_err2);
2351 }
2352
2353 if (ue_lo) {
2354 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2355 if (ue_lo & 1)
2356 dev_err(&adapter->pdev->dev,
2357 "UE: %s bit set\n", ue_status_low_desc[i]);
2358 }
2359 }
2360
2361 if (ue_hi) {
2362 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2363 if (ue_hi & 1)
2364 dev_err(&adapter->pdev->dev,
2365 "UE: %s bit set\n", ue_status_hi_desc[i]);
2366 }
2367 }
2368
2369 }
2370
2371 static void be_msix_disable(struct be_adapter *adapter)
2372 {
2373 if (msix_enabled(adapter)) {
2374 pci_disable_msix(adapter->pdev);
2375 adapter->num_msix_vec = 0;
2376 }
2377 }
2378
2379 static uint be_num_rss_want(struct be_adapter *adapter)
2380 {
2381 u32 num = 0;
2382
2383 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2384 (lancer_chip(adapter) ||
2385 (!sriov_want(adapter) && be_physfn(adapter)))) {
2386 num = adapter->max_rss_queues;
2387 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2388 }
2389 return num;
2390 }
2391
2392 static void be_msix_enable(struct be_adapter *adapter)
2393 {
2394 #define BE_MIN_MSIX_VECTORS 1
2395 int i, status, num_vec, num_roce_vec = 0;
2396 struct device *dev = &adapter->pdev->dev;
2397
2398 /* If RSS queues are not used, need a vec for default RX Q */
2399 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2400 if (be_roce_supported(adapter)) {
2401 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2402 (num_online_cpus() + 1));
2403 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2404 num_vec += num_roce_vec;
2405 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2406 }
2407 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2408
2409 for (i = 0; i < num_vec; i++)
2410 adapter->msix_entries[i].entry = i;
2411
2412 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2413 if (status == 0) {
2414 goto done;
2415 } else if (status >= BE_MIN_MSIX_VECTORS) {
2416 num_vec = status;
2417 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2418 num_vec) == 0)
2419 goto done;
2420 }
2421
2422 dev_warn(dev, "MSIx enable failed\n");
2423 return;
2424 done:
2425 if (be_roce_supported(adapter)) {
2426 if (num_vec > num_roce_vec) {
2427 adapter->num_msix_vec = num_vec - num_roce_vec;
2428 adapter->num_msix_roce_vec =
2429 num_vec - adapter->num_msix_vec;
2430 } else {
2431 adapter->num_msix_vec = num_vec;
2432 adapter->num_msix_roce_vec = 0;
2433 }
2434 } else
2435 adapter->num_msix_vec = num_vec;
2436 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2437 return;
2438 }
2439
2440 static inline int be_msix_vec_get(struct be_adapter *adapter,
2441 struct be_eq_obj *eqo)
2442 {
2443 return adapter->msix_entries[eqo->idx].vector;
2444 }
2445
2446 static int be_msix_register(struct be_adapter *adapter)
2447 {
2448 struct net_device *netdev = adapter->netdev;
2449 struct be_eq_obj *eqo;
2450 int status, i, vec;
2451
2452 for_all_evt_queues(adapter, eqo, i) {
2453 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2454 vec = be_msix_vec_get(adapter, eqo);
2455 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2456 if (status)
2457 goto err_msix;
2458 }
2459
2460 return 0;
2461 err_msix:
2462 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2463 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2464 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2465 status);
2466 be_msix_disable(adapter);
2467 return status;
2468 }
2469
2470 static int be_irq_register(struct be_adapter *adapter)
2471 {
2472 struct net_device *netdev = adapter->netdev;
2473 int status;
2474
2475 if (msix_enabled(adapter)) {
2476 status = be_msix_register(adapter);
2477 if (status == 0)
2478 goto done;
2479 /* INTx is not supported for VF */
2480 if (!be_physfn(adapter))
2481 return status;
2482 }
2483
2484 /* INTx: only the first EQ is used */
2485 netdev->irq = adapter->pdev->irq;
2486 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2487 &adapter->eq_obj[0]);
2488 if (status) {
2489 dev_err(&adapter->pdev->dev,
2490 "INTx request IRQ failed - err %d\n", status);
2491 return status;
2492 }
2493 done:
2494 adapter->isr_registered = true;
2495 return 0;
2496 }
2497
2498 static void be_irq_unregister(struct be_adapter *adapter)
2499 {
2500 struct net_device *netdev = adapter->netdev;
2501 struct be_eq_obj *eqo;
2502 int i;
2503
2504 if (!adapter->isr_registered)
2505 return;
2506
2507 /* INTx */
2508 if (!msix_enabled(adapter)) {
2509 free_irq(netdev->irq, &adapter->eq_obj[0]);
2510 goto done;
2511 }
2512
2513 /* MSIx */
2514 for_all_evt_queues(adapter, eqo, i)
2515 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2516
2517 done:
2518 adapter->isr_registered = false;
2519 }
2520
2521 static void be_rx_qs_destroy(struct be_adapter *adapter)
2522 {
2523 struct be_queue_info *q;
2524 struct be_rx_obj *rxo;
2525 int i;
2526
2527 for_all_rx_queues(adapter, rxo, i) {
2528 q = &rxo->q;
2529 if (q->created) {
2530 be_cmd_rxq_destroy(adapter, q);
2531 /* After the rxq is invalidated, wait for a grace time
2532 * of 1ms for all dma to end and the flush compl to
2533 * arrive
2534 */
2535 mdelay(1);
2536 be_rx_cq_clean(rxo);
2537 }
2538 be_queue_free(adapter, q);
2539 }
2540 }
2541
2542 static int be_close(struct net_device *netdev)
2543 {
2544 struct be_adapter *adapter = netdev_priv(netdev);
2545 struct be_eq_obj *eqo;
2546 int i;
2547
2548 be_roce_dev_close(adapter);
2549
2550 for_all_evt_queues(adapter, eqo, i)
2551 napi_disable(&eqo->napi);
2552
2553 be_async_mcc_disable(adapter);
2554
2555 /* Wait for all pending tx completions to arrive so that
2556 * all tx skbs are freed.
2557 */
2558 be_tx_compl_clean(adapter);
2559
2560 be_rx_qs_destroy(adapter);
2561
2562 for_all_evt_queues(adapter, eqo, i) {
2563 if (msix_enabled(adapter))
2564 synchronize_irq(be_msix_vec_get(adapter, eqo));
2565 else
2566 synchronize_irq(netdev->irq);
2567 be_eq_clean(eqo);
2568 }
2569
2570 be_irq_unregister(adapter);
2571
2572 return 0;
2573 }
2574
2575 static int be_rx_qs_create(struct be_adapter *adapter)
2576 {
2577 struct be_rx_obj *rxo;
2578 int rc, i, j;
2579 u8 rsstable[128];
2580
2581 for_all_rx_queues(adapter, rxo, i) {
2582 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2583 sizeof(struct be_eth_rx_d));
2584 if (rc)
2585 return rc;
2586 }
2587
2588 /* The FW would like the default RXQ to be created first */
2589 rxo = default_rxo(adapter);
2590 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2591 adapter->if_handle, false, &rxo->rss_id);
2592 if (rc)
2593 return rc;
2594
2595 for_all_rss_queues(adapter, rxo, i) {
2596 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2597 rx_frag_size, adapter->if_handle,
2598 true, &rxo->rss_id);
2599 if (rc)
2600 return rc;
2601 }
2602
2603 if (be_multi_rxq(adapter)) {
2604 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2605 for_all_rss_queues(adapter, rxo, i) {
2606 if ((j + i) >= 128)
2607 break;
2608 rsstable[j + i] = rxo->rss_id;
2609 }
2610 }
2611 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2612 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2613
2614 if (!BEx_chip(adapter))
2615 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2616 RSS_ENABLE_UDP_IPV6;
2617
2618 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2619 128);
2620 if (rc) {
2621 adapter->rss_flags = 0;
2622 return rc;
2623 }
2624 }
2625
2626 /* First time posting */
2627 for_all_rx_queues(adapter, rxo, i)
2628 be_post_rx_frags(rxo, GFP_KERNEL);
2629 return 0;
2630 }
2631
2632 static int be_open(struct net_device *netdev)
2633 {
2634 struct be_adapter *adapter = netdev_priv(netdev);
2635 struct be_eq_obj *eqo;
2636 struct be_rx_obj *rxo;
2637 struct be_tx_obj *txo;
2638 u8 link_status;
2639 int status, i;
2640
2641 status = be_rx_qs_create(adapter);
2642 if (status)
2643 goto err;
2644
2645 be_irq_register(adapter);
2646
2647 for_all_rx_queues(adapter, rxo, i)
2648 be_cq_notify(adapter, rxo->cq.id, true, 0);
2649
2650 for_all_tx_queues(adapter, txo, i)
2651 be_cq_notify(adapter, txo->cq.id, true, 0);
2652
2653 be_async_mcc_enable(adapter);
2654
2655 for_all_evt_queues(adapter, eqo, i) {
2656 napi_enable(&eqo->napi);
2657 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2658 }
2659
2660 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2661 if (!status)
2662 be_link_status_update(adapter, link_status);
2663
2664 be_roce_dev_open(adapter);
2665 return 0;
2666 err:
2667 be_close(adapter->netdev);
2668 return -EIO;
2669 }
2670
2671 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2672 {
2673 struct be_dma_mem cmd;
2674 int status = 0;
2675 u8 mac[ETH_ALEN];
2676
2677 memset(mac, 0, ETH_ALEN);
2678
2679 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2680 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2681 GFP_KERNEL | __GFP_ZERO);
2682 if (cmd.va == NULL)
2683 return -1;
2684
2685 if (enable) {
2686 status = pci_write_config_dword(adapter->pdev,
2687 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2688 if (status) {
2689 dev_err(&adapter->pdev->dev,
2690 "Could not enable Wake-on-lan\n");
2691 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2692 cmd.dma);
2693 return status;
2694 }
2695 status = be_cmd_enable_magic_wol(adapter,
2696 adapter->netdev->dev_addr, &cmd);
2697 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2698 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2699 } else {
2700 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2701 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2702 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2703 }
2704
2705 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2706 return status;
2707 }
2708
2709 /*
2710 * Generate a seed MAC address from the PF MAC Address using jhash.
2711 * MAC Address for VFs are assigned incrementally starting from the seed.
2712 * These addresses are programmed in the ASIC by the PF and the VF driver
2713 * queries for the MAC address during its probe.
2714 */
2715 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2716 {
2717 u32 vf;
2718 int status = 0;
2719 u8 mac[ETH_ALEN];
2720 struct be_vf_cfg *vf_cfg;
2721
2722 be_vf_eth_addr_generate(adapter, mac);
2723
2724 for_all_vfs(adapter, vf_cfg, vf) {
2725 if (lancer_chip(adapter)) {
2726 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2727 } else {
2728 status = be_cmd_pmac_add(adapter, mac,
2729 vf_cfg->if_handle,
2730 &vf_cfg->pmac_id, vf + 1);
2731 }
2732
2733 if (status)
2734 dev_err(&adapter->pdev->dev,
2735 "Mac address assignment failed for VF %d\n", vf);
2736 else
2737 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2738
2739 mac[5] += 1;
2740 }
2741 return status;
2742 }
2743
2744 static int be_vfs_mac_query(struct be_adapter *adapter)
2745 {
2746 int status, vf;
2747 u8 mac[ETH_ALEN];
2748 struct be_vf_cfg *vf_cfg;
2749 bool active;
2750
2751 for_all_vfs(adapter, vf_cfg, vf) {
2752 be_cmd_get_mac_from_list(adapter, mac, &active,
2753 &vf_cfg->pmac_id, 0);
2754
2755 status = be_cmd_mac_addr_query(adapter, mac, false,
2756 vf_cfg->if_handle, 0);
2757 if (status)
2758 return status;
2759 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2760 }
2761 return 0;
2762 }
2763
2764 static void be_vf_clear(struct be_adapter *adapter)
2765 {
2766 struct be_vf_cfg *vf_cfg;
2767 u32 vf;
2768
2769 if (be_find_vfs(adapter, ASSIGNED)) {
2770 dev_warn(&adapter->pdev->dev,
2771 "VFs are assigned to VMs: not disabling VFs\n");
2772 goto done;
2773 }
2774
2775 for_all_vfs(adapter, vf_cfg, vf) {
2776 if (lancer_chip(adapter))
2777 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2778 else
2779 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2780 vf_cfg->pmac_id, vf + 1);
2781
2782 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2783 }
2784 pci_disable_sriov(adapter->pdev);
2785 done:
2786 kfree(adapter->vf_cfg);
2787 adapter->num_vfs = 0;
2788 }
2789
2790 static int be_clear(struct be_adapter *adapter)
2791 {
2792 int i = 1;
2793
2794 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2795 cancel_delayed_work_sync(&adapter->work);
2796 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2797 }
2798
2799 if (sriov_enabled(adapter))
2800 be_vf_clear(adapter);
2801
2802 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2803 be_cmd_pmac_del(adapter, adapter->if_handle,
2804 adapter->pmac_id[i], 0);
2805
2806 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2807
2808 be_mcc_queues_destroy(adapter);
2809 be_rx_cqs_destroy(adapter);
2810 be_tx_queues_destroy(adapter);
2811 be_evt_queues_destroy(adapter);
2812
2813 kfree(adapter->pmac_id);
2814 adapter->pmac_id = NULL;
2815
2816 be_msix_disable(adapter);
2817 return 0;
2818 }
2819
2820 static int be_vfs_if_create(struct be_adapter *adapter)
2821 {
2822 struct be_vf_cfg *vf_cfg;
2823 u32 cap_flags, en_flags, vf;
2824 int status;
2825
2826 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2827 BE_IF_FLAGS_MULTICAST;
2828
2829 for_all_vfs(adapter, vf_cfg, vf) {
2830 if (!BE3_chip(adapter))
2831 be_cmd_get_profile_config(adapter, &cap_flags,
2832 NULL, vf + 1);
2833
2834 /* If a FW profile exists, then cap_flags are updated */
2835 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2836 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2837 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2838 &vf_cfg->if_handle, vf + 1);
2839 if (status)
2840 goto err;
2841 }
2842 err:
2843 return status;
2844 }
2845
2846 static int be_vf_setup_init(struct be_adapter *adapter)
2847 {
2848 struct be_vf_cfg *vf_cfg;
2849 int vf;
2850
2851 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2852 GFP_KERNEL);
2853 if (!adapter->vf_cfg)
2854 return -ENOMEM;
2855
2856 for_all_vfs(adapter, vf_cfg, vf) {
2857 vf_cfg->if_handle = -1;
2858 vf_cfg->pmac_id = -1;
2859 }
2860 return 0;
2861 }
2862
2863 static int be_vf_setup(struct be_adapter *adapter)
2864 {
2865 struct be_vf_cfg *vf_cfg;
2866 u16 def_vlan, lnk_speed;
2867 int status, old_vfs, vf;
2868 struct device *dev = &adapter->pdev->dev;
2869
2870 old_vfs = be_find_vfs(adapter, ENABLED);
2871 if (old_vfs) {
2872 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2873 if (old_vfs != num_vfs)
2874 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2875 adapter->num_vfs = old_vfs;
2876 } else {
2877 if (num_vfs > adapter->dev_num_vfs)
2878 dev_info(dev, "Device supports %d VFs and not %d\n",
2879 adapter->dev_num_vfs, num_vfs);
2880 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2881
2882 status = pci_enable_sriov(adapter->pdev, num_vfs);
2883 if (status) {
2884 dev_err(dev, "SRIOV enable failed\n");
2885 adapter->num_vfs = 0;
2886 return 0;
2887 }
2888 }
2889
2890 status = be_vf_setup_init(adapter);
2891 if (status)
2892 goto err;
2893
2894 if (old_vfs) {
2895 for_all_vfs(adapter, vf_cfg, vf) {
2896 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2897 if (status)
2898 goto err;
2899 }
2900 } else {
2901 status = be_vfs_if_create(adapter);
2902 if (status)
2903 goto err;
2904 }
2905
2906 if (old_vfs) {
2907 status = be_vfs_mac_query(adapter);
2908 if (status)
2909 goto err;
2910 } else {
2911 status = be_vf_eth_addr_config(adapter);
2912 if (status)
2913 goto err;
2914 }
2915
2916 for_all_vfs(adapter, vf_cfg, vf) {
2917 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2918 * Allow full available bandwidth
2919 */
2920 if (BE3_chip(adapter) && !old_vfs)
2921 be_cmd_set_qos(adapter, 1000, vf+1);
2922
2923 status = be_cmd_link_status_query(adapter, &lnk_speed,
2924 NULL, vf + 1);
2925 if (!status)
2926 vf_cfg->tx_rate = lnk_speed;
2927
2928 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2929 vf + 1, vf_cfg->if_handle);
2930 if (status)
2931 goto err;
2932 vf_cfg->def_vid = def_vlan;
2933
2934 be_cmd_enable_vf(adapter, vf + 1);
2935 }
2936 return 0;
2937 err:
2938 dev_err(dev, "VF setup failed\n");
2939 be_vf_clear(adapter);
2940 return status;
2941 }
2942
2943 static void be_setup_init(struct be_adapter *adapter)
2944 {
2945 adapter->vlan_prio_bmap = 0xff;
2946 adapter->phy.link_speed = -1;
2947 adapter->if_handle = -1;
2948 adapter->be3_native = false;
2949 adapter->promiscuous = false;
2950 if (be_physfn(adapter))
2951 adapter->cmd_privileges = MAX_PRIVILEGES;
2952 else
2953 adapter->cmd_privileges = MIN_PRIVILEGES;
2954 }
2955
2956 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2957 bool *active_mac, u32 *pmac_id)
2958 {
2959 int status = 0;
2960
2961 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2962 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2963 if (!lancer_chip(adapter) && !be_physfn(adapter))
2964 *active_mac = true;
2965 else
2966 *active_mac = false;
2967
2968 return status;
2969 }
2970
2971 if (lancer_chip(adapter)) {
2972 status = be_cmd_get_mac_from_list(adapter, mac,
2973 active_mac, pmac_id, 0);
2974 if (*active_mac) {
2975 status = be_cmd_mac_addr_query(adapter, mac, false,
2976 if_handle, *pmac_id);
2977 }
2978 } else if (be_physfn(adapter)) {
2979 /* For BE3, for PF get permanent MAC */
2980 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2981 *active_mac = false;
2982 } else {
2983 /* For BE3, for VF get soft MAC assigned by PF*/
2984 status = be_cmd_mac_addr_query(adapter, mac, false,
2985 if_handle, 0);
2986 *active_mac = true;
2987 }
2988 return status;
2989 }
2990
2991 static void be_get_resources(struct be_adapter *adapter)
2992 {
2993 u16 dev_num_vfs;
2994 int pos, status;
2995 bool profile_present = false;
2996 u16 txq_count = 0;
2997
2998 if (!BEx_chip(adapter)) {
2999 status = be_cmd_get_func_config(adapter);
3000 if (!status)
3001 profile_present = true;
3002 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3003 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3004 }
3005
3006 if (profile_present) {
3007 /* Sanity fixes for Lancer */
3008 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3009 BE_UC_PMAC_COUNT);
3010 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3011 BE_NUM_VLANS_SUPPORTED);
3012 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3013 BE_MAX_MC);
3014 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3015 MAX_TX_QS);
3016 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3017 BE3_MAX_RSS_QS);
3018 adapter->max_event_queues = min_t(u16,
3019 adapter->max_event_queues,
3020 BE3_MAX_RSS_QS);
3021
3022 if (adapter->max_rss_queues &&
3023 adapter->max_rss_queues == adapter->max_rx_queues)
3024 adapter->max_rss_queues -= 1;
3025
3026 if (adapter->max_event_queues < adapter->max_rss_queues)
3027 adapter->max_rss_queues = adapter->max_event_queues;
3028
3029 } else {
3030 if (be_physfn(adapter))
3031 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3032 else
3033 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3034
3035 if (adapter->function_mode & FLEX10_MODE)
3036 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3037 else
3038 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3039
3040 adapter->max_mcast_mac = BE_MAX_MC;
3041 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3042 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3043 MAX_TX_QS);
3044 adapter->max_rss_queues = (adapter->be3_native) ?
3045 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3046 adapter->max_event_queues = BE3_MAX_RSS_QS;
3047
3048 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3049 BE_IF_FLAGS_BROADCAST |
3050 BE_IF_FLAGS_MULTICAST |
3051 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3052 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3053 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3054 BE_IF_FLAGS_PROMISCUOUS;
3055
3056 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3057 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3058 }
3059
3060 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3061 if (pos) {
3062 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3063 &dev_num_vfs);
3064 if (BE3_chip(adapter))
3065 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3066 adapter->dev_num_vfs = dev_num_vfs;
3067 }
3068 }
3069
3070 /* Routine to query per function resource limits */
3071 static int be_get_config(struct be_adapter *adapter)
3072 {
3073 int status;
3074
3075 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3076 &adapter->function_mode,
3077 &adapter->function_caps,
3078 &adapter->asic_rev);
3079 if (status)
3080 goto err;
3081
3082 be_get_resources(adapter);
3083
3084 /* primary mac needs 1 pmac entry */
3085 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3086 sizeof(u32), GFP_KERNEL);
3087 if (!adapter->pmac_id) {
3088 status = -ENOMEM;
3089 goto err;
3090 }
3091
3092 err:
3093 return status;
3094 }
3095
3096 static int be_setup(struct be_adapter *adapter)
3097 {
3098 struct device *dev = &adapter->pdev->dev;
3099 u32 en_flags;
3100 u32 tx_fc, rx_fc;
3101 int status;
3102 u8 mac[ETH_ALEN];
3103 bool active_mac;
3104
3105 be_setup_init(adapter);
3106
3107 if (!lancer_chip(adapter))
3108 be_cmd_req_native_mode(adapter);
3109
3110 status = be_get_config(adapter);
3111 if (status)
3112 goto err;
3113
3114 be_msix_enable(adapter);
3115
3116 status = be_evt_queues_create(adapter);
3117 if (status)
3118 goto err;
3119
3120 status = be_tx_cqs_create(adapter);
3121 if (status)
3122 goto err;
3123
3124 status = be_rx_cqs_create(adapter);
3125 if (status)
3126 goto err;
3127
3128 status = be_mcc_queues_create(adapter);
3129 if (status)
3130 goto err;
3131
3132 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3133 /* In UMC mode FW does not return right privileges.
3134 * Override with correct privilege equivalent to PF.
3135 */
3136 if (be_is_mc(adapter))
3137 adapter->cmd_privileges = MAX_PRIVILEGES;
3138
3139 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3140 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3141
3142 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3143 en_flags |= BE_IF_FLAGS_RSS;
3144
3145 en_flags = en_flags & adapter->if_cap_flags;
3146
3147 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3148 &adapter->if_handle, 0);
3149 if (status != 0)
3150 goto err;
3151
3152 memset(mac, 0, ETH_ALEN);
3153 active_mac = false;
3154 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3155 &active_mac, &adapter->pmac_id[0]);
3156 if (status != 0)
3157 goto err;
3158
3159 if (!active_mac) {
3160 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3161 &adapter->pmac_id[0], 0);
3162 if (status != 0)
3163 goto err;
3164 }
3165
3166 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3167 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3168 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3169 }
3170
3171 status = be_tx_qs_create(adapter);
3172 if (status)
3173 goto err;
3174
3175 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3176
3177 if (adapter->vlans_added)
3178 be_vid_config(adapter);
3179
3180 be_set_rx_mode(adapter->netdev);
3181
3182 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3183
3184 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3185 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3186 adapter->rx_fc);
3187
3188 if (be_physfn(adapter) && num_vfs) {
3189 if (adapter->dev_num_vfs)
3190 be_vf_setup(adapter);
3191 else
3192 dev_warn(dev, "device doesn't support SRIOV\n");
3193 }
3194
3195 status = be_cmd_get_phy_info(adapter);
3196 if (!status && be_pause_supported(adapter))
3197 adapter->phy.fc_autoneg = 1;
3198
3199 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3200 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3201 return 0;
3202 err:
3203 be_clear(adapter);
3204 return status;
3205 }
3206
3207 #ifdef CONFIG_NET_POLL_CONTROLLER
3208 static void be_netpoll(struct net_device *netdev)
3209 {
3210 struct be_adapter *adapter = netdev_priv(netdev);
3211 struct be_eq_obj *eqo;
3212 int i;
3213
3214 for_all_evt_queues(adapter, eqo, i) {
3215 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3216 napi_schedule(&eqo->napi);
3217 }
3218
3219 return;
3220 }
3221 #endif
3222
3223 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3224 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3225
3226 static bool be_flash_redboot(struct be_adapter *adapter,
3227 const u8 *p, u32 img_start, int image_size,
3228 int hdr_size)
3229 {
3230 u32 crc_offset;
3231 u8 flashed_crc[4];
3232 int status;
3233
3234 crc_offset = hdr_size + img_start + image_size - 4;
3235
3236 p += crc_offset;
3237
3238 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3239 (image_size - 4));
3240 if (status) {
3241 dev_err(&adapter->pdev->dev,
3242 "could not get crc from flash, not flashing redboot\n");
3243 return false;
3244 }
3245
3246 /*update redboot only if crc does not match*/
3247 if (!memcmp(flashed_crc, p, 4))
3248 return false;
3249 else
3250 return true;
3251 }
3252
3253 static bool phy_flashing_required(struct be_adapter *adapter)
3254 {
3255 return (adapter->phy.phy_type == TN_8022 &&
3256 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3257 }
3258
3259 static bool is_comp_in_ufi(struct be_adapter *adapter,
3260 struct flash_section_info *fsec, int type)
3261 {
3262 int i = 0, img_type = 0;
3263 struct flash_section_info_g2 *fsec_g2 = NULL;
3264
3265 if (BE2_chip(adapter))
3266 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3267
3268 for (i = 0; i < MAX_FLASH_COMP; i++) {
3269 if (fsec_g2)
3270 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3271 else
3272 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3273
3274 if (img_type == type)
3275 return true;
3276 }
3277 return false;
3278
3279 }
3280
3281 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3282 int header_size,
3283 const struct firmware *fw)
3284 {
3285 struct flash_section_info *fsec = NULL;
3286 const u8 *p = fw->data;
3287
3288 p += header_size;
3289 while (p < (fw->data + fw->size)) {
3290 fsec = (struct flash_section_info *)p;
3291 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3292 return fsec;
3293 p += 32;
3294 }
3295 return NULL;
3296 }
3297
3298 static int be_flash(struct be_adapter *adapter, const u8 *img,
3299 struct be_dma_mem *flash_cmd, int optype, int img_size)
3300 {
3301 u32 total_bytes = 0, flash_op, num_bytes = 0;
3302 int status = 0;
3303 struct be_cmd_write_flashrom *req = flash_cmd->va;
3304
3305 total_bytes = img_size;
3306 while (total_bytes) {
3307 num_bytes = min_t(u32, 32*1024, total_bytes);
3308
3309 total_bytes -= num_bytes;
3310
3311 if (!total_bytes) {
3312 if (optype == OPTYPE_PHY_FW)
3313 flash_op = FLASHROM_OPER_PHY_FLASH;
3314 else
3315 flash_op = FLASHROM_OPER_FLASH;
3316 } else {
3317 if (optype == OPTYPE_PHY_FW)
3318 flash_op = FLASHROM_OPER_PHY_SAVE;
3319 else
3320 flash_op = FLASHROM_OPER_SAVE;
3321 }
3322
3323 memcpy(req->data_buf, img, num_bytes);
3324 img += num_bytes;
3325 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3326 flash_op, num_bytes);
3327 if (status) {
3328 if (status == ILLEGAL_IOCTL_REQ &&
3329 optype == OPTYPE_PHY_FW)
3330 break;
3331 dev_err(&adapter->pdev->dev,
3332 "cmd to write to flash rom failed.\n");
3333 return status;
3334 }
3335 }
3336 return 0;
3337 }
3338
3339 /* For BE2, BE3 and BE3-R */
3340 static int be_flash_BEx(struct be_adapter *adapter,
3341 const struct firmware *fw,
3342 struct be_dma_mem *flash_cmd,
3343 int num_of_images)
3344
3345 {
3346 int status = 0, i, filehdr_size = 0;
3347 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3348 const u8 *p = fw->data;
3349 const struct flash_comp *pflashcomp;
3350 int num_comp, redboot;
3351 struct flash_section_info *fsec = NULL;
3352
3353 struct flash_comp gen3_flash_types[] = {
3354 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3355 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3356 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3357 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3358 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3359 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3360 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3361 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3362 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3363 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3364 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3365 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3366 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3367 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3368 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3369 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3370 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3371 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3372 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3373 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3374 };
3375
3376 struct flash_comp gen2_flash_types[] = {
3377 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3378 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3379 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3380 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3381 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3382 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3383 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3384 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3385 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3386 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3387 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3388 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3389 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3390 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3391 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3392 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3393 };
3394
3395 if (BE3_chip(adapter)) {
3396 pflashcomp = gen3_flash_types;
3397 filehdr_size = sizeof(struct flash_file_hdr_g3);
3398 num_comp = ARRAY_SIZE(gen3_flash_types);
3399 } else {
3400 pflashcomp = gen2_flash_types;
3401 filehdr_size = sizeof(struct flash_file_hdr_g2);
3402 num_comp = ARRAY_SIZE(gen2_flash_types);
3403 }
3404
3405 /* Get flash section info*/
3406 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3407 if (!fsec) {
3408 dev_err(&adapter->pdev->dev,
3409 "Invalid Cookie. UFI corrupted ?\n");
3410 return -1;
3411 }
3412 for (i = 0; i < num_comp; i++) {
3413 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3414 continue;
3415
3416 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3417 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3418 continue;
3419
3420 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3421 !phy_flashing_required(adapter))
3422 continue;
3423
3424 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3425 redboot = be_flash_redboot(adapter, fw->data,
3426 pflashcomp[i].offset, pflashcomp[i].size,
3427 filehdr_size + img_hdrs_size);
3428 if (!redboot)
3429 continue;
3430 }
3431
3432 p = fw->data;
3433 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3434 if (p + pflashcomp[i].size > fw->data + fw->size)
3435 return -1;
3436
3437 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3438 pflashcomp[i].size);
3439 if (status) {
3440 dev_err(&adapter->pdev->dev,
3441 "Flashing section type %d failed.\n",
3442 pflashcomp[i].img_type);
3443 return status;
3444 }
3445 }
3446 return 0;
3447 }
3448
3449 static int be_flash_skyhawk(struct be_adapter *adapter,
3450 const struct firmware *fw,
3451 struct be_dma_mem *flash_cmd, int num_of_images)
3452 {
3453 int status = 0, i, filehdr_size = 0;
3454 int img_offset, img_size, img_optype, redboot;
3455 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3456 const u8 *p = fw->data;
3457 struct flash_section_info *fsec = NULL;
3458
3459 filehdr_size = sizeof(struct flash_file_hdr_g3);
3460 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3461 if (!fsec) {
3462 dev_err(&adapter->pdev->dev,
3463 "Invalid Cookie. UFI corrupted ?\n");
3464 return -1;
3465 }
3466
3467 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3468 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3469 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3470
3471 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3472 case IMAGE_FIRMWARE_iSCSI:
3473 img_optype = OPTYPE_ISCSI_ACTIVE;
3474 break;
3475 case IMAGE_BOOT_CODE:
3476 img_optype = OPTYPE_REDBOOT;
3477 break;
3478 case IMAGE_OPTION_ROM_ISCSI:
3479 img_optype = OPTYPE_BIOS;
3480 break;
3481 case IMAGE_OPTION_ROM_PXE:
3482 img_optype = OPTYPE_PXE_BIOS;
3483 break;
3484 case IMAGE_OPTION_ROM_FCoE:
3485 img_optype = OPTYPE_FCOE_BIOS;
3486 break;
3487 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3488 img_optype = OPTYPE_ISCSI_BACKUP;
3489 break;
3490 case IMAGE_NCSI:
3491 img_optype = OPTYPE_NCSI_FW;
3492 break;
3493 default:
3494 continue;
3495 }
3496
3497 if (img_optype == OPTYPE_REDBOOT) {
3498 redboot = be_flash_redboot(adapter, fw->data,
3499 img_offset, img_size,
3500 filehdr_size + img_hdrs_size);
3501 if (!redboot)
3502 continue;
3503 }
3504
3505 p = fw->data;
3506 p += filehdr_size + img_offset + img_hdrs_size;
3507 if (p + img_size > fw->data + fw->size)
3508 return -1;
3509
3510 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3511 if (status) {
3512 dev_err(&adapter->pdev->dev,
3513 "Flashing section type %d failed.\n",
3514 fsec->fsec_entry[i].type);
3515 return status;
3516 }
3517 }
3518 return 0;
3519 }
3520
3521 static int lancer_wait_idle(struct be_adapter *adapter)
3522 {
3523 #define SLIPORT_IDLE_TIMEOUT 30
3524 u32 reg_val;
3525 int status = 0, i;
3526
3527 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3528 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3529 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3530 break;
3531
3532 ssleep(1);
3533 }
3534
3535 if (i == SLIPORT_IDLE_TIMEOUT)
3536 status = -1;
3537
3538 return status;
3539 }
3540
3541 static int lancer_fw_reset(struct be_adapter *adapter)
3542 {
3543 int status = 0;
3544
3545 status = lancer_wait_idle(adapter);
3546 if (status)
3547 return status;
3548
3549 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3550 PHYSDEV_CONTROL_OFFSET);
3551
3552 return status;
3553 }
3554
3555 static int lancer_fw_download(struct be_adapter *adapter,
3556 const struct firmware *fw)
3557 {
3558 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3559 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3560 struct be_dma_mem flash_cmd;
3561 const u8 *data_ptr = NULL;
3562 u8 *dest_image_ptr = NULL;
3563 size_t image_size = 0;
3564 u32 chunk_size = 0;
3565 u32 data_written = 0;
3566 u32 offset = 0;
3567 int status = 0;
3568 u8 add_status = 0;
3569 u8 change_status;
3570
3571 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3572 dev_err(&adapter->pdev->dev,
3573 "FW Image not properly aligned. "
3574 "Length must be 4 byte aligned.\n");
3575 status = -EINVAL;
3576 goto lancer_fw_exit;
3577 }
3578
3579 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3580 + LANCER_FW_DOWNLOAD_CHUNK;
3581 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3582 &flash_cmd.dma, GFP_KERNEL);
3583 if (!flash_cmd.va) {
3584 status = -ENOMEM;
3585 goto lancer_fw_exit;
3586 }
3587
3588 dest_image_ptr = flash_cmd.va +
3589 sizeof(struct lancer_cmd_req_write_object);
3590 image_size = fw->size;
3591 data_ptr = fw->data;
3592
3593 while (image_size) {
3594 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3595
3596 /* Copy the image chunk content. */
3597 memcpy(dest_image_ptr, data_ptr, chunk_size);
3598
3599 status = lancer_cmd_write_object(adapter, &flash_cmd,
3600 chunk_size, offset,
3601 LANCER_FW_DOWNLOAD_LOCATION,
3602 &data_written, &change_status,
3603 &add_status);
3604 if (status)
3605 break;
3606
3607 offset += data_written;
3608 data_ptr += data_written;
3609 image_size -= data_written;
3610 }
3611
3612 if (!status) {
3613 /* Commit the FW written */
3614 status = lancer_cmd_write_object(adapter, &flash_cmd,
3615 0, offset,
3616 LANCER_FW_DOWNLOAD_LOCATION,
3617 &data_written, &change_status,
3618 &add_status);
3619 }
3620
3621 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3622 flash_cmd.dma);
3623 if (status) {
3624 dev_err(&adapter->pdev->dev,
3625 "Firmware load error. "
3626 "Status code: 0x%x Additional Status: 0x%x\n",
3627 status, add_status);
3628 goto lancer_fw_exit;
3629 }
3630
3631 if (change_status == LANCER_FW_RESET_NEEDED) {
3632 status = lancer_fw_reset(adapter);
3633 if (status) {
3634 dev_err(&adapter->pdev->dev,
3635 "Adapter busy for FW reset.\n"
3636 "New FW will not be active.\n");
3637 goto lancer_fw_exit;
3638 }
3639 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3640 dev_err(&adapter->pdev->dev,
3641 "System reboot required for new FW"
3642 " to be active\n");
3643 }
3644
3645 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3646 lancer_fw_exit:
3647 return status;
3648 }
3649
3650 #define UFI_TYPE2 2
3651 #define UFI_TYPE3 3
3652 #define UFI_TYPE3R 10
3653 #define UFI_TYPE4 4
3654 static int be_get_ufi_type(struct be_adapter *adapter,
3655 struct flash_file_hdr_g3 *fhdr)
3656 {
3657 if (fhdr == NULL)
3658 goto be_get_ufi_exit;
3659
3660 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3661 return UFI_TYPE4;
3662 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3663 if (fhdr->asic_type_rev == 0x10)
3664 return UFI_TYPE3R;
3665 else
3666 return UFI_TYPE3;
3667 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3668 return UFI_TYPE2;
3669
3670 be_get_ufi_exit:
3671 dev_err(&adapter->pdev->dev,
3672 "UFI and Interface are not compatible for flashing\n");
3673 return -1;
3674 }
3675
3676 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3677 {
3678 struct flash_file_hdr_g3 *fhdr3;
3679 struct image_hdr *img_hdr_ptr = NULL;
3680 struct be_dma_mem flash_cmd;
3681 const u8 *p;
3682 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3683
3684 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3685 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3686 &flash_cmd.dma, GFP_KERNEL);
3687 if (!flash_cmd.va) {
3688 status = -ENOMEM;
3689 goto be_fw_exit;
3690 }
3691
3692 p = fw->data;
3693 fhdr3 = (struct flash_file_hdr_g3 *)p;
3694
3695 ufi_type = be_get_ufi_type(adapter, fhdr3);
3696
3697 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3698 for (i = 0; i < num_imgs; i++) {
3699 img_hdr_ptr = (struct image_hdr *)(fw->data +
3700 (sizeof(struct flash_file_hdr_g3) +
3701 i * sizeof(struct image_hdr)));
3702 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3703 switch (ufi_type) {
3704 case UFI_TYPE4:
3705 status = be_flash_skyhawk(adapter, fw,
3706 &flash_cmd, num_imgs);
3707 break;
3708 case UFI_TYPE3R:
3709 status = be_flash_BEx(adapter, fw, &flash_cmd,
3710 num_imgs);
3711 break;
3712 case UFI_TYPE3:
3713 /* Do not flash this ufi on BE3-R cards */
3714 if (adapter->asic_rev < 0x10)
3715 status = be_flash_BEx(adapter, fw,
3716 &flash_cmd,
3717 num_imgs);
3718 else {
3719 status = -1;
3720 dev_err(&adapter->pdev->dev,
3721 "Can't load BE3 UFI on BE3R\n");
3722 }
3723 }
3724 }
3725 }
3726
3727 if (ufi_type == UFI_TYPE2)
3728 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3729 else if (ufi_type == -1)
3730 status = -1;
3731
3732 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3733 flash_cmd.dma);
3734 if (status) {
3735 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3736 goto be_fw_exit;
3737 }
3738
3739 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3740
3741 be_fw_exit:
3742 return status;
3743 }
3744
3745 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3746 {
3747 const struct firmware *fw;
3748 int status;
3749
3750 if (!netif_running(adapter->netdev)) {
3751 dev_err(&adapter->pdev->dev,
3752 "Firmware load not allowed (interface is down)\n");
3753 return -1;
3754 }
3755
3756 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3757 if (status)
3758 goto fw_exit;
3759
3760 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3761
3762 if (lancer_chip(adapter))
3763 status = lancer_fw_download(adapter, fw);
3764 else
3765 status = be_fw_download(adapter, fw);
3766
3767 fw_exit:
3768 release_firmware(fw);
3769 return status;
3770 }
3771
3772 static const struct net_device_ops be_netdev_ops = {
3773 .ndo_open = be_open,
3774 .ndo_stop = be_close,
3775 .ndo_start_xmit = be_xmit,
3776 .ndo_set_rx_mode = be_set_rx_mode,
3777 .ndo_set_mac_address = be_mac_addr_set,
3778 .ndo_change_mtu = be_change_mtu,
3779 .ndo_get_stats64 = be_get_stats64,
3780 .ndo_validate_addr = eth_validate_addr,
3781 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3782 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3783 .ndo_set_vf_mac = be_set_vf_mac,
3784 .ndo_set_vf_vlan = be_set_vf_vlan,
3785 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3786 .ndo_get_vf_config = be_get_vf_config,
3787 #ifdef CONFIG_NET_POLL_CONTROLLER
3788 .ndo_poll_controller = be_netpoll,
3789 #endif
3790 };
3791
3792 static void be_netdev_init(struct net_device *netdev)
3793 {
3794 struct be_adapter *adapter = netdev_priv(netdev);
3795 struct be_eq_obj *eqo;
3796 int i;
3797
3798 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3799 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3800 NETIF_F_HW_VLAN_CTAG_TX;
3801 if (be_multi_rxq(adapter))
3802 netdev->hw_features |= NETIF_F_RXHASH;
3803
3804 netdev->features |= netdev->hw_features |
3805 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3806
3807 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3808 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3809
3810 netdev->priv_flags |= IFF_UNICAST_FLT;
3811
3812 netdev->flags |= IFF_MULTICAST;
3813
3814 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3815
3816 netdev->netdev_ops = &be_netdev_ops;
3817
3818 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3819
3820 for_all_evt_queues(adapter, eqo, i)
3821 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3822 }
3823
3824 static void be_unmap_pci_bars(struct be_adapter *adapter)
3825 {
3826 if (adapter->csr)
3827 pci_iounmap(adapter->pdev, adapter->csr);
3828 if (adapter->db)
3829 pci_iounmap(adapter->pdev, adapter->db);
3830 }
3831
3832 static int db_bar(struct be_adapter *adapter)
3833 {
3834 if (lancer_chip(adapter) || !be_physfn(adapter))
3835 return 0;
3836 else
3837 return 4;
3838 }
3839
3840 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3841 {
3842 if (skyhawk_chip(adapter)) {
3843 adapter->roce_db.size = 4096;
3844 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3845 db_bar(adapter));
3846 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3847 db_bar(adapter));
3848 }
3849 return 0;
3850 }
3851
3852 static int be_map_pci_bars(struct be_adapter *adapter)
3853 {
3854 u8 __iomem *addr;
3855 u32 sli_intf;
3856
3857 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3858 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3859 SLI_INTF_IF_TYPE_SHIFT;
3860
3861 if (BEx_chip(adapter) && be_physfn(adapter)) {
3862 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3863 if (adapter->csr == NULL)
3864 return -ENOMEM;
3865 }
3866
3867 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3868 if (addr == NULL)
3869 goto pci_map_err;
3870 adapter->db = addr;
3871
3872 be_roce_map_pci_bars(adapter);
3873 return 0;
3874
3875 pci_map_err:
3876 be_unmap_pci_bars(adapter);
3877 return -ENOMEM;
3878 }
3879
3880 static void be_ctrl_cleanup(struct be_adapter *adapter)
3881 {
3882 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3883
3884 be_unmap_pci_bars(adapter);
3885
3886 if (mem->va)
3887 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3888 mem->dma);
3889
3890 mem = &adapter->rx_filter;
3891 if (mem->va)
3892 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3893 mem->dma);
3894 }
3895
3896 static int be_ctrl_init(struct be_adapter *adapter)
3897 {
3898 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3899 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3900 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3901 u32 sli_intf;
3902 int status;
3903
3904 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3905 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3906 SLI_INTF_FAMILY_SHIFT;
3907 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3908
3909 status = be_map_pci_bars(adapter);
3910 if (status)
3911 goto done;
3912
3913 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3914 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3915 mbox_mem_alloc->size,
3916 &mbox_mem_alloc->dma,
3917 GFP_KERNEL);
3918 if (!mbox_mem_alloc->va) {
3919 status = -ENOMEM;
3920 goto unmap_pci_bars;
3921 }
3922 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3923 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3924 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3925 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3926
3927 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3928 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3929 &rx_filter->dma,
3930 GFP_KERNEL | __GFP_ZERO);
3931 if (rx_filter->va == NULL) {
3932 status = -ENOMEM;
3933 goto free_mbox;
3934 }
3935
3936 mutex_init(&adapter->mbox_lock);
3937 spin_lock_init(&adapter->mcc_lock);
3938 spin_lock_init(&adapter->mcc_cq_lock);
3939
3940 init_completion(&adapter->flash_compl);
3941 pci_save_state(adapter->pdev);
3942 return 0;
3943
3944 free_mbox:
3945 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3946 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3947
3948 unmap_pci_bars:
3949 be_unmap_pci_bars(adapter);
3950
3951 done:
3952 return status;
3953 }
3954
3955 static void be_stats_cleanup(struct be_adapter *adapter)
3956 {
3957 struct be_dma_mem *cmd = &adapter->stats_cmd;
3958
3959 if (cmd->va)
3960 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3961 cmd->va, cmd->dma);
3962 }
3963
3964 static int be_stats_init(struct be_adapter *adapter)
3965 {
3966 struct be_dma_mem *cmd = &adapter->stats_cmd;
3967
3968 if (lancer_chip(adapter))
3969 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3970 else if (BE2_chip(adapter))
3971 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3972 else
3973 /* BE3 and Skyhawk */
3974 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3975
3976 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3977 GFP_KERNEL | __GFP_ZERO);
3978 if (cmd->va == NULL)
3979 return -1;
3980 return 0;
3981 }
3982
3983 static void be_remove(struct pci_dev *pdev)
3984 {
3985 struct be_adapter *adapter = pci_get_drvdata(pdev);
3986
3987 if (!adapter)
3988 return;
3989
3990 be_roce_dev_remove(adapter);
3991 be_intr_set(adapter, false);
3992
3993 cancel_delayed_work_sync(&adapter->func_recovery_work);
3994
3995 unregister_netdev(adapter->netdev);
3996
3997 be_clear(adapter);
3998
3999 /* tell fw we're done with firing cmds */
4000 be_cmd_fw_clean(adapter);
4001
4002 be_stats_cleanup(adapter);
4003
4004 be_ctrl_cleanup(adapter);
4005
4006 pci_disable_pcie_error_reporting(pdev);
4007
4008 pci_set_drvdata(pdev, NULL);
4009 pci_release_regions(pdev);
4010 pci_disable_device(pdev);
4011
4012 free_netdev(adapter->netdev);
4013 }
4014
4015 bool be_is_wol_supported(struct be_adapter *adapter)
4016 {
4017 return ((adapter->wol_cap & BE_WOL_CAP) &&
4018 !be_is_wol_excluded(adapter)) ? true : false;
4019 }
4020
4021 u32 be_get_fw_log_level(struct be_adapter *adapter)
4022 {
4023 struct be_dma_mem extfat_cmd;
4024 struct be_fat_conf_params *cfgs;
4025 int status;
4026 u32 level = 0;
4027 int j;
4028
4029 if (lancer_chip(adapter))
4030 return 0;
4031
4032 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4033 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4034 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4035 &extfat_cmd.dma);
4036
4037 if (!extfat_cmd.va) {
4038 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4039 __func__);
4040 goto err;
4041 }
4042
4043 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4044 if (!status) {
4045 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4046 sizeof(struct be_cmd_resp_hdr));
4047 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4048 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4049 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4050 }
4051 }
4052 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4053 extfat_cmd.dma);
4054 err:
4055 return level;
4056 }
4057
4058 static int be_get_initial_config(struct be_adapter *adapter)
4059 {
4060 int status;
4061 u32 level;
4062
4063 status = be_cmd_get_cntl_attributes(adapter);
4064 if (status)
4065 return status;
4066
4067 status = be_cmd_get_acpi_wol_cap(adapter);
4068 if (status) {
4069 /* in case of a failure to get wol capabillities
4070 * check the exclusion list to determine WOL capability */
4071 if (!be_is_wol_excluded(adapter))
4072 adapter->wol_cap |= BE_WOL_CAP;
4073 }
4074
4075 if (be_is_wol_supported(adapter))
4076 adapter->wol = true;
4077
4078 /* Must be a power of 2 or else MODULO will BUG_ON */
4079 adapter->be_get_temp_freq = 64;
4080
4081 level = be_get_fw_log_level(adapter);
4082 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4083
4084 return 0;
4085 }
4086
4087 static int lancer_recover_func(struct be_adapter *adapter)
4088 {
4089 int status;
4090
4091 status = lancer_test_and_set_rdy_state(adapter);
4092 if (status)
4093 goto err;
4094
4095 if (netif_running(adapter->netdev))
4096 be_close(adapter->netdev);
4097
4098 be_clear(adapter);
4099
4100 adapter->hw_error = false;
4101 adapter->fw_timeout = false;
4102
4103 status = be_setup(adapter);
4104 if (status)
4105 goto err;
4106
4107 if (netif_running(adapter->netdev)) {
4108 status = be_open(adapter->netdev);
4109 if (status)
4110 goto err;
4111 }
4112
4113 dev_err(&adapter->pdev->dev,
4114 "Adapter SLIPORT recovery succeeded\n");
4115 return 0;
4116 err:
4117 if (adapter->eeh_error)
4118 dev_err(&adapter->pdev->dev,
4119 "Adapter SLIPORT recovery failed\n");
4120
4121 return status;
4122 }
4123
4124 static void be_func_recovery_task(struct work_struct *work)
4125 {
4126 struct be_adapter *adapter =
4127 container_of(work, struct be_adapter, func_recovery_work.work);
4128 int status;
4129
4130 be_detect_error(adapter);
4131
4132 if (adapter->hw_error && lancer_chip(adapter)) {
4133
4134 if (adapter->eeh_error)
4135 goto out;
4136
4137 rtnl_lock();
4138 netif_device_detach(adapter->netdev);
4139 rtnl_unlock();
4140
4141 status = lancer_recover_func(adapter);
4142
4143 if (!status)
4144 netif_device_attach(adapter->netdev);
4145 }
4146
4147 out:
4148 schedule_delayed_work(&adapter->func_recovery_work,
4149 msecs_to_jiffies(1000));
4150 }
4151
4152 static void be_worker(struct work_struct *work)
4153 {
4154 struct be_adapter *adapter =
4155 container_of(work, struct be_adapter, work.work);
4156 struct be_rx_obj *rxo;
4157 struct be_eq_obj *eqo;
4158 int i;
4159
4160 /* when interrupts are not yet enabled, just reap any pending
4161 * mcc completions */
4162 if (!netif_running(adapter->netdev)) {
4163 local_bh_disable();
4164 be_process_mcc(adapter);
4165 local_bh_enable();
4166 goto reschedule;
4167 }
4168
4169 if (!adapter->stats_cmd_sent) {
4170 if (lancer_chip(adapter))
4171 lancer_cmd_get_pport_stats(adapter,
4172 &adapter->stats_cmd);
4173 else
4174 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4175 }
4176
4177 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4178 be_cmd_get_die_temperature(adapter);
4179
4180 for_all_rx_queues(adapter, rxo, i) {
4181 if (rxo->rx_post_starved) {
4182 rxo->rx_post_starved = false;
4183 be_post_rx_frags(rxo, GFP_KERNEL);
4184 }
4185 }
4186
4187 for_all_evt_queues(adapter, eqo, i)
4188 be_eqd_update(adapter, eqo);
4189
4190 reschedule:
4191 adapter->work_counter++;
4192 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4193 }
4194
4195 static bool be_reset_required(struct be_adapter *adapter)
4196 {
4197 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4198 }
4199
4200 static char *mc_name(struct be_adapter *adapter)
4201 {
4202 if (adapter->function_mode & FLEX10_MODE)
4203 return "FLEX10";
4204 else if (adapter->function_mode & VNIC_MODE)
4205 return "vNIC";
4206 else if (adapter->function_mode & UMC_ENABLED)
4207 return "UMC";
4208 else
4209 return "";
4210 }
4211
4212 static inline char *func_name(struct be_adapter *adapter)
4213 {
4214 return be_physfn(adapter) ? "PF" : "VF";
4215 }
4216
4217 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4218 {
4219 int status = 0;
4220 struct be_adapter *adapter;
4221 struct net_device *netdev;
4222 char port_name;
4223
4224 status = pci_enable_device(pdev);
4225 if (status)
4226 goto do_none;
4227
4228 status = pci_request_regions(pdev, DRV_NAME);
4229 if (status)
4230 goto disable_dev;
4231 pci_set_master(pdev);
4232
4233 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4234 if (netdev == NULL) {
4235 status = -ENOMEM;
4236 goto rel_reg;
4237 }
4238 adapter = netdev_priv(netdev);
4239 adapter->pdev = pdev;
4240 pci_set_drvdata(pdev, adapter);
4241 adapter->netdev = netdev;
4242 SET_NETDEV_DEV(netdev, &pdev->dev);
4243
4244 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4245 if (!status) {
4246 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4247 if (status < 0) {
4248 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4249 goto free_netdev;
4250 }
4251 netdev->features |= NETIF_F_HIGHDMA;
4252 } else {
4253 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4254 if (status) {
4255 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4256 goto free_netdev;
4257 }
4258 }
4259
4260 status = pci_enable_pcie_error_reporting(pdev);
4261 if (status)
4262 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4263
4264 status = be_ctrl_init(adapter);
4265 if (status)
4266 goto free_netdev;
4267
4268 /* sync up with fw's ready state */
4269 if (be_physfn(adapter)) {
4270 status = be_fw_wait_ready(adapter);
4271 if (status)
4272 goto ctrl_clean;
4273 }
4274
4275 if (be_reset_required(adapter)) {
4276 status = be_cmd_reset_function(adapter);
4277 if (status)
4278 goto ctrl_clean;
4279
4280 /* Wait for interrupts to quiesce after an FLR */
4281 msleep(100);
4282 }
4283
4284 /* Allow interrupts for other ULPs running on NIC function */
4285 be_intr_set(adapter, true);
4286
4287 /* tell fw we're ready to fire cmds */
4288 status = be_cmd_fw_init(adapter);
4289 if (status)
4290 goto ctrl_clean;
4291
4292 status = be_stats_init(adapter);
4293 if (status)
4294 goto ctrl_clean;
4295
4296 status = be_get_initial_config(adapter);
4297 if (status)
4298 goto stats_clean;
4299
4300 INIT_DELAYED_WORK(&adapter->work, be_worker);
4301 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4302 adapter->rx_fc = adapter->tx_fc = true;
4303
4304 status = be_setup(adapter);
4305 if (status)
4306 goto stats_clean;
4307
4308 be_netdev_init(netdev);
4309 status = register_netdev(netdev);
4310 if (status != 0)
4311 goto unsetup;
4312
4313 be_roce_dev_add(adapter);
4314
4315 schedule_delayed_work(&adapter->func_recovery_work,
4316 msecs_to_jiffies(1000));
4317
4318 be_cmd_query_port_name(adapter, &port_name);
4319
4320 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4321 func_name(adapter), mc_name(adapter), port_name);
4322
4323 return 0;
4324
4325 unsetup:
4326 be_clear(adapter);
4327 stats_clean:
4328 be_stats_cleanup(adapter);
4329 ctrl_clean:
4330 be_ctrl_cleanup(adapter);
4331 free_netdev:
4332 free_netdev(netdev);
4333 pci_set_drvdata(pdev, NULL);
4334 rel_reg:
4335 pci_release_regions(pdev);
4336 disable_dev:
4337 pci_disable_device(pdev);
4338 do_none:
4339 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4340 return status;
4341 }
4342
4343 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4344 {
4345 struct be_adapter *adapter = pci_get_drvdata(pdev);
4346 struct net_device *netdev = adapter->netdev;
4347
4348 if (adapter->wol)
4349 be_setup_wol(adapter, true);
4350
4351 cancel_delayed_work_sync(&adapter->func_recovery_work);
4352
4353 netif_device_detach(netdev);
4354 if (netif_running(netdev)) {
4355 rtnl_lock();
4356 be_close(netdev);
4357 rtnl_unlock();
4358 }
4359 be_clear(adapter);
4360
4361 pci_save_state(pdev);
4362 pci_disable_device(pdev);
4363 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4364 return 0;
4365 }
4366
4367 static int be_resume(struct pci_dev *pdev)
4368 {
4369 int status = 0;
4370 struct be_adapter *adapter = pci_get_drvdata(pdev);
4371 struct net_device *netdev = adapter->netdev;
4372
4373 netif_device_detach(netdev);
4374
4375 status = pci_enable_device(pdev);
4376 if (status)
4377 return status;
4378
4379 pci_set_power_state(pdev, 0);
4380 pci_restore_state(pdev);
4381
4382 /* tell fw we're ready to fire cmds */
4383 status = be_cmd_fw_init(adapter);
4384 if (status)
4385 return status;
4386
4387 be_setup(adapter);
4388 if (netif_running(netdev)) {
4389 rtnl_lock();
4390 be_open(netdev);
4391 rtnl_unlock();
4392 }
4393
4394 schedule_delayed_work(&adapter->func_recovery_work,
4395 msecs_to_jiffies(1000));
4396 netif_device_attach(netdev);
4397
4398 if (adapter->wol)
4399 be_setup_wol(adapter, false);
4400
4401 return 0;
4402 }
4403
4404 /*
4405 * An FLR will stop BE from DMAing any data.
4406 */
4407 static void be_shutdown(struct pci_dev *pdev)
4408 {
4409 struct be_adapter *adapter = pci_get_drvdata(pdev);
4410
4411 if (!adapter)
4412 return;
4413
4414 cancel_delayed_work_sync(&adapter->work);
4415 cancel_delayed_work_sync(&adapter->func_recovery_work);
4416
4417 netif_device_detach(adapter->netdev);
4418
4419 be_cmd_reset_function(adapter);
4420
4421 pci_disable_device(pdev);
4422 }
4423
4424 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4425 pci_channel_state_t state)
4426 {
4427 struct be_adapter *adapter = pci_get_drvdata(pdev);
4428 struct net_device *netdev = adapter->netdev;
4429
4430 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4431
4432 adapter->eeh_error = true;
4433
4434 cancel_delayed_work_sync(&adapter->func_recovery_work);
4435
4436 rtnl_lock();
4437 netif_device_detach(netdev);
4438 rtnl_unlock();
4439
4440 if (netif_running(netdev)) {
4441 rtnl_lock();
4442 be_close(netdev);
4443 rtnl_unlock();
4444 }
4445 be_clear(adapter);
4446
4447 if (state == pci_channel_io_perm_failure)
4448 return PCI_ERS_RESULT_DISCONNECT;
4449
4450 pci_disable_device(pdev);
4451
4452 /* The error could cause the FW to trigger a flash debug dump.
4453 * Resetting the card while flash dump is in progress
4454 * can cause it not to recover; wait for it to finish.
4455 * Wait only for first function as it is needed only once per
4456 * adapter.
4457 */
4458 if (pdev->devfn == 0)
4459 ssleep(30);
4460
4461 return PCI_ERS_RESULT_NEED_RESET;
4462 }
4463
4464 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4465 {
4466 struct be_adapter *adapter = pci_get_drvdata(pdev);
4467 int status;
4468
4469 dev_info(&adapter->pdev->dev, "EEH reset\n");
4470 be_clear_all_error(adapter);
4471
4472 status = pci_enable_device(pdev);
4473 if (status)
4474 return PCI_ERS_RESULT_DISCONNECT;
4475
4476 pci_set_master(pdev);
4477 pci_set_power_state(pdev, 0);
4478 pci_restore_state(pdev);
4479
4480 /* Check if card is ok and fw is ready */
4481 dev_info(&adapter->pdev->dev,
4482 "Waiting for FW to be ready after EEH reset\n");
4483 status = be_fw_wait_ready(adapter);
4484 if (status)
4485 return PCI_ERS_RESULT_DISCONNECT;
4486
4487 pci_cleanup_aer_uncorrect_error_status(pdev);
4488 return PCI_ERS_RESULT_RECOVERED;
4489 }
4490
4491 static void be_eeh_resume(struct pci_dev *pdev)
4492 {
4493 int status = 0;
4494 struct be_adapter *adapter = pci_get_drvdata(pdev);
4495 struct net_device *netdev = adapter->netdev;
4496
4497 dev_info(&adapter->pdev->dev, "EEH resume\n");
4498
4499 pci_save_state(pdev);
4500
4501 status = be_cmd_reset_function(adapter);
4502 if (status)
4503 goto err;
4504
4505 /* tell fw we're ready to fire cmds */
4506 status = be_cmd_fw_init(adapter);
4507 if (status)
4508 goto err;
4509
4510 status = be_setup(adapter);
4511 if (status)
4512 goto err;
4513
4514 if (netif_running(netdev)) {
4515 status = be_open(netdev);
4516 if (status)
4517 goto err;
4518 }
4519
4520 schedule_delayed_work(&adapter->func_recovery_work,
4521 msecs_to_jiffies(1000));
4522 netif_device_attach(netdev);
4523 return;
4524 err:
4525 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4526 }
4527
4528 static const struct pci_error_handlers be_eeh_handlers = {
4529 .error_detected = be_eeh_err_detected,
4530 .slot_reset = be_eeh_reset,
4531 .resume = be_eeh_resume,
4532 };
4533
4534 static struct pci_driver be_driver = {
4535 .name = DRV_NAME,
4536 .id_table = be_dev_ids,
4537 .probe = be_probe,
4538 .remove = be_remove,
4539 .suspend = be_suspend,
4540 .resume = be_resume,
4541 .shutdown = be_shutdown,
4542 .err_handler = &be_eeh_handlers
4543 };
4544
4545 static int __init be_init_module(void)
4546 {
4547 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4548 rx_frag_size != 2048) {
4549 printk(KERN_WARNING DRV_NAME
4550 " : Module param rx_frag_size must be 2048/4096/8192."
4551 " Using 2048\n");
4552 rx_frag_size = 2048;
4553 }
4554
4555 return pci_register_driver(&be_driver);
4556 }
4557 module_init(be_init_module);
4558
4559 static void __exit be_exit_module(void)
4560 {
4561 pci_unregister_driver(&be_driver);
4562 }
4563 module_exit(be_exit_module);
This page took 0.178371 seconds and 5 git commands to generate.