net: vlan: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_*
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
111 "NETC",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131 struct be_dma_mem *mem = &q->dma_mem;
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141 {
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va)
151 return -ENOMEM;
152 return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157 u32 reg, enabled;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196 wmb();
197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
201 {
202 u32 val = 0;
203 val |= qid & DB_TXULP_RING_ID_MASK;
204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
205
206 wmb();
207 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
208 }
209
210 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
211 bool arm, bool clear_int, u16 num_popped)
212 {
213 u32 val = 0;
214 val |= qid & DB_EQ_RING_ID_MASK;
215 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
216 DB_EQ_RING_ID_EXT_MASK_SHIFT);
217
218 if (adapter->eeh_error)
219 return;
220
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
228 }
229
230 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
231 {
232 u32 val = 0;
233 val |= qid & DB_CQ_RING_ID_MASK;
234 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
235 DB_CQ_RING_ID_EXT_MASK_SHIFT);
236
237 if (adapter->eeh_error)
238 return;
239
240 if (arm)
241 val |= 1 << DB_CQ_REARM_SHIFT;
242 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
243 iowrite32(val, adapter->db + DB_CQ_OFFSET);
244 }
245
246 static int be_mac_addr_set(struct net_device *netdev, void *p)
247 {
248 struct be_adapter *adapter = netdev_priv(netdev);
249 struct sockaddr *addr = p;
250 int status = 0;
251 u8 current_mac[ETH_ALEN];
252 u32 pmac_id = adapter->pmac_id[0];
253 bool active_mac = true;
254
255 if (!is_valid_ether_addr(addr->sa_data))
256 return -EADDRNOTAVAIL;
257
258 /* For BE VF, MAC address is already activated by PF.
259 * Hence only operation left is updating netdev->devaddr.
260 * Update it if user is passing the same MAC which was used
261 * during configuring VF MAC from PF(Hypervisor).
262 */
263 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
264 status = be_cmd_mac_addr_query(adapter, current_mac,
265 false, adapter->if_handle, 0);
266 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
267 goto done;
268 else
269 goto err;
270 }
271
272 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
273 goto done;
274
275 /* For Lancer check if any MAC is active.
276 * If active, get its mac id.
277 */
278 if (lancer_chip(adapter) && !be_physfn(adapter))
279 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
280 &pmac_id, 0);
281
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle,
284 &adapter->pmac_id[0], 0);
285
286 if (status)
287 goto err;
288
289 if (active_mac)
290 be_cmd_pmac_del(adapter, adapter->if_handle,
291 pmac_id, 0);
292 done:
293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
294 return 0;
295 err:
296 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
297 return status;
298 }
299
300 /* BE2 supports only v0 cmd */
301 static void *hw_stats_from_cmd(struct be_adapter *adapter)
302 {
303 if (BE2_chip(adapter)) {
304 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
305
306 return &cmd->hw_stats;
307 } else {
308 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
311 }
312 }
313
314 /* BE2 supports only v0 cmd */
315 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
316 {
317 if (BE2_chip(adapter)) {
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319
320 return &hw_stats->erx;
321 } else {
322 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
323
324 return &hw_stats->erx;
325 }
326 }
327
328 static void populate_be_v0_stats(struct be_adapter *adapter)
329 {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
332 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
333 struct be_port_rxf_stats_v0 *port_stats =
334 &rxf_stats->port[adapter->port_num];
335 struct be_drv_stats *drvs = &adapter->drv_stats;
336
337 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
338 drvs->rx_pause_frames = port_stats->rx_pause_frames;
339 drvs->rx_crc_errors = port_stats->rx_crc_errors;
340 drvs->rx_control_frames = port_stats->rx_control_frames;
341 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
342 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
343 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
344 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
345 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
346 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
347 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
348 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
349 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
350 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
351 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
352 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
353 drvs->rx_dropped_header_too_small =
354 port_stats->rx_dropped_header_too_small;
355 drvs->rx_address_mismatch_drops =
356 port_stats->rx_address_mismatch_drops +
357 port_stats->rx_vlan_mismatch_drops;
358 drvs->rx_alignment_symbol_errors =
359 port_stats->rx_alignment_symbol_errors;
360
361 drvs->tx_pauseframes = port_stats->tx_pauseframes;
362 drvs->tx_controlframes = port_stats->tx_controlframes;
363
364 if (adapter->port_num)
365 drvs->jabber_events = rxf_stats->port1_jabber_events;
366 else
367 drvs->jabber_events = rxf_stats->port0_jabber_events;
368 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
369 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
370 drvs->forwarded_packets = rxf_stats->forwarded_packets;
371 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
372 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
373 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
374 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
375 }
376
377 static void populate_be_v1_stats(struct be_adapter *adapter)
378 {
379 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
380 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
381 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
382 struct be_port_rxf_stats_v1 *port_stats =
383 &rxf_stats->port[adapter->port_num];
384 struct be_drv_stats *drvs = &adapter->drv_stats;
385
386 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
387 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
388 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
389 drvs->rx_pause_frames = port_stats->rx_pause_frames;
390 drvs->rx_crc_errors = port_stats->rx_crc_errors;
391 drvs->rx_control_frames = port_stats->rx_control_frames;
392 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
393 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
394 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
396 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
397 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
398 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
399 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
400 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
401 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
402 drvs->rx_dropped_header_too_small =
403 port_stats->rx_dropped_header_too_small;
404 drvs->rx_input_fifo_overflow_drop =
405 port_stats->rx_input_fifo_overflow_drop;
406 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
407 drvs->rx_alignment_symbol_errors =
408 port_stats->rx_alignment_symbol_errors;
409 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
410 drvs->tx_pauseframes = port_stats->tx_pauseframes;
411 drvs->tx_controlframes = port_stats->tx_controlframes;
412 drvs->jabber_events = port_stats->jabber_events;
413 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
414 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
415 drvs->forwarded_packets = rxf_stats->forwarded_packets;
416 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
417 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
418 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
419 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
420 }
421
422 static void populate_lancer_stats(struct be_adapter *adapter)
423 {
424
425 struct be_drv_stats *drvs = &adapter->drv_stats;
426 struct lancer_pport_stats *pport_stats =
427 pport_stats_from_cmd(adapter);
428
429 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
430 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
431 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
432 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
433 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
434 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
435 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
436 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
437 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
438 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
439 drvs->rx_dropped_tcp_length =
440 pport_stats->rx_dropped_invalid_tcp_length;
441 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
442 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
443 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
444 drvs->rx_dropped_header_too_small =
445 pport_stats->rx_dropped_header_too_small;
446 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
447 drvs->rx_address_mismatch_drops =
448 pport_stats->rx_address_mismatch_drops +
449 pport_stats->rx_vlan_mismatch_drops;
450 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
451 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
452 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
453 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
454 drvs->jabber_events = pport_stats->rx_jabbers;
455 drvs->forwarded_packets = pport_stats->num_forwards_lo;
456 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
457 drvs->rx_drops_too_many_frags =
458 pport_stats->rx_drops_too_many_frags_lo;
459 }
460
461 static void accumulate_16bit_val(u32 *acc, u16 val)
462 {
463 #define lo(x) (x & 0xFFFF)
464 #define hi(x) (x & 0xFFFF0000)
465 bool wrapped = val < lo(*acc);
466 u32 newacc = hi(*acc) + val;
467
468 if (wrapped)
469 newacc += 65536;
470 ACCESS_ONCE(*acc) = newacc;
471 }
472
473 void be_parse_stats(struct be_adapter *adapter)
474 {
475 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
476 struct be_rx_obj *rxo;
477 int i;
478
479 if (lancer_chip(adapter)) {
480 populate_lancer_stats(adapter);
481 } else {
482 if (BE2_chip(adapter))
483 populate_be_v0_stats(adapter);
484 else
485 /* for BE3 and Skyhawk */
486 populate_be_v1_stats(adapter);
487
488 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
489 for_all_rx_queues(adapter, rxo, i) {
490 /* below erx HW counter can actually wrap around after
491 * 65535. Driver accumulates a 32-bit value
492 */
493 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
494 (u16)erx->rx_drops_no_fragments \
495 [rxo->q.id]);
496 }
497 }
498 }
499
500 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
501 struct rtnl_link_stats64 *stats)
502 {
503 struct be_adapter *adapter = netdev_priv(netdev);
504 struct be_drv_stats *drvs = &adapter->drv_stats;
505 struct be_rx_obj *rxo;
506 struct be_tx_obj *txo;
507 u64 pkts, bytes;
508 unsigned int start;
509 int i;
510
511 for_all_rx_queues(adapter, rxo, i) {
512 const struct be_rx_stats *rx_stats = rx_stats(rxo);
513 do {
514 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
515 pkts = rx_stats(rxo)->rx_pkts;
516 bytes = rx_stats(rxo)->rx_bytes;
517 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
518 stats->rx_packets += pkts;
519 stats->rx_bytes += bytes;
520 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
521 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
522 rx_stats(rxo)->rx_drops_no_frags;
523 }
524
525 for_all_tx_queues(adapter, txo, i) {
526 const struct be_tx_stats *tx_stats = tx_stats(txo);
527 do {
528 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
529 pkts = tx_stats(txo)->tx_pkts;
530 bytes = tx_stats(txo)->tx_bytes;
531 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
532 stats->tx_packets += pkts;
533 stats->tx_bytes += bytes;
534 }
535
536 /* bad pkts received */
537 stats->rx_errors = drvs->rx_crc_errors +
538 drvs->rx_alignment_symbol_errors +
539 drvs->rx_in_range_errors +
540 drvs->rx_out_range_errors +
541 drvs->rx_frame_too_long +
542 drvs->rx_dropped_too_small +
543 drvs->rx_dropped_too_short +
544 drvs->rx_dropped_header_too_small +
545 drvs->rx_dropped_tcp_length +
546 drvs->rx_dropped_runt;
547
548 /* detailed rx errors */
549 stats->rx_length_errors = drvs->rx_in_range_errors +
550 drvs->rx_out_range_errors +
551 drvs->rx_frame_too_long;
552
553 stats->rx_crc_errors = drvs->rx_crc_errors;
554
555 /* frame alignment errors */
556 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
557
558 /* receiver fifo overrun */
559 /* drops_no_pbuf is no per i/f, it's per BE card */
560 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
561 drvs->rx_input_fifo_overflow_drop +
562 drvs->rx_drops_no_pbuf;
563 return stats;
564 }
565
566 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
567 {
568 struct net_device *netdev = adapter->netdev;
569
570 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
571 netif_carrier_off(netdev);
572 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
573 }
574
575 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
576 netif_carrier_on(netdev);
577 else
578 netif_carrier_off(netdev);
579 }
580
581 static void be_tx_stats_update(struct be_tx_obj *txo,
582 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
583 {
584 struct be_tx_stats *stats = tx_stats(txo);
585
586 u64_stats_update_begin(&stats->sync);
587 stats->tx_reqs++;
588 stats->tx_wrbs += wrb_cnt;
589 stats->tx_bytes += copied;
590 stats->tx_pkts += (gso_segs ? gso_segs : 1);
591 if (stopped)
592 stats->tx_stops++;
593 u64_stats_update_end(&stats->sync);
594 }
595
596 /* Determine number of WRB entries needed to xmit data in an skb */
597 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
599 {
600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
604 /* to account for hdr wrb */
605 cnt++;
606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
612 }
613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615 }
616
617 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618 {
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622 wrb->rsvd0 = 0;
623 }
624
625 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
626 struct sk_buff *skb)
627 {
628 u8 vlan_prio;
629 u16 vlan_tag;
630
631 vlan_tag = vlan_tx_tag_get(skb);
632 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
633 /* If vlan priority provided by OS is NOT in available bmap */
634 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
635 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
636 adapter->recommended_prio;
637
638 return vlan_tag;
639 }
640
641 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
642 {
643 return vlan_tx_tag_present(skb) || adapter->pvid;
644 }
645
646 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
647 struct sk_buff *skb, u32 wrb_cnt, u32 len)
648 {
649 u16 vlan_tag;
650
651 memset(hdr, 0, sizeof(*hdr));
652
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
654
655 if (skb_is_gso(skb)) {
656 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
658 hdr, skb_shinfo(skb)->gso_size);
659 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
661 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
662 if (is_tcp_pkt(skb))
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
664 else if (is_udp_pkt(skb))
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
666 }
667
668 if (vlan_tx_tag_present(skb)) {
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
670 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
672 }
673
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
678 }
679
680 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
681 bool unmap_single)
682 {
683 dma_addr_t dma;
684
685 be_dws_le_to_cpu(wrb, sizeof(*wrb));
686
687 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
688 if (wrb->frag_len) {
689 if (unmap_single)
690 dma_unmap_single(dev, dma, wrb->frag_len,
691 DMA_TO_DEVICE);
692 else
693 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
694 }
695 }
696
697 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
698 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
699 {
700 dma_addr_t busaddr;
701 int i, copied = 0;
702 struct device *dev = &adapter->pdev->dev;
703 struct sk_buff *first_skb = skb;
704 struct be_eth_wrb *wrb;
705 struct be_eth_hdr_wrb *hdr;
706 bool map_single = false;
707 u16 map_head;
708
709 hdr = queue_head_node(txq);
710 queue_head_inc(txq);
711 map_head = txq->head;
712
713 if (skb->len > skb->data_len) {
714 int len = skb_headlen(skb);
715 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
716 if (dma_mapping_error(dev, busaddr))
717 goto dma_err;
718 map_single = true;
719 wrb = queue_head_node(txq);
720 wrb_fill(wrb, busaddr, len);
721 be_dws_cpu_to_le(wrb, sizeof(*wrb));
722 queue_head_inc(txq);
723 copied += len;
724 }
725
726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
727 const struct skb_frag_struct *frag =
728 &skb_shinfo(skb)->frags[i];
729 busaddr = skb_frag_dma_map(dev, frag, 0,
730 skb_frag_size(frag), DMA_TO_DEVICE);
731 if (dma_mapping_error(dev, busaddr))
732 goto dma_err;
733 wrb = queue_head_node(txq);
734 wrb_fill(wrb, busaddr, skb_frag_size(frag));
735 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 queue_head_inc(txq);
737 copied += skb_frag_size(frag);
738 }
739
740 if (dummy_wrb) {
741 wrb = queue_head_node(txq);
742 wrb_fill(wrb, 0, 0);
743 be_dws_cpu_to_le(wrb, sizeof(*wrb));
744 queue_head_inc(txq);
745 }
746
747 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
748 be_dws_cpu_to_le(hdr, sizeof(*hdr));
749
750 return copied;
751 dma_err:
752 txq->head = map_head;
753 while (copied) {
754 wrb = queue_head_node(txq);
755 unmap_tx_frag(dev, wrb, map_single);
756 map_single = false;
757 copied -= wrb->frag_len;
758 queue_head_inc(txq);
759 }
760 return 0;
761 }
762
763 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
764 struct sk_buff *skb)
765 {
766 u16 vlan_tag = 0;
767
768 skb = skb_share_check(skb, GFP_ATOMIC);
769 if (unlikely(!skb))
770 return skb;
771
772 if (vlan_tx_tag_present(skb)) {
773 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
774 __vlan_put_tag(skb, vlan_tag);
775 skb->vlan_tci = 0;
776 }
777
778 return skb;
779 }
780
781 static netdev_tx_t be_xmit(struct sk_buff *skb,
782 struct net_device *netdev)
783 {
784 struct be_adapter *adapter = netdev_priv(netdev);
785 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
786 struct be_queue_info *txq = &txo->q;
787 struct iphdr *ip = NULL;
788 u32 wrb_cnt = 0, copied = 0;
789 u32 start = txq->head, eth_hdr_len;
790 bool dummy_wrb, stopped = false;
791
792 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
793 VLAN_ETH_HLEN : ETH_HLEN;
794
795 /* HW has a bug which considers padding bytes as legal
796 * and modifies the IPv4 hdr's 'tot_len' field
797 */
798 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
799 is_ipv4_pkt(skb)) {
800 ip = (struct iphdr *)ip_hdr(skb);
801 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
802 }
803
804 /* HW has a bug wherein it will calculate CSUM for VLAN
805 * pkts even though it is disabled.
806 * Manually insert VLAN in pkt.
807 */
808 if (skb->ip_summed != CHECKSUM_PARTIAL &&
809 be_vlan_tag_chk(adapter, skb)) {
810 skb = be_insert_vlan_in_pkt(adapter, skb);
811 if (unlikely(!skb))
812 goto tx_drop;
813 }
814
815 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
816
817 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
818 if (copied) {
819 int gso_segs = skb_shinfo(skb)->gso_segs;
820
821 /* record the sent skb in the sent_skb table */
822 BUG_ON(txo->sent_skb_list[start]);
823 txo->sent_skb_list[start] = skb;
824
825 /* Ensure txq has space for the next skb; Else stop the queue
826 * *BEFORE* ringing the tx doorbell, so that we serialze the
827 * tx compls of the current transmit which'll wake up the queue
828 */
829 atomic_add(wrb_cnt, &txq->used);
830 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
831 txq->len) {
832 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
833 stopped = true;
834 }
835
836 be_txq_notify(adapter, txq->id, wrb_cnt);
837
838 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
839 } else {
840 txq->head = start;
841 dev_kfree_skb_any(skb);
842 }
843 tx_drop:
844 return NETDEV_TX_OK;
845 }
846
847 static int be_change_mtu(struct net_device *netdev, int new_mtu)
848 {
849 struct be_adapter *adapter = netdev_priv(netdev);
850 if (new_mtu < BE_MIN_MTU ||
851 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
852 (ETH_HLEN + ETH_FCS_LEN))) {
853 dev_info(&adapter->pdev->dev,
854 "MTU must be between %d and %d bytes\n",
855 BE_MIN_MTU,
856 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
857 return -EINVAL;
858 }
859 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
860 netdev->mtu, new_mtu);
861 netdev->mtu = new_mtu;
862 return 0;
863 }
864
865 /*
866 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
867 * If the user configures more, place BE in vlan promiscuous mode.
868 */
869 static int be_vid_config(struct be_adapter *adapter)
870 {
871 u16 vids[BE_NUM_VLANS_SUPPORTED];
872 u16 num = 0, i;
873 int status = 0;
874
875 /* No need to further configure vids if in promiscuous mode */
876 if (adapter->promiscuous)
877 return 0;
878
879 if (adapter->vlans_added > adapter->max_vlans)
880 goto set_vlan_promisc;
881
882 /* Construct VLAN Table to give to HW */
883 for (i = 0; i < VLAN_N_VID; i++)
884 if (adapter->vlan_tag[i])
885 vids[num++] = cpu_to_le16(i);
886
887 status = be_cmd_vlan_config(adapter, adapter->if_handle,
888 vids, num, 1, 0);
889
890 /* Set to VLAN promisc mode as setting VLAN filter failed */
891 if (status) {
892 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
893 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
894 goto set_vlan_promisc;
895 }
896
897 return status;
898
899 set_vlan_promisc:
900 status = be_cmd_vlan_config(adapter, adapter->if_handle,
901 NULL, 0, 1, 1);
902 return status;
903 }
904
905 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
906 {
907 struct be_adapter *adapter = netdev_priv(netdev);
908 int status = 0;
909
910 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
911 status = -EINVAL;
912 goto ret;
913 }
914
915 /* Packets with VID 0 are always received by Lancer by default */
916 if (lancer_chip(adapter) && vid == 0)
917 goto ret;
918
919 adapter->vlan_tag[vid] = 1;
920 if (adapter->vlans_added <= (adapter->max_vlans + 1))
921 status = be_vid_config(adapter);
922
923 if (!status)
924 adapter->vlans_added++;
925 else
926 adapter->vlan_tag[vid] = 0;
927 ret:
928 return status;
929 }
930
931 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
932 {
933 struct be_adapter *adapter = netdev_priv(netdev);
934 int status = 0;
935
936 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
937 status = -EINVAL;
938 goto ret;
939 }
940
941 /* Packets with VID 0 are always received by Lancer by default */
942 if (lancer_chip(adapter) && vid == 0)
943 goto ret;
944
945 adapter->vlan_tag[vid] = 0;
946 if (adapter->vlans_added <= adapter->max_vlans)
947 status = be_vid_config(adapter);
948
949 if (!status)
950 adapter->vlans_added--;
951 else
952 adapter->vlan_tag[vid] = 1;
953 ret:
954 return status;
955 }
956
957 static void be_set_rx_mode(struct net_device *netdev)
958 {
959 struct be_adapter *adapter = netdev_priv(netdev);
960 int status;
961
962 if (netdev->flags & IFF_PROMISC) {
963 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
964 adapter->promiscuous = true;
965 goto done;
966 }
967
968 /* BE was previously in promiscuous mode; disable it */
969 if (adapter->promiscuous) {
970 adapter->promiscuous = false;
971 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
972
973 if (adapter->vlans_added)
974 be_vid_config(adapter);
975 }
976
977 /* Enable multicast promisc if num configured exceeds what we support */
978 if (netdev->flags & IFF_ALLMULTI ||
979 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
980 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
981 goto done;
982 }
983
984 if (netdev_uc_count(netdev) != adapter->uc_macs) {
985 struct netdev_hw_addr *ha;
986 int i = 1; /* First slot is claimed by the Primary MAC */
987
988 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
989 be_cmd_pmac_del(adapter, adapter->if_handle,
990 adapter->pmac_id[i], 0);
991 }
992
993 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
994 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
995 adapter->promiscuous = true;
996 goto done;
997 }
998
999 netdev_for_each_uc_addr(ha, adapter->netdev) {
1000 adapter->uc_macs++; /* First slot is for Primary MAC */
1001 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1002 adapter->if_handle,
1003 &adapter->pmac_id[adapter->uc_macs], 0);
1004 }
1005 }
1006
1007 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1008
1009 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1013 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1014 }
1015 done:
1016 return;
1017 }
1018
1019 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1020 {
1021 struct be_adapter *adapter = netdev_priv(netdev);
1022 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1023 int status;
1024 bool active_mac = false;
1025 u32 pmac_id;
1026 u8 old_mac[ETH_ALEN];
1027
1028 if (!sriov_enabled(adapter))
1029 return -EPERM;
1030
1031 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1032 return -EINVAL;
1033
1034 if (lancer_chip(adapter)) {
1035 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1036 &pmac_id, vf + 1);
1037 if (!status && active_mac)
1038 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1039 pmac_id, vf + 1);
1040
1041 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1042 } else {
1043 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1044 vf_cfg->pmac_id, vf + 1);
1045
1046 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1047 &vf_cfg->pmac_id, vf + 1);
1048 }
1049
1050 if (status)
1051 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1052 mac, vf);
1053 else
1054 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1055
1056 return status;
1057 }
1058
1059 static int be_get_vf_config(struct net_device *netdev, int vf,
1060 struct ifla_vf_info *vi)
1061 {
1062 struct be_adapter *adapter = netdev_priv(netdev);
1063 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1064
1065 if (!sriov_enabled(adapter))
1066 return -EPERM;
1067
1068 if (vf >= adapter->num_vfs)
1069 return -EINVAL;
1070
1071 vi->vf = vf;
1072 vi->tx_rate = vf_cfg->tx_rate;
1073 vi->vlan = vf_cfg->vlan_tag;
1074 vi->qos = 0;
1075 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1076
1077 return 0;
1078 }
1079
1080 static int be_set_vf_vlan(struct net_device *netdev,
1081 int vf, u16 vlan, u8 qos)
1082 {
1083 struct be_adapter *adapter = netdev_priv(netdev);
1084 int status = 0;
1085
1086 if (!sriov_enabled(adapter))
1087 return -EPERM;
1088
1089 if (vf >= adapter->num_vfs || vlan > 4095)
1090 return -EINVAL;
1091
1092 if (vlan) {
1093 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1094 /* If this is new value, program it. Else skip. */
1095 adapter->vf_cfg[vf].vlan_tag = vlan;
1096
1097 status = be_cmd_set_hsw_config(adapter, vlan,
1098 vf + 1, adapter->vf_cfg[vf].if_handle);
1099 }
1100 } else {
1101 /* Reset Transparent Vlan Tagging. */
1102 adapter->vf_cfg[vf].vlan_tag = 0;
1103 vlan = adapter->vf_cfg[vf].def_vid;
1104 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1105 adapter->vf_cfg[vf].if_handle);
1106 }
1107
1108
1109 if (status)
1110 dev_info(&adapter->pdev->dev,
1111 "VLAN %d config on VF %d failed\n", vlan, vf);
1112 return status;
1113 }
1114
1115 static int be_set_vf_tx_rate(struct net_device *netdev,
1116 int vf, int rate)
1117 {
1118 struct be_adapter *adapter = netdev_priv(netdev);
1119 int status = 0;
1120
1121 if (!sriov_enabled(adapter))
1122 return -EPERM;
1123
1124 if (vf >= adapter->num_vfs)
1125 return -EINVAL;
1126
1127 if (rate < 100 || rate > 10000) {
1128 dev_err(&adapter->pdev->dev,
1129 "tx rate must be between 100 and 10000 Mbps\n");
1130 return -EINVAL;
1131 }
1132
1133 if (lancer_chip(adapter))
1134 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1135 else
1136 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1137
1138 if (status)
1139 dev_err(&adapter->pdev->dev,
1140 "tx rate %d on VF %d failed\n", rate, vf);
1141 else
1142 adapter->vf_cfg[vf].tx_rate = rate;
1143 return status;
1144 }
1145
1146 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1147 {
1148 struct pci_dev *dev, *pdev = adapter->pdev;
1149 int vfs = 0, assigned_vfs = 0, pos;
1150 u16 offset, stride;
1151
1152 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1153 if (!pos)
1154 return 0;
1155 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1156 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1157
1158 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1159 while (dev) {
1160 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1161 vfs++;
1162 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1163 assigned_vfs++;
1164 }
1165 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1166 }
1167 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1168 }
1169
1170 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1171 {
1172 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1173 ulong now = jiffies;
1174 ulong delta = now - stats->rx_jiffies;
1175 u64 pkts;
1176 unsigned int start, eqd;
1177
1178 if (!eqo->enable_aic) {
1179 eqd = eqo->eqd;
1180 goto modify_eqd;
1181 }
1182
1183 if (eqo->idx >= adapter->num_rx_qs)
1184 return;
1185
1186 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1187
1188 /* Wrapped around */
1189 if (time_before(now, stats->rx_jiffies)) {
1190 stats->rx_jiffies = now;
1191 return;
1192 }
1193
1194 /* Update once a second */
1195 if (delta < HZ)
1196 return;
1197
1198 do {
1199 start = u64_stats_fetch_begin_bh(&stats->sync);
1200 pkts = stats->rx_pkts;
1201 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1202
1203 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1204 stats->rx_pkts_prev = pkts;
1205 stats->rx_jiffies = now;
1206 eqd = (stats->rx_pps / 110000) << 3;
1207 eqd = min(eqd, eqo->max_eqd);
1208 eqd = max(eqd, eqo->min_eqd);
1209 if (eqd < 10)
1210 eqd = 0;
1211
1212 modify_eqd:
1213 if (eqd != eqo->cur_eqd) {
1214 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1215 eqo->cur_eqd = eqd;
1216 }
1217 }
1218
1219 static void be_rx_stats_update(struct be_rx_obj *rxo,
1220 struct be_rx_compl_info *rxcp)
1221 {
1222 struct be_rx_stats *stats = rx_stats(rxo);
1223
1224 u64_stats_update_begin(&stats->sync);
1225 stats->rx_compl++;
1226 stats->rx_bytes += rxcp->pkt_size;
1227 stats->rx_pkts++;
1228 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1229 stats->rx_mcast_pkts++;
1230 if (rxcp->err)
1231 stats->rx_compl_err++;
1232 u64_stats_update_end(&stats->sync);
1233 }
1234
1235 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1236 {
1237 /* L4 checksum is not reliable for non TCP/UDP packets.
1238 * Also ignore ipcksm for ipv6 pkts */
1239 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1240 (rxcp->ip_csum || rxcp->ipv6);
1241 }
1242
1243 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1244 u16 frag_idx)
1245 {
1246 struct be_adapter *adapter = rxo->adapter;
1247 struct be_rx_page_info *rx_page_info;
1248 struct be_queue_info *rxq = &rxo->q;
1249
1250 rx_page_info = &rxo->page_info_tbl[frag_idx];
1251 BUG_ON(!rx_page_info->page);
1252
1253 if (rx_page_info->last_page_user) {
1254 dma_unmap_page(&adapter->pdev->dev,
1255 dma_unmap_addr(rx_page_info, bus),
1256 adapter->big_page_size, DMA_FROM_DEVICE);
1257 rx_page_info->last_page_user = false;
1258 }
1259
1260 atomic_dec(&rxq->used);
1261 return rx_page_info;
1262 }
1263
1264 /* Throwaway the data in the Rx completion */
1265 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1266 struct be_rx_compl_info *rxcp)
1267 {
1268 struct be_queue_info *rxq = &rxo->q;
1269 struct be_rx_page_info *page_info;
1270 u16 i, num_rcvd = rxcp->num_rcvd;
1271
1272 for (i = 0; i < num_rcvd; i++) {
1273 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1274 put_page(page_info->page);
1275 memset(page_info, 0, sizeof(*page_info));
1276 index_inc(&rxcp->rxq_idx, rxq->len);
1277 }
1278 }
1279
1280 /*
1281 * skb_fill_rx_data forms a complete skb for an ether frame
1282 * indicated by rxcp.
1283 */
1284 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1285 struct be_rx_compl_info *rxcp)
1286 {
1287 struct be_queue_info *rxq = &rxo->q;
1288 struct be_rx_page_info *page_info;
1289 u16 i, j;
1290 u16 hdr_len, curr_frag_len, remaining;
1291 u8 *start;
1292
1293 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1294 start = page_address(page_info->page) + page_info->page_offset;
1295 prefetch(start);
1296
1297 /* Copy data in the first descriptor of this completion */
1298 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1299
1300 skb->len = curr_frag_len;
1301 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1302 memcpy(skb->data, start, curr_frag_len);
1303 /* Complete packet has now been moved to data */
1304 put_page(page_info->page);
1305 skb->data_len = 0;
1306 skb->tail += curr_frag_len;
1307 } else {
1308 hdr_len = ETH_HLEN;
1309 memcpy(skb->data, start, hdr_len);
1310 skb_shinfo(skb)->nr_frags = 1;
1311 skb_frag_set_page(skb, 0, page_info->page);
1312 skb_shinfo(skb)->frags[0].page_offset =
1313 page_info->page_offset + hdr_len;
1314 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1315 skb->data_len = curr_frag_len - hdr_len;
1316 skb->truesize += rx_frag_size;
1317 skb->tail += hdr_len;
1318 }
1319 page_info->page = NULL;
1320
1321 if (rxcp->pkt_size <= rx_frag_size) {
1322 BUG_ON(rxcp->num_rcvd != 1);
1323 return;
1324 }
1325
1326 /* More frags present for this completion */
1327 index_inc(&rxcp->rxq_idx, rxq->len);
1328 remaining = rxcp->pkt_size - curr_frag_len;
1329 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1330 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1331 curr_frag_len = min(remaining, rx_frag_size);
1332
1333 /* Coalesce all frags from the same physical page in one slot */
1334 if (page_info->page_offset == 0) {
1335 /* Fresh page */
1336 j++;
1337 skb_frag_set_page(skb, j, page_info->page);
1338 skb_shinfo(skb)->frags[j].page_offset =
1339 page_info->page_offset;
1340 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1341 skb_shinfo(skb)->nr_frags++;
1342 } else {
1343 put_page(page_info->page);
1344 }
1345
1346 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1347 skb->len += curr_frag_len;
1348 skb->data_len += curr_frag_len;
1349 skb->truesize += rx_frag_size;
1350 remaining -= curr_frag_len;
1351 index_inc(&rxcp->rxq_idx, rxq->len);
1352 page_info->page = NULL;
1353 }
1354 BUG_ON(j > MAX_SKB_FRAGS);
1355 }
1356
1357 /* Process the RX completion indicated by rxcp when GRO is disabled */
1358 static void be_rx_compl_process(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
1360 {
1361 struct be_adapter *adapter = rxo->adapter;
1362 struct net_device *netdev = adapter->netdev;
1363 struct sk_buff *skb;
1364
1365 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1366 if (unlikely(!skb)) {
1367 rx_stats(rxo)->rx_drops_no_skbs++;
1368 be_rx_compl_discard(rxo, rxcp);
1369 return;
1370 }
1371
1372 skb_fill_rx_data(rxo, skb, rxcp);
1373
1374 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376 else
1377 skb_checksum_none_assert(skb);
1378
1379 skb->protocol = eth_type_trans(skb, netdev);
1380 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1381 if (netdev->features & NETIF_F_RXHASH)
1382 skb->rxhash = rxcp->rss_hash;
1383
1384
1385 if (rxcp->vlanf)
1386 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1387
1388 netif_receive_skb(skb);
1389 }
1390
1391 /* Process the RX completion indicated by rxcp when GRO is enabled */
1392 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1393 struct be_rx_compl_info *rxcp)
1394 {
1395 struct be_adapter *adapter = rxo->adapter;
1396 struct be_rx_page_info *page_info;
1397 struct sk_buff *skb = NULL;
1398 struct be_queue_info *rxq = &rxo->q;
1399 u16 remaining, curr_frag_len;
1400 u16 i, j;
1401
1402 skb = napi_get_frags(napi);
1403 if (!skb) {
1404 be_rx_compl_discard(rxo, rxcp);
1405 return;
1406 }
1407
1408 remaining = rxcp->pkt_size;
1409 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1410 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1411
1412 curr_frag_len = min(remaining, rx_frag_size);
1413
1414 /* Coalesce all frags from the same physical page in one slot */
1415 if (i == 0 || page_info->page_offset == 0) {
1416 /* First frag or Fresh page */
1417 j++;
1418 skb_frag_set_page(skb, j, page_info->page);
1419 skb_shinfo(skb)->frags[j].page_offset =
1420 page_info->page_offset;
1421 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1422 } else {
1423 put_page(page_info->page);
1424 }
1425 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1426 skb->truesize += rx_frag_size;
1427 remaining -= curr_frag_len;
1428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 memset(page_info, 0, sizeof(*page_info));
1430 }
1431 BUG_ON(j > MAX_SKB_FRAGS);
1432
1433 skb_shinfo(skb)->nr_frags = j + 1;
1434 skb->len = rxcp->pkt_size;
1435 skb->data_len = rxcp->pkt_size;
1436 skb->ip_summed = CHECKSUM_UNNECESSARY;
1437 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1438 if (adapter->netdev->features & NETIF_F_RXHASH)
1439 skb->rxhash = rxcp->rss_hash;
1440
1441 if (rxcp->vlanf)
1442 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1443
1444 napi_gro_frags(napi);
1445 }
1446
1447 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1448 struct be_rx_compl_info *rxcp)
1449 {
1450 rxcp->pkt_size =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1452 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1453 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1454 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1455 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1456 rxcp->ip_csum =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1458 rxcp->l4_csum =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1460 rxcp->ipv6 =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1462 rxcp->rxq_idx =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1464 rxcp->num_rcvd =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1466 rxcp->pkt_type =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1468 rxcp->rss_hash =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1470 if (rxcp->vlanf) {
1471 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1472 compl);
1473 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1474 compl);
1475 }
1476 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1477 }
1478
1479 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1480 struct be_rx_compl_info *rxcp)
1481 {
1482 rxcp->pkt_size =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1484 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1485 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1486 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1487 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1488 rxcp->ip_csum =
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1490 rxcp->l4_csum =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1492 rxcp->ipv6 =
1493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1494 rxcp->rxq_idx =
1495 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1496 rxcp->num_rcvd =
1497 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1498 rxcp->pkt_type =
1499 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1500 rxcp->rss_hash =
1501 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1502 if (rxcp->vlanf) {
1503 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1504 compl);
1505 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1506 compl);
1507 }
1508 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1509 }
1510
1511 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1512 {
1513 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1514 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1515 struct be_adapter *adapter = rxo->adapter;
1516
1517 /* For checking the valid bit it is Ok to use either definition as the
1518 * valid bit is at the same position in both v0 and v1 Rx compl */
1519 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1520 return NULL;
1521
1522 rmb();
1523 be_dws_le_to_cpu(compl, sizeof(*compl));
1524
1525 if (adapter->be3_native)
1526 be_parse_rx_compl_v1(compl, rxcp);
1527 else
1528 be_parse_rx_compl_v0(compl, rxcp);
1529
1530 if (rxcp->vlanf) {
1531 /* vlanf could be wrongly set in some cards.
1532 * ignore if vtm is not set */
1533 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1534 rxcp->vlanf = 0;
1535
1536 if (!lancer_chip(adapter))
1537 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1538
1539 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1540 !adapter->vlan_tag[rxcp->vlan_tag])
1541 rxcp->vlanf = 0;
1542 }
1543
1544 /* As the compl has been parsed, reset it; we wont touch it again */
1545 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1546
1547 queue_tail_inc(&rxo->cq);
1548 return rxcp;
1549 }
1550
1551 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1552 {
1553 u32 order = get_order(size);
1554
1555 if (order > 0)
1556 gfp |= __GFP_COMP;
1557 return alloc_pages(gfp, order);
1558 }
1559
1560 /*
1561 * Allocate a page, split it to fragments of size rx_frag_size and post as
1562 * receive buffers to BE
1563 */
1564 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1565 {
1566 struct be_adapter *adapter = rxo->adapter;
1567 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1568 struct be_queue_info *rxq = &rxo->q;
1569 struct page *pagep = NULL;
1570 struct be_eth_rx_d *rxd;
1571 u64 page_dmaaddr = 0, frag_dmaaddr;
1572 u32 posted, page_offset = 0;
1573
1574 page_info = &rxo->page_info_tbl[rxq->head];
1575 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1576 if (!pagep) {
1577 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1578 if (unlikely(!pagep)) {
1579 rx_stats(rxo)->rx_post_fail++;
1580 break;
1581 }
1582 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1583 0, adapter->big_page_size,
1584 DMA_FROM_DEVICE);
1585 page_info->page_offset = 0;
1586 } else {
1587 get_page(pagep);
1588 page_info->page_offset = page_offset + rx_frag_size;
1589 }
1590 page_offset = page_info->page_offset;
1591 page_info->page = pagep;
1592 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1593 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1594
1595 rxd = queue_head_node(rxq);
1596 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1597 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1598
1599 /* Any space left in the current big page for another frag? */
1600 if ((page_offset + rx_frag_size + rx_frag_size) >
1601 adapter->big_page_size) {
1602 pagep = NULL;
1603 page_info->last_page_user = true;
1604 }
1605
1606 prev_page_info = page_info;
1607 queue_head_inc(rxq);
1608 page_info = &rxo->page_info_tbl[rxq->head];
1609 }
1610 if (pagep)
1611 prev_page_info->last_page_user = true;
1612
1613 if (posted) {
1614 atomic_add(posted, &rxq->used);
1615 be_rxq_notify(adapter, rxq->id, posted);
1616 } else if (atomic_read(&rxq->used) == 0) {
1617 /* Let be_worker replenish when memory is available */
1618 rxo->rx_post_starved = true;
1619 }
1620 }
1621
1622 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1623 {
1624 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1625
1626 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1627 return NULL;
1628
1629 rmb();
1630 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1631
1632 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1633
1634 queue_tail_inc(tx_cq);
1635 return txcp;
1636 }
1637
1638 static u16 be_tx_compl_process(struct be_adapter *adapter,
1639 struct be_tx_obj *txo, u16 last_index)
1640 {
1641 struct be_queue_info *txq = &txo->q;
1642 struct be_eth_wrb *wrb;
1643 struct sk_buff **sent_skbs = txo->sent_skb_list;
1644 struct sk_buff *sent_skb;
1645 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1646 bool unmap_skb_hdr = true;
1647
1648 sent_skb = sent_skbs[txq->tail];
1649 BUG_ON(!sent_skb);
1650 sent_skbs[txq->tail] = NULL;
1651
1652 /* skip header wrb */
1653 queue_tail_inc(txq);
1654
1655 do {
1656 cur_index = txq->tail;
1657 wrb = queue_tail_node(txq);
1658 unmap_tx_frag(&adapter->pdev->dev, wrb,
1659 (unmap_skb_hdr && skb_headlen(sent_skb)));
1660 unmap_skb_hdr = false;
1661
1662 num_wrbs++;
1663 queue_tail_inc(txq);
1664 } while (cur_index != last_index);
1665
1666 kfree_skb(sent_skb);
1667 return num_wrbs;
1668 }
1669
1670 /* Return the number of events in the event queue */
1671 static inline int events_get(struct be_eq_obj *eqo)
1672 {
1673 struct be_eq_entry *eqe;
1674 int num = 0;
1675
1676 do {
1677 eqe = queue_tail_node(&eqo->q);
1678 if (eqe->evt == 0)
1679 break;
1680
1681 rmb();
1682 eqe->evt = 0;
1683 num++;
1684 queue_tail_inc(&eqo->q);
1685 } while (true);
1686
1687 return num;
1688 }
1689
1690 /* Leaves the EQ is disarmed state */
1691 static void be_eq_clean(struct be_eq_obj *eqo)
1692 {
1693 int num = events_get(eqo);
1694
1695 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1696 }
1697
1698 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1699 {
1700 struct be_rx_page_info *page_info;
1701 struct be_queue_info *rxq = &rxo->q;
1702 struct be_queue_info *rx_cq = &rxo->cq;
1703 struct be_rx_compl_info *rxcp;
1704 struct be_adapter *adapter = rxo->adapter;
1705 int flush_wait = 0;
1706 u16 tail;
1707
1708 /* Consume pending rx completions.
1709 * Wait for the flush completion (identified by zero num_rcvd)
1710 * to arrive. Notify CQ even when there are no more CQ entries
1711 * for HW to flush partially coalesced CQ entries.
1712 * In Lancer, there is no need to wait for flush compl.
1713 */
1714 for (;;) {
1715 rxcp = be_rx_compl_get(rxo);
1716 if (rxcp == NULL) {
1717 if (lancer_chip(adapter))
1718 break;
1719
1720 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1721 dev_warn(&adapter->pdev->dev,
1722 "did not receive flush compl\n");
1723 break;
1724 }
1725 be_cq_notify(adapter, rx_cq->id, true, 0);
1726 mdelay(1);
1727 } else {
1728 be_rx_compl_discard(rxo, rxcp);
1729 be_cq_notify(adapter, rx_cq->id, true, 1);
1730 if (rxcp->num_rcvd == 0)
1731 break;
1732 }
1733 }
1734
1735 /* After cleanup, leave the CQ in unarmed state */
1736 be_cq_notify(adapter, rx_cq->id, false, 0);
1737
1738 /* Then free posted rx buffers that were not used */
1739 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1740 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1741 page_info = get_rx_page_info(rxo, tail);
1742 put_page(page_info->page);
1743 memset(page_info, 0, sizeof(*page_info));
1744 }
1745 BUG_ON(atomic_read(&rxq->used));
1746 rxq->tail = rxq->head = 0;
1747 }
1748
1749 static void be_tx_compl_clean(struct be_adapter *adapter)
1750 {
1751 struct be_tx_obj *txo;
1752 struct be_queue_info *txq;
1753 struct be_eth_tx_compl *txcp;
1754 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1755 struct sk_buff *sent_skb;
1756 bool dummy_wrb;
1757 int i, pending_txqs;
1758
1759 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1760 do {
1761 pending_txqs = adapter->num_tx_qs;
1762
1763 for_all_tx_queues(adapter, txo, i) {
1764 txq = &txo->q;
1765 while ((txcp = be_tx_compl_get(&txo->cq))) {
1766 end_idx =
1767 AMAP_GET_BITS(struct amap_eth_tx_compl,
1768 wrb_index, txcp);
1769 num_wrbs += be_tx_compl_process(adapter, txo,
1770 end_idx);
1771 cmpl++;
1772 }
1773 if (cmpl) {
1774 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1775 atomic_sub(num_wrbs, &txq->used);
1776 cmpl = 0;
1777 num_wrbs = 0;
1778 }
1779 if (atomic_read(&txq->used) == 0)
1780 pending_txqs--;
1781 }
1782
1783 if (pending_txqs == 0 || ++timeo > 200)
1784 break;
1785
1786 mdelay(1);
1787 } while (true);
1788
1789 for_all_tx_queues(adapter, txo, i) {
1790 txq = &txo->q;
1791 if (atomic_read(&txq->used))
1792 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1793 atomic_read(&txq->used));
1794
1795 /* free posted tx for which compls will never arrive */
1796 while (atomic_read(&txq->used)) {
1797 sent_skb = txo->sent_skb_list[txq->tail];
1798 end_idx = txq->tail;
1799 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1800 &dummy_wrb);
1801 index_adv(&end_idx, num_wrbs - 1, txq->len);
1802 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1803 atomic_sub(num_wrbs, &txq->used);
1804 }
1805 }
1806 }
1807
1808 static void be_evt_queues_destroy(struct be_adapter *adapter)
1809 {
1810 struct be_eq_obj *eqo;
1811 int i;
1812
1813 for_all_evt_queues(adapter, eqo, i) {
1814 if (eqo->q.created) {
1815 be_eq_clean(eqo);
1816 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1817 }
1818 be_queue_free(adapter, &eqo->q);
1819 }
1820 }
1821
1822 static int be_evt_queues_create(struct be_adapter *adapter)
1823 {
1824 struct be_queue_info *eq;
1825 struct be_eq_obj *eqo;
1826 int i, rc;
1827
1828 adapter->num_evt_qs = num_irqs(adapter);
1829
1830 for_all_evt_queues(adapter, eqo, i) {
1831 eqo->adapter = adapter;
1832 eqo->tx_budget = BE_TX_BUDGET;
1833 eqo->idx = i;
1834 eqo->max_eqd = BE_MAX_EQD;
1835 eqo->enable_aic = true;
1836
1837 eq = &eqo->q;
1838 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1839 sizeof(struct be_eq_entry));
1840 if (rc)
1841 return rc;
1842
1843 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1844 if (rc)
1845 return rc;
1846 }
1847 return 0;
1848 }
1849
1850 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1851 {
1852 struct be_queue_info *q;
1853
1854 q = &adapter->mcc_obj.q;
1855 if (q->created)
1856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1857 be_queue_free(adapter, q);
1858
1859 q = &adapter->mcc_obj.cq;
1860 if (q->created)
1861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1862 be_queue_free(adapter, q);
1863 }
1864
1865 /* Must be called only after TX qs are created as MCC shares TX EQ */
1866 static int be_mcc_queues_create(struct be_adapter *adapter)
1867 {
1868 struct be_queue_info *q, *cq;
1869
1870 cq = &adapter->mcc_obj.cq;
1871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1872 sizeof(struct be_mcc_compl)))
1873 goto err;
1874
1875 /* Use the default EQ for MCC completions */
1876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1877 goto mcc_cq_free;
1878
1879 q = &adapter->mcc_obj.q;
1880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1881 goto mcc_cq_destroy;
1882
1883 if (be_cmd_mccq_create(adapter, q, cq))
1884 goto mcc_q_free;
1885
1886 return 0;
1887
1888 mcc_q_free:
1889 be_queue_free(adapter, q);
1890 mcc_cq_destroy:
1891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1892 mcc_cq_free:
1893 be_queue_free(adapter, cq);
1894 err:
1895 return -1;
1896 }
1897
1898 static void be_tx_queues_destroy(struct be_adapter *adapter)
1899 {
1900 struct be_queue_info *q;
1901 struct be_tx_obj *txo;
1902 u8 i;
1903
1904 for_all_tx_queues(adapter, txo, i) {
1905 q = &txo->q;
1906 if (q->created)
1907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1908 be_queue_free(adapter, q);
1909
1910 q = &txo->cq;
1911 if (q->created)
1912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913 be_queue_free(adapter, q);
1914 }
1915 }
1916
1917 static int be_num_txqs_want(struct be_adapter *adapter)
1918 {
1919 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1920 be_is_mc(adapter) ||
1921 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1922 BE2_chip(adapter))
1923 return 1;
1924 else
1925 return adapter->max_tx_queues;
1926 }
1927
1928 static int be_tx_cqs_create(struct be_adapter *adapter)
1929 {
1930 struct be_queue_info *cq, *eq;
1931 int status;
1932 struct be_tx_obj *txo;
1933 u8 i;
1934
1935 adapter->num_tx_qs = be_num_txqs_want(adapter);
1936 if (adapter->num_tx_qs != MAX_TX_QS) {
1937 rtnl_lock();
1938 netif_set_real_num_tx_queues(adapter->netdev,
1939 adapter->num_tx_qs);
1940 rtnl_unlock();
1941 }
1942
1943 for_all_tx_queues(adapter, txo, i) {
1944 cq = &txo->cq;
1945 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1946 sizeof(struct be_eth_tx_compl));
1947 if (status)
1948 return status;
1949
1950 /* If num_evt_qs is less than num_tx_qs, then more than
1951 * one txq share an eq
1952 */
1953 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1954 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1955 if (status)
1956 return status;
1957 }
1958 return 0;
1959 }
1960
1961 static int be_tx_qs_create(struct be_adapter *adapter)
1962 {
1963 struct be_tx_obj *txo;
1964 int i, status;
1965
1966 for_all_tx_queues(adapter, txo, i) {
1967 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1968 sizeof(struct be_eth_wrb));
1969 if (status)
1970 return status;
1971
1972 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1973 if (status)
1974 return status;
1975 }
1976
1977 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1978 adapter->num_tx_qs);
1979 return 0;
1980 }
1981
1982 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1983 {
1984 struct be_queue_info *q;
1985 struct be_rx_obj *rxo;
1986 int i;
1987
1988 for_all_rx_queues(adapter, rxo, i) {
1989 q = &rxo->cq;
1990 if (q->created)
1991 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1992 be_queue_free(adapter, q);
1993 }
1994 }
1995
1996 static int be_rx_cqs_create(struct be_adapter *adapter)
1997 {
1998 struct be_queue_info *eq, *cq;
1999 struct be_rx_obj *rxo;
2000 int rc, i;
2001
2002 /* We'll create as many RSS rings as there are irqs.
2003 * But when there's only one irq there's no use creating RSS rings
2004 */
2005 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2006 num_irqs(adapter) + 1 : 1;
2007 if (adapter->num_rx_qs != MAX_RX_QS) {
2008 rtnl_lock();
2009 netif_set_real_num_rx_queues(adapter->netdev,
2010 adapter->num_rx_qs);
2011 rtnl_unlock();
2012 }
2013
2014 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2015 for_all_rx_queues(adapter, rxo, i) {
2016 rxo->adapter = adapter;
2017 cq = &rxo->cq;
2018 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2019 sizeof(struct be_eth_rx_compl));
2020 if (rc)
2021 return rc;
2022
2023 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2024 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2025 if (rc)
2026 return rc;
2027 }
2028
2029 dev_info(&adapter->pdev->dev,
2030 "created %d RSS queue(s) and 1 default RX queue\n",
2031 adapter->num_rx_qs - 1);
2032 return 0;
2033 }
2034
2035 static irqreturn_t be_intx(int irq, void *dev)
2036 {
2037 struct be_eq_obj *eqo = dev;
2038 struct be_adapter *adapter = eqo->adapter;
2039 int num_evts = 0;
2040
2041 /* IRQ is not expected when NAPI is scheduled as the EQ
2042 * will not be armed.
2043 * But, this can happen on Lancer INTx where it takes
2044 * a while to de-assert INTx or in BE2 where occasionaly
2045 * an interrupt may be raised even when EQ is unarmed.
2046 * If NAPI is already scheduled, then counting & notifying
2047 * events will orphan them.
2048 */
2049 if (napi_schedule_prep(&eqo->napi)) {
2050 num_evts = events_get(eqo);
2051 __napi_schedule(&eqo->napi);
2052 if (num_evts)
2053 eqo->spurious_intr = 0;
2054 }
2055 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2056
2057 /* Return IRQ_HANDLED only for the the first spurious intr
2058 * after a valid intr to stop the kernel from branding
2059 * this irq as a bad one!
2060 */
2061 if (num_evts || eqo->spurious_intr++ == 0)
2062 return IRQ_HANDLED;
2063 else
2064 return IRQ_NONE;
2065 }
2066
2067 static irqreturn_t be_msix(int irq, void *dev)
2068 {
2069 struct be_eq_obj *eqo = dev;
2070
2071 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2072 napi_schedule(&eqo->napi);
2073 return IRQ_HANDLED;
2074 }
2075
2076 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2077 {
2078 return (rxcp->tcpf && !rxcp->err) ? true : false;
2079 }
2080
2081 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2082 int budget)
2083 {
2084 struct be_adapter *adapter = rxo->adapter;
2085 struct be_queue_info *rx_cq = &rxo->cq;
2086 struct be_rx_compl_info *rxcp;
2087 u32 work_done;
2088
2089 for (work_done = 0; work_done < budget; work_done++) {
2090 rxcp = be_rx_compl_get(rxo);
2091 if (!rxcp)
2092 break;
2093
2094 /* Is it a flush compl that has no data */
2095 if (unlikely(rxcp->num_rcvd == 0))
2096 goto loop_continue;
2097
2098 /* Discard compl with partial DMA Lancer B0 */
2099 if (unlikely(!rxcp->pkt_size)) {
2100 be_rx_compl_discard(rxo, rxcp);
2101 goto loop_continue;
2102 }
2103
2104 /* On BE drop pkts that arrive due to imperfect filtering in
2105 * promiscuous mode on some skews
2106 */
2107 if (unlikely(rxcp->port != adapter->port_num &&
2108 !lancer_chip(adapter))) {
2109 be_rx_compl_discard(rxo, rxcp);
2110 goto loop_continue;
2111 }
2112
2113 if (do_gro(rxcp))
2114 be_rx_compl_process_gro(rxo, napi, rxcp);
2115 else
2116 be_rx_compl_process(rxo, rxcp);
2117 loop_continue:
2118 be_rx_stats_update(rxo, rxcp);
2119 }
2120
2121 if (work_done) {
2122 be_cq_notify(adapter, rx_cq->id, true, work_done);
2123
2124 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2125 be_post_rx_frags(rxo, GFP_ATOMIC);
2126 }
2127
2128 return work_done;
2129 }
2130
2131 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2132 int budget, int idx)
2133 {
2134 struct be_eth_tx_compl *txcp;
2135 int num_wrbs = 0, work_done;
2136
2137 for (work_done = 0; work_done < budget; work_done++) {
2138 txcp = be_tx_compl_get(&txo->cq);
2139 if (!txcp)
2140 break;
2141 num_wrbs += be_tx_compl_process(adapter, txo,
2142 AMAP_GET_BITS(struct amap_eth_tx_compl,
2143 wrb_index, txcp));
2144 }
2145
2146 if (work_done) {
2147 be_cq_notify(adapter, txo->cq.id, true, work_done);
2148 atomic_sub(num_wrbs, &txo->q.used);
2149
2150 /* As Tx wrbs have been freed up, wake up netdev queue
2151 * if it was stopped due to lack of tx wrbs. */
2152 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2153 atomic_read(&txo->q.used) < txo->q.len / 2) {
2154 netif_wake_subqueue(adapter->netdev, idx);
2155 }
2156
2157 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2158 tx_stats(txo)->tx_compl += work_done;
2159 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2160 }
2161 return (work_done < budget); /* Done */
2162 }
2163
2164 int be_poll(struct napi_struct *napi, int budget)
2165 {
2166 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2167 struct be_adapter *adapter = eqo->adapter;
2168 int max_work = 0, work, i, num_evts;
2169 bool tx_done;
2170
2171 num_evts = events_get(eqo);
2172
2173 /* Process all TXQs serviced by this EQ */
2174 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2175 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2176 eqo->tx_budget, i);
2177 if (!tx_done)
2178 max_work = budget;
2179 }
2180
2181 /* This loop will iterate twice for EQ0 in which
2182 * completions of the last RXQ (default one) are also processed
2183 * For other EQs the loop iterates only once
2184 */
2185 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2186 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2187 max_work = max(work, max_work);
2188 }
2189
2190 if (is_mcc_eqo(eqo))
2191 be_process_mcc(adapter);
2192
2193 if (max_work < budget) {
2194 napi_complete(napi);
2195 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2196 } else {
2197 /* As we'll continue in polling mode, count and clear events */
2198 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2199 }
2200 return max_work;
2201 }
2202
2203 void be_detect_error(struct be_adapter *adapter)
2204 {
2205 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2206 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2207 u32 i;
2208
2209 if (be_hw_error(adapter))
2210 return;
2211
2212 if (lancer_chip(adapter)) {
2213 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2214 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2215 sliport_err1 = ioread32(adapter->db +
2216 SLIPORT_ERROR1_OFFSET);
2217 sliport_err2 = ioread32(adapter->db +
2218 SLIPORT_ERROR2_OFFSET);
2219 }
2220 } else {
2221 pci_read_config_dword(adapter->pdev,
2222 PCICFG_UE_STATUS_LOW, &ue_lo);
2223 pci_read_config_dword(adapter->pdev,
2224 PCICFG_UE_STATUS_HIGH, &ue_hi);
2225 pci_read_config_dword(adapter->pdev,
2226 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2227 pci_read_config_dword(adapter->pdev,
2228 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2229
2230 ue_lo = (ue_lo & ~ue_lo_mask);
2231 ue_hi = (ue_hi & ~ue_hi_mask);
2232 }
2233
2234 /* On certain platforms BE hardware can indicate spurious UEs.
2235 * Allow the h/w to stop working completely in case of a real UE.
2236 * Hence not setting the hw_error for UE detection.
2237 */
2238 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2239 adapter->hw_error = true;
2240 dev_err(&adapter->pdev->dev,
2241 "Error detected in the card\n");
2242 }
2243
2244 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2245 dev_err(&adapter->pdev->dev,
2246 "ERR: sliport status 0x%x\n", sliport_status);
2247 dev_err(&adapter->pdev->dev,
2248 "ERR: sliport error1 0x%x\n", sliport_err1);
2249 dev_err(&adapter->pdev->dev,
2250 "ERR: sliport error2 0x%x\n", sliport_err2);
2251 }
2252
2253 if (ue_lo) {
2254 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2255 if (ue_lo & 1)
2256 dev_err(&adapter->pdev->dev,
2257 "UE: %s bit set\n", ue_status_low_desc[i]);
2258 }
2259 }
2260
2261 if (ue_hi) {
2262 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2263 if (ue_hi & 1)
2264 dev_err(&adapter->pdev->dev,
2265 "UE: %s bit set\n", ue_status_hi_desc[i]);
2266 }
2267 }
2268
2269 }
2270
2271 static void be_msix_disable(struct be_adapter *adapter)
2272 {
2273 if (msix_enabled(adapter)) {
2274 pci_disable_msix(adapter->pdev);
2275 adapter->num_msix_vec = 0;
2276 }
2277 }
2278
2279 static uint be_num_rss_want(struct be_adapter *adapter)
2280 {
2281 u32 num = 0;
2282
2283 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2284 (lancer_chip(adapter) ||
2285 (!sriov_want(adapter) && be_physfn(adapter)))) {
2286 num = adapter->max_rss_queues;
2287 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2288 }
2289 return num;
2290 }
2291
2292 static void be_msix_enable(struct be_adapter *adapter)
2293 {
2294 #define BE_MIN_MSIX_VECTORS 1
2295 int i, status, num_vec, num_roce_vec = 0;
2296 struct device *dev = &adapter->pdev->dev;
2297
2298 /* If RSS queues are not used, need a vec for default RX Q */
2299 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2300 if (be_roce_supported(adapter)) {
2301 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2302 (num_online_cpus() + 1));
2303 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2304 num_vec += num_roce_vec;
2305 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2306 }
2307 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2308
2309 for (i = 0; i < num_vec; i++)
2310 adapter->msix_entries[i].entry = i;
2311
2312 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2313 if (status == 0) {
2314 goto done;
2315 } else if (status >= BE_MIN_MSIX_VECTORS) {
2316 num_vec = status;
2317 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2318 num_vec) == 0)
2319 goto done;
2320 }
2321
2322 dev_warn(dev, "MSIx enable failed\n");
2323 return;
2324 done:
2325 if (be_roce_supported(adapter)) {
2326 if (num_vec > num_roce_vec) {
2327 adapter->num_msix_vec = num_vec - num_roce_vec;
2328 adapter->num_msix_roce_vec =
2329 num_vec - adapter->num_msix_vec;
2330 } else {
2331 adapter->num_msix_vec = num_vec;
2332 adapter->num_msix_roce_vec = 0;
2333 }
2334 } else
2335 adapter->num_msix_vec = num_vec;
2336 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2337 return;
2338 }
2339
2340 static inline int be_msix_vec_get(struct be_adapter *adapter,
2341 struct be_eq_obj *eqo)
2342 {
2343 return adapter->msix_entries[eqo->idx].vector;
2344 }
2345
2346 static int be_msix_register(struct be_adapter *adapter)
2347 {
2348 struct net_device *netdev = adapter->netdev;
2349 struct be_eq_obj *eqo;
2350 int status, i, vec;
2351
2352 for_all_evt_queues(adapter, eqo, i) {
2353 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2354 vec = be_msix_vec_get(adapter, eqo);
2355 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2356 if (status)
2357 goto err_msix;
2358 }
2359
2360 return 0;
2361 err_msix:
2362 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2363 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2364 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2365 status);
2366 be_msix_disable(adapter);
2367 return status;
2368 }
2369
2370 static int be_irq_register(struct be_adapter *adapter)
2371 {
2372 struct net_device *netdev = adapter->netdev;
2373 int status;
2374
2375 if (msix_enabled(adapter)) {
2376 status = be_msix_register(adapter);
2377 if (status == 0)
2378 goto done;
2379 /* INTx is not supported for VF */
2380 if (!be_physfn(adapter))
2381 return status;
2382 }
2383
2384 /* INTx: only the first EQ is used */
2385 netdev->irq = adapter->pdev->irq;
2386 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2387 &adapter->eq_obj[0]);
2388 if (status) {
2389 dev_err(&adapter->pdev->dev,
2390 "INTx request IRQ failed - err %d\n", status);
2391 return status;
2392 }
2393 done:
2394 adapter->isr_registered = true;
2395 return 0;
2396 }
2397
2398 static void be_irq_unregister(struct be_adapter *adapter)
2399 {
2400 struct net_device *netdev = adapter->netdev;
2401 struct be_eq_obj *eqo;
2402 int i;
2403
2404 if (!adapter->isr_registered)
2405 return;
2406
2407 /* INTx */
2408 if (!msix_enabled(adapter)) {
2409 free_irq(netdev->irq, &adapter->eq_obj[0]);
2410 goto done;
2411 }
2412
2413 /* MSIx */
2414 for_all_evt_queues(adapter, eqo, i)
2415 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2416
2417 done:
2418 adapter->isr_registered = false;
2419 }
2420
2421 static void be_rx_qs_destroy(struct be_adapter *adapter)
2422 {
2423 struct be_queue_info *q;
2424 struct be_rx_obj *rxo;
2425 int i;
2426
2427 for_all_rx_queues(adapter, rxo, i) {
2428 q = &rxo->q;
2429 if (q->created) {
2430 be_cmd_rxq_destroy(adapter, q);
2431 /* After the rxq is invalidated, wait for a grace time
2432 * of 1ms for all dma to end and the flush compl to
2433 * arrive
2434 */
2435 mdelay(1);
2436 be_rx_cq_clean(rxo);
2437 }
2438 be_queue_free(adapter, q);
2439 }
2440 }
2441
2442 static int be_close(struct net_device *netdev)
2443 {
2444 struct be_adapter *adapter = netdev_priv(netdev);
2445 struct be_eq_obj *eqo;
2446 int i;
2447
2448 be_roce_dev_close(adapter);
2449
2450 for_all_evt_queues(adapter, eqo, i)
2451 napi_disable(&eqo->napi);
2452
2453 be_async_mcc_disable(adapter);
2454
2455 /* Wait for all pending tx completions to arrive so that
2456 * all tx skbs are freed.
2457 */
2458 be_tx_compl_clean(adapter);
2459
2460 be_rx_qs_destroy(adapter);
2461
2462 for_all_evt_queues(adapter, eqo, i) {
2463 if (msix_enabled(adapter))
2464 synchronize_irq(be_msix_vec_get(adapter, eqo));
2465 else
2466 synchronize_irq(netdev->irq);
2467 be_eq_clean(eqo);
2468 }
2469
2470 be_irq_unregister(adapter);
2471
2472 return 0;
2473 }
2474
2475 static int be_rx_qs_create(struct be_adapter *adapter)
2476 {
2477 struct be_rx_obj *rxo;
2478 int rc, i, j;
2479 u8 rsstable[128];
2480
2481 for_all_rx_queues(adapter, rxo, i) {
2482 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2483 sizeof(struct be_eth_rx_d));
2484 if (rc)
2485 return rc;
2486 }
2487
2488 /* The FW would like the default RXQ to be created first */
2489 rxo = default_rxo(adapter);
2490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2491 adapter->if_handle, false, &rxo->rss_id);
2492 if (rc)
2493 return rc;
2494
2495 for_all_rss_queues(adapter, rxo, i) {
2496 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2497 rx_frag_size, adapter->if_handle,
2498 true, &rxo->rss_id);
2499 if (rc)
2500 return rc;
2501 }
2502
2503 if (be_multi_rxq(adapter)) {
2504 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2505 for_all_rss_queues(adapter, rxo, i) {
2506 if ((j + i) >= 128)
2507 break;
2508 rsstable[j + i] = rxo->rss_id;
2509 }
2510 }
2511 rc = be_cmd_rss_config(adapter, rsstable, 128);
2512 if (rc)
2513 return rc;
2514 }
2515
2516 /* First time posting */
2517 for_all_rx_queues(adapter, rxo, i)
2518 be_post_rx_frags(rxo, GFP_KERNEL);
2519 return 0;
2520 }
2521
2522 static int be_open(struct net_device *netdev)
2523 {
2524 struct be_adapter *adapter = netdev_priv(netdev);
2525 struct be_eq_obj *eqo;
2526 struct be_rx_obj *rxo;
2527 struct be_tx_obj *txo;
2528 u8 link_status;
2529 int status, i;
2530
2531 status = be_rx_qs_create(adapter);
2532 if (status)
2533 goto err;
2534
2535 be_irq_register(adapter);
2536
2537 for_all_rx_queues(adapter, rxo, i)
2538 be_cq_notify(adapter, rxo->cq.id, true, 0);
2539
2540 for_all_tx_queues(adapter, txo, i)
2541 be_cq_notify(adapter, txo->cq.id, true, 0);
2542
2543 be_async_mcc_enable(adapter);
2544
2545 for_all_evt_queues(adapter, eqo, i) {
2546 napi_enable(&eqo->napi);
2547 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2548 }
2549
2550 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2551 if (!status)
2552 be_link_status_update(adapter, link_status);
2553
2554 be_roce_dev_open(adapter);
2555 return 0;
2556 err:
2557 be_close(adapter->netdev);
2558 return -EIO;
2559 }
2560
2561 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2562 {
2563 struct be_dma_mem cmd;
2564 int status = 0;
2565 u8 mac[ETH_ALEN];
2566
2567 memset(mac, 0, ETH_ALEN);
2568
2569 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2570 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2571 GFP_KERNEL | __GFP_ZERO);
2572 if (cmd.va == NULL)
2573 return -1;
2574
2575 if (enable) {
2576 status = pci_write_config_dword(adapter->pdev,
2577 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2578 if (status) {
2579 dev_err(&adapter->pdev->dev,
2580 "Could not enable Wake-on-lan\n");
2581 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2582 cmd.dma);
2583 return status;
2584 }
2585 status = be_cmd_enable_magic_wol(adapter,
2586 adapter->netdev->dev_addr, &cmd);
2587 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2588 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2589 } else {
2590 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2591 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2592 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2593 }
2594
2595 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2596 return status;
2597 }
2598
2599 /*
2600 * Generate a seed MAC address from the PF MAC Address using jhash.
2601 * MAC Address for VFs are assigned incrementally starting from the seed.
2602 * These addresses are programmed in the ASIC by the PF and the VF driver
2603 * queries for the MAC address during its probe.
2604 */
2605 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2606 {
2607 u32 vf;
2608 int status = 0;
2609 u8 mac[ETH_ALEN];
2610 struct be_vf_cfg *vf_cfg;
2611
2612 be_vf_eth_addr_generate(adapter, mac);
2613
2614 for_all_vfs(adapter, vf_cfg, vf) {
2615 if (lancer_chip(adapter)) {
2616 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2617 } else {
2618 status = be_cmd_pmac_add(adapter, mac,
2619 vf_cfg->if_handle,
2620 &vf_cfg->pmac_id, vf + 1);
2621 }
2622
2623 if (status)
2624 dev_err(&adapter->pdev->dev,
2625 "Mac address assignment failed for VF %d\n", vf);
2626 else
2627 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2628
2629 mac[5] += 1;
2630 }
2631 return status;
2632 }
2633
2634 static int be_vfs_mac_query(struct be_adapter *adapter)
2635 {
2636 int status, vf;
2637 u8 mac[ETH_ALEN];
2638 struct be_vf_cfg *vf_cfg;
2639 bool active;
2640
2641 for_all_vfs(adapter, vf_cfg, vf) {
2642 be_cmd_get_mac_from_list(adapter, mac, &active,
2643 &vf_cfg->pmac_id, 0);
2644
2645 status = be_cmd_mac_addr_query(adapter, mac, false,
2646 vf_cfg->if_handle, 0);
2647 if (status)
2648 return status;
2649 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2650 }
2651 return 0;
2652 }
2653
2654 static void be_vf_clear(struct be_adapter *adapter)
2655 {
2656 struct be_vf_cfg *vf_cfg;
2657 u32 vf;
2658
2659 if (be_find_vfs(adapter, ASSIGNED)) {
2660 dev_warn(&adapter->pdev->dev,
2661 "VFs are assigned to VMs: not disabling VFs\n");
2662 goto done;
2663 }
2664
2665 for_all_vfs(adapter, vf_cfg, vf) {
2666 if (lancer_chip(adapter))
2667 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2668 else
2669 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2670 vf_cfg->pmac_id, vf + 1);
2671
2672 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2673 }
2674 pci_disable_sriov(adapter->pdev);
2675 done:
2676 kfree(adapter->vf_cfg);
2677 adapter->num_vfs = 0;
2678 }
2679
2680 static int be_clear(struct be_adapter *adapter)
2681 {
2682 int i = 1;
2683
2684 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2685 cancel_delayed_work_sync(&adapter->work);
2686 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2687 }
2688
2689 if (sriov_enabled(adapter))
2690 be_vf_clear(adapter);
2691
2692 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2693 be_cmd_pmac_del(adapter, adapter->if_handle,
2694 adapter->pmac_id[i], 0);
2695
2696 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2697
2698 be_mcc_queues_destroy(adapter);
2699 be_rx_cqs_destroy(adapter);
2700 be_tx_queues_destroy(adapter);
2701 be_evt_queues_destroy(adapter);
2702
2703 kfree(adapter->pmac_id);
2704 adapter->pmac_id = NULL;
2705
2706 be_msix_disable(adapter);
2707 return 0;
2708 }
2709
2710 static int be_vfs_if_create(struct be_adapter *adapter)
2711 {
2712 struct be_vf_cfg *vf_cfg;
2713 u32 cap_flags, en_flags, vf;
2714 int status;
2715
2716 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2717 BE_IF_FLAGS_MULTICAST;
2718
2719 for_all_vfs(adapter, vf_cfg, vf) {
2720 if (!BE3_chip(adapter))
2721 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2722
2723 /* If a FW profile exists, then cap_flags are updated */
2724 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2725 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2727 &vf_cfg->if_handle, vf + 1);
2728 if (status)
2729 goto err;
2730 }
2731 err:
2732 return status;
2733 }
2734
2735 static int be_vf_setup_init(struct be_adapter *adapter)
2736 {
2737 struct be_vf_cfg *vf_cfg;
2738 int vf;
2739
2740 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2741 GFP_KERNEL);
2742 if (!adapter->vf_cfg)
2743 return -ENOMEM;
2744
2745 for_all_vfs(adapter, vf_cfg, vf) {
2746 vf_cfg->if_handle = -1;
2747 vf_cfg->pmac_id = -1;
2748 }
2749 return 0;
2750 }
2751
2752 static int be_vf_setup(struct be_adapter *adapter)
2753 {
2754 struct be_vf_cfg *vf_cfg;
2755 u16 def_vlan, lnk_speed;
2756 int status, old_vfs, vf;
2757 struct device *dev = &adapter->pdev->dev;
2758
2759 old_vfs = be_find_vfs(adapter, ENABLED);
2760 if (old_vfs) {
2761 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2762 if (old_vfs != num_vfs)
2763 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2764 adapter->num_vfs = old_vfs;
2765 } else {
2766 if (num_vfs > adapter->dev_num_vfs)
2767 dev_info(dev, "Device supports %d VFs and not %d\n",
2768 adapter->dev_num_vfs, num_vfs);
2769 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2770
2771 status = pci_enable_sriov(adapter->pdev, num_vfs);
2772 if (status) {
2773 dev_err(dev, "SRIOV enable failed\n");
2774 adapter->num_vfs = 0;
2775 return 0;
2776 }
2777 }
2778
2779 status = be_vf_setup_init(adapter);
2780 if (status)
2781 goto err;
2782
2783 if (old_vfs) {
2784 for_all_vfs(adapter, vf_cfg, vf) {
2785 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2786 if (status)
2787 goto err;
2788 }
2789 } else {
2790 status = be_vfs_if_create(adapter);
2791 if (status)
2792 goto err;
2793 }
2794
2795 if (old_vfs) {
2796 status = be_vfs_mac_query(adapter);
2797 if (status)
2798 goto err;
2799 } else {
2800 status = be_vf_eth_addr_config(adapter);
2801 if (status)
2802 goto err;
2803 }
2804
2805 for_all_vfs(adapter, vf_cfg, vf) {
2806 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2807 * Allow full available bandwidth
2808 */
2809 if (BE3_chip(adapter) && !old_vfs)
2810 be_cmd_set_qos(adapter, 1000, vf+1);
2811
2812 status = be_cmd_link_status_query(adapter, &lnk_speed,
2813 NULL, vf + 1);
2814 if (!status)
2815 vf_cfg->tx_rate = lnk_speed;
2816
2817 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2818 vf + 1, vf_cfg->if_handle);
2819 if (status)
2820 goto err;
2821 vf_cfg->def_vid = def_vlan;
2822
2823 be_cmd_enable_vf(adapter, vf + 1);
2824 }
2825 return 0;
2826 err:
2827 dev_err(dev, "VF setup failed\n");
2828 be_vf_clear(adapter);
2829 return status;
2830 }
2831
2832 static void be_setup_init(struct be_adapter *adapter)
2833 {
2834 adapter->vlan_prio_bmap = 0xff;
2835 adapter->phy.link_speed = -1;
2836 adapter->if_handle = -1;
2837 adapter->be3_native = false;
2838 adapter->promiscuous = false;
2839 if (be_physfn(adapter))
2840 adapter->cmd_privileges = MAX_PRIVILEGES;
2841 else
2842 adapter->cmd_privileges = MIN_PRIVILEGES;
2843 }
2844
2845 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2846 bool *active_mac, u32 *pmac_id)
2847 {
2848 int status = 0;
2849
2850 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2851 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2852 if (!lancer_chip(adapter) && !be_physfn(adapter))
2853 *active_mac = true;
2854 else
2855 *active_mac = false;
2856
2857 return status;
2858 }
2859
2860 if (lancer_chip(adapter)) {
2861 status = be_cmd_get_mac_from_list(adapter, mac,
2862 active_mac, pmac_id, 0);
2863 if (*active_mac) {
2864 status = be_cmd_mac_addr_query(adapter, mac, false,
2865 if_handle, *pmac_id);
2866 }
2867 } else if (be_physfn(adapter)) {
2868 /* For BE3, for PF get permanent MAC */
2869 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2870 *active_mac = false;
2871 } else {
2872 /* For BE3, for VF get soft MAC assigned by PF*/
2873 status = be_cmd_mac_addr_query(adapter, mac, false,
2874 if_handle, 0);
2875 *active_mac = true;
2876 }
2877 return status;
2878 }
2879
2880 static void be_get_resources(struct be_adapter *adapter)
2881 {
2882 u16 dev_num_vfs;
2883 int pos, status;
2884 bool profile_present = false;
2885
2886 if (!BEx_chip(adapter)) {
2887 status = be_cmd_get_func_config(adapter);
2888 if (!status)
2889 profile_present = true;
2890 }
2891
2892 if (profile_present) {
2893 /* Sanity fixes for Lancer */
2894 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2895 BE_UC_PMAC_COUNT);
2896 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2897 BE_NUM_VLANS_SUPPORTED);
2898 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2899 BE_MAX_MC);
2900 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2901 MAX_TX_QS);
2902 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2903 BE3_MAX_RSS_QS);
2904 adapter->max_event_queues = min_t(u16,
2905 adapter->max_event_queues,
2906 BE3_MAX_RSS_QS);
2907
2908 if (adapter->max_rss_queues &&
2909 adapter->max_rss_queues == adapter->max_rx_queues)
2910 adapter->max_rss_queues -= 1;
2911
2912 if (adapter->max_event_queues < adapter->max_rss_queues)
2913 adapter->max_rss_queues = adapter->max_event_queues;
2914
2915 } else {
2916 if (be_physfn(adapter))
2917 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2918 else
2919 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2920
2921 if (adapter->function_mode & FLEX10_MODE)
2922 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2923 else
2924 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2925
2926 adapter->max_mcast_mac = BE_MAX_MC;
2927 adapter->max_tx_queues = MAX_TX_QS;
2928 adapter->max_rss_queues = (adapter->be3_native) ?
2929 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2930 adapter->max_event_queues = BE3_MAX_RSS_QS;
2931
2932 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2933 BE_IF_FLAGS_BROADCAST |
2934 BE_IF_FLAGS_MULTICAST |
2935 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2936 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2937 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2938 BE_IF_FLAGS_PROMISCUOUS;
2939
2940 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2941 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2942 }
2943
2944 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2945 if (pos) {
2946 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2947 &dev_num_vfs);
2948 if (BE3_chip(adapter))
2949 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2950 adapter->dev_num_vfs = dev_num_vfs;
2951 }
2952 }
2953
2954 /* Routine to query per function resource limits */
2955 static int be_get_config(struct be_adapter *adapter)
2956 {
2957 int status;
2958
2959 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2960 &adapter->function_mode,
2961 &adapter->function_caps);
2962 if (status)
2963 goto err;
2964
2965 be_get_resources(adapter);
2966
2967 /* primary mac needs 1 pmac entry */
2968 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2969 sizeof(u32), GFP_KERNEL);
2970 if (!adapter->pmac_id) {
2971 status = -ENOMEM;
2972 goto err;
2973 }
2974
2975 err:
2976 return status;
2977 }
2978
2979 static int be_setup(struct be_adapter *adapter)
2980 {
2981 struct device *dev = &adapter->pdev->dev;
2982 u32 en_flags;
2983 u32 tx_fc, rx_fc;
2984 int status;
2985 u8 mac[ETH_ALEN];
2986 bool active_mac;
2987
2988 be_setup_init(adapter);
2989
2990 if (!lancer_chip(adapter))
2991 be_cmd_req_native_mode(adapter);
2992
2993 status = be_get_config(adapter);
2994 if (status)
2995 goto err;
2996
2997 be_msix_enable(adapter);
2998
2999 status = be_evt_queues_create(adapter);
3000 if (status)
3001 goto err;
3002
3003 status = be_tx_cqs_create(adapter);
3004 if (status)
3005 goto err;
3006
3007 status = be_rx_cqs_create(adapter);
3008 if (status)
3009 goto err;
3010
3011 status = be_mcc_queues_create(adapter);
3012 if (status)
3013 goto err;
3014
3015 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3016 /* In UMC mode FW does not return right privileges.
3017 * Override with correct privilege equivalent to PF.
3018 */
3019 if (be_is_mc(adapter))
3020 adapter->cmd_privileges = MAX_PRIVILEGES;
3021
3022 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3023 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3024
3025 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3026 en_flags |= BE_IF_FLAGS_RSS;
3027
3028 en_flags = en_flags & adapter->if_cap_flags;
3029
3030 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3031 &adapter->if_handle, 0);
3032 if (status != 0)
3033 goto err;
3034
3035 memset(mac, 0, ETH_ALEN);
3036 active_mac = false;
3037 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3038 &active_mac, &adapter->pmac_id[0]);
3039 if (status != 0)
3040 goto err;
3041
3042 if (!active_mac) {
3043 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3044 &adapter->pmac_id[0], 0);
3045 if (status != 0)
3046 goto err;
3047 }
3048
3049 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3050 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3051 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3052 }
3053
3054 status = be_tx_qs_create(adapter);
3055 if (status)
3056 goto err;
3057
3058 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3059
3060 if (adapter->vlans_added)
3061 be_vid_config(adapter);
3062
3063 be_set_rx_mode(adapter->netdev);
3064
3065 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3066
3067 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3068 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3069 adapter->rx_fc);
3070
3071 if (be_physfn(adapter) && num_vfs) {
3072 if (adapter->dev_num_vfs)
3073 be_vf_setup(adapter);
3074 else
3075 dev_warn(dev, "device doesn't support SRIOV\n");
3076 }
3077
3078 status = be_cmd_get_phy_info(adapter);
3079 if (!status && be_pause_supported(adapter))
3080 adapter->phy.fc_autoneg = 1;
3081
3082 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3083 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3084 return 0;
3085 err:
3086 be_clear(adapter);
3087 return status;
3088 }
3089
3090 #ifdef CONFIG_NET_POLL_CONTROLLER
3091 static void be_netpoll(struct net_device *netdev)
3092 {
3093 struct be_adapter *adapter = netdev_priv(netdev);
3094 struct be_eq_obj *eqo;
3095 int i;
3096
3097 for_all_evt_queues(adapter, eqo, i) {
3098 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3099 napi_schedule(&eqo->napi);
3100 }
3101
3102 return;
3103 }
3104 #endif
3105
3106 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3107 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3108
3109 static bool be_flash_redboot(struct be_adapter *adapter,
3110 const u8 *p, u32 img_start, int image_size,
3111 int hdr_size)
3112 {
3113 u32 crc_offset;
3114 u8 flashed_crc[4];
3115 int status;
3116
3117 crc_offset = hdr_size + img_start + image_size - 4;
3118
3119 p += crc_offset;
3120
3121 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3122 (image_size - 4));
3123 if (status) {
3124 dev_err(&adapter->pdev->dev,
3125 "could not get crc from flash, not flashing redboot\n");
3126 return false;
3127 }
3128
3129 /*update redboot only if crc does not match*/
3130 if (!memcmp(flashed_crc, p, 4))
3131 return false;
3132 else
3133 return true;
3134 }
3135
3136 static bool phy_flashing_required(struct be_adapter *adapter)
3137 {
3138 return (adapter->phy.phy_type == TN_8022 &&
3139 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3140 }
3141
3142 static bool is_comp_in_ufi(struct be_adapter *adapter,
3143 struct flash_section_info *fsec, int type)
3144 {
3145 int i = 0, img_type = 0;
3146 struct flash_section_info_g2 *fsec_g2 = NULL;
3147
3148 if (BE2_chip(adapter))
3149 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3150
3151 for (i = 0; i < MAX_FLASH_COMP; i++) {
3152 if (fsec_g2)
3153 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3154 else
3155 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3156
3157 if (img_type == type)
3158 return true;
3159 }
3160 return false;
3161
3162 }
3163
3164 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3165 int header_size,
3166 const struct firmware *fw)
3167 {
3168 struct flash_section_info *fsec = NULL;
3169 const u8 *p = fw->data;
3170
3171 p += header_size;
3172 while (p < (fw->data + fw->size)) {
3173 fsec = (struct flash_section_info *)p;
3174 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3175 return fsec;
3176 p += 32;
3177 }
3178 return NULL;
3179 }
3180
3181 static int be_flash(struct be_adapter *adapter, const u8 *img,
3182 struct be_dma_mem *flash_cmd, int optype, int img_size)
3183 {
3184 u32 total_bytes = 0, flash_op, num_bytes = 0;
3185 int status = 0;
3186 struct be_cmd_write_flashrom *req = flash_cmd->va;
3187
3188 total_bytes = img_size;
3189 while (total_bytes) {
3190 num_bytes = min_t(u32, 32*1024, total_bytes);
3191
3192 total_bytes -= num_bytes;
3193
3194 if (!total_bytes) {
3195 if (optype == OPTYPE_PHY_FW)
3196 flash_op = FLASHROM_OPER_PHY_FLASH;
3197 else
3198 flash_op = FLASHROM_OPER_FLASH;
3199 } else {
3200 if (optype == OPTYPE_PHY_FW)
3201 flash_op = FLASHROM_OPER_PHY_SAVE;
3202 else
3203 flash_op = FLASHROM_OPER_SAVE;
3204 }
3205
3206 memcpy(req->data_buf, img, num_bytes);
3207 img += num_bytes;
3208 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3209 flash_op, num_bytes);
3210 if (status) {
3211 if (status == ILLEGAL_IOCTL_REQ &&
3212 optype == OPTYPE_PHY_FW)
3213 break;
3214 dev_err(&adapter->pdev->dev,
3215 "cmd to write to flash rom failed.\n");
3216 return status;
3217 }
3218 }
3219 return 0;
3220 }
3221
3222 /* For BE2 and BE3 */
3223 static int be_flash_BEx(struct be_adapter *adapter,
3224 const struct firmware *fw,
3225 struct be_dma_mem *flash_cmd,
3226 int num_of_images)
3227
3228 {
3229 int status = 0, i, filehdr_size = 0;
3230 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3231 const u8 *p = fw->data;
3232 const struct flash_comp *pflashcomp;
3233 int num_comp, redboot;
3234 struct flash_section_info *fsec = NULL;
3235
3236 struct flash_comp gen3_flash_types[] = {
3237 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3238 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3239 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3240 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3241 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3242 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3243 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3244 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3245 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3246 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3247 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3248 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3249 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3250 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3251 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3252 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3253 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3254 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3255 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3256 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3257 };
3258
3259 struct flash_comp gen2_flash_types[] = {
3260 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3261 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3262 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3263 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3264 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3266 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3267 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3268 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3269 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3270 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3272 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3273 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3274 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3275 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3276 };
3277
3278 if (BE3_chip(adapter)) {
3279 pflashcomp = gen3_flash_types;
3280 filehdr_size = sizeof(struct flash_file_hdr_g3);
3281 num_comp = ARRAY_SIZE(gen3_flash_types);
3282 } else {
3283 pflashcomp = gen2_flash_types;
3284 filehdr_size = sizeof(struct flash_file_hdr_g2);
3285 num_comp = ARRAY_SIZE(gen2_flash_types);
3286 }
3287
3288 /* Get flash section info*/
3289 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3290 if (!fsec) {
3291 dev_err(&adapter->pdev->dev,
3292 "Invalid Cookie. UFI corrupted ?\n");
3293 return -1;
3294 }
3295 for (i = 0; i < num_comp; i++) {
3296 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3297 continue;
3298
3299 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3300 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3301 continue;
3302
3303 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3304 !phy_flashing_required(adapter))
3305 continue;
3306
3307 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3308 redboot = be_flash_redboot(adapter, fw->data,
3309 pflashcomp[i].offset, pflashcomp[i].size,
3310 filehdr_size + img_hdrs_size);
3311 if (!redboot)
3312 continue;
3313 }
3314
3315 p = fw->data;
3316 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3317 if (p + pflashcomp[i].size > fw->data + fw->size)
3318 return -1;
3319
3320 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3321 pflashcomp[i].size);
3322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "Flashing section type %d failed.\n",
3325 pflashcomp[i].img_type);
3326 return status;
3327 }
3328 }
3329 return 0;
3330 }
3331
3332 static int be_flash_skyhawk(struct be_adapter *adapter,
3333 const struct firmware *fw,
3334 struct be_dma_mem *flash_cmd, int num_of_images)
3335 {
3336 int status = 0, i, filehdr_size = 0;
3337 int img_offset, img_size, img_optype, redboot;
3338 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3339 const u8 *p = fw->data;
3340 struct flash_section_info *fsec = NULL;
3341
3342 filehdr_size = sizeof(struct flash_file_hdr_g3);
3343 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3344 if (!fsec) {
3345 dev_err(&adapter->pdev->dev,
3346 "Invalid Cookie. UFI corrupted ?\n");
3347 return -1;
3348 }
3349
3350 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3351 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3352 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3353
3354 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3355 case IMAGE_FIRMWARE_iSCSI:
3356 img_optype = OPTYPE_ISCSI_ACTIVE;
3357 break;
3358 case IMAGE_BOOT_CODE:
3359 img_optype = OPTYPE_REDBOOT;
3360 break;
3361 case IMAGE_OPTION_ROM_ISCSI:
3362 img_optype = OPTYPE_BIOS;
3363 break;
3364 case IMAGE_OPTION_ROM_PXE:
3365 img_optype = OPTYPE_PXE_BIOS;
3366 break;
3367 case IMAGE_OPTION_ROM_FCoE:
3368 img_optype = OPTYPE_FCOE_BIOS;
3369 break;
3370 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3371 img_optype = OPTYPE_ISCSI_BACKUP;
3372 break;
3373 case IMAGE_NCSI:
3374 img_optype = OPTYPE_NCSI_FW;
3375 break;
3376 default:
3377 continue;
3378 }
3379
3380 if (img_optype == OPTYPE_REDBOOT) {
3381 redboot = be_flash_redboot(adapter, fw->data,
3382 img_offset, img_size,
3383 filehdr_size + img_hdrs_size);
3384 if (!redboot)
3385 continue;
3386 }
3387
3388 p = fw->data;
3389 p += filehdr_size + img_offset + img_hdrs_size;
3390 if (p + img_size > fw->data + fw->size)
3391 return -1;
3392
3393 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3394 if (status) {
3395 dev_err(&adapter->pdev->dev,
3396 "Flashing section type %d failed.\n",
3397 fsec->fsec_entry[i].type);
3398 return status;
3399 }
3400 }
3401 return 0;
3402 }
3403
3404 static int lancer_wait_idle(struct be_adapter *adapter)
3405 {
3406 #define SLIPORT_IDLE_TIMEOUT 30
3407 u32 reg_val;
3408 int status = 0, i;
3409
3410 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3411 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3412 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3413 break;
3414
3415 ssleep(1);
3416 }
3417
3418 if (i == SLIPORT_IDLE_TIMEOUT)
3419 status = -1;
3420
3421 return status;
3422 }
3423
3424 static int lancer_fw_reset(struct be_adapter *adapter)
3425 {
3426 int status = 0;
3427
3428 status = lancer_wait_idle(adapter);
3429 if (status)
3430 return status;
3431
3432 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3433 PHYSDEV_CONTROL_OFFSET);
3434
3435 return status;
3436 }
3437
3438 static int lancer_fw_download(struct be_adapter *adapter,
3439 const struct firmware *fw)
3440 {
3441 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3442 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3443 struct be_dma_mem flash_cmd;
3444 const u8 *data_ptr = NULL;
3445 u8 *dest_image_ptr = NULL;
3446 size_t image_size = 0;
3447 u32 chunk_size = 0;
3448 u32 data_written = 0;
3449 u32 offset = 0;
3450 int status = 0;
3451 u8 add_status = 0;
3452 u8 change_status;
3453
3454 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3455 dev_err(&adapter->pdev->dev,
3456 "FW Image not properly aligned. "
3457 "Length must be 4 byte aligned.\n");
3458 status = -EINVAL;
3459 goto lancer_fw_exit;
3460 }
3461
3462 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3463 + LANCER_FW_DOWNLOAD_CHUNK;
3464 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3465 &flash_cmd.dma, GFP_KERNEL);
3466 if (!flash_cmd.va) {
3467 status = -ENOMEM;
3468 goto lancer_fw_exit;
3469 }
3470
3471 dest_image_ptr = flash_cmd.va +
3472 sizeof(struct lancer_cmd_req_write_object);
3473 image_size = fw->size;
3474 data_ptr = fw->data;
3475
3476 while (image_size) {
3477 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3478
3479 /* Copy the image chunk content. */
3480 memcpy(dest_image_ptr, data_ptr, chunk_size);
3481
3482 status = lancer_cmd_write_object(adapter, &flash_cmd,
3483 chunk_size, offset,
3484 LANCER_FW_DOWNLOAD_LOCATION,
3485 &data_written, &change_status,
3486 &add_status);
3487 if (status)
3488 break;
3489
3490 offset += data_written;
3491 data_ptr += data_written;
3492 image_size -= data_written;
3493 }
3494
3495 if (!status) {
3496 /* Commit the FW written */
3497 status = lancer_cmd_write_object(adapter, &flash_cmd,
3498 0, offset,
3499 LANCER_FW_DOWNLOAD_LOCATION,
3500 &data_written, &change_status,
3501 &add_status);
3502 }
3503
3504 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3505 flash_cmd.dma);
3506 if (status) {
3507 dev_err(&adapter->pdev->dev,
3508 "Firmware load error. "
3509 "Status code: 0x%x Additional Status: 0x%x\n",
3510 status, add_status);
3511 goto lancer_fw_exit;
3512 }
3513
3514 if (change_status == LANCER_FW_RESET_NEEDED) {
3515 status = lancer_fw_reset(adapter);
3516 if (status) {
3517 dev_err(&adapter->pdev->dev,
3518 "Adapter busy for FW reset.\n"
3519 "New FW will not be active.\n");
3520 goto lancer_fw_exit;
3521 }
3522 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3523 dev_err(&adapter->pdev->dev,
3524 "System reboot required for new FW"
3525 " to be active\n");
3526 }
3527
3528 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3529 lancer_fw_exit:
3530 return status;
3531 }
3532
3533 #define UFI_TYPE2 2
3534 #define UFI_TYPE3 3
3535 #define UFI_TYPE4 4
3536 static int be_get_ufi_type(struct be_adapter *adapter,
3537 struct flash_file_hdr_g2 *fhdr)
3538 {
3539 if (fhdr == NULL)
3540 goto be_get_ufi_exit;
3541
3542 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3543 return UFI_TYPE4;
3544 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3545 return UFI_TYPE3;
3546 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3547 return UFI_TYPE2;
3548
3549 be_get_ufi_exit:
3550 dev_err(&adapter->pdev->dev,
3551 "UFI and Interface are not compatible for flashing\n");
3552 return -1;
3553 }
3554
3555 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3556 {
3557 struct flash_file_hdr_g2 *fhdr;
3558 struct flash_file_hdr_g3 *fhdr3;
3559 struct image_hdr *img_hdr_ptr = NULL;
3560 struct be_dma_mem flash_cmd;
3561 const u8 *p;
3562 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3563
3564 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3565 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3566 &flash_cmd.dma, GFP_KERNEL);
3567 if (!flash_cmd.va) {
3568 status = -ENOMEM;
3569 goto be_fw_exit;
3570 }
3571
3572 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p;
3574
3575 ufi_type = be_get_ufi_type(adapter, fhdr);
3576
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3584 if (ufi_type == UFI_TYPE4)
3585 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs);
3587 else if (ufi_type == UFI_TYPE3)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs);
3590 }
3591 }
3592
3593 if (ufi_type == UFI_TYPE2)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3595 else if (ufi_type == -1)
3596 status = -1;
3597
3598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599 flash_cmd.dma);
3600 if (status) {
3601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3602 goto be_fw_exit;
3603 }
3604
3605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3606
3607 be_fw_exit:
3608 return status;
3609 }
3610
3611 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612 {
3613 const struct firmware *fw;
3614 int status;
3615
3616 if (!netif_running(adapter->netdev)) {
3617 dev_err(&adapter->pdev->dev,
3618 "Firmware load not allowed (interface is down)\n");
3619 return -1;
3620 }
3621
3622 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623 if (status)
3624 goto fw_exit;
3625
3626 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628 if (lancer_chip(adapter))
3629 status = lancer_fw_download(adapter, fw);
3630 else
3631 status = be_fw_download(adapter, fw);
3632
3633 fw_exit:
3634 release_firmware(fw);
3635 return status;
3636 }
3637
3638 static const struct net_device_ops be_netdev_ops = {
3639 .ndo_open = be_open,
3640 .ndo_stop = be_close,
3641 .ndo_start_xmit = be_xmit,
3642 .ndo_set_rx_mode = be_set_rx_mode,
3643 .ndo_set_mac_address = be_mac_addr_set,
3644 .ndo_change_mtu = be_change_mtu,
3645 .ndo_get_stats64 = be_get_stats64,
3646 .ndo_validate_addr = eth_validate_addr,
3647 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3648 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3649 .ndo_set_vf_mac = be_set_vf_mac,
3650 .ndo_set_vf_vlan = be_set_vf_vlan,
3651 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3652 .ndo_get_vf_config = be_get_vf_config,
3653 #ifdef CONFIG_NET_POLL_CONTROLLER
3654 .ndo_poll_controller = be_netpoll,
3655 #endif
3656 };
3657
3658 static void be_netdev_init(struct net_device *netdev)
3659 {
3660 struct be_adapter *adapter = netdev_priv(netdev);
3661 struct be_eq_obj *eqo;
3662 int i;
3663
3664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3666 NETIF_F_HW_VLAN_CTAG_TX;
3667 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH;
3669
3670 netdev->features |= netdev->hw_features |
3671 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3672
3673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3675
3676 netdev->priv_flags |= IFF_UNICAST_FLT;
3677
3678 netdev->flags |= IFF_MULTICAST;
3679
3680 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3681
3682 netdev->netdev_ops = &be_netdev_ops;
3683
3684 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
3686 for_all_evt_queues(adapter, eqo, i)
3687 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3688 }
3689
3690 static void be_unmap_pci_bars(struct be_adapter *adapter)
3691 {
3692 if (adapter->csr)
3693 pci_iounmap(adapter->pdev, adapter->csr);
3694 if (adapter->db)
3695 pci_iounmap(adapter->pdev, adapter->db);
3696 }
3697
3698 static int db_bar(struct be_adapter *adapter)
3699 {
3700 if (lancer_chip(adapter) || !be_physfn(adapter))
3701 return 0;
3702 else
3703 return 4;
3704 }
3705
3706 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3707 {
3708 if (skyhawk_chip(adapter)) {
3709 adapter->roce_db.size = 4096;
3710 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711 db_bar(adapter));
3712 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713 db_bar(adapter));
3714 }
3715 return 0;
3716 }
3717
3718 static int be_map_pci_bars(struct be_adapter *adapter)
3719 {
3720 u8 __iomem *addr;
3721 u32 sli_intf;
3722
3723 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725 SLI_INTF_IF_TYPE_SHIFT;
3726
3727 if (BEx_chip(adapter) && be_physfn(adapter)) {
3728 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729 if (adapter->csr == NULL)
3730 return -ENOMEM;
3731 }
3732
3733 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3734 if (addr == NULL)
3735 goto pci_map_err;
3736 adapter->db = addr;
3737
3738 be_roce_map_pci_bars(adapter);
3739 return 0;
3740
3741 pci_map_err:
3742 be_unmap_pci_bars(adapter);
3743 return -ENOMEM;
3744 }
3745
3746 static void be_ctrl_cleanup(struct be_adapter *adapter)
3747 {
3748 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3749
3750 be_unmap_pci_bars(adapter);
3751
3752 if (mem->va)
3753 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754 mem->dma);
3755
3756 mem = &adapter->rx_filter;
3757 if (mem->va)
3758 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759 mem->dma);
3760 }
3761
3762 static int be_ctrl_init(struct be_adapter *adapter)
3763 {
3764 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3766 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3767 u32 sli_intf;
3768 int status;
3769
3770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772 SLI_INTF_FAMILY_SHIFT;
3773 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
3775 status = be_map_pci_bars(adapter);
3776 if (status)
3777 goto done;
3778
3779 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3780 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781 mbox_mem_alloc->size,
3782 &mbox_mem_alloc->dma,
3783 GFP_KERNEL);
3784 if (!mbox_mem_alloc->va) {
3785 status = -ENOMEM;
3786 goto unmap_pci_bars;
3787 }
3788 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3792
3793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3795 &rx_filter->dma,
3796 GFP_KERNEL | __GFP_ZERO);
3797 if (rx_filter->va == NULL) {
3798 status = -ENOMEM;
3799 goto free_mbox;
3800 }
3801
3802 mutex_init(&adapter->mbox_lock);
3803 spin_lock_init(&adapter->mcc_lock);
3804 spin_lock_init(&adapter->mcc_cq_lock);
3805
3806 init_completion(&adapter->flash_compl);
3807 pci_save_state(adapter->pdev);
3808 return 0;
3809
3810 free_mbox:
3811 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3812 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3813
3814 unmap_pci_bars:
3815 be_unmap_pci_bars(adapter);
3816
3817 done:
3818 return status;
3819 }
3820
3821 static void be_stats_cleanup(struct be_adapter *adapter)
3822 {
3823 struct be_dma_mem *cmd = &adapter->stats_cmd;
3824
3825 if (cmd->va)
3826 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3827 cmd->va, cmd->dma);
3828 }
3829
3830 static int be_stats_init(struct be_adapter *adapter)
3831 {
3832 struct be_dma_mem *cmd = &adapter->stats_cmd;
3833
3834 if (lancer_chip(adapter))
3835 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3836 else if (BE2_chip(adapter))
3837 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3838 else
3839 /* BE3 and Skyhawk */
3840 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3841
3842 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3843 GFP_KERNEL | __GFP_ZERO);
3844 if (cmd->va == NULL)
3845 return -1;
3846 return 0;
3847 }
3848
3849 static void be_remove(struct pci_dev *pdev)
3850 {
3851 struct be_adapter *adapter = pci_get_drvdata(pdev);
3852
3853 if (!adapter)
3854 return;
3855
3856 be_roce_dev_remove(adapter);
3857 be_intr_set(adapter, false);
3858
3859 cancel_delayed_work_sync(&adapter->func_recovery_work);
3860
3861 unregister_netdev(adapter->netdev);
3862
3863 be_clear(adapter);
3864
3865 /* tell fw we're done with firing cmds */
3866 be_cmd_fw_clean(adapter);
3867
3868 be_stats_cleanup(adapter);
3869
3870 be_ctrl_cleanup(adapter);
3871
3872 pci_disable_pcie_error_reporting(pdev);
3873
3874 pci_set_drvdata(pdev, NULL);
3875 pci_release_regions(pdev);
3876 pci_disable_device(pdev);
3877
3878 free_netdev(adapter->netdev);
3879 }
3880
3881 bool be_is_wol_supported(struct be_adapter *adapter)
3882 {
3883 return ((adapter->wol_cap & BE_WOL_CAP) &&
3884 !be_is_wol_excluded(adapter)) ? true : false;
3885 }
3886
3887 u32 be_get_fw_log_level(struct be_adapter *adapter)
3888 {
3889 struct be_dma_mem extfat_cmd;
3890 struct be_fat_conf_params *cfgs;
3891 int status;
3892 u32 level = 0;
3893 int j;
3894
3895 if (lancer_chip(adapter))
3896 return 0;
3897
3898 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3899 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3900 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3901 &extfat_cmd.dma);
3902
3903 if (!extfat_cmd.va) {
3904 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3905 __func__);
3906 goto err;
3907 }
3908
3909 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3910 if (!status) {
3911 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3912 sizeof(struct be_cmd_resp_hdr));
3913 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3914 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3915 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3916 }
3917 }
3918 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3919 extfat_cmd.dma);
3920 err:
3921 return level;
3922 }
3923
3924 static int be_get_initial_config(struct be_adapter *adapter)
3925 {
3926 int status;
3927 u32 level;
3928
3929 status = be_cmd_get_cntl_attributes(adapter);
3930 if (status)
3931 return status;
3932
3933 status = be_cmd_get_acpi_wol_cap(adapter);
3934 if (status) {
3935 /* in case of a failure to get wol capabillities
3936 * check the exclusion list to determine WOL capability */
3937 if (!be_is_wol_excluded(adapter))
3938 adapter->wol_cap |= BE_WOL_CAP;
3939 }
3940
3941 if (be_is_wol_supported(adapter))
3942 adapter->wol = true;
3943
3944 /* Must be a power of 2 or else MODULO will BUG_ON */
3945 adapter->be_get_temp_freq = 64;
3946
3947 level = be_get_fw_log_level(adapter);
3948 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3949
3950 return 0;
3951 }
3952
3953 static int lancer_recover_func(struct be_adapter *adapter)
3954 {
3955 int status;
3956
3957 status = lancer_test_and_set_rdy_state(adapter);
3958 if (status)
3959 goto err;
3960
3961 if (netif_running(adapter->netdev))
3962 be_close(adapter->netdev);
3963
3964 be_clear(adapter);
3965
3966 adapter->hw_error = false;
3967 adapter->fw_timeout = false;
3968
3969 status = be_setup(adapter);
3970 if (status)
3971 goto err;
3972
3973 if (netif_running(adapter->netdev)) {
3974 status = be_open(adapter->netdev);
3975 if (status)
3976 goto err;
3977 }
3978
3979 dev_err(&adapter->pdev->dev,
3980 "Adapter SLIPORT recovery succeeded\n");
3981 return 0;
3982 err:
3983 if (adapter->eeh_error)
3984 dev_err(&adapter->pdev->dev,
3985 "Adapter SLIPORT recovery failed\n");
3986
3987 return status;
3988 }
3989
3990 static void be_func_recovery_task(struct work_struct *work)
3991 {
3992 struct be_adapter *adapter =
3993 container_of(work, struct be_adapter, func_recovery_work.work);
3994 int status;
3995
3996 be_detect_error(adapter);
3997
3998 if (adapter->hw_error && lancer_chip(adapter)) {
3999
4000 if (adapter->eeh_error)
4001 goto out;
4002
4003 rtnl_lock();
4004 netif_device_detach(adapter->netdev);
4005 rtnl_unlock();
4006
4007 status = lancer_recover_func(adapter);
4008
4009 if (!status)
4010 netif_device_attach(adapter->netdev);
4011 }
4012
4013 out:
4014 schedule_delayed_work(&adapter->func_recovery_work,
4015 msecs_to_jiffies(1000));
4016 }
4017
4018 static void be_worker(struct work_struct *work)
4019 {
4020 struct be_adapter *adapter =
4021 container_of(work, struct be_adapter, work.work);
4022 struct be_rx_obj *rxo;
4023 struct be_eq_obj *eqo;
4024 int i;
4025
4026 /* when interrupts are not yet enabled, just reap any pending
4027 * mcc completions */
4028 if (!netif_running(adapter->netdev)) {
4029 local_bh_disable();
4030 be_process_mcc(adapter);
4031 local_bh_enable();
4032 goto reschedule;
4033 }
4034
4035 if (!adapter->stats_cmd_sent) {
4036 if (lancer_chip(adapter))
4037 lancer_cmd_get_pport_stats(adapter,
4038 &adapter->stats_cmd);
4039 else
4040 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4041 }
4042
4043 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4044 be_cmd_get_die_temperature(adapter);
4045
4046 for_all_rx_queues(adapter, rxo, i) {
4047 if (rxo->rx_post_starved) {
4048 rxo->rx_post_starved = false;
4049 be_post_rx_frags(rxo, GFP_KERNEL);
4050 }
4051 }
4052
4053 for_all_evt_queues(adapter, eqo, i)
4054 be_eqd_update(adapter, eqo);
4055
4056 reschedule:
4057 adapter->work_counter++;
4058 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4059 }
4060
4061 static bool be_reset_required(struct be_adapter *adapter)
4062 {
4063 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4064 }
4065
4066 static char *mc_name(struct be_adapter *adapter)
4067 {
4068 if (adapter->function_mode & FLEX10_MODE)
4069 return "FLEX10";
4070 else if (adapter->function_mode & VNIC_MODE)
4071 return "vNIC";
4072 else if (adapter->function_mode & UMC_ENABLED)
4073 return "UMC";
4074 else
4075 return "";
4076 }
4077
4078 static inline char *func_name(struct be_adapter *adapter)
4079 {
4080 return be_physfn(adapter) ? "PF" : "VF";
4081 }
4082
4083 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4084 {
4085 int status = 0;
4086 struct be_adapter *adapter;
4087 struct net_device *netdev;
4088 char port_name;
4089
4090 status = pci_enable_device(pdev);
4091 if (status)
4092 goto do_none;
4093
4094 status = pci_request_regions(pdev, DRV_NAME);
4095 if (status)
4096 goto disable_dev;
4097 pci_set_master(pdev);
4098
4099 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4100 if (netdev == NULL) {
4101 status = -ENOMEM;
4102 goto rel_reg;
4103 }
4104 adapter = netdev_priv(netdev);
4105 adapter->pdev = pdev;
4106 pci_set_drvdata(pdev, adapter);
4107 adapter->netdev = netdev;
4108 SET_NETDEV_DEV(netdev, &pdev->dev);
4109
4110 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4111 if (!status) {
4112 netdev->features |= NETIF_F_HIGHDMA;
4113 } else {
4114 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4115 if (status) {
4116 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4117 goto free_netdev;
4118 }
4119 }
4120
4121 status = pci_enable_pcie_error_reporting(pdev);
4122 if (status)
4123 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4124
4125 status = be_ctrl_init(adapter);
4126 if (status)
4127 goto free_netdev;
4128
4129 /* sync up with fw's ready state */
4130 if (be_physfn(adapter)) {
4131 status = be_fw_wait_ready(adapter);
4132 if (status)
4133 goto ctrl_clean;
4134 }
4135
4136 /* tell fw we're ready to fire cmds */
4137 status = be_cmd_fw_init(adapter);
4138 if (status)
4139 goto ctrl_clean;
4140
4141 if (be_reset_required(adapter)) {
4142 status = be_cmd_reset_function(adapter);
4143 if (status)
4144 goto ctrl_clean;
4145 }
4146
4147 /* Wait for interrupts to quiesce after an FLR */
4148 msleep(100);
4149
4150 /* Allow interrupts for other ULPs running on NIC function */
4151 be_intr_set(adapter, true);
4152
4153 status = be_stats_init(adapter);
4154 if (status)
4155 goto ctrl_clean;
4156
4157 status = be_get_initial_config(adapter);
4158 if (status)
4159 goto stats_clean;
4160
4161 INIT_DELAYED_WORK(&adapter->work, be_worker);
4162 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4163 adapter->rx_fc = adapter->tx_fc = true;
4164
4165 status = be_setup(adapter);
4166 if (status)
4167 goto stats_clean;
4168
4169 be_netdev_init(netdev);
4170 status = register_netdev(netdev);
4171 if (status != 0)
4172 goto unsetup;
4173
4174 be_roce_dev_add(adapter);
4175
4176 schedule_delayed_work(&adapter->func_recovery_work,
4177 msecs_to_jiffies(1000));
4178
4179 be_cmd_query_port_name(adapter, &port_name);
4180
4181 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4182 func_name(adapter), mc_name(adapter), port_name);
4183
4184 return 0;
4185
4186 unsetup:
4187 be_clear(adapter);
4188 stats_clean:
4189 be_stats_cleanup(adapter);
4190 ctrl_clean:
4191 be_ctrl_cleanup(adapter);
4192 free_netdev:
4193 free_netdev(netdev);
4194 pci_set_drvdata(pdev, NULL);
4195 rel_reg:
4196 pci_release_regions(pdev);
4197 disable_dev:
4198 pci_disable_device(pdev);
4199 do_none:
4200 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4201 return status;
4202 }
4203
4204 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4205 {
4206 struct be_adapter *adapter = pci_get_drvdata(pdev);
4207 struct net_device *netdev = adapter->netdev;
4208
4209 if (adapter->wol)
4210 be_setup_wol(adapter, true);
4211
4212 cancel_delayed_work_sync(&adapter->func_recovery_work);
4213
4214 netif_device_detach(netdev);
4215 if (netif_running(netdev)) {
4216 rtnl_lock();
4217 be_close(netdev);
4218 rtnl_unlock();
4219 }
4220 be_clear(adapter);
4221
4222 pci_save_state(pdev);
4223 pci_disable_device(pdev);
4224 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4225 return 0;
4226 }
4227
4228 static int be_resume(struct pci_dev *pdev)
4229 {
4230 int status = 0;
4231 struct be_adapter *adapter = pci_get_drvdata(pdev);
4232 struct net_device *netdev = adapter->netdev;
4233
4234 netif_device_detach(netdev);
4235
4236 status = pci_enable_device(pdev);
4237 if (status)
4238 return status;
4239
4240 pci_set_power_state(pdev, 0);
4241 pci_restore_state(pdev);
4242
4243 /* tell fw we're ready to fire cmds */
4244 status = be_cmd_fw_init(adapter);
4245 if (status)
4246 return status;
4247
4248 be_setup(adapter);
4249 if (netif_running(netdev)) {
4250 rtnl_lock();
4251 be_open(netdev);
4252 rtnl_unlock();
4253 }
4254
4255 schedule_delayed_work(&adapter->func_recovery_work,
4256 msecs_to_jiffies(1000));
4257 netif_device_attach(netdev);
4258
4259 if (adapter->wol)
4260 be_setup_wol(adapter, false);
4261
4262 return 0;
4263 }
4264
4265 /*
4266 * An FLR will stop BE from DMAing any data.
4267 */
4268 static void be_shutdown(struct pci_dev *pdev)
4269 {
4270 struct be_adapter *adapter = pci_get_drvdata(pdev);
4271
4272 if (!adapter)
4273 return;
4274
4275 cancel_delayed_work_sync(&adapter->work);
4276 cancel_delayed_work_sync(&adapter->func_recovery_work);
4277
4278 netif_device_detach(adapter->netdev);
4279
4280 be_cmd_reset_function(adapter);
4281
4282 pci_disable_device(pdev);
4283 }
4284
4285 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4286 pci_channel_state_t state)
4287 {
4288 struct be_adapter *adapter = pci_get_drvdata(pdev);
4289 struct net_device *netdev = adapter->netdev;
4290
4291 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4292
4293 adapter->eeh_error = true;
4294
4295 cancel_delayed_work_sync(&adapter->func_recovery_work);
4296
4297 rtnl_lock();
4298 netif_device_detach(netdev);
4299 rtnl_unlock();
4300
4301 if (netif_running(netdev)) {
4302 rtnl_lock();
4303 be_close(netdev);
4304 rtnl_unlock();
4305 }
4306 be_clear(adapter);
4307
4308 if (state == pci_channel_io_perm_failure)
4309 return PCI_ERS_RESULT_DISCONNECT;
4310
4311 pci_disable_device(pdev);
4312
4313 /* The error could cause the FW to trigger a flash debug dump.
4314 * Resetting the card while flash dump is in progress
4315 * can cause it not to recover; wait for it to finish.
4316 * Wait only for first function as it is needed only once per
4317 * adapter.
4318 */
4319 if (pdev->devfn == 0)
4320 ssleep(30);
4321
4322 return PCI_ERS_RESULT_NEED_RESET;
4323 }
4324
4325 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4326 {
4327 struct be_adapter *adapter = pci_get_drvdata(pdev);
4328 int status;
4329
4330 dev_info(&adapter->pdev->dev, "EEH reset\n");
4331 be_clear_all_error(adapter);
4332
4333 status = pci_enable_device(pdev);
4334 if (status)
4335 return PCI_ERS_RESULT_DISCONNECT;
4336
4337 pci_set_master(pdev);
4338 pci_set_power_state(pdev, 0);
4339 pci_restore_state(pdev);
4340
4341 /* Check if card is ok and fw is ready */
4342 dev_info(&adapter->pdev->dev,
4343 "Waiting for FW to be ready after EEH reset\n");
4344 status = be_fw_wait_ready(adapter);
4345 if (status)
4346 return PCI_ERS_RESULT_DISCONNECT;
4347
4348 pci_cleanup_aer_uncorrect_error_status(pdev);
4349 return PCI_ERS_RESULT_RECOVERED;
4350 }
4351
4352 static void be_eeh_resume(struct pci_dev *pdev)
4353 {
4354 int status = 0;
4355 struct be_adapter *adapter = pci_get_drvdata(pdev);
4356 struct net_device *netdev = adapter->netdev;
4357
4358 dev_info(&adapter->pdev->dev, "EEH resume\n");
4359
4360 pci_save_state(pdev);
4361
4362 /* tell fw we're ready to fire cmds */
4363 status = be_cmd_fw_init(adapter);
4364 if (status)
4365 goto err;
4366
4367 status = be_cmd_reset_function(adapter);
4368 if (status)
4369 goto err;
4370
4371 status = be_setup(adapter);
4372 if (status)
4373 goto err;
4374
4375 if (netif_running(netdev)) {
4376 status = be_open(netdev);
4377 if (status)
4378 goto err;
4379 }
4380
4381 schedule_delayed_work(&adapter->func_recovery_work,
4382 msecs_to_jiffies(1000));
4383 netif_device_attach(netdev);
4384 return;
4385 err:
4386 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4387 }
4388
4389 static const struct pci_error_handlers be_eeh_handlers = {
4390 .error_detected = be_eeh_err_detected,
4391 .slot_reset = be_eeh_reset,
4392 .resume = be_eeh_resume,
4393 };
4394
4395 static struct pci_driver be_driver = {
4396 .name = DRV_NAME,
4397 .id_table = be_dev_ids,
4398 .probe = be_probe,
4399 .remove = be_remove,
4400 .suspend = be_suspend,
4401 .resume = be_resume,
4402 .shutdown = be_shutdown,
4403 .err_handler = &be_eeh_handlers
4404 };
4405
4406 static int __init be_init_module(void)
4407 {
4408 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4409 rx_frag_size != 2048) {
4410 printk(KERN_WARNING DRV_NAME
4411 " : Module param rx_frag_size must be 2048/4096/8192."
4412 " Using 2048\n");
4413 rx_frag_size = 2048;
4414 }
4415
4416 return pci_register_driver(&be_driver);
4417 }
4418 module_init(be_init_module);
4419
4420 static void __exit be_exit_module(void)
4421 {
4422 pci_unregister_driver(&be_driver);
4423 }
4424 module_exit(be_exit_module);
This page took 0.145441 seconds and 5 git commands to generate.