Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
111 "NETC",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131 struct be_dma_mem *mem = &q->dma_mem;
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141 {
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va)
151 return -ENOMEM;
152 return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157 u32 reg, enabled;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196 wmb();
197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
202 {
203 u32 val = 0;
204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207 wmb();
208 iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212 bool arm, bool clear_int, u16 num_popped)
213 {
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219 if (adapter->eeh_error)
220 return;
221
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238 if (adapter->eeh_error)
239 return;
240
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
252 u8 current_mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0];
254 bool active_mac = true;
255
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
287 if (status)
288 goto err;
289
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293 done:
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296 err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298 return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334 struct be_port_rxf_stats_v0 *port_stats =
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
337
338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
366 drvs->jabber_events = rxf_stats->port1_jabber_events;
367 else
368 drvs->jabber_events = rxf_stats->port0_jabber_events;
369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383 struct be_port_rxf_stats_v1 *port_stats =
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
386
387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427 struct be_drv_stats *drvs = &adapter->drv_stats;
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456 drvs->jabber_events = pport_stats->rx_jabbers;
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459 drvs->rx_drops_too_many_frags =
460 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x) (x & 0xFFFF)
466 #define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478 {
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
494 u32 erx_stat;
495
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
498 } else {
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
504
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
509 }
510 }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
515 {
516 struct be_adapter *adapter = netdev_priv(netdev);
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518 struct be_rx_obj *rxo;
519 struct be_tx_obj *txo;
520 u64 pkts, bytes;
521 unsigned int start;
522 int i;
523
524 for_all_rx_queues(adapter, rxo, i) {
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
536 }
537
538 for_all_tx_queues(adapter, txo, i) {
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
547 }
548
549 /* bad pkts received */
550 stats->rx_errors = drvs->rx_crc_errors +
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
559 drvs->rx_dropped_runt;
560
561 /* detailed rx errors */
562 stats->rx_length_errors = drvs->rx_in_range_errors +
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
565
566 stats->rx_crc_errors = drvs->rx_crc_errors;
567
568 /* frame alignment errors */
569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
576 return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581 struct net_device *netdev = adapter->netdev;
582
583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584 netif_carrier_off(netdev);
585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586 }
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597 struct be_tx_stats *stats = tx_stats(txo);
598
599 u64_stats_update_begin(&stats->sync);
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
604 if (stopped)
605 stats->tx_stops++;
606 u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
612 {
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
617 /* to account for hdr wrb */
618 cnt++;
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
625 }
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635 wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640 {
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657 u16 vlan_tag;
658
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663 if (skb_is_gso(skb)) {
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
676 if (vlan_tx_tag_present(skb)) {
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680 }
681
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690 bool unmap_single)
691 {
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697 if (wrb->frag_len) {
698 if (unmap_single)
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
701 else
702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703 }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
709 {
710 dma_addr_t busaddr;
711 int i, copied = 0;
712 struct device *dev = &adapter->pdev->dev;
713 struct sk_buff *first_skb = skb;
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
716 bool map_single = false;
717 u16 map_head;
718
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
721 map_head = txq->head;
722
723 if (skb->len > skb->data_len) {
724 int len = skb_headlen(skb);
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
727 goto dma_err;
728 map_single = true;
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
735
736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737 const struct skb_frag_struct *frag =
738 &skb_shinfo(skb)->frags[i];
739 busaddr = skb_frag_dma_map(dev, frag, 0,
740 skb_frag_size(frag), DMA_TO_DEVICE);
741 if (dma_mapping_error(dev, busaddr))
742 goto dma_err;
743 wrb = queue_head_node(txq);
744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
747 copied += skb_frag_size(frag);
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
761 dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
765 unmap_tx_frag(dev, wrb, map_single);
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
776 {
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
783 if (vlan_tx_tag_present(skb)) {
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
786 if (skb)
787 skb->vlan_tci = 0;
788 }
789
790 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791 if (!vlan_tag)
792 vlan_tag = adapter->pvid;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796
797 if (vlan_tag) {
798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799 if (unlikely(!skb))
800 return skb;
801
802 skb->vlan_tci = 0;
803 }
804
805 /* Insert the outer VLAN, if any */
806 if (adapter->qnq_vid) {
807 vlan_tag = adapter->qnq_vid;
808 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
809 if (unlikely(!skb))
810 return skb;
811 if (skip_hw_vlan)
812 *skip_hw_vlan = true;
813 }
814
815 return skb;
816 }
817
818 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
819 {
820 struct ethhdr *eh = (struct ethhdr *)skb->data;
821 u16 offset = ETH_HLEN;
822
823 if (eh->h_proto == htons(ETH_P_IPV6)) {
824 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
825
826 offset += sizeof(struct ipv6hdr);
827 if (ip6h->nexthdr != NEXTHDR_TCP &&
828 ip6h->nexthdr != NEXTHDR_UDP) {
829 struct ipv6_opt_hdr *ehdr =
830 (struct ipv6_opt_hdr *) (skb->data + offset);
831
832 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
833 if (ehdr->hdrlen == 0xff)
834 return true;
835 }
836 }
837 return false;
838 }
839
840 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
841 {
842 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
843 }
844
845 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
846 {
847 return BE3_chip(adapter) &&
848 be_ipv6_exthdr_check(skb);
849 }
850
851 static netdev_tx_t be_xmit(struct sk_buff *skb,
852 struct net_device *netdev)
853 {
854 struct be_adapter *adapter = netdev_priv(netdev);
855 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
856 struct be_queue_info *txq = &txo->q;
857 struct iphdr *ip = NULL;
858 u32 wrb_cnt = 0, copied = 0;
859 u32 start = txq->head, eth_hdr_len;
860 bool dummy_wrb, stopped = false;
861 bool skip_hw_vlan = false;
862 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
863
864 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
865 VLAN_ETH_HLEN : ETH_HLEN;
866
867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
869 */
870 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
871 ip = (struct iphdr *)ip_hdr(skb);
872 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
873 }
874
875 /* If vlan tag is already inlined in the packet, skip HW VLAN
876 * tagging in UMC mode
877 */
878 if ((adapter->function_mode & UMC_ENABLED) &&
879 veh->h_vlan_proto == htons(ETH_P_8021Q))
880 skip_hw_vlan = true;
881
882 /* HW has a bug wherein it will calculate CSUM for VLAN
883 * pkts even though it is disabled.
884 * Manually insert VLAN in pkt.
885 */
886 if (skb->ip_summed != CHECKSUM_PARTIAL &&
887 vlan_tx_tag_present(skb)) {
888 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
889 if (unlikely(!skb))
890 goto tx_drop;
891 }
892
893 /* HW may lockup when VLAN HW tagging is requested on
894 * certain ipv6 packets. Drop such pkts if the HW workaround to
895 * skip HW tagging is not enabled by FW.
896 */
897 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
898 (adapter->pvid || adapter->qnq_vid) &&
899 !qnq_async_evt_rcvd(adapter)))
900 goto tx_drop;
901
902 /* Manual VLAN tag insertion to prevent:
903 * ASIC lockup when the ASIC inserts VLAN tag into
904 * certain ipv6 packets. Insert VLAN tags in driver,
905 * and set event, completion, vlan bits accordingly
906 * in the Tx WRB.
907 */
908 if (be_ipv6_tx_stall_chk(adapter, skb) &&
909 be_vlan_tag_tx_chk(adapter, skb)) {
910 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
911 if (unlikely(!skb))
912 goto tx_drop;
913 }
914
915 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
916
917 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
918 skip_hw_vlan);
919 if (copied) {
920 int gso_segs = skb_shinfo(skb)->gso_segs;
921
922 /* record the sent skb in the sent_skb table */
923 BUG_ON(txo->sent_skb_list[start]);
924 txo->sent_skb_list[start] = skb;
925
926 /* Ensure txq has space for the next skb; Else stop the queue
927 * *BEFORE* ringing the tx doorbell, so that we serialze the
928 * tx compls of the current transmit which'll wake up the queue
929 */
930 atomic_add(wrb_cnt, &txq->used);
931 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
932 txq->len) {
933 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
934 stopped = true;
935 }
936
937 be_txq_notify(adapter, txo, wrb_cnt);
938
939 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
940 } else {
941 txq->head = start;
942 dev_kfree_skb_any(skb);
943 }
944 tx_drop:
945 return NETDEV_TX_OK;
946 }
947
948 static int be_change_mtu(struct net_device *netdev, int new_mtu)
949 {
950 struct be_adapter *adapter = netdev_priv(netdev);
951 if (new_mtu < BE_MIN_MTU ||
952 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
953 (ETH_HLEN + ETH_FCS_LEN))) {
954 dev_info(&adapter->pdev->dev,
955 "MTU must be between %d and %d bytes\n",
956 BE_MIN_MTU,
957 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
958 return -EINVAL;
959 }
960 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
961 netdev->mtu, new_mtu);
962 netdev->mtu = new_mtu;
963 return 0;
964 }
965
966 /*
967 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
968 * If the user configures more, place BE in vlan promiscuous mode.
969 */
970 static int be_vid_config(struct be_adapter *adapter)
971 {
972 u16 vids[BE_NUM_VLANS_SUPPORTED];
973 u16 num = 0, i;
974 int status = 0;
975
976 /* No need to further configure vids if in promiscuous mode */
977 if (adapter->promiscuous)
978 return 0;
979
980 if (adapter->vlans_added > adapter->max_vlans)
981 goto set_vlan_promisc;
982
983 /* Construct VLAN Table to give to HW */
984 for (i = 0; i < VLAN_N_VID; i++)
985 if (adapter->vlan_tag[i])
986 vids[num++] = cpu_to_le16(i);
987
988 status = be_cmd_vlan_config(adapter, adapter->if_handle,
989 vids, num, 1, 0);
990
991 /* Set to VLAN promisc mode as setting VLAN filter failed */
992 if (status) {
993 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
994 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
995 goto set_vlan_promisc;
996 }
997
998 return status;
999
1000 set_vlan_promisc:
1001 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1002 NULL, 0, 1, 1);
1003 return status;
1004 }
1005
1006 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1007 {
1008 struct be_adapter *adapter = netdev_priv(netdev);
1009 int status = 0;
1010
1011 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1012 status = -EINVAL;
1013 goto ret;
1014 }
1015
1016 /* Packets with VID 0 are always received by Lancer by default */
1017 if (lancer_chip(adapter) && vid == 0)
1018 goto ret;
1019
1020 adapter->vlan_tag[vid] = 1;
1021 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1022 status = be_vid_config(adapter);
1023
1024 if (!status)
1025 adapter->vlans_added++;
1026 else
1027 adapter->vlan_tag[vid] = 0;
1028 ret:
1029 return status;
1030 }
1031
1032 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 {
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 int status = 0;
1036
1037 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1038 status = -EINVAL;
1039 goto ret;
1040 }
1041
1042 /* Packets with VID 0 are always received by Lancer by default */
1043 if (lancer_chip(adapter) && vid == 0)
1044 goto ret;
1045
1046 adapter->vlan_tag[vid] = 0;
1047 if (adapter->vlans_added <= adapter->max_vlans)
1048 status = be_vid_config(adapter);
1049
1050 if (!status)
1051 adapter->vlans_added--;
1052 else
1053 adapter->vlan_tag[vid] = 1;
1054 ret:
1055 return status;
1056 }
1057
1058 static void be_set_rx_mode(struct net_device *netdev)
1059 {
1060 struct be_adapter *adapter = netdev_priv(netdev);
1061 int status;
1062
1063 if (netdev->flags & IFF_PROMISC) {
1064 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1065 adapter->promiscuous = true;
1066 goto done;
1067 }
1068
1069 /* BE was previously in promiscuous mode; disable it */
1070 if (adapter->promiscuous) {
1071 adapter->promiscuous = false;
1072 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1073
1074 if (adapter->vlans_added)
1075 be_vid_config(adapter);
1076 }
1077
1078 /* Enable multicast promisc if num configured exceeds what we support */
1079 if (netdev->flags & IFF_ALLMULTI ||
1080 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1081 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1082 goto done;
1083 }
1084
1085 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1086 struct netdev_hw_addr *ha;
1087 int i = 1; /* First slot is claimed by the Primary MAC */
1088
1089 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1090 be_cmd_pmac_del(adapter, adapter->if_handle,
1091 adapter->pmac_id[i], 0);
1092 }
1093
1094 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1095 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1096 adapter->promiscuous = true;
1097 goto done;
1098 }
1099
1100 netdev_for_each_uc_addr(ha, adapter->netdev) {
1101 adapter->uc_macs++; /* First slot is for Primary MAC */
1102 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1103 adapter->if_handle,
1104 &adapter->pmac_id[adapter->uc_macs], 0);
1105 }
1106 }
1107
1108 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1109
1110 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1111 if (status) {
1112 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1114 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1115 }
1116 done:
1117 return;
1118 }
1119
1120 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1121 {
1122 struct be_adapter *adapter = netdev_priv(netdev);
1123 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1124 int status;
1125 bool active_mac = false;
1126 u32 pmac_id;
1127 u8 old_mac[ETH_ALEN];
1128
1129 if (!sriov_enabled(adapter))
1130 return -EPERM;
1131
1132 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1133 return -EINVAL;
1134
1135 if (lancer_chip(adapter)) {
1136 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1137 &pmac_id, vf + 1);
1138 if (!status && active_mac)
1139 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1140 pmac_id, vf + 1);
1141
1142 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1143 } else {
1144 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1145 vf_cfg->pmac_id, vf + 1);
1146
1147 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1148 &vf_cfg->pmac_id, vf + 1);
1149 }
1150
1151 if (status)
1152 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1153 mac, vf);
1154 else
1155 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1156
1157 return status;
1158 }
1159
1160 static int be_get_vf_config(struct net_device *netdev, int vf,
1161 struct ifla_vf_info *vi)
1162 {
1163 struct be_adapter *adapter = netdev_priv(netdev);
1164 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1165
1166 if (!sriov_enabled(adapter))
1167 return -EPERM;
1168
1169 if (vf >= adapter->num_vfs)
1170 return -EINVAL;
1171
1172 vi->vf = vf;
1173 vi->tx_rate = vf_cfg->tx_rate;
1174 vi->vlan = vf_cfg->vlan_tag;
1175 vi->qos = 0;
1176 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1177
1178 return 0;
1179 }
1180
1181 static int be_set_vf_vlan(struct net_device *netdev,
1182 int vf, u16 vlan, u8 qos)
1183 {
1184 struct be_adapter *adapter = netdev_priv(netdev);
1185 int status = 0;
1186
1187 if (!sriov_enabled(adapter))
1188 return -EPERM;
1189
1190 if (vf >= adapter->num_vfs || vlan > 4095)
1191 return -EINVAL;
1192
1193 if (vlan) {
1194 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1195 /* If this is new value, program it. Else skip. */
1196 adapter->vf_cfg[vf].vlan_tag = vlan;
1197
1198 status = be_cmd_set_hsw_config(adapter, vlan,
1199 vf + 1, adapter->vf_cfg[vf].if_handle);
1200 }
1201 } else {
1202 /* Reset Transparent Vlan Tagging. */
1203 adapter->vf_cfg[vf].vlan_tag = 0;
1204 vlan = adapter->vf_cfg[vf].def_vid;
1205 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1206 adapter->vf_cfg[vf].if_handle);
1207 }
1208
1209
1210 if (status)
1211 dev_info(&adapter->pdev->dev,
1212 "VLAN %d config on VF %d failed\n", vlan, vf);
1213 return status;
1214 }
1215
1216 static int be_set_vf_tx_rate(struct net_device *netdev,
1217 int vf, int rate)
1218 {
1219 struct be_adapter *adapter = netdev_priv(netdev);
1220 int status = 0;
1221
1222 if (!sriov_enabled(adapter))
1223 return -EPERM;
1224
1225 if (vf >= adapter->num_vfs)
1226 return -EINVAL;
1227
1228 if (rate < 100 || rate > 10000) {
1229 dev_err(&adapter->pdev->dev,
1230 "tx rate must be between 100 and 10000 Mbps\n");
1231 return -EINVAL;
1232 }
1233
1234 if (lancer_chip(adapter))
1235 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1236 else
1237 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1238
1239 if (status)
1240 dev_err(&adapter->pdev->dev,
1241 "tx rate %d on VF %d failed\n", rate, vf);
1242 else
1243 adapter->vf_cfg[vf].tx_rate = rate;
1244 return status;
1245 }
1246
1247 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1248 {
1249 struct pci_dev *dev, *pdev = adapter->pdev;
1250 int vfs = 0, assigned_vfs = 0, pos;
1251 u16 offset, stride;
1252
1253 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1254 if (!pos)
1255 return 0;
1256 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1257 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1258
1259 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1260 while (dev) {
1261 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1262 vfs++;
1263 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1264 assigned_vfs++;
1265 }
1266 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1267 }
1268 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1269 }
1270
1271 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1272 {
1273 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1274 ulong now = jiffies;
1275 ulong delta = now - stats->rx_jiffies;
1276 u64 pkts;
1277 unsigned int start, eqd;
1278
1279 if (!eqo->enable_aic) {
1280 eqd = eqo->eqd;
1281 goto modify_eqd;
1282 }
1283
1284 if (eqo->idx >= adapter->num_rx_qs)
1285 return;
1286
1287 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
1289 /* Wrapped around */
1290 if (time_before(now, stats->rx_jiffies)) {
1291 stats->rx_jiffies = now;
1292 return;
1293 }
1294
1295 /* Update once a second */
1296 if (delta < HZ)
1297 return;
1298
1299 do {
1300 start = u64_stats_fetch_begin_bh(&stats->sync);
1301 pkts = stats->rx_pkts;
1302 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
1304 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1305 stats->rx_pkts_prev = pkts;
1306 stats->rx_jiffies = now;
1307 eqd = (stats->rx_pps / 110000) << 3;
1308 eqd = min(eqd, eqo->max_eqd);
1309 eqd = max(eqd, eqo->min_eqd);
1310 if (eqd < 10)
1311 eqd = 0;
1312
1313 modify_eqd:
1314 if (eqd != eqo->cur_eqd) {
1315 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316 eqo->cur_eqd = eqd;
1317 }
1318 }
1319
1320 static void be_rx_stats_update(struct be_rx_obj *rxo,
1321 struct be_rx_compl_info *rxcp)
1322 {
1323 struct be_rx_stats *stats = rx_stats(rxo);
1324
1325 u64_stats_update_begin(&stats->sync);
1326 stats->rx_compl++;
1327 stats->rx_bytes += rxcp->pkt_size;
1328 stats->rx_pkts++;
1329 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1330 stats->rx_mcast_pkts++;
1331 if (rxcp->err)
1332 stats->rx_compl_err++;
1333 u64_stats_update_end(&stats->sync);
1334 }
1335
1336 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1337 {
1338 /* L4 checksum is not reliable for non TCP/UDP packets.
1339 * Also ignore ipcksm for ipv6 pkts */
1340 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341 (rxcp->ip_csum || rxcp->ipv6);
1342 }
1343
1344 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345 u16 frag_idx)
1346 {
1347 struct be_adapter *adapter = rxo->adapter;
1348 struct be_rx_page_info *rx_page_info;
1349 struct be_queue_info *rxq = &rxo->q;
1350
1351 rx_page_info = &rxo->page_info_tbl[frag_idx];
1352 BUG_ON(!rx_page_info->page);
1353
1354 if (rx_page_info->last_page_user) {
1355 dma_unmap_page(&adapter->pdev->dev,
1356 dma_unmap_addr(rx_page_info, bus),
1357 adapter->big_page_size, DMA_FROM_DEVICE);
1358 rx_page_info->last_page_user = false;
1359 }
1360
1361 atomic_dec(&rxq->used);
1362 return rx_page_info;
1363 }
1364
1365 /* Throwaway the data in the Rx completion */
1366 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367 struct be_rx_compl_info *rxcp)
1368 {
1369 struct be_queue_info *rxq = &rxo->q;
1370 struct be_rx_page_info *page_info;
1371 u16 i, num_rcvd = rxcp->num_rcvd;
1372
1373 for (i = 0; i < num_rcvd; i++) {
1374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
1377 index_inc(&rxcp->rxq_idx, rxq->len);
1378 }
1379 }
1380
1381 /*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
1385 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386 struct be_rx_compl_info *rxcp)
1387 {
1388 struct be_queue_info *rxq = &rxo->q;
1389 struct be_rx_page_info *page_info;
1390 u16 i, j;
1391 u16 hdr_len, curr_frag_len, remaining;
1392 u8 *start;
1393
1394 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1395 start = page_address(page_info->page) + page_info->page_offset;
1396 prefetch(start);
1397
1398 /* Copy data in the first descriptor of this completion */
1399 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1400
1401 skb->len = curr_frag_len;
1402 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1403 memcpy(skb->data, start, curr_frag_len);
1404 /* Complete packet has now been moved to data */
1405 put_page(page_info->page);
1406 skb->data_len = 0;
1407 skb->tail += curr_frag_len;
1408 } else {
1409 hdr_len = ETH_HLEN;
1410 memcpy(skb->data, start, hdr_len);
1411 skb_shinfo(skb)->nr_frags = 1;
1412 skb_frag_set_page(skb, 0, page_info->page);
1413 skb_shinfo(skb)->frags[0].page_offset =
1414 page_info->page_offset + hdr_len;
1415 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1416 skb->data_len = curr_frag_len - hdr_len;
1417 skb->truesize += rx_frag_size;
1418 skb->tail += hdr_len;
1419 }
1420 page_info->page = NULL;
1421
1422 if (rxcp->pkt_size <= rx_frag_size) {
1423 BUG_ON(rxcp->num_rcvd != 1);
1424 return;
1425 }
1426
1427 /* More frags present for this completion */
1428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 remaining = rxcp->pkt_size - curr_frag_len;
1430 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1431 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1432 curr_frag_len = min(remaining, rx_frag_size);
1433
1434 /* Coalesce all frags from the same physical page in one slot */
1435 if (page_info->page_offset == 0) {
1436 /* Fresh page */
1437 j++;
1438 skb_frag_set_page(skb, j, page_info->page);
1439 skb_shinfo(skb)->frags[j].page_offset =
1440 page_info->page_offset;
1441 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1442 skb_shinfo(skb)->nr_frags++;
1443 } else {
1444 put_page(page_info->page);
1445 }
1446
1447 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1448 skb->len += curr_frag_len;
1449 skb->data_len += curr_frag_len;
1450 skb->truesize += rx_frag_size;
1451 remaining -= curr_frag_len;
1452 index_inc(&rxcp->rxq_idx, rxq->len);
1453 page_info->page = NULL;
1454 }
1455 BUG_ON(j > MAX_SKB_FRAGS);
1456 }
1457
1458 /* Process the RX completion indicated by rxcp when GRO is disabled */
1459 static void be_rx_compl_process(struct be_rx_obj *rxo,
1460 struct be_rx_compl_info *rxcp)
1461 {
1462 struct be_adapter *adapter = rxo->adapter;
1463 struct net_device *netdev = adapter->netdev;
1464 struct sk_buff *skb;
1465
1466 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1467 if (unlikely(!skb)) {
1468 rx_stats(rxo)->rx_drops_no_skbs++;
1469 be_rx_compl_discard(rxo, rxcp);
1470 return;
1471 }
1472
1473 skb_fill_rx_data(rxo, skb, rxcp);
1474
1475 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1476 skb->ip_summed = CHECKSUM_UNNECESSARY;
1477 else
1478 skb_checksum_none_assert(skb);
1479
1480 skb->protocol = eth_type_trans(skb, netdev);
1481 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1482 if (netdev->features & NETIF_F_RXHASH)
1483 skb->rxhash = rxcp->rss_hash;
1484
1485
1486 if (rxcp->vlanf)
1487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1488
1489 netif_receive_skb(skb);
1490 }
1491
1492 /* Process the RX completion indicated by rxcp when GRO is enabled */
1493 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494 struct be_rx_compl_info *rxcp)
1495 {
1496 struct be_adapter *adapter = rxo->adapter;
1497 struct be_rx_page_info *page_info;
1498 struct sk_buff *skb = NULL;
1499 struct be_queue_info *rxq = &rxo->q;
1500 u16 remaining, curr_frag_len;
1501 u16 i, j;
1502
1503 skb = napi_get_frags(napi);
1504 if (!skb) {
1505 be_rx_compl_discard(rxo, rxcp);
1506 return;
1507 }
1508
1509 remaining = rxcp->pkt_size;
1510 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1511 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1512
1513 curr_frag_len = min(remaining, rx_frag_size);
1514
1515 /* Coalesce all frags from the same physical page in one slot */
1516 if (i == 0 || page_info->page_offset == 0) {
1517 /* First frag or Fresh page */
1518 j++;
1519 skb_frag_set_page(skb, j, page_info->page);
1520 skb_shinfo(skb)->frags[j].page_offset =
1521 page_info->page_offset;
1522 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1523 } else {
1524 put_page(page_info->page);
1525 }
1526 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1527 skb->truesize += rx_frag_size;
1528 remaining -= curr_frag_len;
1529 index_inc(&rxcp->rxq_idx, rxq->len);
1530 memset(page_info, 0, sizeof(*page_info));
1531 }
1532 BUG_ON(j > MAX_SKB_FRAGS);
1533
1534 skb_shinfo(skb)->nr_frags = j + 1;
1535 skb->len = rxcp->pkt_size;
1536 skb->data_len = rxcp->pkt_size;
1537 skb->ip_summed = CHECKSUM_UNNECESSARY;
1538 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1539 if (adapter->netdev->features & NETIF_F_RXHASH)
1540 skb->rxhash = rxcp->rss_hash;
1541
1542 if (rxcp->vlanf)
1543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1544
1545 napi_gro_frags(napi);
1546 }
1547
1548 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549 struct be_rx_compl_info *rxcp)
1550 {
1551 rxcp->pkt_size =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1556 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1557 rxcp->ip_csum =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559 rxcp->l4_csum =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561 rxcp->ipv6 =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563 rxcp->rxq_idx =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565 rxcp->num_rcvd =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567 rxcp->pkt_type =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1569 rxcp->rss_hash =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1571 if (rxcp->vlanf) {
1572 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1573 compl);
1574 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575 compl);
1576 }
1577 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1578 }
1579
1580 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581 struct be_rx_compl_info *rxcp)
1582 {
1583 rxcp->pkt_size =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1588 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1589 rxcp->ip_csum =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591 rxcp->l4_csum =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593 rxcp->ipv6 =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595 rxcp->rxq_idx =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597 rxcp->num_rcvd =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599 rxcp->pkt_type =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1601 rxcp->rss_hash =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1603 if (rxcp->vlanf) {
1604 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1605 compl);
1606 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607 compl);
1608 }
1609 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1610 }
1611
1612 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1613 {
1614 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1615 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1616 struct be_adapter *adapter = rxo->adapter;
1617
1618 /* For checking the valid bit it is Ok to use either definition as the
1619 * valid bit is at the same position in both v0 and v1 Rx compl */
1620 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1621 return NULL;
1622
1623 rmb();
1624 be_dws_le_to_cpu(compl, sizeof(*compl));
1625
1626 if (adapter->be3_native)
1627 be_parse_rx_compl_v1(compl, rxcp);
1628 else
1629 be_parse_rx_compl_v0(compl, rxcp);
1630
1631 if (rxcp->vlanf) {
1632 /* vlanf could be wrongly set in some cards.
1633 * ignore if vtm is not set */
1634 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1635 rxcp->vlanf = 0;
1636
1637 if (!lancer_chip(adapter))
1638 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1639
1640 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1641 !adapter->vlan_tag[rxcp->vlan_tag])
1642 rxcp->vlanf = 0;
1643 }
1644
1645 /* As the compl has been parsed, reset it; we wont touch it again */
1646 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1647
1648 queue_tail_inc(&rxo->cq);
1649 return rxcp;
1650 }
1651
1652 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1653 {
1654 u32 order = get_order(size);
1655
1656 if (order > 0)
1657 gfp |= __GFP_COMP;
1658 return alloc_pages(gfp, order);
1659 }
1660
1661 /*
1662 * Allocate a page, split it to fragments of size rx_frag_size and post as
1663 * receive buffers to BE
1664 */
1665 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1666 {
1667 struct be_adapter *adapter = rxo->adapter;
1668 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1669 struct be_queue_info *rxq = &rxo->q;
1670 struct page *pagep = NULL;
1671 struct be_eth_rx_d *rxd;
1672 u64 page_dmaaddr = 0, frag_dmaaddr;
1673 u32 posted, page_offset = 0;
1674
1675 page_info = &rxo->page_info_tbl[rxq->head];
1676 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1677 if (!pagep) {
1678 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1679 if (unlikely(!pagep)) {
1680 rx_stats(rxo)->rx_post_fail++;
1681 break;
1682 }
1683 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1684 0, adapter->big_page_size,
1685 DMA_FROM_DEVICE);
1686 page_info->page_offset = 0;
1687 } else {
1688 get_page(pagep);
1689 page_info->page_offset = page_offset + rx_frag_size;
1690 }
1691 page_offset = page_info->page_offset;
1692 page_info->page = pagep;
1693 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1694 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1695
1696 rxd = queue_head_node(rxq);
1697 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1698 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1699
1700 /* Any space left in the current big page for another frag? */
1701 if ((page_offset + rx_frag_size + rx_frag_size) >
1702 adapter->big_page_size) {
1703 pagep = NULL;
1704 page_info->last_page_user = true;
1705 }
1706
1707 prev_page_info = page_info;
1708 queue_head_inc(rxq);
1709 page_info = &rxo->page_info_tbl[rxq->head];
1710 }
1711 if (pagep)
1712 prev_page_info->last_page_user = true;
1713
1714 if (posted) {
1715 atomic_add(posted, &rxq->used);
1716 be_rxq_notify(adapter, rxq->id, posted);
1717 } else if (atomic_read(&rxq->used) == 0) {
1718 /* Let be_worker replenish when memory is available */
1719 rxo->rx_post_starved = true;
1720 }
1721 }
1722
1723 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1724 {
1725 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1726
1727 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1728 return NULL;
1729
1730 rmb();
1731 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1732
1733 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1734
1735 queue_tail_inc(tx_cq);
1736 return txcp;
1737 }
1738
1739 static u16 be_tx_compl_process(struct be_adapter *adapter,
1740 struct be_tx_obj *txo, u16 last_index)
1741 {
1742 struct be_queue_info *txq = &txo->q;
1743 struct be_eth_wrb *wrb;
1744 struct sk_buff **sent_skbs = txo->sent_skb_list;
1745 struct sk_buff *sent_skb;
1746 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1747 bool unmap_skb_hdr = true;
1748
1749 sent_skb = sent_skbs[txq->tail];
1750 BUG_ON(!sent_skb);
1751 sent_skbs[txq->tail] = NULL;
1752
1753 /* skip header wrb */
1754 queue_tail_inc(txq);
1755
1756 do {
1757 cur_index = txq->tail;
1758 wrb = queue_tail_node(txq);
1759 unmap_tx_frag(&adapter->pdev->dev, wrb,
1760 (unmap_skb_hdr && skb_headlen(sent_skb)));
1761 unmap_skb_hdr = false;
1762
1763 num_wrbs++;
1764 queue_tail_inc(txq);
1765 } while (cur_index != last_index);
1766
1767 kfree_skb(sent_skb);
1768 return num_wrbs;
1769 }
1770
1771 /* Return the number of events in the event queue */
1772 static inline int events_get(struct be_eq_obj *eqo)
1773 {
1774 struct be_eq_entry *eqe;
1775 int num = 0;
1776
1777 do {
1778 eqe = queue_tail_node(&eqo->q);
1779 if (eqe->evt == 0)
1780 break;
1781
1782 rmb();
1783 eqe->evt = 0;
1784 num++;
1785 queue_tail_inc(&eqo->q);
1786 } while (true);
1787
1788 return num;
1789 }
1790
1791 /* Leaves the EQ is disarmed state */
1792 static void be_eq_clean(struct be_eq_obj *eqo)
1793 {
1794 int num = events_get(eqo);
1795
1796 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1797 }
1798
1799 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1800 {
1801 struct be_rx_page_info *page_info;
1802 struct be_queue_info *rxq = &rxo->q;
1803 struct be_queue_info *rx_cq = &rxo->cq;
1804 struct be_rx_compl_info *rxcp;
1805 struct be_adapter *adapter = rxo->adapter;
1806 int flush_wait = 0;
1807 u16 tail;
1808
1809 /* Consume pending rx completions.
1810 * Wait for the flush completion (identified by zero num_rcvd)
1811 * to arrive. Notify CQ even when there are no more CQ entries
1812 * for HW to flush partially coalesced CQ entries.
1813 * In Lancer, there is no need to wait for flush compl.
1814 */
1815 for (;;) {
1816 rxcp = be_rx_compl_get(rxo);
1817 if (rxcp == NULL) {
1818 if (lancer_chip(adapter))
1819 break;
1820
1821 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1822 dev_warn(&adapter->pdev->dev,
1823 "did not receive flush compl\n");
1824 break;
1825 }
1826 be_cq_notify(adapter, rx_cq->id, true, 0);
1827 mdelay(1);
1828 } else {
1829 be_rx_compl_discard(rxo, rxcp);
1830 be_cq_notify(adapter, rx_cq->id, false, 1);
1831 if (rxcp->num_rcvd == 0)
1832 break;
1833 }
1834 }
1835
1836 /* After cleanup, leave the CQ in unarmed state */
1837 be_cq_notify(adapter, rx_cq->id, false, 0);
1838
1839 /* Then free posted rx buffers that were not used */
1840 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1841 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1842 page_info = get_rx_page_info(rxo, tail);
1843 put_page(page_info->page);
1844 memset(page_info, 0, sizeof(*page_info));
1845 }
1846 BUG_ON(atomic_read(&rxq->used));
1847 rxq->tail = rxq->head = 0;
1848 }
1849
1850 static void be_tx_compl_clean(struct be_adapter *adapter)
1851 {
1852 struct be_tx_obj *txo;
1853 struct be_queue_info *txq;
1854 struct be_eth_tx_compl *txcp;
1855 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1856 struct sk_buff *sent_skb;
1857 bool dummy_wrb;
1858 int i, pending_txqs;
1859
1860 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1861 do {
1862 pending_txqs = adapter->num_tx_qs;
1863
1864 for_all_tx_queues(adapter, txo, i) {
1865 txq = &txo->q;
1866 while ((txcp = be_tx_compl_get(&txo->cq))) {
1867 end_idx =
1868 AMAP_GET_BITS(struct amap_eth_tx_compl,
1869 wrb_index, txcp);
1870 num_wrbs += be_tx_compl_process(adapter, txo,
1871 end_idx);
1872 cmpl++;
1873 }
1874 if (cmpl) {
1875 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1876 atomic_sub(num_wrbs, &txq->used);
1877 cmpl = 0;
1878 num_wrbs = 0;
1879 }
1880 if (atomic_read(&txq->used) == 0)
1881 pending_txqs--;
1882 }
1883
1884 if (pending_txqs == 0 || ++timeo > 200)
1885 break;
1886
1887 mdelay(1);
1888 } while (true);
1889
1890 for_all_tx_queues(adapter, txo, i) {
1891 txq = &txo->q;
1892 if (atomic_read(&txq->used))
1893 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1894 atomic_read(&txq->used));
1895
1896 /* free posted tx for which compls will never arrive */
1897 while (atomic_read(&txq->used)) {
1898 sent_skb = txo->sent_skb_list[txq->tail];
1899 end_idx = txq->tail;
1900 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1901 &dummy_wrb);
1902 index_adv(&end_idx, num_wrbs - 1, txq->len);
1903 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1904 atomic_sub(num_wrbs, &txq->used);
1905 }
1906 }
1907 }
1908
1909 static void be_evt_queues_destroy(struct be_adapter *adapter)
1910 {
1911 struct be_eq_obj *eqo;
1912 int i;
1913
1914 for_all_evt_queues(adapter, eqo, i) {
1915 if (eqo->q.created) {
1916 be_eq_clean(eqo);
1917 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1918 }
1919 be_queue_free(adapter, &eqo->q);
1920 }
1921 }
1922
1923 static int be_evt_queues_create(struct be_adapter *adapter)
1924 {
1925 struct be_queue_info *eq;
1926 struct be_eq_obj *eqo;
1927 int i, rc;
1928
1929 adapter->num_evt_qs = num_irqs(adapter);
1930
1931 for_all_evt_queues(adapter, eqo, i) {
1932 eqo->adapter = adapter;
1933 eqo->tx_budget = BE_TX_BUDGET;
1934 eqo->idx = i;
1935 eqo->max_eqd = BE_MAX_EQD;
1936 eqo->enable_aic = true;
1937
1938 eq = &eqo->q;
1939 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1940 sizeof(struct be_eq_entry));
1941 if (rc)
1942 return rc;
1943
1944 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1945 if (rc)
1946 return rc;
1947 }
1948 return 0;
1949 }
1950
1951 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1952 {
1953 struct be_queue_info *q;
1954
1955 q = &adapter->mcc_obj.q;
1956 if (q->created)
1957 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1958 be_queue_free(adapter, q);
1959
1960 q = &adapter->mcc_obj.cq;
1961 if (q->created)
1962 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1963 be_queue_free(adapter, q);
1964 }
1965
1966 /* Must be called only after TX qs are created as MCC shares TX EQ */
1967 static int be_mcc_queues_create(struct be_adapter *adapter)
1968 {
1969 struct be_queue_info *q, *cq;
1970
1971 cq = &adapter->mcc_obj.cq;
1972 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1973 sizeof(struct be_mcc_compl)))
1974 goto err;
1975
1976 /* Use the default EQ for MCC completions */
1977 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1978 goto mcc_cq_free;
1979
1980 q = &adapter->mcc_obj.q;
1981 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1982 goto mcc_cq_destroy;
1983
1984 if (be_cmd_mccq_create(adapter, q, cq))
1985 goto mcc_q_free;
1986
1987 return 0;
1988
1989 mcc_q_free:
1990 be_queue_free(adapter, q);
1991 mcc_cq_destroy:
1992 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1993 mcc_cq_free:
1994 be_queue_free(adapter, cq);
1995 err:
1996 return -1;
1997 }
1998
1999 static void be_tx_queues_destroy(struct be_adapter *adapter)
2000 {
2001 struct be_queue_info *q;
2002 struct be_tx_obj *txo;
2003 u8 i;
2004
2005 for_all_tx_queues(adapter, txo, i) {
2006 q = &txo->q;
2007 if (q->created)
2008 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2009 be_queue_free(adapter, q);
2010
2011 q = &txo->cq;
2012 if (q->created)
2013 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2014 be_queue_free(adapter, q);
2015 }
2016 }
2017
2018 static int be_num_txqs_want(struct be_adapter *adapter)
2019 {
2020 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2021 be_is_mc(adapter) ||
2022 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2023 BE2_chip(adapter))
2024 return 1;
2025 else
2026 return adapter->max_tx_queues;
2027 }
2028
2029 static int be_tx_cqs_create(struct be_adapter *adapter)
2030 {
2031 struct be_queue_info *cq, *eq;
2032 int status;
2033 struct be_tx_obj *txo;
2034 u8 i;
2035
2036 adapter->num_tx_qs = be_num_txqs_want(adapter);
2037 if (adapter->num_tx_qs != MAX_TX_QS) {
2038 rtnl_lock();
2039 netif_set_real_num_tx_queues(adapter->netdev,
2040 adapter->num_tx_qs);
2041 rtnl_unlock();
2042 }
2043
2044 for_all_tx_queues(adapter, txo, i) {
2045 cq = &txo->cq;
2046 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2047 sizeof(struct be_eth_tx_compl));
2048 if (status)
2049 return status;
2050
2051 /* If num_evt_qs is less than num_tx_qs, then more than
2052 * one txq share an eq
2053 */
2054 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2055 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2056 if (status)
2057 return status;
2058 }
2059 return 0;
2060 }
2061
2062 static int be_tx_qs_create(struct be_adapter *adapter)
2063 {
2064 struct be_tx_obj *txo;
2065 int i, status;
2066
2067 for_all_tx_queues(adapter, txo, i) {
2068 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2069 sizeof(struct be_eth_wrb));
2070 if (status)
2071 return status;
2072
2073 status = be_cmd_txq_create(adapter, txo);
2074 if (status)
2075 return status;
2076 }
2077
2078 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2079 adapter->num_tx_qs);
2080 return 0;
2081 }
2082
2083 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2084 {
2085 struct be_queue_info *q;
2086 struct be_rx_obj *rxo;
2087 int i;
2088
2089 for_all_rx_queues(adapter, rxo, i) {
2090 q = &rxo->cq;
2091 if (q->created)
2092 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2093 be_queue_free(adapter, q);
2094 }
2095 }
2096
2097 static int be_rx_cqs_create(struct be_adapter *adapter)
2098 {
2099 struct be_queue_info *eq, *cq;
2100 struct be_rx_obj *rxo;
2101 int rc, i;
2102
2103 /* We'll create as many RSS rings as there are irqs.
2104 * But when there's only one irq there's no use creating RSS rings
2105 */
2106 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2107 num_irqs(adapter) + 1 : 1;
2108 if (adapter->num_rx_qs != MAX_RX_QS) {
2109 rtnl_lock();
2110 netif_set_real_num_rx_queues(adapter->netdev,
2111 adapter->num_rx_qs);
2112 rtnl_unlock();
2113 }
2114
2115 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2116 for_all_rx_queues(adapter, rxo, i) {
2117 rxo->adapter = adapter;
2118 cq = &rxo->cq;
2119 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2120 sizeof(struct be_eth_rx_compl));
2121 if (rc)
2122 return rc;
2123
2124 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2125 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2126 if (rc)
2127 return rc;
2128 }
2129
2130 dev_info(&adapter->pdev->dev,
2131 "created %d RSS queue(s) and 1 default RX queue\n",
2132 adapter->num_rx_qs - 1);
2133 return 0;
2134 }
2135
2136 static irqreturn_t be_intx(int irq, void *dev)
2137 {
2138 struct be_eq_obj *eqo = dev;
2139 struct be_adapter *adapter = eqo->adapter;
2140 int num_evts = 0;
2141
2142 /* IRQ is not expected when NAPI is scheduled as the EQ
2143 * will not be armed.
2144 * But, this can happen on Lancer INTx where it takes
2145 * a while to de-assert INTx or in BE2 where occasionaly
2146 * an interrupt may be raised even when EQ is unarmed.
2147 * If NAPI is already scheduled, then counting & notifying
2148 * events will orphan them.
2149 */
2150 if (napi_schedule_prep(&eqo->napi)) {
2151 num_evts = events_get(eqo);
2152 __napi_schedule(&eqo->napi);
2153 if (num_evts)
2154 eqo->spurious_intr = 0;
2155 }
2156 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2157
2158 /* Return IRQ_HANDLED only for the the first spurious intr
2159 * after a valid intr to stop the kernel from branding
2160 * this irq as a bad one!
2161 */
2162 if (num_evts || eqo->spurious_intr++ == 0)
2163 return IRQ_HANDLED;
2164 else
2165 return IRQ_NONE;
2166 }
2167
2168 static irqreturn_t be_msix(int irq, void *dev)
2169 {
2170 struct be_eq_obj *eqo = dev;
2171
2172 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2173 napi_schedule(&eqo->napi);
2174 return IRQ_HANDLED;
2175 }
2176
2177 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2178 {
2179 return (rxcp->tcpf && !rxcp->err) ? true : false;
2180 }
2181
2182 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2183 int budget)
2184 {
2185 struct be_adapter *adapter = rxo->adapter;
2186 struct be_queue_info *rx_cq = &rxo->cq;
2187 struct be_rx_compl_info *rxcp;
2188 u32 work_done;
2189
2190 for (work_done = 0; work_done < budget; work_done++) {
2191 rxcp = be_rx_compl_get(rxo);
2192 if (!rxcp)
2193 break;
2194
2195 /* Is it a flush compl that has no data */
2196 if (unlikely(rxcp->num_rcvd == 0))
2197 goto loop_continue;
2198
2199 /* Discard compl with partial DMA Lancer B0 */
2200 if (unlikely(!rxcp->pkt_size)) {
2201 be_rx_compl_discard(rxo, rxcp);
2202 goto loop_continue;
2203 }
2204
2205 /* On BE drop pkts that arrive due to imperfect filtering in
2206 * promiscuous mode on some skews
2207 */
2208 if (unlikely(rxcp->port != adapter->port_num &&
2209 !lancer_chip(adapter))) {
2210 be_rx_compl_discard(rxo, rxcp);
2211 goto loop_continue;
2212 }
2213
2214 if (do_gro(rxcp))
2215 be_rx_compl_process_gro(rxo, napi, rxcp);
2216 else
2217 be_rx_compl_process(rxo, rxcp);
2218 loop_continue:
2219 be_rx_stats_update(rxo, rxcp);
2220 }
2221
2222 if (work_done) {
2223 be_cq_notify(adapter, rx_cq->id, true, work_done);
2224
2225 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2226 be_post_rx_frags(rxo, GFP_ATOMIC);
2227 }
2228
2229 return work_done;
2230 }
2231
2232 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2233 int budget, int idx)
2234 {
2235 struct be_eth_tx_compl *txcp;
2236 int num_wrbs = 0, work_done;
2237
2238 for (work_done = 0; work_done < budget; work_done++) {
2239 txcp = be_tx_compl_get(&txo->cq);
2240 if (!txcp)
2241 break;
2242 num_wrbs += be_tx_compl_process(adapter, txo,
2243 AMAP_GET_BITS(struct amap_eth_tx_compl,
2244 wrb_index, txcp));
2245 }
2246
2247 if (work_done) {
2248 be_cq_notify(adapter, txo->cq.id, true, work_done);
2249 atomic_sub(num_wrbs, &txo->q.used);
2250
2251 /* As Tx wrbs have been freed up, wake up netdev queue
2252 * if it was stopped due to lack of tx wrbs. */
2253 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2254 atomic_read(&txo->q.used) < txo->q.len / 2) {
2255 netif_wake_subqueue(adapter->netdev, idx);
2256 }
2257
2258 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2259 tx_stats(txo)->tx_compl += work_done;
2260 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2261 }
2262 return (work_done < budget); /* Done */
2263 }
2264
2265 int be_poll(struct napi_struct *napi, int budget)
2266 {
2267 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2268 struct be_adapter *adapter = eqo->adapter;
2269 int max_work = 0, work, i, num_evts;
2270 bool tx_done;
2271
2272 num_evts = events_get(eqo);
2273
2274 /* Process all TXQs serviced by this EQ */
2275 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2276 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2277 eqo->tx_budget, i);
2278 if (!tx_done)
2279 max_work = budget;
2280 }
2281
2282 /* This loop will iterate twice for EQ0 in which
2283 * completions of the last RXQ (default one) are also processed
2284 * For other EQs the loop iterates only once
2285 */
2286 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2287 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2288 max_work = max(work, max_work);
2289 }
2290
2291 if (is_mcc_eqo(eqo))
2292 be_process_mcc(adapter);
2293
2294 if (max_work < budget) {
2295 napi_complete(napi);
2296 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2297 } else {
2298 /* As we'll continue in polling mode, count and clear events */
2299 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2300 }
2301 return max_work;
2302 }
2303
2304 void be_detect_error(struct be_adapter *adapter)
2305 {
2306 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2307 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2308 u32 i;
2309
2310 if (be_hw_error(adapter))
2311 return;
2312
2313 if (lancer_chip(adapter)) {
2314 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2315 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2316 sliport_err1 = ioread32(adapter->db +
2317 SLIPORT_ERROR1_OFFSET);
2318 sliport_err2 = ioread32(adapter->db +
2319 SLIPORT_ERROR2_OFFSET);
2320 }
2321 } else {
2322 pci_read_config_dword(adapter->pdev,
2323 PCICFG_UE_STATUS_LOW, &ue_lo);
2324 pci_read_config_dword(adapter->pdev,
2325 PCICFG_UE_STATUS_HIGH, &ue_hi);
2326 pci_read_config_dword(adapter->pdev,
2327 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2330
2331 ue_lo = (ue_lo & ~ue_lo_mask);
2332 ue_hi = (ue_hi & ~ue_hi_mask);
2333 }
2334
2335 /* On certain platforms BE hardware can indicate spurious UEs.
2336 * Allow the h/w to stop working completely in case of a real UE.
2337 * Hence not setting the hw_error for UE detection.
2338 */
2339 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2340 adapter->hw_error = true;
2341 dev_err(&adapter->pdev->dev,
2342 "Error detected in the card\n");
2343 }
2344
2345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2346 dev_err(&adapter->pdev->dev,
2347 "ERR: sliport status 0x%x\n", sliport_status);
2348 dev_err(&adapter->pdev->dev,
2349 "ERR: sliport error1 0x%x\n", sliport_err1);
2350 dev_err(&adapter->pdev->dev,
2351 "ERR: sliport error2 0x%x\n", sliport_err2);
2352 }
2353
2354 if (ue_lo) {
2355 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2356 if (ue_lo & 1)
2357 dev_err(&adapter->pdev->dev,
2358 "UE: %s bit set\n", ue_status_low_desc[i]);
2359 }
2360 }
2361
2362 if (ue_hi) {
2363 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2364 if (ue_hi & 1)
2365 dev_err(&adapter->pdev->dev,
2366 "UE: %s bit set\n", ue_status_hi_desc[i]);
2367 }
2368 }
2369
2370 }
2371
2372 static void be_msix_disable(struct be_adapter *adapter)
2373 {
2374 if (msix_enabled(adapter)) {
2375 pci_disable_msix(adapter->pdev);
2376 adapter->num_msix_vec = 0;
2377 }
2378 }
2379
2380 static uint be_num_rss_want(struct be_adapter *adapter)
2381 {
2382 u32 num = 0;
2383
2384 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2385 (lancer_chip(adapter) ||
2386 (!sriov_want(adapter) && be_physfn(adapter)))) {
2387 num = adapter->max_rss_queues;
2388 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2389 }
2390 return num;
2391 }
2392
2393 static int be_msix_enable(struct be_adapter *adapter)
2394 {
2395 #define BE_MIN_MSIX_VECTORS 1
2396 int i, status, num_vec, num_roce_vec = 0;
2397 struct device *dev = &adapter->pdev->dev;
2398
2399 /* If RSS queues are not used, need a vec for default RX Q */
2400 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2401 if (be_roce_supported(adapter)) {
2402 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2403 (num_online_cpus() + 1));
2404 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2405 num_vec += num_roce_vec;
2406 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2407 }
2408 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2409
2410 for (i = 0; i < num_vec; i++)
2411 adapter->msix_entries[i].entry = i;
2412
2413 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2414 if (status == 0) {
2415 goto done;
2416 } else if (status >= BE_MIN_MSIX_VECTORS) {
2417 num_vec = status;
2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2419 num_vec);
2420 if (!status)
2421 goto done;
2422 }
2423
2424 dev_warn(dev, "MSIx enable failed\n");
2425 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2426 if (!be_physfn(adapter))
2427 return status;
2428 return 0;
2429 done:
2430 if (be_roce_supported(adapter)) {
2431 if (num_vec > num_roce_vec) {
2432 adapter->num_msix_vec = num_vec - num_roce_vec;
2433 adapter->num_msix_roce_vec =
2434 num_vec - adapter->num_msix_vec;
2435 } else {
2436 adapter->num_msix_vec = num_vec;
2437 adapter->num_msix_roce_vec = 0;
2438 }
2439 } else
2440 adapter->num_msix_vec = num_vec;
2441 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2442 return 0;
2443 }
2444
2445 static inline int be_msix_vec_get(struct be_adapter *adapter,
2446 struct be_eq_obj *eqo)
2447 {
2448 return adapter->msix_entries[eqo->idx].vector;
2449 }
2450
2451 static int be_msix_register(struct be_adapter *adapter)
2452 {
2453 struct net_device *netdev = adapter->netdev;
2454 struct be_eq_obj *eqo;
2455 int status, i, vec;
2456
2457 for_all_evt_queues(adapter, eqo, i) {
2458 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2459 vec = be_msix_vec_get(adapter, eqo);
2460 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2461 if (status)
2462 goto err_msix;
2463 }
2464
2465 return 0;
2466 err_msix:
2467 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2468 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2469 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2470 status);
2471 be_msix_disable(adapter);
2472 return status;
2473 }
2474
2475 static int be_irq_register(struct be_adapter *adapter)
2476 {
2477 struct net_device *netdev = adapter->netdev;
2478 int status;
2479
2480 if (msix_enabled(adapter)) {
2481 status = be_msix_register(adapter);
2482 if (status == 0)
2483 goto done;
2484 /* INTx is not supported for VF */
2485 if (!be_physfn(adapter))
2486 return status;
2487 }
2488
2489 /* INTx: only the first EQ is used */
2490 netdev->irq = adapter->pdev->irq;
2491 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2492 &adapter->eq_obj[0]);
2493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "INTx request IRQ failed - err %d\n", status);
2496 return status;
2497 }
2498 done:
2499 adapter->isr_registered = true;
2500 return 0;
2501 }
2502
2503 static void be_irq_unregister(struct be_adapter *adapter)
2504 {
2505 struct net_device *netdev = adapter->netdev;
2506 struct be_eq_obj *eqo;
2507 int i;
2508
2509 if (!adapter->isr_registered)
2510 return;
2511
2512 /* INTx */
2513 if (!msix_enabled(adapter)) {
2514 free_irq(netdev->irq, &adapter->eq_obj[0]);
2515 goto done;
2516 }
2517
2518 /* MSIx */
2519 for_all_evt_queues(adapter, eqo, i)
2520 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2521
2522 done:
2523 adapter->isr_registered = false;
2524 }
2525
2526 static void be_rx_qs_destroy(struct be_adapter *adapter)
2527 {
2528 struct be_queue_info *q;
2529 struct be_rx_obj *rxo;
2530 int i;
2531
2532 for_all_rx_queues(adapter, rxo, i) {
2533 q = &rxo->q;
2534 if (q->created) {
2535 be_cmd_rxq_destroy(adapter, q);
2536 be_rx_cq_clean(rxo);
2537 }
2538 be_queue_free(adapter, q);
2539 }
2540 }
2541
2542 static int be_close(struct net_device *netdev)
2543 {
2544 struct be_adapter *adapter = netdev_priv(netdev);
2545 struct be_eq_obj *eqo;
2546 int i;
2547
2548 be_roce_dev_close(adapter);
2549
2550 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2551 for_all_evt_queues(adapter, eqo, i)
2552 napi_disable(&eqo->napi);
2553 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2554 }
2555
2556 be_async_mcc_disable(adapter);
2557
2558 /* Wait for all pending tx completions to arrive so that
2559 * all tx skbs are freed.
2560 */
2561 be_tx_compl_clean(adapter);
2562 netif_tx_disable(netdev);
2563
2564 be_rx_qs_destroy(adapter);
2565
2566 for_all_evt_queues(adapter, eqo, i) {
2567 if (msix_enabled(adapter))
2568 synchronize_irq(be_msix_vec_get(adapter, eqo));
2569 else
2570 synchronize_irq(netdev->irq);
2571 be_eq_clean(eqo);
2572 }
2573
2574 be_irq_unregister(adapter);
2575
2576 return 0;
2577 }
2578
2579 static int be_rx_qs_create(struct be_adapter *adapter)
2580 {
2581 struct be_rx_obj *rxo;
2582 int rc, i, j;
2583 u8 rsstable[128];
2584
2585 for_all_rx_queues(adapter, rxo, i) {
2586 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2587 sizeof(struct be_eth_rx_d));
2588 if (rc)
2589 return rc;
2590 }
2591
2592 /* The FW would like the default RXQ to be created first */
2593 rxo = default_rxo(adapter);
2594 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2595 adapter->if_handle, false, &rxo->rss_id);
2596 if (rc)
2597 return rc;
2598
2599 for_all_rss_queues(adapter, rxo, i) {
2600 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2601 rx_frag_size, adapter->if_handle,
2602 true, &rxo->rss_id);
2603 if (rc)
2604 return rc;
2605 }
2606
2607 if (be_multi_rxq(adapter)) {
2608 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2609 for_all_rss_queues(adapter, rxo, i) {
2610 if ((j + i) >= 128)
2611 break;
2612 rsstable[j + i] = rxo->rss_id;
2613 }
2614 }
2615 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2616 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2617
2618 if (!BEx_chip(adapter))
2619 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2620 RSS_ENABLE_UDP_IPV6;
2621
2622 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2623 128);
2624 if (rc) {
2625 adapter->rss_flags = 0;
2626 return rc;
2627 }
2628 }
2629
2630 /* First time posting */
2631 for_all_rx_queues(adapter, rxo, i)
2632 be_post_rx_frags(rxo, GFP_KERNEL);
2633 return 0;
2634 }
2635
2636 static int be_open(struct net_device *netdev)
2637 {
2638 struct be_adapter *adapter = netdev_priv(netdev);
2639 struct be_eq_obj *eqo;
2640 struct be_rx_obj *rxo;
2641 struct be_tx_obj *txo;
2642 u8 link_status;
2643 int status, i;
2644
2645 status = be_rx_qs_create(adapter);
2646 if (status)
2647 goto err;
2648
2649 status = be_irq_register(adapter);
2650 if (status)
2651 goto err;
2652
2653 for_all_rx_queues(adapter, rxo, i)
2654 be_cq_notify(adapter, rxo->cq.id, true, 0);
2655
2656 for_all_tx_queues(adapter, txo, i)
2657 be_cq_notify(adapter, txo->cq.id, true, 0);
2658
2659 be_async_mcc_enable(adapter);
2660
2661 for_all_evt_queues(adapter, eqo, i) {
2662 napi_enable(&eqo->napi);
2663 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2664 }
2665 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2666
2667 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2668 if (!status)
2669 be_link_status_update(adapter, link_status);
2670
2671 netif_tx_start_all_queues(netdev);
2672 be_roce_dev_open(adapter);
2673 return 0;
2674 err:
2675 be_close(adapter->netdev);
2676 return -EIO;
2677 }
2678
2679 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2680 {
2681 struct be_dma_mem cmd;
2682 int status = 0;
2683 u8 mac[ETH_ALEN];
2684
2685 memset(mac, 0, ETH_ALEN);
2686
2687 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2688 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2689 GFP_KERNEL | __GFP_ZERO);
2690 if (cmd.va == NULL)
2691 return -1;
2692
2693 if (enable) {
2694 status = pci_write_config_dword(adapter->pdev,
2695 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2696 if (status) {
2697 dev_err(&adapter->pdev->dev,
2698 "Could not enable Wake-on-lan\n");
2699 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2700 cmd.dma);
2701 return status;
2702 }
2703 status = be_cmd_enable_magic_wol(adapter,
2704 adapter->netdev->dev_addr, &cmd);
2705 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2706 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2707 } else {
2708 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2709 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2710 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2711 }
2712
2713 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2714 return status;
2715 }
2716
2717 /*
2718 * Generate a seed MAC address from the PF MAC Address using jhash.
2719 * MAC Address for VFs are assigned incrementally starting from the seed.
2720 * These addresses are programmed in the ASIC by the PF and the VF driver
2721 * queries for the MAC address during its probe.
2722 */
2723 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2724 {
2725 u32 vf;
2726 int status = 0;
2727 u8 mac[ETH_ALEN];
2728 struct be_vf_cfg *vf_cfg;
2729
2730 be_vf_eth_addr_generate(adapter, mac);
2731
2732 for_all_vfs(adapter, vf_cfg, vf) {
2733 if (lancer_chip(adapter)) {
2734 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2735 } else {
2736 status = be_cmd_pmac_add(adapter, mac,
2737 vf_cfg->if_handle,
2738 &vf_cfg->pmac_id, vf + 1);
2739 }
2740
2741 if (status)
2742 dev_err(&adapter->pdev->dev,
2743 "Mac address assignment failed for VF %d\n", vf);
2744 else
2745 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2746
2747 mac[5] += 1;
2748 }
2749 return status;
2750 }
2751
2752 static int be_vfs_mac_query(struct be_adapter *adapter)
2753 {
2754 int status, vf;
2755 u8 mac[ETH_ALEN];
2756 struct be_vf_cfg *vf_cfg;
2757 bool active;
2758
2759 for_all_vfs(adapter, vf_cfg, vf) {
2760 be_cmd_get_mac_from_list(adapter, mac, &active,
2761 &vf_cfg->pmac_id, 0);
2762
2763 status = be_cmd_mac_addr_query(adapter, mac, false,
2764 vf_cfg->if_handle, 0);
2765 if (status)
2766 return status;
2767 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2768 }
2769 return 0;
2770 }
2771
2772 static void be_vf_clear(struct be_adapter *adapter)
2773 {
2774 struct be_vf_cfg *vf_cfg;
2775 u32 vf;
2776
2777 if (be_find_vfs(adapter, ASSIGNED)) {
2778 dev_warn(&adapter->pdev->dev,
2779 "VFs are assigned to VMs: not disabling VFs\n");
2780 goto done;
2781 }
2782
2783 pci_disable_sriov(adapter->pdev);
2784
2785 for_all_vfs(adapter, vf_cfg, vf) {
2786 if (lancer_chip(adapter))
2787 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2788 else
2789 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2790 vf_cfg->pmac_id, vf + 1);
2791
2792 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2793 }
2794 done:
2795 kfree(adapter->vf_cfg);
2796 adapter->num_vfs = 0;
2797 }
2798
2799 static int be_clear(struct be_adapter *adapter)
2800 {
2801 int i = 1;
2802
2803 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2804 cancel_delayed_work_sync(&adapter->work);
2805 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2806 }
2807
2808 if (sriov_enabled(adapter))
2809 be_vf_clear(adapter);
2810
2811 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2812 be_cmd_pmac_del(adapter, adapter->if_handle,
2813 adapter->pmac_id[i], 0);
2814
2815 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2816
2817 be_mcc_queues_destroy(adapter);
2818 be_rx_cqs_destroy(adapter);
2819 be_tx_queues_destroy(adapter);
2820 be_evt_queues_destroy(adapter);
2821
2822 kfree(adapter->pmac_id);
2823 adapter->pmac_id = NULL;
2824
2825 be_msix_disable(adapter);
2826 return 0;
2827 }
2828
2829 static int be_vfs_if_create(struct be_adapter *adapter)
2830 {
2831 struct be_vf_cfg *vf_cfg;
2832 u32 cap_flags, en_flags, vf;
2833 int status;
2834
2835 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2836 BE_IF_FLAGS_MULTICAST;
2837
2838 for_all_vfs(adapter, vf_cfg, vf) {
2839 if (!BE3_chip(adapter))
2840 be_cmd_get_profile_config(adapter, &cap_flags,
2841 NULL, vf + 1);
2842
2843 /* If a FW profile exists, then cap_flags are updated */
2844 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2845 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2846 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2847 &vf_cfg->if_handle, vf + 1);
2848 if (status)
2849 goto err;
2850 }
2851 err:
2852 return status;
2853 }
2854
2855 static int be_vf_setup_init(struct be_adapter *adapter)
2856 {
2857 struct be_vf_cfg *vf_cfg;
2858 int vf;
2859
2860 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2861 GFP_KERNEL);
2862 if (!adapter->vf_cfg)
2863 return -ENOMEM;
2864
2865 for_all_vfs(adapter, vf_cfg, vf) {
2866 vf_cfg->if_handle = -1;
2867 vf_cfg->pmac_id = -1;
2868 }
2869 return 0;
2870 }
2871
2872 static int be_vf_setup(struct be_adapter *adapter)
2873 {
2874 struct be_vf_cfg *vf_cfg;
2875 u16 def_vlan, lnk_speed;
2876 int status, old_vfs, vf;
2877 struct device *dev = &adapter->pdev->dev;
2878
2879 old_vfs = be_find_vfs(adapter, ENABLED);
2880 if (old_vfs) {
2881 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2882 if (old_vfs != num_vfs)
2883 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2884 adapter->num_vfs = old_vfs;
2885 } else {
2886 if (num_vfs > adapter->dev_num_vfs)
2887 dev_info(dev, "Device supports %d VFs and not %d\n",
2888 adapter->dev_num_vfs, num_vfs);
2889 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2890 if (!adapter->num_vfs)
2891 return 0;
2892 }
2893
2894 status = be_vf_setup_init(adapter);
2895 if (status)
2896 goto err;
2897
2898 if (old_vfs) {
2899 for_all_vfs(adapter, vf_cfg, vf) {
2900 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2901 if (status)
2902 goto err;
2903 }
2904 } else {
2905 status = be_vfs_if_create(adapter);
2906 if (status)
2907 goto err;
2908 }
2909
2910 if (old_vfs) {
2911 status = be_vfs_mac_query(adapter);
2912 if (status)
2913 goto err;
2914 } else {
2915 status = be_vf_eth_addr_config(adapter);
2916 if (status)
2917 goto err;
2918 }
2919
2920 for_all_vfs(adapter, vf_cfg, vf) {
2921 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2922 * Allow full available bandwidth
2923 */
2924 if (BE3_chip(adapter) && !old_vfs)
2925 be_cmd_set_qos(adapter, 1000, vf+1);
2926
2927 status = be_cmd_link_status_query(adapter, &lnk_speed,
2928 NULL, vf + 1);
2929 if (!status)
2930 vf_cfg->tx_rate = lnk_speed;
2931
2932 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2933 vf + 1, vf_cfg->if_handle);
2934 if (status)
2935 goto err;
2936 vf_cfg->def_vid = def_vlan;
2937
2938 be_cmd_enable_vf(adapter, vf + 1);
2939 }
2940
2941 if (!old_vfs) {
2942 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2943 if (status) {
2944 dev_err(dev, "SRIOV enable failed\n");
2945 adapter->num_vfs = 0;
2946 goto err;
2947 }
2948 }
2949 return 0;
2950 err:
2951 dev_err(dev, "VF setup failed\n");
2952 be_vf_clear(adapter);
2953 return status;
2954 }
2955
2956 static void be_setup_init(struct be_adapter *adapter)
2957 {
2958 adapter->vlan_prio_bmap = 0xff;
2959 adapter->phy.link_speed = -1;
2960 adapter->if_handle = -1;
2961 adapter->be3_native = false;
2962 adapter->promiscuous = false;
2963 if (be_physfn(adapter))
2964 adapter->cmd_privileges = MAX_PRIVILEGES;
2965 else
2966 adapter->cmd_privileges = MIN_PRIVILEGES;
2967 }
2968
2969 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2970 bool *active_mac, u32 *pmac_id)
2971 {
2972 int status = 0;
2973
2974 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2975 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2976 if (!lancer_chip(adapter) && !be_physfn(adapter))
2977 *active_mac = true;
2978 else
2979 *active_mac = false;
2980
2981 return status;
2982 }
2983
2984 if (lancer_chip(adapter)) {
2985 status = be_cmd_get_mac_from_list(adapter, mac,
2986 active_mac, pmac_id, 0);
2987 if (*active_mac) {
2988 status = be_cmd_mac_addr_query(adapter, mac, false,
2989 if_handle, *pmac_id);
2990 }
2991 } else if (be_physfn(adapter)) {
2992 /* For BE3, for PF get permanent MAC */
2993 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2994 *active_mac = false;
2995 } else {
2996 /* For BE3, for VF get soft MAC assigned by PF*/
2997 status = be_cmd_mac_addr_query(adapter, mac, false,
2998 if_handle, 0);
2999 *active_mac = true;
3000 }
3001 return status;
3002 }
3003
3004 static void be_get_resources(struct be_adapter *adapter)
3005 {
3006 u16 dev_num_vfs;
3007 int pos, status;
3008 bool profile_present = false;
3009 u16 txq_count = 0;
3010
3011 if (!BEx_chip(adapter)) {
3012 status = be_cmd_get_func_config(adapter);
3013 if (!status)
3014 profile_present = true;
3015 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3016 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3017 }
3018
3019 if (profile_present) {
3020 /* Sanity fixes for Lancer */
3021 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3022 BE_UC_PMAC_COUNT);
3023 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3024 BE_NUM_VLANS_SUPPORTED);
3025 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3026 BE_MAX_MC);
3027 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3028 MAX_TX_QS);
3029 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3030 BE3_MAX_RSS_QS);
3031 adapter->max_event_queues = min_t(u16,
3032 adapter->max_event_queues,
3033 BE3_MAX_RSS_QS);
3034
3035 if (adapter->max_rss_queues &&
3036 adapter->max_rss_queues == adapter->max_rx_queues)
3037 adapter->max_rss_queues -= 1;
3038
3039 if (adapter->max_event_queues < adapter->max_rss_queues)
3040 adapter->max_rss_queues = adapter->max_event_queues;
3041
3042 } else {
3043 if (be_physfn(adapter))
3044 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3045 else
3046 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3047
3048 if (adapter->function_mode & FLEX10_MODE)
3049 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3050 else
3051 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3052
3053 adapter->max_mcast_mac = BE_MAX_MC;
3054 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3055 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3056 MAX_TX_QS);
3057 adapter->max_rss_queues = (adapter->be3_native) ?
3058 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3059 adapter->max_event_queues = BE3_MAX_RSS_QS;
3060
3061 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3062 BE_IF_FLAGS_BROADCAST |
3063 BE_IF_FLAGS_MULTICAST |
3064 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3065 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3066 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3067 BE_IF_FLAGS_PROMISCUOUS;
3068
3069 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3070 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3071 }
3072
3073 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3074 if (pos) {
3075 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3076 &dev_num_vfs);
3077 if (BE3_chip(adapter))
3078 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3079 adapter->dev_num_vfs = dev_num_vfs;
3080 }
3081 }
3082
3083 /* Routine to query per function resource limits */
3084 static int be_get_config(struct be_adapter *adapter)
3085 {
3086 int status;
3087
3088 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3089 &adapter->function_mode,
3090 &adapter->function_caps,
3091 &adapter->asic_rev);
3092 if (status)
3093 goto err;
3094
3095 be_get_resources(adapter);
3096
3097 /* primary mac needs 1 pmac entry */
3098 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3099 sizeof(u32), GFP_KERNEL);
3100 if (!adapter->pmac_id) {
3101 status = -ENOMEM;
3102 goto err;
3103 }
3104
3105 err:
3106 return status;
3107 }
3108
3109 static int be_setup(struct be_adapter *adapter)
3110 {
3111 struct device *dev = &adapter->pdev->dev;
3112 u32 en_flags;
3113 u32 tx_fc, rx_fc;
3114 int status;
3115 u8 mac[ETH_ALEN];
3116 bool active_mac;
3117
3118 be_setup_init(adapter);
3119
3120 if (!lancer_chip(adapter))
3121 be_cmd_req_native_mode(adapter);
3122
3123 status = be_get_config(adapter);
3124 if (status)
3125 goto err;
3126
3127 status = be_msix_enable(adapter);
3128 if (status)
3129 goto err;
3130
3131 status = be_evt_queues_create(adapter);
3132 if (status)
3133 goto err;
3134
3135 status = be_tx_cqs_create(adapter);
3136 if (status)
3137 goto err;
3138
3139 status = be_rx_cqs_create(adapter);
3140 if (status)
3141 goto err;
3142
3143 status = be_mcc_queues_create(adapter);
3144 if (status)
3145 goto err;
3146
3147 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3148 /* In UMC mode FW does not return right privileges.
3149 * Override with correct privilege equivalent to PF.
3150 */
3151 if (be_is_mc(adapter))
3152 adapter->cmd_privileges = MAX_PRIVILEGES;
3153
3154 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3155 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3156
3157 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3158 en_flags |= BE_IF_FLAGS_RSS;
3159
3160 en_flags = en_flags & adapter->if_cap_flags;
3161
3162 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3163 &adapter->if_handle, 0);
3164 if (status != 0)
3165 goto err;
3166
3167 memset(mac, 0, ETH_ALEN);
3168 active_mac = false;
3169 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3170 &active_mac, &adapter->pmac_id[0]);
3171 if (status != 0)
3172 goto err;
3173
3174 if (!active_mac) {
3175 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3176 &adapter->pmac_id[0], 0);
3177 if (status != 0)
3178 goto err;
3179 }
3180
3181 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3182 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3183 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3184 }
3185
3186 status = be_tx_qs_create(adapter);
3187 if (status)
3188 goto err;
3189
3190 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3191
3192 if (adapter->vlans_added)
3193 be_vid_config(adapter);
3194
3195 be_set_rx_mode(adapter->netdev);
3196
3197 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3198
3199 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3200 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3201 adapter->rx_fc);
3202
3203 if (be_physfn(adapter)) {
3204 if (adapter->dev_num_vfs)
3205 be_vf_setup(adapter);
3206 else
3207 dev_warn(dev, "device doesn't support SRIOV\n");
3208 }
3209
3210 status = be_cmd_get_phy_info(adapter);
3211 if (!status && be_pause_supported(adapter))
3212 adapter->phy.fc_autoneg = 1;
3213
3214 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3215 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3216 return 0;
3217 err:
3218 be_clear(adapter);
3219 return status;
3220 }
3221
3222 #ifdef CONFIG_NET_POLL_CONTROLLER
3223 static void be_netpoll(struct net_device *netdev)
3224 {
3225 struct be_adapter *adapter = netdev_priv(netdev);
3226 struct be_eq_obj *eqo;
3227 int i;
3228
3229 for_all_evt_queues(adapter, eqo, i) {
3230 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3231 napi_schedule(&eqo->napi);
3232 }
3233
3234 return;
3235 }
3236 #endif
3237
3238 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3239 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3240
3241 static bool be_flash_redboot(struct be_adapter *adapter,
3242 const u8 *p, u32 img_start, int image_size,
3243 int hdr_size)
3244 {
3245 u32 crc_offset;
3246 u8 flashed_crc[4];
3247 int status;
3248
3249 crc_offset = hdr_size + img_start + image_size - 4;
3250
3251 p += crc_offset;
3252
3253 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3254 (image_size - 4));
3255 if (status) {
3256 dev_err(&adapter->pdev->dev,
3257 "could not get crc from flash, not flashing redboot\n");
3258 return false;
3259 }
3260
3261 /*update redboot only if crc does not match*/
3262 if (!memcmp(flashed_crc, p, 4))
3263 return false;
3264 else
3265 return true;
3266 }
3267
3268 static bool phy_flashing_required(struct be_adapter *adapter)
3269 {
3270 return (adapter->phy.phy_type == TN_8022 &&
3271 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3272 }
3273
3274 static bool is_comp_in_ufi(struct be_adapter *adapter,
3275 struct flash_section_info *fsec, int type)
3276 {
3277 int i = 0, img_type = 0;
3278 struct flash_section_info_g2 *fsec_g2 = NULL;
3279
3280 if (BE2_chip(adapter))
3281 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3282
3283 for (i = 0; i < MAX_FLASH_COMP; i++) {
3284 if (fsec_g2)
3285 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3286 else
3287 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3288
3289 if (img_type == type)
3290 return true;
3291 }
3292 return false;
3293
3294 }
3295
3296 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3297 int header_size,
3298 const struct firmware *fw)
3299 {
3300 struct flash_section_info *fsec = NULL;
3301 const u8 *p = fw->data;
3302
3303 p += header_size;
3304 while (p < (fw->data + fw->size)) {
3305 fsec = (struct flash_section_info *)p;
3306 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3307 return fsec;
3308 p += 32;
3309 }
3310 return NULL;
3311 }
3312
3313 static int be_flash(struct be_adapter *adapter, const u8 *img,
3314 struct be_dma_mem *flash_cmd, int optype, int img_size)
3315 {
3316 u32 total_bytes = 0, flash_op, num_bytes = 0;
3317 int status = 0;
3318 struct be_cmd_write_flashrom *req = flash_cmd->va;
3319
3320 total_bytes = img_size;
3321 while (total_bytes) {
3322 num_bytes = min_t(u32, 32*1024, total_bytes);
3323
3324 total_bytes -= num_bytes;
3325
3326 if (!total_bytes) {
3327 if (optype == OPTYPE_PHY_FW)
3328 flash_op = FLASHROM_OPER_PHY_FLASH;
3329 else
3330 flash_op = FLASHROM_OPER_FLASH;
3331 } else {
3332 if (optype == OPTYPE_PHY_FW)
3333 flash_op = FLASHROM_OPER_PHY_SAVE;
3334 else
3335 flash_op = FLASHROM_OPER_SAVE;
3336 }
3337
3338 memcpy(req->data_buf, img, num_bytes);
3339 img += num_bytes;
3340 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3341 flash_op, num_bytes);
3342 if (status) {
3343 if (status == ILLEGAL_IOCTL_REQ &&
3344 optype == OPTYPE_PHY_FW)
3345 break;
3346 dev_err(&adapter->pdev->dev,
3347 "cmd to write to flash rom failed.\n");
3348 return status;
3349 }
3350 }
3351 return 0;
3352 }
3353
3354 /* For BE2, BE3 and BE3-R */
3355 static int be_flash_BEx(struct be_adapter *adapter,
3356 const struct firmware *fw,
3357 struct be_dma_mem *flash_cmd,
3358 int num_of_images)
3359
3360 {
3361 int status = 0, i, filehdr_size = 0;
3362 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3363 const u8 *p = fw->data;
3364 const struct flash_comp *pflashcomp;
3365 int num_comp, redboot;
3366 struct flash_section_info *fsec = NULL;
3367
3368 struct flash_comp gen3_flash_types[] = {
3369 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3370 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3371 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3372 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3373 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3374 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3375 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3376 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3377 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3378 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3379 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3380 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3381 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3382 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3383 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3384 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3385 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3386 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3387 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3388 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3389 };
3390
3391 struct flash_comp gen2_flash_types[] = {
3392 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3393 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3394 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3395 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3396 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3397 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3398 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3399 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3400 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3401 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3402 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3403 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3404 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3405 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3406 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3407 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3408 };
3409
3410 if (BE3_chip(adapter)) {
3411 pflashcomp = gen3_flash_types;
3412 filehdr_size = sizeof(struct flash_file_hdr_g3);
3413 num_comp = ARRAY_SIZE(gen3_flash_types);
3414 } else {
3415 pflashcomp = gen2_flash_types;
3416 filehdr_size = sizeof(struct flash_file_hdr_g2);
3417 num_comp = ARRAY_SIZE(gen2_flash_types);
3418 }
3419
3420 /* Get flash section info*/
3421 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3422 if (!fsec) {
3423 dev_err(&adapter->pdev->dev,
3424 "Invalid Cookie. UFI corrupted ?\n");
3425 return -1;
3426 }
3427 for (i = 0; i < num_comp; i++) {
3428 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3429 continue;
3430
3431 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3432 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3433 continue;
3434
3435 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3436 !phy_flashing_required(adapter))
3437 continue;
3438
3439 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3440 redboot = be_flash_redboot(adapter, fw->data,
3441 pflashcomp[i].offset, pflashcomp[i].size,
3442 filehdr_size + img_hdrs_size);
3443 if (!redboot)
3444 continue;
3445 }
3446
3447 p = fw->data;
3448 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3449 if (p + pflashcomp[i].size > fw->data + fw->size)
3450 return -1;
3451
3452 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3453 pflashcomp[i].size);
3454 if (status) {
3455 dev_err(&adapter->pdev->dev,
3456 "Flashing section type %d failed.\n",
3457 pflashcomp[i].img_type);
3458 return status;
3459 }
3460 }
3461 return 0;
3462 }
3463
3464 static int be_flash_skyhawk(struct be_adapter *adapter,
3465 const struct firmware *fw,
3466 struct be_dma_mem *flash_cmd, int num_of_images)
3467 {
3468 int status = 0, i, filehdr_size = 0;
3469 int img_offset, img_size, img_optype, redboot;
3470 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3471 const u8 *p = fw->data;
3472 struct flash_section_info *fsec = NULL;
3473
3474 filehdr_size = sizeof(struct flash_file_hdr_g3);
3475 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3476 if (!fsec) {
3477 dev_err(&adapter->pdev->dev,
3478 "Invalid Cookie. UFI corrupted ?\n");
3479 return -1;
3480 }
3481
3482 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3483 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3484 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3485
3486 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3487 case IMAGE_FIRMWARE_iSCSI:
3488 img_optype = OPTYPE_ISCSI_ACTIVE;
3489 break;
3490 case IMAGE_BOOT_CODE:
3491 img_optype = OPTYPE_REDBOOT;
3492 break;
3493 case IMAGE_OPTION_ROM_ISCSI:
3494 img_optype = OPTYPE_BIOS;
3495 break;
3496 case IMAGE_OPTION_ROM_PXE:
3497 img_optype = OPTYPE_PXE_BIOS;
3498 break;
3499 case IMAGE_OPTION_ROM_FCoE:
3500 img_optype = OPTYPE_FCOE_BIOS;
3501 break;
3502 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3503 img_optype = OPTYPE_ISCSI_BACKUP;
3504 break;
3505 case IMAGE_NCSI:
3506 img_optype = OPTYPE_NCSI_FW;
3507 break;
3508 default:
3509 continue;
3510 }
3511
3512 if (img_optype == OPTYPE_REDBOOT) {
3513 redboot = be_flash_redboot(adapter, fw->data,
3514 img_offset, img_size,
3515 filehdr_size + img_hdrs_size);
3516 if (!redboot)
3517 continue;
3518 }
3519
3520 p = fw->data;
3521 p += filehdr_size + img_offset + img_hdrs_size;
3522 if (p + img_size > fw->data + fw->size)
3523 return -1;
3524
3525 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3526 if (status) {
3527 dev_err(&adapter->pdev->dev,
3528 "Flashing section type %d failed.\n",
3529 fsec->fsec_entry[i].type);
3530 return status;
3531 }
3532 }
3533 return 0;
3534 }
3535
3536 static int lancer_wait_idle(struct be_adapter *adapter)
3537 {
3538 #define SLIPORT_IDLE_TIMEOUT 30
3539 u32 reg_val;
3540 int status = 0, i;
3541
3542 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3543 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3544 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3545 break;
3546
3547 ssleep(1);
3548 }
3549
3550 if (i == SLIPORT_IDLE_TIMEOUT)
3551 status = -1;
3552
3553 return status;
3554 }
3555
3556 static int lancer_fw_reset(struct be_adapter *adapter)
3557 {
3558 int status = 0;
3559
3560 status = lancer_wait_idle(adapter);
3561 if (status)
3562 return status;
3563
3564 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3565 PHYSDEV_CONTROL_OFFSET);
3566
3567 return status;
3568 }
3569
3570 static int lancer_fw_download(struct be_adapter *adapter,
3571 const struct firmware *fw)
3572 {
3573 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3574 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3575 struct be_dma_mem flash_cmd;
3576 const u8 *data_ptr = NULL;
3577 u8 *dest_image_ptr = NULL;
3578 size_t image_size = 0;
3579 u32 chunk_size = 0;
3580 u32 data_written = 0;
3581 u32 offset = 0;
3582 int status = 0;
3583 u8 add_status = 0;
3584 u8 change_status;
3585
3586 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3587 dev_err(&adapter->pdev->dev,
3588 "FW Image not properly aligned. "
3589 "Length must be 4 byte aligned.\n");
3590 status = -EINVAL;
3591 goto lancer_fw_exit;
3592 }
3593
3594 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3595 + LANCER_FW_DOWNLOAD_CHUNK;
3596 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3597 &flash_cmd.dma, GFP_KERNEL);
3598 if (!flash_cmd.va) {
3599 status = -ENOMEM;
3600 goto lancer_fw_exit;
3601 }
3602
3603 dest_image_ptr = flash_cmd.va +
3604 sizeof(struct lancer_cmd_req_write_object);
3605 image_size = fw->size;
3606 data_ptr = fw->data;
3607
3608 while (image_size) {
3609 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3610
3611 /* Copy the image chunk content. */
3612 memcpy(dest_image_ptr, data_ptr, chunk_size);
3613
3614 status = lancer_cmd_write_object(adapter, &flash_cmd,
3615 chunk_size, offset,
3616 LANCER_FW_DOWNLOAD_LOCATION,
3617 &data_written, &change_status,
3618 &add_status);
3619 if (status)
3620 break;
3621
3622 offset += data_written;
3623 data_ptr += data_written;
3624 image_size -= data_written;
3625 }
3626
3627 if (!status) {
3628 /* Commit the FW written */
3629 status = lancer_cmd_write_object(adapter, &flash_cmd,
3630 0, offset,
3631 LANCER_FW_DOWNLOAD_LOCATION,
3632 &data_written, &change_status,
3633 &add_status);
3634 }
3635
3636 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3637 flash_cmd.dma);
3638 if (status) {
3639 dev_err(&adapter->pdev->dev,
3640 "Firmware load error. "
3641 "Status code: 0x%x Additional Status: 0x%x\n",
3642 status, add_status);
3643 goto lancer_fw_exit;
3644 }
3645
3646 if (change_status == LANCER_FW_RESET_NEEDED) {
3647 status = lancer_fw_reset(adapter);
3648 if (status) {
3649 dev_err(&adapter->pdev->dev,
3650 "Adapter busy for FW reset.\n"
3651 "New FW will not be active.\n");
3652 goto lancer_fw_exit;
3653 }
3654 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3655 dev_err(&adapter->pdev->dev,
3656 "System reboot required for new FW"
3657 " to be active\n");
3658 }
3659
3660 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3661 lancer_fw_exit:
3662 return status;
3663 }
3664
3665 #define UFI_TYPE2 2
3666 #define UFI_TYPE3 3
3667 #define UFI_TYPE3R 10
3668 #define UFI_TYPE4 4
3669 static int be_get_ufi_type(struct be_adapter *adapter,
3670 struct flash_file_hdr_g3 *fhdr)
3671 {
3672 if (fhdr == NULL)
3673 goto be_get_ufi_exit;
3674
3675 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3676 return UFI_TYPE4;
3677 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3678 if (fhdr->asic_type_rev == 0x10)
3679 return UFI_TYPE3R;
3680 else
3681 return UFI_TYPE3;
3682 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3683 return UFI_TYPE2;
3684
3685 be_get_ufi_exit:
3686 dev_err(&adapter->pdev->dev,
3687 "UFI and Interface are not compatible for flashing\n");
3688 return -1;
3689 }
3690
3691 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3692 {
3693 struct flash_file_hdr_g3 *fhdr3;
3694 struct image_hdr *img_hdr_ptr = NULL;
3695 struct be_dma_mem flash_cmd;
3696 const u8 *p;
3697 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3698
3699 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3700 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3701 &flash_cmd.dma, GFP_KERNEL);
3702 if (!flash_cmd.va) {
3703 status = -ENOMEM;
3704 goto be_fw_exit;
3705 }
3706
3707 p = fw->data;
3708 fhdr3 = (struct flash_file_hdr_g3 *)p;
3709
3710 ufi_type = be_get_ufi_type(adapter, fhdr3);
3711
3712 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3713 for (i = 0; i < num_imgs; i++) {
3714 img_hdr_ptr = (struct image_hdr *)(fw->data +
3715 (sizeof(struct flash_file_hdr_g3) +
3716 i * sizeof(struct image_hdr)));
3717 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3718 switch (ufi_type) {
3719 case UFI_TYPE4:
3720 status = be_flash_skyhawk(adapter, fw,
3721 &flash_cmd, num_imgs);
3722 break;
3723 case UFI_TYPE3R:
3724 status = be_flash_BEx(adapter, fw, &flash_cmd,
3725 num_imgs);
3726 break;
3727 case UFI_TYPE3:
3728 /* Do not flash this ufi on BE3-R cards */
3729 if (adapter->asic_rev < 0x10)
3730 status = be_flash_BEx(adapter, fw,
3731 &flash_cmd,
3732 num_imgs);
3733 else {
3734 status = -1;
3735 dev_err(&adapter->pdev->dev,
3736 "Can't load BE3 UFI on BE3R\n");
3737 }
3738 }
3739 }
3740 }
3741
3742 if (ufi_type == UFI_TYPE2)
3743 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3744 else if (ufi_type == -1)
3745 status = -1;
3746
3747 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3748 flash_cmd.dma);
3749 if (status) {
3750 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3751 goto be_fw_exit;
3752 }
3753
3754 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3755
3756 be_fw_exit:
3757 return status;
3758 }
3759
3760 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3761 {
3762 const struct firmware *fw;
3763 int status;
3764
3765 if (!netif_running(adapter->netdev)) {
3766 dev_err(&adapter->pdev->dev,
3767 "Firmware load not allowed (interface is down)\n");
3768 return -1;
3769 }
3770
3771 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3772 if (status)
3773 goto fw_exit;
3774
3775 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3776
3777 if (lancer_chip(adapter))
3778 status = lancer_fw_download(adapter, fw);
3779 else
3780 status = be_fw_download(adapter, fw);
3781
3782 fw_exit:
3783 release_firmware(fw);
3784 return status;
3785 }
3786
3787 static const struct net_device_ops be_netdev_ops = {
3788 .ndo_open = be_open,
3789 .ndo_stop = be_close,
3790 .ndo_start_xmit = be_xmit,
3791 .ndo_set_rx_mode = be_set_rx_mode,
3792 .ndo_set_mac_address = be_mac_addr_set,
3793 .ndo_change_mtu = be_change_mtu,
3794 .ndo_get_stats64 = be_get_stats64,
3795 .ndo_validate_addr = eth_validate_addr,
3796 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3797 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3798 .ndo_set_vf_mac = be_set_vf_mac,
3799 .ndo_set_vf_vlan = be_set_vf_vlan,
3800 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3801 .ndo_get_vf_config = be_get_vf_config,
3802 #ifdef CONFIG_NET_POLL_CONTROLLER
3803 .ndo_poll_controller = be_netpoll,
3804 #endif
3805 };
3806
3807 static void be_netdev_init(struct net_device *netdev)
3808 {
3809 struct be_adapter *adapter = netdev_priv(netdev);
3810 struct be_eq_obj *eqo;
3811 int i;
3812
3813 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3814 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3815 NETIF_F_HW_VLAN_CTAG_TX;
3816 if (be_multi_rxq(adapter))
3817 netdev->hw_features |= NETIF_F_RXHASH;
3818
3819 netdev->features |= netdev->hw_features |
3820 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3821
3822 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3824
3825 netdev->priv_flags |= IFF_UNICAST_FLT;
3826
3827 netdev->flags |= IFF_MULTICAST;
3828
3829 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3830
3831 netdev->netdev_ops = &be_netdev_ops;
3832
3833 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3834
3835 for_all_evt_queues(adapter, eqo, i)
3836 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3837 }
3838
3839 static void be_unmap_pci_bars(struct be_adapter *adapter)
3840 {
3841 if (adapter->csr)
3842 pci_iounmap(adapter->pdev, adapter->csr);
3843 if (adapter->db)
3844 pci_iounmap(adapter->pdev, adapter->db);
3845 }
3846
3847 static int db_bar(struct be_adapter *adapter)
3848 {
3849 if (lancer_chip(adapter) || !be_physfn(adapter))
3850 return 0;
3851 else
3852 return 4;
3853 }
3854
3855 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3856 {
3857 if (skyhawk_chip(adapter)) {
3858 adapter->roce_db.size = 4096;
3859 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3860 db_bar(adapter));
3861 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3862 db_bar(adapter));
3863 }
3864 return 0;
3865 }
3866
3867 static int be_map_pci_bars(struct be_adapter *adapter)
3868 {
3869 u8 __iomem *addr;
3870 u32 sli_intf;
3871
3872 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3873 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3874 SLI_INTF_IF_TYPE_SHIFT;
3875
3876 if (BEx_chip(adapter) && be_physfn(adapter)) {
3877 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3878 if (adapter->csr == NULL)
3879 return -ENOMEM;
3880 }
3881
3882 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3883 if (addr == NULL)
3884 goto pci_map_err;
3885 adapter->db = addr;
3886
3887 be_roce_map_pci_bars(adapter);
3888 return 0;
3889
3890 pci_map_err:
3891 be_unmap_pci_bars(adapter);
3892 return -ENOMEM;
3893 }
3894
3895 static void be_ctrl_cleanup(struct be_adapter *adapter)
3896 {
3897 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3898
3899 be_unmap_pci_bars(adapter);
3900
3901 if (mem->va)
3902 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3903 mem->dma);
3904
3905 mem = &adapter->rx_filter;
3906 if (mem->va)
3907 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3908 mem->dma);
3909 }
3910
3911 static int be_ctrl_init(struct be_adapter *adapter)
3912 {
3913 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3914 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3915 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3916 u32 sli_intf;
3917 int status;
3918
3919 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3920 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3921 SLI_INTF_FAMILY_SHIFT;
3922 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3923
3924 status = be_map_pci_bars(adapter);
3925 if (status)
3926 goto done;
3927
3928 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3929 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3930 mbox_mem_alloc->size,
3931 &mbox_mem_alloc->dma,
3932 GFP_KERNEL);
3933 if (!mbox_mem_alloc->va) {
3934 status = -ENOMEM;
3935 goto unmap_pci_bars;
3936 }
3937 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3938 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3939 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3940 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3941
3942 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3943 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3944 &rx_filter->dma,
3945 GFP_KERNEL | __GFP_ZERO);
3946 if (rx_filter->va == NULL) {
3947 status = -ENOMEM;
3948 goto free_mbox;
3949 }
3950
3951 mutex_init(&adapter->mbox_lock);
3952 spin_lock_init(&adapter->mcc_lock);
3953 spin_lock_init(&adapter->mcc_cq_lock);
3954
3955 init_completion(&adapter->flash_compl);
3956 pci_save_state(adapter->pdev);
3957 return 0;
3958
3959 free_mbox:
3960 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3961 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3962
3963 unmap_pci_bars:
3964 be_unmap_pci_bars(adapter);
3965
3966 done:
3967 return status;
3968 }
3969
3970 static void be_stats_cleanup(struct be_adapter *adapter)
3971 {
3972 struct be_dma_mem *cmd = &adapter->stats_cmd;
3973
3974 if (cmd->va)
3975 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3976 cmd->va, cmd->dma);
3977 }
3978
3979 static int be_stats_init(struct be_adapter *adapter)
3980 {
3981 struct be_dma_mem *cmd = &adapter->stats_cmd;
3982
3983 if (lancer_chip(adapter))
3984 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3985 else if (BE2_chip(adapter))
3986 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3987 else
3988 /* BE3 and Skyhawk */
3989 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3990
3991 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3992 GFP_KERNEL | __GFP_ZERO);
3993 if (cmd->va == NULL)
3994 return -1;
3995 return 0;
3996 }
3997
3998 static void be_remove(struct pci_dev *pdev)
3999 {
4000 struct be_adapter *adapter = pci_get_drvdata(pdev);
4001
4002 if (!adapter)
4003 return;
4004
4005 be_roce_dev_remove(adapter);
4006 be_intr_set(adapter, false);
4007
4008 cancel_delayed_work_sync(&adapter->func_recovery_work);
4009
4010 unregister_netdev(adapter->netdev);
4011
4012 be_clear(adapter);
4013
4014 /* tell fw we're done with firing cmds */
4015 be_cmd_fw_clean(adapter);
4016
4017 be_stats_cleanup(adapter);
4018
4019 be_ctrl_cleanup(adapter);
4020
4021 pci_disable_pcie_error_reporting(pdev);
4022
4023 pci_set_drvdata(pdev, NULL);
4024 pci_release_regions(pdev);
4025 pci_disable_device(pdev);
4026
4027 free_netdev(adapter->netdev);
4028 }
4029
4030 bool be_is_wol_supported(struct be_adapter *adapter)
4031 {
4032 return ((adapter->wol_cap & BE_WOL_CAP) &&
4033 !be_is_wol_excluded(adapter)) ? true : false;
4034 }
4035
4036 u32 be_get_fw_log_level(struct be_adapter *adapter)
4037 {
4038 struct be_dma_mem extfat_cmd;
4039 struct be_fat_conf_params *cfgs;
4040 int status;
4041 u32 level = 0;
4042 int j;
4043
4044 if (lancer_chip(adapter))
4045 return 0;
4046
4047 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4048 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4049 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4050 &extfat_cmd.dma);
4051
4052 if (!extfat_cmd.va) {
4053 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4054 __func__);
4055 goto err;
4056 }
4057
4058 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4059 if (!status) {
4060 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4061 sizeof(struct be_cmd_resp_hdr));
4062 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4063 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4064 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4065 }
4066 }
4067 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4068 extfat_cmd.dma);
4069 err:
4070 return level;
4071 }
4072
4073 static int be_get_initial_config(struct be_adapter *adapter)
4074 {
4075 int status;
4076 u32 level;
4077
4078 status = be_cmd_get_cntl_attributes(adapter);
4079 if (status)
4080 return status;
4081
4082 status = be_cmd_get_acpi_wol_cap(adapter);
4083 if (status) {
4084 /* in case of a failure to get wol capabillities
4085 * check the exclusion list to determine WOL capability */
4086 if (!be_is_wol_excluded(adapter))
4087 adapter->wol_cap |= BE_WOL_CAP;
4088 }
4089
4090 if (be_is_wol_supported(adapter))
4091 adapter->wol = true;
4092
4093 /* Must be a power of 2 or else MODULO will BUG_ON */
4094 adapter->be_get_temp_freq = 64;
4095
4096 level = be_get_fw_log_level(adapter);
4097 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4098
4099 return 0;
4100 }
4101
4102 static int lancer_recover_func(struct be_adapter *adapter)
4103 {
4104 int status;
4105
4106 status = lancer_test_and_set_rdy_state(adapter);
4107 if (status)
4108 goto err;
4109
4110 if (netif_running(adapter->netdev))
4111 be_close(adapter->netdev);
4112
4113 be_clear(adapter);
4114
4115 adapter->hw_error = false;
4116 adapter->fw_timeout = false;
4117
4118 status = be_setup(adapter);
4119 if (status)
4120 goto err;
4121
4122 if (netif_running(adapter->netdev)) {
4123 status = be_open(adapter->netdev);
4124 if (status)
4125 goto err;
4126 }
4127
4128 dev_err(&adapter->pdev->dev,
4129 "Adapter SLIPORT recovery succeeded\n");
4130 return 0;
4131 err:
4132 if (adapter->eeh_error)
4133 dev_err(&adapter->pdev->dev,
4134 "Adapter SLIPORT recovery failed\n");
4135
4136 return status;
4137 }
4138
4139 static void be_func_recovery_task(struct work_struct *work)
4140 {
4141 struct be_adapter *adapter =
4142 container_of(work, struct be_adapter, func_recovery_work.work);
4143 int status;
4144
4145 be_detect_error(adapter);
4146
4147 if (adapter->hw_error && lancer_chip(adapter)) {
4148
4149 if (adapter->eeh_error)
4150 goto out;
4151
4152 rtnl_lock();
4153 netif_device_detach(adapter->netdev);
4154 rtnl_unlock();
4155
4156 status = lancer_recover_func(adapter);
4157
4158 if (!status)
4159 netif_device_attach(adapter->netdev);
4160 }
4161
4162 out:
4163 schedule_delayed_work(&adapter->func_recovery_work,
4164 msecs_to_jiffies(1000));
4165 }
4166
4167 static void be_worker(struct work_struct *work)
4168 {
4169 struct be_adapter *adapter =
4170 container_of(work, struct be_adapter, work.work);
4171 struct be_rx_obj *rxo;
4172 struct be_eq_obj *eqo;
4173 int i;
4174
4175 /* when interrupts are not yet enabled, just reap any pending
4176 * mcc completions */
4177 if (!netif_running(adapter->netdev)) {
4178 local_bh_disable();
4179 be_process_mcc(adapter);
4180 local_bh_enable();
4181 goto reschedule;
4182 }
4183
4184 if (!adapter->stats_cmd_sent) {
4185 if (lancer_chip(adapter))
4186 lancer_cmd_get_pport_stats(adapter,
4187 &adapter->stats_cmd);
4188 else
4189 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4190 }
4191
4192 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4193 be_cmd_get_die_temperature(adapter);
4194
4195 for_all_rx_queues(adapter, rxo, i) {
4196 if (rxo->rx_post_starved) {
4197 rxo->rx_post_starved = false;
4198 be_post_rx_frags(rxo, GFP_KERNEL);
4199 }
4200 }
4201
4202 for_all_evt_queues(adapter, eqo, i)
4203 be_eqd_update(adapter, eqo);
4204
4205 reschedule:
4206 adapter->work_counter++;
4207 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4208 }
4209
4210 static bool be_reset_required(struct be_adapter *adapter)
4211 {
4212 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4213 }
4214
4215 static char *mc_name(struct be_adapter *adapter)
4216 {
4217 if (adapter->function_mode & FLEX10_MODE)
4218 return "FLEX10";
4219 else if (adapter->function_mode & VNIC_MODE)
4220 return "vNIC";
4221 else if (adapter->function_mode & UMC_ENABLED)
4222 return "UMC";
4223 else
4224 return "";
4225 }
4226
4227 static inline char *func_name(struct be_adapter *adapter)
4228 {
4229 return be_physfn(adapter) ? "PF" : "VF";
4230 }
4231
4232 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4233 {
4234 int status = 0;
4235 struct be_adapter *adapter;
4236 struct net_device *netdev;
4237 char port_name;
4238
4239 status = pci_enable_device(pdev);
4240 if (status)
4241 goto do_none;
4242
4243 status = pci_request_regions(pdev, DRV_NAME);
4244 if (status)
4245 goto disable_dev;
4246 pci_set_master(pdev);
4247
4248 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4249 if (netdev == NULL) {
4250 status = -ENOMEM;
4251 goto rel_reg;
4252 }
4253 adapter = netdev_priv(netdev);
4254 adapter->pdev = pdev;
4255 pci_set_drvdata(pdev, adapter);
4256 adapter->netdev = netdev;
4257 SET_NETDEV_DEV(netdev, &pdev->dev);
4258
4259 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4260 if (!status) {
4261 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4262 if (status < 0) {
4263 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4264 goto free_netdev;
4265 }
4266 netdev->features |= NETIF_F_HIGHDMA;
4267 } else {
4268 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4269 if (status) {
4270 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4271 goto free_netdev;
4272 }
4273 }
4274
4275 status = pci_enable_pcie_error_reporting(pdev);
4276 if (status)
4277 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4278
4279 status = be_ctrl_init(adapter);
4280 if (status)
4281 goto free_netdev;
4282
4283 /* sync up with fw's ready state */
4284 if (be_physfn(adapter)) {
4285 status = be_fw_wait_ready(adapter);
4286 if (status)
4287 goto ctrl_clean;
4288 }
4289
4290 if (be_reset_required(adapter)) {
4291 status = be_cmd_reset_function(adapter);
4292 if (status)
4293 goto ctrl_clean;
4294
4295 /* Wait for interrupts to quiesce after an FLR */
4296 msleep(100);
4297 }
4298
4299 /* Allow interrupts for other ULPs running on NIC function */
4300 be_intr_set(adapter, true);
4301
4302 /* tell fw we're ready to fire cmds */
4303 status = be_cmd_fw_init(adapter);
4304 if (status)
4305 goto ctrl_clean;
4306
4307 status = be_stats_init(adapter);
4308 if (status)
4309 goto ctrl_clean;
4310
4311 status = be_get_initial_config(adapter);
4312 if (status)
4313 goto stats_clean;
4314
4315 INIT_DELAYED_WORK(&adapter->work, be_worker);
4316 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4317 adapter->rx_fc = adapter->tx_fc = true;
4318
4319 status = be_setup(adapter);
4320 if (status)
4321 goto stats_clean;
4322
4323 be_netdev_init(netdev);
4324 status = register_netdev(netdev);
4325 if (status != 0)
4326 goto unsetup;
4327
4328 be_roce_dev_add(adapter);
4329
4330 schedule_delayed_work(&adapter->func_recovery_work,
4331 msecs_to_jiffies(1000));
4332
4333 be_cmd_query_port_name(adapter, &port_name);
4334
4335 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4336 func_name(adapter), mc_name(adapter), port_name);
4337
4338 return 0;
4339
4340 unsetup:
4341 be_clear(adapter);
4342 stats_clean:
4343 be_stats_cleanup(adapter);
4344 ctrl_clean:
4345 be_ctrl_cleanup(adapter);
4346 free_netdev:
4347 free_netdev(netdev);
4348 pci_set_drvdata(pdev, NULL);
4349 rel_reg:
4350 pci_release_regions(pdev);
4351 disable_dev:
4352 pci_disable_device(pdev);
4353 do_none:
4354 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4355 return status;
4356 }
4357
4358 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4359 {
4360 struct be_adapter *adapter = pci_get_drvdata(pdev);
4361 struct net_device *netdev = adapter->netdev;
4362
4363 if (adapter->wol)
4364 be_setup_wol(adapter, true);
4365
4366 cancel_delayed_work_sync(&adapter->func_recovery_work);
4367
4368 netif_device_detach(netdev);
4369 if (netif_running(netdev)) {
4370 rtnl_lock();
4371 be_close(netdev);
4372 rtnl_unlock();
4373 }
4374 be_clear(adapter);
4375
4376 pci_save_state(pdev);
4377 pci_disable_device(pdev);
4378 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4379 return 0;
4380 }
4381
4382 static int be_resume(struct pci_dev *pdev)
4383 {
4384 int status = 0;
4385 struct be_adapter *adapter = pci_get_drvdata(pdev);
4386 struct net_device *netdev = adapter->netdev;
4387
4388 netif_device_detach(netdev);
4389
4390 status = pci_enable_device(pdev);
4391 if (status)
4392 return status;
4393
4394 pci_set_power_state(pdev, 0);
4395 pci_restore_state(pdev);
4396
4397 /* tell fw we're ready to fire cmds */
4398 status = be_cmd_fw_init(adapter);
4399 if (status)
4400 return status;
4401
4402 be_setup(adapter);
4403 if (netif_running(netdev)) {
4404 rtnl_lock();
4405 be_open(netdev);
4406 rtnl_unlock();
4407 }
4408
4409 schedule_delayed_work(&adapter->func_recovery_work,
4410 msecs_to_jiffies(1000));
4411 netif_device_attach(netdev);
4412
4413 if (adapter->wol)
4414 be_setup_wol(adapter, false);
4415
4416 return 0;
4417 }
4418
4419 /*
4420 * An FLR will stop BE from DMAing any data.
4421 */
4422 static void be_shutdown(struct pci_dev *pdev)
4423 {
4424 struct be_adapter *adapter = pci_get_drvdata(pdev);
4425
4426 if (!adapter)
4427 return;
4428
4429 cancel_delayed_work_sync(&adapter->work);
4430 cancel_delayed_work_sync(&adapter->func_recovery_work);
4431
4432 netif_device_detach(adapter->netdev);
4433
4434 be_cmd_reset_function(adapter);
4435
4436 pci_disable_device(pdev);
4437 }
4438
4439 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4440 pci_channel_state_t state)
4441 {
4442 struct be_adapter *adapter = pci_get_drvdata(pdev);
4443 struct net_device *netdev = adapter->netdev;
4444
4445 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4446
4447 adapter->eeh_error = true;
4448
4449 cancel_delayed_work_sync(&adapter->func_recovery_work);
4450
4451 rtnl_lock();
4452 netif_device_detach(netdev);
4453 rtnl_unlock();
4454
4455 if (netif_running(netdev)) {
4456 rtnl_lock();
4457 be_close(netdev);
4458 rtnl_unlock();
4459 }
4460 be_clear(adapter);
4461
4462 if (state == pci_channel_io_perm_failure)
4463 return PCI_ERS_RESULT_DISCONNECT;
4464
4465 pci_disable_device(pdev);
4466
4467 /* The error could cause the FW to trigger a flash debug dump.
4468 * Resetting the card while flash dump is in progress
4469 * can cause it not to recover; wait for it to finish.
4470 * Wait only for first function as it is needed only once per
4471 * adapter.
4472 */
4473 if (pdev->devfn == 0)
4474 ssleep(30);
4475
4476 return PCI_ERS_RESULT_NEED_RESET;
4477 }
4478
4479 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4480 {
4481 struct be_adapter *adapter = pci_get_drvdata(pdev);
4482 int status;
4483
4484 dev_info(&adapter->pdev->dev, "EEH reset\n");
4485 be_clear_all_error(adapter);
4486
4487 status = pci_enable_device(pdev);
4488 if (status)
4489 return PCI_ERS_RESULT_DISCONNECT;
4490
4491 pci_set_master(pdev);
4492 pci_set_power_state(pdev, 0);
4493 pci_restore_state(pdev);
4494
4495 /* Check if card is ok and fw is ready */
4496 dev_info(&adapter->pdev->dev,
4497 "Waiting for FW to be ready after EEH reset\n");
4498 status = be_fw_wait_ready(adapter);
4499 if (status)
4500 return PCI_ERS_RESULT_DISCONNECT;
4501
4502 pci_cleanup_aer_uncorrect_error_status(pdev);
4503 return PCI_ERS_RESULT_RECOVERED;
4504 }
4505
4506 static void be_eeh_resume(struct pci_dev *pdev)
4507 {
4508 int status = 0;
4509 struct be_adapter *adapter = pci_get_drvdata(pdev);
4510 struct net_device *netdev = adapter->netdev;
4511
4512 dev_info(&adapter->pdev->dev, "EEH resume\n");
4513
4514 pci_save_state(pdev);
4515
4516 status = be_cmd_reset_function(adapter);
4517 if (status)
4518 goto err;
4519
4520 /* tell fw we're ready to fire cmds */
4521 status = be_cmd_fw_init(adapter);
4522 if (status)
4523 goto err;
4524
4525 status = be_setup(adapter);
4526 if (status)
4527 goto err;
4528
4529 if (netif_running(netdev)) {
4530 status = be_open(netdev);
4531 if (status)
4532 goto err;
4533 }
4534
4535 schedule_delayed_work(&adapter->func_recovery_work,
4536 msecs_to_jiffies(1000));
4537 netif_device_attach(netdev);
4538 return;
4539 err:
4540 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4541 }
4542
4543 static const struct pci_error_handlers be_eeh_handlers = {
4544 .error_detected = be_eeh_err_detected,
4545 .slot_reset = be_eeh_reset,
4546 .resume = be_eeh_resume,
4547 };
4548
4549 static struct pci_driver be_driver = {
4550 .name = DRV_NAME,
4551 .id_table = be_dev_ids,
4552 .probe = be_probe,
4553 .remove = be_remove,
4554 .suspend = be_suspend,
4555 .resume = be_resume,
4556 .shutdown = be_shutdown,
4557 .err_handler = &be_eeh_handlers
4558 };
4559
4560 static int __init be_init_module(void)
4561 {
4562 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4563 rx_frag_size != 2048) {
4564 printk(KERN_WARNING DRV_NAME
4565 " : Module param rx_frag_size must be 2048/4096/8192."
4566 " Using 2048\n");
4567 rx_frag_size = 2048;
4568 }
4569
4570 return pci_register_driver(&be_driver);
4571 }
4572 module_init(be_init_module);
4573
4574 static void __exit be_exit_module(void)
4575 {
4576 pci_unregister_driver(&be_driver);
4577 }
4578 module_exit(be_exit_module);
This page took 0.134015 seconds and 6 git commands to generate.