be2net: Avoid double insertion of vlan tags.
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
111 "NETC",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131 struct be_dma_mem *mem = &q->dma_mem;
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141 {
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va)
151 return -ENOMEM;
152 return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157 u32 reg, enabled;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196 wmb();
197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
202 {
203 u32 val = 0;
204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207 wmb();
208 iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212 bool arm, bool clear_int, u16 num_popped)
213 {
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219 if (adapter->eeh_error)
220 return;
221
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238 if (adapter->eeh_error)
239 return;
240
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
252 u8 current_mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0];
254 bool active_mac = true;
255
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
287 if (status)
288 goto err;
289
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293 done:
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296 err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298 return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334 struct be_port_rxf_stats_v0 *port_stats =
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
337
338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
366 drvs->jabber_events = rxf_stats->port1_jabber_events;
367 else
368 drvs->jabber_events = rxf_stats->port0_jabber_events;
369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383 struct be_port_rxf_stats_v1 *port_stats =
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
386
387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427 struct be_drv_stats *drvs = &adapter->drv_stats;
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456 drvs->jabber_events = pport_stats->rx_jabbers;
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459 drvs->rx_drops_too_many_frags =
460 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x) (x & 0xFFFF)
466 #define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478 {
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
494 u32 erx_stat;
495
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
498 } else {
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
504
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
509 }
510 }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
515 {
516 struct be_adapter *adapter = netdev_priv(netdev);
517 struct be_drv_stats *drvs = &adapter->drv_stats;
518 struct be_rx_obj *rxo;
519 struct be_tx_obj *txo;
520 u64 pkts, bytes;
521 unsigned int start;
522 int i;
523
524 for_all_rx_queues(adapter, rxo, i) {
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
536 }
537
538 for_all_tx_queues(adapter, txo, i) {
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
547 }
548
549 /* bad pkts received */
550 stats->rx_errors = drvs->rx_crc_errors +
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
559 drvs->rx_dropped_runt;
560
561 /* detailed rx errors */
562 stats->rx_length_errors = drvs->rx_in_range_errors +
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
565
566 stats->rx_crc_errors = drvs->rx_crc_errors;
567
568 /* frame alignment errors */
569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
576 return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581 struct net_device *netdev = adapter->netdev;
582
583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584 netif_carrier_off(netdev);
585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586 }
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597 struct be_tx_stats *stats = tx_stats(txo);
598
599 u64_stats_update_begin(&stats->sync);
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
604 if (stopped)
605 stats->tx_stops++;
606 u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
612 {
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
617 /* to account for hdr wrb */
618 cnt++;
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
625 }
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635 wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640 {
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657 u16 vlan_tag;
658
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663 if (skb_is_gso(skb)) {
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
676 if (vlan_tx_tag_present(skb)) {
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680 }
681
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690 bool unmap_single)
691 {
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697 if (wrb->frag_len) {
698 if (unmap_single)
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
701 else
702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703 }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
709 {
710 dma_addr_t busaddr;
711 int i, copied = 0;
712 struct device *dev = &adapter->pdev->dev;
713 struct sk_buff *first_skb = skb;
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
716 bool map_single = false;
717 u16 map_head;
718
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
721 map_head = txq->head;
722
723 if (skb->len > skb->data_len) {
724 int len = skb_headlen(skb);
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
727 goto dma_err;
728 map_single = true;
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
735
736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737 const struct skb_frag_struct *frag =
738 &skb_shinfo(skb)->frags[i];
739 busaddr = skb_frag_dma_map(dev, frag, 0,
740 skb_frag_size(frag), DMA_TO_DEVICE);
741 if (dma_mapping_error(dev, busaddr))
742 goto dma_err;
743 wrb = queue_head_node(txq);
744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
747 copied += skb_frag_size(frag);
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
761 dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
765 unmap_tx_frag(dev, wrb, map_single);
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
776 {
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
783 if (vlan_tx_tag_present(skb))
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
787
788 if (vlan_tag) {
789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
790 if (unlikely(!skb))
791 return skb;
792 skb->vlan_tci = 0;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
807 return skb;
808 }
809
810 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811 {
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830 }
831
832 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833 {
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835 }
836
837 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
838 {
839 return BE3_chip(adapter) &&
840 be_ipv6_exthdr_check(skb);
841 }
842
843 static netdev_tx_t be_xmit(struct sk_buff *skb,
844 struct net_device *netdev)
845 {
846 struct be_adapter *adapter = netdev_priv(netdev);
847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
849 struct iphdr *ip = NULL;
850 u32 wrb_cnt = 0, copied = 0;
851 u32 start = txq->head, eth_hdr_len;
852 bool dummy_wrb, stopped = false;
853 bool skip_hw_vlan = false;
854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855
856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
857 VLAN_ETH_HLEN : ETH_HLEN;
858
859 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW.
861 */
862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
863 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865 }
866
867 /* If vlan tag is already inlined in the packet, skip HW VLAN
868 * tagging in UMC mode
869 */
870 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q))
872 skip_hw_vlan = true;
873
874 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt.
877 */
878 if (skb->ip_summed != CHECKSUM_PARTIAL &&
879 vlan_tx_tag_present(skb)) {
880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
881 if (unlikely(!skb))
882 goto tx_drop;
883 }
884
885 /* HW may lockup when VLAN HW tagging is requested on
886 * certain ipv6 packets. Drop such pkts if the HW workaround to
887 * skip HW tagging is not enabled by FW.
888 */
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter)))
892 goto tx_drop;
893
894 /* Manual VLAN tag insertion to prevent:
895 * ASIC lockup when the ASIC inserts VLAN tag into
896 * certain ipv6 packets. Insert VLAN tags in driver,
897 * and set event, completion, vlan bits accordingly
898 * in the Tx WRB.
899 */
900 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
903 if (unlikely(!skb))
904 goto tx_drop;
905 }
906
907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
908
909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
910 skip_hw_vlan);
911 if (copied) {
912 int gso_segs = skb_shinfo(skb)->gso_segs;
913
914 /* record the sent skb in the sent_skb table */
915 BUG_ON(txo->sent_skb_list[start]);
916 txo->sent_skb_list[start] = skb;
917
918 /* Ensure txq has space for the next skb; Else stop the queue
919 * *BEFORE* ringing the tx doorbell, so that we serialze the
920 * tx compls of the current transmit which'll wake up the queue
921 */
922 atomic_add(wrb_cnt, &txq->used);
923 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
924 txq->len) {
925 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
926 stopped = true;
927 }
928
929 be_txq_notify(adapter, txo, wrb_cnt);
930
931 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
932 } else {
933 txq->head = start;
934 dev_kfree_skb_any(skb);
935 }
936 tx_drop:
937 return NETDEV_TX_OK;
938 }
939
940 static int be_change_mtu(struct net_device *netdev, int new_mtu)
941 {
942 struct be_adapter *adapter = netdev_priv(netdev);
943 if (new_mtu < BE_MIN_MTU ||
944 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
945 (ETH_HLEN + ETH_FCS_LEN))) {
946 dev_info(&adapter->pdev->dev,
947 "MTU must be between %d and %d bytes\n",
948 BE_MIN_MTU,
949 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
950 return -EINVAL;
951 }
952 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
953 netdev->mtu, new_mtu);
954 netdev->mtu = new_mtu;
955 return 0;
956 }
957
958 /*
959 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
960 * If the user configures more, place BE in vlan promiscuous mode.
961 */
962 static int be_vid_config(struct be_adapter *adapter)
963 {
964 u16 vids[BE_NUM_VLANS_SUPPORTED];
965 u16 num = 0, i;
966 int status = 0;
967
968 /* No need to further configure vids if in promiscuous mode */
969 if (adapter->promiscuous)
970 return 0;
971
972 if (adapter->vlans_added > adapter->max_vlans)
973 goto set_vlan_promisc;
974
975 /* Construct VLAN Table to give to HW */
976 for (i = 0; i < VLAN_N_VID; i++)
977 if (adapter->vlan_tag[i])
978 vids[num++] = cpu_to_le16(i);
979
980 status = be_cmd_vlan_config(adapter, adapter->if_handle,
981 vids, num, 1, 0);
982
983 /* Set to VLAN promisc mode as setting VLAN filter failed */
984 if (status) {
985 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
986 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
987 goto set_vlan_promisc;
988 }
989
990 return status;
991
992 set_vlan_promisc:
993 status = be_cmd_vlan_config(adapter, adapter->if_handle,
994 NULL, 0, 1, 1);
995 return status;
996 }
997
998 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
999 {
1000 struct be_adapter *adapter = netdev_priv(netdev);
1001 int status = 0;
1002
1003 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1004 status = -EINVAL;
1005 goto ret;
1006 }
1007
1008 /* Packets with VID 0 are always received by Lancer by default */
1009 if (lancer_chip(adapter) && vid == 0)
1010 goto ret;
1011
1012 adapter->vlan_tag[vid] = 1;
1013 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1014 status = be_vid_config(adapter);
1015
1016 if (!status)
1017 adapter->vlans_added++;
1018 else
1019 adapter->vlan_tag[vid] = 0;
1020 ret:
1021 return status;
1022 }
1023
1024 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1025 {
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 int status = 0;
1028
1029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1030 status = -EINVAL;
1031 goto ret;
1032 }
1033
1034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
1038 adapter->vlan_tag[vid] = 0;
1039 if (adapter->vlans_added <= adapter->max_vlans)
1040 status = be_vid_config(adapter);
1041
1042 if (!status)
1043 adapter->vlans_added--;
1044 else
1045 adapter->vlan_tag[vid] = 1;
1046 ret:
1047 return status;
1048 }
1049
1050 static void be_set_rx_mode(struct net_device *netdev)
1051 {
1052 struct be_adapter *adapter = netdev_priv(netdev);
1053 int status;
1054
1055 if (netdev->flags & IFF_PROMISC) {
1056 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1057 adapter->promiscuous = true;
1058 goto done;
1059 }
1060
1061 /* BE was previously in promiscuous mode; disable it */
1062 if (adapter->promiscuous) {
1063 adapter->promiscuous = false;
1064 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1065
1066 if (adapter->vlans_added)
1067 be_vid_config(adapter);
1068 }
1069
1070 /* Enable multicast promisc if num configured exceeds what we support */
1071 if (netdev->flags & IFF_ALLMULTI ||
1072 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1073 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1074 goto done;
1075 }
1076
1077 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1078 struct netdev_hw_addr *ha;
1079 int i = 1; /* First slot is claimed by the Primary MAC */
1080
1081 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1082 be_cmd_pmac_del(adapter, adapter->if_handle,
1083 adapter->pmac_id[i], 0);
1084 }
1085
1086 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1087 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1088 adapter->promiscuous = true;
1089 goto done;
1090 }
1091
1092 netdev_for_each_uc_addr(ha, adapter->netdev) {
1093 adapter->uc_macs++; /* First slot is for Primary MAC */
1094 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1095 adapter->if_handle,
1096 &adapter->pmac_id[adapter->uc_macs], 0);
1097 }
1098 }
1099
1100 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1101
1102 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1103 if (status) {
1104 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1105 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1107 }
1108 done:
1109 return;
1110 }
1111
1112 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1113 {
1114 struct be_adapter *adapter = netdev_priv(netdev);
1115 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1116 int status;
1117 bool active_mac = false;
1118 u32 pmac_id;
1119 u8 old_mac[ETH_ALEN];
1120
1121 if (!sriov_enabled(adapter))
1122 return -EPERM;
1123
1124 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1125 return -EINVAL;
1126
1127 if (lancer_chip(adapter)) {
1128 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1129 &pmac_id, vf + 1);
1130 if (!status && active_mac)
1131 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1132 pmac_id, vf + 1);
1133
1134 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1135 } else {
1136 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1137 vf_cfg->pmac_id, vf + 1);
1138
1139 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1140 &vf_cfg->pmac_id, vf + 1);
1141 }
1142
1143 if (status)
1144 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1145 mac, vf);
1146 else
1147 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1148
1149 return status;
1150 }
1151
1152 static int be_get_vf_config(struct net_device *netdev, int vf,
1153 struct ifla_vf_info *vi)
1154 {
1155 struct be_adapter *adapter = netdev_priv(netdev);
1156 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1157
1158 if (!sriov_enabled(adapter))
1159 return -EPERM;
1160
1161 if (vf >= adapter->num_vfs)
1162 return -EINVAL;
1163
1164 vi->vf = vf;
1165 vi->tx_rate = vf_cfg->tx_rate;
1166 vi->vlan = vf_cfg->vlan_tag;
1167 vi->qos = 0;
1168 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1169
1170 return 0;
1171 }
1172
1173 static int be_set_vf_vlan(struct net_device *netdev,
1174 int vf, u16 vlan, u8 qos)
1175 {
1176 struct be_adapter *adapter = netdev_priv(netdev);
1177 int status = 0;
1178
1179 if (!sriov_enabled(adapter))
1180 return -EPERM;
1181
1182 if (vf >= adapter->num_vfs || vlan > 4095)
1183 return -EINVAL;
1184
1185 if (vlan) {
1186 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1187 /* If this is new value, program it. Else skip. */
1188 adapter->vf_cfg[vf].vlan_tag = vlan;
1189
1190 status = be_cmd_set_hsw_config(adapter, vlan,
1191 vf + 1, adapter->vf_cfg[vf].if_handle);
1192 }
1193 } else {
1194 /* Reset Transparent Vlan Tagging. */
1195 adapter->vf_cfg[vf].vlan_tag = 0;
1196 vlan = adapter->vf_cfg[vf].def_vid;
1197 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1198 adapter->vf_cfg[vf].if_handle);
1199 }
1200
1201
1202 if (status)
1203 dev_info(&adapter->pdev->dev,
1204 "VLAN %d config on VF %d failed\n", vlan, vf);
1205 return status;
1206 }
1207
1208 static int be_set_vf_tx_rate(struct net_device *netdev,
1209 int vf, int rate)
1210 {
1211 struct be_adapter *adapter = netdev_priv(netdev);
1212 int status = 0;
1213
1214 if (!sriov_enabled(adapter))
1215 return -EPERM;
1216
1217 if (vf >= adapter->num_vfs)
1218 return -EINVAL;
1219
1220 if (rate < 100 || rate > 10000) {
1221 dev_err(&adapter->pdev->dev,
1222 "tx rate must be between 100 and 10000 Mbps\n");
1223 return -EINVAL;
1224 }
1225
1226 if (lancer_chip(adapter))
1227 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1228 else
1229 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1230
1231 if (status)
1232 dev_err(&adapter->pdev->dev,
1233 "tx rate %d on VF %d failed\n", rate, vf);
1234 else
1235 adapter->vf_cfg[vf].tx_rate = rate;
1236 return status;
1237 }
1238
1239 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1240 {
1241 struct pci_dev *dev, *pdev = adapter->pdev;
1242 int vfs = 0, assigned_vfs = 0, pos;
1243 u16 offset, stride;
1244
1245 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1246 if (!pos)
1247 return 0;
1248 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1250
1251 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1252 while (dev) {
1253 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1254 vfs++;
1255 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1256 assigned_vfs++;
1257 }
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1259 }
1260 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1261 }
1262
1263 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1264 {
1265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1266 ulong now = jiffies;
1267 ulong delta = now - stats->rx_jiffies;
1268 u64 pkts;
1269 unsigned int start, eqd;
1270
1271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
1277 return;
1278
1279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
1281 /* Wrapped around */
1282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
1284 return;
1285 }
1286
1287 /* Update once a second */
1288 if (delta < HZ)
1289 return;
1290
1291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
1296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1297 stats->rx_pkts_prev = pkts;
1298 stats->rx_jiffies = now;
1299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
1302 if (eqd < 10)
1303 eqd = 0;
1304
1305 modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
1309 }
1310 }
1311
1312 static void be_rx_stats_update(struct be_rx_obj *rxo,
1313 struct be_rx_compl_info *rxcp)
1314 {
1315 struct be_rx_stats *stats = rx_stats(rxo);
1316
1317 u64_stats_update_begin(&stats->sync);
1318 stats->rx_compl++;
1319 stats->rx_bytes += rxcp->pkt_size;
1320 stats->rx_pkts++;
1321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1322 stats->rx_mcast_pkts++;
1323 if (rxcp->err)
1324 stats->rx_compl_err++;
1325 u64_stats_update_end(&stats->sync);
1326 }
1327
1328 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1329 {
1330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
1332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
1334 }
1335
1336 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
1338 {
1339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *rx_page_info;
1341 struct be_queue_info *rxq = &rxo->q;
1342
1343 rx_page_info = &rxo->page_info_tbl[frag_idx];
1344 BUG_ON(!rx_page_info->page);
1345
1346 if (rx_page_info->last_page_user) {
1347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
1350 rx_page_info->last_page_user = false;
1351 }
1352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355 }
1356
1357 /* Throwaway the data in the Rx completion */
1358 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
1360 {
1361 struct be_queue_info *rxq = &rxo->q;
1362 struct be_rx_page_info *page_info;
1363 u16 i, num_rcvd = rxcp->num_rcvd;
1364
1365 for (i = 0; i < num_rcvd; i++) {
1366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
1369 index_inc(&rxcp->rxq_idx, rxq->len);
1370 }
1371 }
1372
1373 /*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
1377 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
1379 {
1380 struct be_queue_info *rxq = &rxo->q;
1381 struct be_rx_page_info *page_info;
1382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
1384 u8 *start;
1385
1386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
1391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1392
1393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1395 memcpy(skb->data, start, curr_frag_len);
1396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
1401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
1403 skb_shinfo(skb)->nr_frags = 1;
1404 skb_frag_set_page(skb, 0, page_info->page);
1405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
1407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1408 skb->data_len = curr_frag_len - hdr_len;
1409 skb->truesize += rx_frag_size;
1410 skb->tail += hdr_len;
1411 }
1412 page_info->page = NULL;
1413
1414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
1417 }
1418
1419 /* More frags present for this completion */
1420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1424 curr_frag_len = min(remaining, rx_frag_size);
1425
1426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
1430 skb_frag_set_page(skb, j, page_info->page);
1431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
1433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
1439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
1442 skb->truesize += rx_frag_size;
1443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
1445 page_info->page = NULL;
1446 }
1447 BUG_ON(j > MAX_SKB_FRAGS);
1448 }
1449
1450 /* Process the RX completion indicated by rxcp when GRO is disabled */
1451 static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
1453 {
1454 struct be_adapter *adapter = rxo->adapter;
1455 struct net_device *netdev = adapter->netdev;
1456 struct sk_buff *skb;
1457
1458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1459 if (unlikely(!skb)) {
1460 rx_stats(rxo)->rx_drops_no_skbs++;
1461 be_rx_compl_discard(rxo, rxcp);
1462 return;
1463 }
1464
1465 skb_fill_rx_data(rxo, skb, rxcp);
1466
1467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469 else
1470 skb_checksum_none_assert(skb);
1471
1472 skb->protocol = eth_type_trans(skb, netdev);
1473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1474 if (netdev->features & NETIF_F_RXHASH)
1475 skb->rxhash = rxcp->rss_hash;
1476
1477
1478 if (rxcp->vlanf)
1479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1480
1481 netif_receive_skb(skb);
1482 }
1483
1484 /* Process the RX completion indicated by rxcp when GRO is enabled */
1485 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486 struct be_rx_compl_info *rxcp)
1487 {
1488 struct be_adapter *adapter = rxo->adapter;
1489 struct be_rx_page_info *page_info;
1490 struct sk_buff *skb = NULL;
1491 struct be_queue_info *rxq = &rxo->q;
1492 u16 remaining, curr_frag_len;
1493 u16 i, j;
1494
1495 skb = napi_get_frags(napi);
1496 if (!skb) {
1497 be_rx_compl_discard(rxo, rxcp);
1498 return;
1499 }
1500
1501 remaining = rxcp->pkt_size;
1502 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1503 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1504
1505 curr_frag_len = min(remaining, rx_frag_size);
1506
1507 /* Coalesce all frags from the same physical page in one slot */
1508 if (i == 0 || page_info->page_offset == 0) {
1509 /* First frag or Fresh page */
1510 j++;
1511 skb_frag_set_page(skb, j, page_info->page);
1512 skb_shinfo(skb)->frags[j].page_offset =
1513 page_info->page_offset;
1514 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1515 } else {
1516 put_page(page_info->page);
1517 }
1518 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1519 skb->truesize += rx_frag_size;
1520 remaining -= curr_frag_len;
1521 index_inc(&rxcp->rxq_idx, rxq->len);
1522 memset(page_info, 0, sizeof(*page_info));
1523 }
1524 BUG_ON(j > MAX_SKB_FRAGS);
1525
1526 skb_shinfo(skb)->nr_frags = j + 1;
1527 skb->len = rxcp->pkt_size;
1528 skb->data_len = rxcp->pkt_size;
1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1530 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1531 if (adapter->netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = rxcp->rss_hash;
1533
1534 if (rxcp->vlanf)
1535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1536
1537 napi_gro_frags(napi);
1538 }
1539
1540 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541 struct be_rx_compl_info *rxcp)
1542 {
1543 rxcp->pkt_size =
1544 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1548 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1549 rxcp->ip_csum =
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1551 rxcp->l4_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1553 rxcp->ipv6 =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1555 rxcp->rxq_idx =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1557 rxcp->num_rcvd =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1559 rxcp->pkt_type =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1561 rxcp->rss_hash =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1563 if (rxcp->vlanf) {
1564 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1565 compl);
1566 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1567 compl);
1568 }
1569 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1570 }
1571
1572 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573 struct be_rx_compl_info *rxcp)
1574 {
1575 rxcp->pkt_size =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1580 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1581 rxcp->ip_csum =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1583 rxcp->l4_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1585 rxcp->ipv6 =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1587 rxcp->rxq_idx =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1589 rxcp->num_rcvd =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1591 rxcp->pkt_type =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1593 rxcp->rss_hash =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1595 if (rxcp->vlanf) {
1596 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1597 compl);
1598 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1599 compl);
1600 }
1601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1602 }
1603
1604 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1605 {
1606 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1607 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1608 struct be_adapter *adapter = rxo->adapter;
1609
1610 /* For checking the valid bit it is Ok to use either definition as the
1611 * valid bit is at the same position in both v0 and v1 Rx compl */
1612 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1613 return NULL;
1614
1615 rmb();
1616 be_dws_le_to_cpu(compl, sizeof(*compl));
1617
1618 if (adapter->be3_native)
1619 be_parse_rx_compl_v1(compl, rxcp);
1620 else
1621 be_parse_rx_compl_v0(compl, rxcp);
1622
1623 if (rxcp->vlanf) {
1624 /* vlanf could be wrongly set in some cards.
1625 * ignore if vtm is not set */
1626 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1627 rxcp->vlanf = 0;
1628
1629 if (!lancer_chip(adapter))
1630 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1631
1632 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1633 !adapter->vlan_tag[rxcp->vlan_tag])
1634 rxcp->vlanf = 0;
1635 }
1636
1637 /* As the compl has been parsed, reset it; we wont touch it again */
1638 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1639
1640 queue_tail_inc(&rxo->cq);
1641 return rxcp;
1642 }
1643
1644 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1645 {
1646 u32 order = get_order(size);
1647
1648 if (order > 0)
1649 gfp |= __GFP_COMP;
1650 return alloc_pages(gfp, order);
1651 }
1652
1653 /*
1654 * Allocate a page, split it to fragments of size rx_frag_size and post as
1655 * receive buffers to BE
1656 */
1657 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1658 {
1659 struct be_adapter *adapter = rxo->adapter;
1660 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1661 struct be_queue_info *rxq = &rxo->q;
1662 struct page *pagep = NULL;
1663 struct be_eth_rx_d *rxd;
1664 u64 page_dmaaddr = 0, frag_dmaaddr;
1665 u32 posted, page_offset = 0;
1666
1667 page_info = &rxo->page_info_tbl[rxq->head];
1668 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1669 if (!pagep) {
1670 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1671 if (unlikely(!pagep)) {
1672 rx_stats(rxo)->rx_post_fail++;
1673 break;
1674 }
1675 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1676 0, adapter->big_page_size,
1677 DMA_FROM_DEVICE);
1678 page_info->page_offset = 0;
1679 } else {
1680 get_page(pagep);
1681 page_info->page_offset = page_offset + rx_frag_size;
1682 }
1683 page_offset = page_info->page_offset;
1684 page_info->page = pagep;
1685 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1686 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1687
1688 rxd = queue_head_node(rxq);
1689 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1690 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1691
1692 /* Any space left in the current big page for another frag? */
1693 if ((page_offset + rx_frag_size + rx_frag_size) >
1694 adapter->big_page_size) {
1695 pagep = NULL;
1696 page_info->last_page_user = true;
1697 }
1698
1699 prev_page_info = page_info;
1700 queue_head_inc(rxq);
1701 page_info = &rxo->page_info_tbl[rxq->head];
1702 }
1703 if (pagep)
1704 prev_page_info->last_page_user = true;
1705
1706 if (posted) {
1707 atomic_add(posted, &rxq->used);
1708 be_rxq_notify(adapter, rxq->id, posted);
1709 } else if (atomic_read(&rxq->used) == 0) {
1710 /* Let be_worker replenish when memory is available */
1711 rxo->rx_post_starved = true;
1712 }
1713 }
1714
1715 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1716 {
1717 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1718
1719 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1720 return NULL;
1721
1722 rmb();
1723 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1724
1725 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1726
1727 queue_tail_inc(tx_cq);
1728 return txcp;
1729 }
1730
1731 static u16 be_tx_compl_process(struct be_adapter *adapter,
1732 struct be_tx_obj *txo, u16 last_index)
1733 {
1734 struct be_queue_info *txq = &txo->q;
1735 struct be_eth_wrb *wrb;
1736 struct sk_buff **sent_skbs = txo->sent_skb_list;
1737 struct sk_buff *sent_skb;
1738 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1739 bool unmap_skb_hdr = true;
1740
1741 sent_skb = sent_skbs[txq->tail];
1742 BUG_ON(!sent_skb);
1743 sent_skbs[txq->tail] = NULL;
1744
1745 /* skip header wrb */
1746 queue_tail_inc(txq);
1747
1748 do {
1749 cur_index = txq->tail;
1750 wrb = queue_tail_node(txq);
1751 unmap_tx_frag(&adapter->pdev->dev, wrb,
1752 (unmap_skb_hdr && skb_headlen(sent_skb)));
1753 unmap_skb_hdr = false;
1754
1755 num_wrbs++;
1756 queue_tail_inc(txq);
1757 } while (cur_index != last_index);
1758
1759 kfree_skb(sent_skb);
1760 return num_wrbs;
1761 }
1762
1763 /* Return the number of events in the event queue */
1764 static inline int events_get(struct be_eq_obj *eqo)
1765 {
1766 struct be_eq_entry *eqe;
1767 int num = 0;
1768
1769 do {
1770 eqe = queue_tail_node(&eqo->q);
1771 if (eqe->evt == 0)
1772 break;
1773
1774 rmb();
1775 eqe->evt = 0;
1776 num++;
1777 queue_tail_inc(&eqo->q);
1778 } while (true);
1779
1780 return num;
1781 }
1782
1783 /* Leaves the EQ is disarmed state */
1784 static void be_eq_clean(struct be_eq_obj *eqo)
1785 {
1786 int num = events_get(eqo);
1787
1788 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1789 }
1790
1791 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1792 {
1793 struct be_rx_page_info *page_info;
1794 struct be_queue_info *rxq = &rxo->q;
1795 struct be_queue_info *rx_cq = &rxo->cq;
1796 struct be_rx_compl_info *rxcp;
1797 struct be_adapter *adapter = rxo->adapter;
1798 int flush_wait = 0;
1799 u16 tail;
1800
1801 /* Consume pending rx completions.
1802 * Wait for the flush completion (identified by zero num_rcvd)
1803 * to arrive. Notify CQ even when there are no more CQ entries
1804 * for HW to flush partially coalesced CQ entries.
1805 * In Lancer, there is no need to wait for flush compl.
1806 */
1807 for (;;) {
1808 rxcp = be_rx_compl_get(rxo);
1809 if (rxcp == NULL) {
1810 if (lancer_chip(adapter))
1811 break;
1812
1813 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1814 dev_warn(&adapter->pdev->dev,
1815 "did not receive flush compl\n");
1816 break;
1817 }
1818 be_cq_notify(adapter, rx_cq->id, true, 0);
1819 mdelay(1);
1820 } else {
1821 be_rx_compl_discard(rxo, rxcp);
1822 be_cq_notify(adapter, rx_cq->id, false, 1);
1823 if (rxcp->num_rcvd == 0)
1824 break;
1825 }
1826 }
1827
1828 /* After cleanup, leave the CQ in unarmed state */
1829 be_cq_notify(adapter, rx_cq->id, false, 0);
1830
1831 /* Then free posted rx buffers that were not used */
1832 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1833 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1834 page_info = get_rx_page_info(rxo, tail);
1835 put_page(page_info->page);
1836 memset(page_info, 0, sizeof(*page_info));
1837 }
1838 BUG_ON(atomic_read(&rxq->used));
1839 rxq->tail = rxq->head = 0;
1840 }
1841
1842 static void be_tx_compl_clean(struct be_adapter *adapter)
1843 {
1844 struct be_tx_obj *txo;
1845 struct be_queue_info *txq;
1846 struct be_eth_tx_compl *txcp;
1847 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1848 struct sk_buff *sent_skb;
1849 bool dummy_wrb;
1850 int i, pending_txqs;
1851
1852 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1853 do {
1854 pending_txqs = adapter->num_tx_qs;
1855
1856 for_all_tx_queues(adapter, txo, i) {
1857 txq = &txo->q;
1858 while ((txcp = be_tx_compl_get(&txo->cq))) {
1859 end_idx =
1860 AMAP_GET_BITS(struct amap_eth_tx_compl,
1861 wrb_index, txcp);
1862 num_wrbs += be_tx_compl_process(adapter, txo,
1863 end_idx);
1864 cmpl++;
1865 }
1866 if (cmpl) {
1867 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1868 atomic_sub(num_wrbs, &txq->used);
1869 cmpl = 0;
1870 num_wrbs = 0;
1871 }
1872 if (atomic_read(&txq->used) == 0)
1873 pending_txqs--;
1874 }
1875
1876 if (pending_txqs == 0 || ++timeo > 200)
1877 break;
1878
1879 mdelay(1);
1880 } while (true);
1881
1882 for_all_tx_queues(adapter, txo, i) {
1883 txq = &txo->q;
1884 if (atomic_read(&txq->used))
1885 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1886 atomic_read(&txq->used));
1887
1888 /* free posted tx for which compls will never arrive */
1889 while (atomic_read(&txq->used)) {
1890 sent_skb = txo->sent_skb_list[txq->tail];
1891 end_idx = txq->tail;
1892 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1893 &dummy_wrb);
1894 index_adv(&end_idx, num_wrbs - 1, txq->len);
1895 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1896 atomic_sub(num_wrbs, &txq->used);
1897 }
1898 }
1899 }
1900
1901 static void be_evt_queues_destroy(struct be_adapter *adapter)
1902 {
1903 struct be_eq_obj *eqo;
1904 int i;
1905
1906 for_all_evt_queues(adapter, eqo, i) {
1907 if (eqo->q.created) {
1908 be_eq_clean(eqo);
1909 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1910 }
1911 be_queue_free(adapter, &eqo->q);
1912 }
1913 }
1914
1915 static int be_evt_queues_create(struct be_adapter *adapter)
1916 {
1917 struct be_queue_info *eq;
1918 struct be_eq_obj *eqo;
1919 int i, rc;
1920
1921 adapter->num_evt_qs = num_irqs(adapter);
1922
1923 for_all_evt_queues(adapter, eqo, i) {
1924 eqo->adapter = adapter;
1925 eqo->tx_budget = BE_TX_BUDGET;
1926 eqo->idx = i;
1927 eqo->max_eqd = BE_MAX_EQD;
1928 eqo->enable_aic = true;
1929
1930 eq = &eqo->q;
1931 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1932 sizeof(struct be_eq_entry));
1933 if (rc)
1934 return rc;
1935
1936 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1937 if (rc)
1938 return rc;
1939 }
1940 return 0;
1941 }
1942
1943 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1944 {
1945 struct be_queue_info *q;
1946
1947 q = &adapter->mcc_obj.q;
1948 if (q->created)
1949 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1950 be_queue_free(adapter, q);
1951
1952 q = &adapter->mcc_obj.cq;
1953 if (q->created)
1954 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1955 be_queue_free(adapter, q);
1956 }
1957
1958 /* Must be called only after TX qs are created as MCC shares TX EQ */
1959 static int be_mcc_queues_create(struct be_adapter *adapter)
1960 {
1961 struct be_queue_info *q, *cq;
1962
1963 cq = &adapter->mcc_obj.cq;
1964 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1965 sizeof(struct be_mcc_compl)))
1966 goto err;
1967
1968 /* Use the default EQ for MCC completions */
1969 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1970 goto mcc_cq_free;
1971
1972 q = &adapter->mcc_obj.q;
1973 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1974 goto mcc_cq_destroy;
1975
1976 if (be_cmd_mccq_create(adapter, q, cq))
1977 goto mcc_q_free;
1978
1979 return 0;
1980
1981 mcc_q_free:
1982 be_queue_free(adapter, q);
1983 mcc_cq_destroy:
1984 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1985 mcc_cq_free:
1986 be_queue_free(adapter, cq);
1987 err:
1988 return -1;
1989 }
1990
1991 static void be_tx_queues_destroy(struct be_adapter *adapter)
1992 {
1993 struct be_queue_info *q;
1994 struct be_tx_obj *txo;
1995 u8 i;
1996
1997 for_all_tx_queues(adapter, txo, i) {
1998 q = &txo->q;
1999 if (q->created)
2000 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2001 be_queue_free(adapter, q);
2002
2003 q = &txo->cq;
2004 if (q->created)
2005 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2006 be_queue_free(adapter, q);
2007 }
2008 }
2009
2010 static int be_num_txqs_want(struct be_adapter *adapter)
2011 {
2012 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2013 be_is_mc(adapter) ||
2014 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2015 BE2_chip(adapter))
2016 return 1;
2017 else
2018 return adapter->max_tx_queues;
2019 }
2020
2021 static int be_tx_cqs_create(struct be_adapter *adapter)
2022 {
2023 struct be_queue_info *cq, *eq;
2024 int status;
2025 struct be_tx_obj *txo;
2026 u8 i;
2027
2028 adapter->num_tx_qs = be_num_txqs_want(adapter);
2029 if (adapter->num_tx_qs != MAX_TX_QS) {
2030 rtnl_lock();
2031 netif_set_real_num_tx_queues(adapter->netdev,
2032 adapter->num_tx_qs);
2033 rtnl_unlock();
2034 }
2035
2036 for_all_tx_queues(adapter, txo, i) {
2037 cq = &txo->cq;
2038 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2039 sizeof(struct be_eth_tx_compl));
2040 if (status)
2041 return status;
2042
2043 /* If num_evt_qs is less than num_tx_qs, then more than
2044 * one txq share an eq
2045 */
2046 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2047 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2048 if (status)
2049 return status;
2050 }
2051 return 0;
2052 }
2053
2054 static int be_tx_qs_create(struct be_adapter *adapter)
2055 {
2056 struct be_tx_obj *txo;
2057 int i, status;
2058
2059 for_all_tx_queues(adapter, txo, i) {
2060 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2061 sizeof(struct be_eth_wrb));
2062 if (status)
2063 return status;
2064
2065 status = be_cmd_txq_create(adapter, txo);
2066 if (status)
2067 return status;
2068 }
2069
2070 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2071 adapter->num_tx_qs);
2072 return 0;
2073 }
2074
2075 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2076 {
2077 struct be_queue_info *q;
2078 struct be_rx_obj *rxo;
2079 int i;
2080
2081 for_all_rx_queues(adapter, rxo, i) {
2082 q = &rxo->cq;
2083 if (q->created)
2084 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2085 be_queue_free(adapter, q);
2086 }
2087 }
2088
2089 static int be_rx_cqs_create(struct be_adapter *adapter)
2090 {
2091 struct be_queue_info *eq, *cq;
2092 struct be_rx_obj *rxo;
2093 int rc, i;
2094
2095 /* We'll create as many RSS rings as there are irqs.
2096 * But when there's only one irq there's no use creating RSS rings
2097 */
2098 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2099 num_irqs(adapter) + 1 : 1;
2100 if (adapter->num_rx_qs != MAX_RX_QS) {
2101 rtnl_lock();
2102 netif_set_real_num_rx_queues(adapter->netdev,
2103 adapter->num_rx_qs);
2104 rtnl_unlock();
2105 }
2106
2107 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2108 for_all_rx_queues(adapter, rxo, i) {
2109 rxo->adapter = adapter;
2110 cq = &rxo->cq;
2111 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2112 sizeof(struct be_eth_rx_compl));
2113 if (rc)
2114 return rc;
2115
2116 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2117 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2118 if (rc)
2119 return rc;
2120 }
2121
2122 dev_info(&adapter->pdev->dev,
2123 "created %d RSS queue(s) and 1 default RX queue\n",
2124 adapter->num_rx_qs - 1);
2125 return 0;
2126 }
2127
2128 static irqreturn_t be_intx(int irq, void *dev)
2129 {
2130 struct be_eq_obj *eqo = dev;
2131 struct be_adapter *adapter = eqo->adapter;
2132 int num_evts = 0;
2133
2134 /* IRQ is not expected when NAPI is scheduled as the EQ
2135 * will not be armed.
2136 * But, this can happen on Lancer INTx where it takes
2137 * a while to de-assert INTx or in BE2 where occasionaly
2138 * an interrupt may be raised even when EQ is unarmed.
2139 * If NAPI is already scheduled, then counting & notifying
2140 * events will orphan them.
2141 */
2142 if (napi_schedule_prep(&eqo->napi)) {
2143 num_evts = events_get(eqo);
2144 __napi_schedule(&eqo->napi);
2145 if (num_evts)
2146 eqo->spurious_intr = 0;
2147 }
2148 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2149
2150 /* Return IRQ_HANDLED only for the the first spurious intr
2151 * after a valid intr to stop the kernel from branding
2152 * this irq as a bad one!
2153 */
2154 if (num_evts || eqo->spurious_intr++ == 0)
2155 return IRQ_HANDLED;
2156 else
2157 return IRQ_NONE;
2158 }
2159
2160 static irqreturn_t be_msix(int irq, void *dev)
2161 {
2162 struct be_eq_obj *eqo = dev;
2163
2164 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2165 napi_schedule(&eqo->napi);
2166 return IRQ_HANDLED;
2167 }
2168
2169 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2170 {
2171 return (rxcp->tcpf && !rxcp->err) ? true : false;
2172 }
2173
2174 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2175 int budget)
2176 {
2177 struct be_adapter *adapter = rxo->adapter;
2178 struct be_queue_info *rx_cq = &rxo->cq;
2179 struct be_rx_compl_info *rxcp;
2180 u32 work_done;
2181
2182 for (work_done = 0; work_done < budget; work_done++) {
2183 rxcp = be_rx_compl_get(rxo);
2184 if (!rxcp)
2185 break;
2186
2187 /* Is it a flush compl that has no data */
2188 if (unlikely(rxcp->num_rcvd == 0))
2189 goto loop_continue;
2190
2191 /* Discard compl with partial DMA Lancer B0 */
2192 if (unlikely(!rxcp->pkt_size)) {
2193 be_rx_compl_discard(rxo, rxcp);
2194 goto loop_continue;
2195 }
2196
2197 /* On BE drop pkts that arrive due to imperfect filtering in
2198 * promiscuous mode on some skews
2199 */
2200 if (unlikely(rxcp->port != adapter->port_num &&
2201 !lancer_chip(adapter))) {
2202 be_rx_compl_discard(rxo, rxcp);
2203 goto loop_continue;
2204 }
2205
2206 if (do_gro(rxcp))
2207 be_rx_compl_process_gro(rxo, napi, rxcp);
2208 else
2209 be_rx_compl_process(rxo, rxcp);
2210 loop_continue:
2211 be_rx_stats_update(rxo, rxcp);
2212 }
2213
2214 if (work_done) {
2215 be_cq_notify(adapter, rx_cq->id, true, work_done);
2216
2217 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2218 be_post_rx_frags(rxo, GFP_ATOMIC);
2219 }
2220
2221 return work_done;
2222 }
2223
2224 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2225 int budget, int idx)
2226 {
2227 struct be_eth_tx_compl *txcp;
2228 int num_wrbs = 0, work_done;
2229
2230 for (work_done = 0; work_done < budget; work_done++) {
2231 txcp = be_tx_compl_get(&txo->cq);
2232 if (!txcp)
2233 break;
2234 num_wrbs += be_tx_compl_process(adapter, txo,
2235 AMAP_GET_BITS(struct amap_eth_tx_compl,
2236 wrb_index, txcp));
2237 }
2238
2239 if (work_done) {
2240 be_cq_notify(adapter, txo->cq.id, true, work_done);
2241 atomic_sub(num_wrbs, &txo->q.used);
2242
2243 /* As Tx wrbs have been freed up, wake up netdev queue
2244 * if it was stopped due to lack of tx wrbs. */
2245 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2246 atomic_read(&txo->q.used) < txo->q.len / 2) {
2247 netif_wake_subqueue(adapter->netdev, idx);
2248 }
2249
2250 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2251 tx_stats(txo)->tx_compl += work_done;
2252 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2253 }
2254 return (work_done < budget); /* Done */
2255 }
2256
2257 int be_poll(struct napi_struct *napi, int budget)
2258 {
2259 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2260 struct be_adapter *adapter = eqo->adapter;
2261 int max_work = 0, work, i, num_evts;
2262 bool tx_done;
2263
2264 num_evts = events_get(eqo);
2265
2266 /* Process all TXQs serviced by this EQ */
2267 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2268 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2269 eqo->tx_budget, i);
2270 if (!tx_done)
2271 max_work = budget;
2272 }
2273
2274 /* This loop will iterate twice for EQ0 in which
2275 * completions of the last RXQ (default one) are also processed
2276 * For other EQs the loop iterates only once
2277 */
2278 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2279 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2280 max_work = max(work, max_work);
2281 }
2282
2283 if (is_mcc_eqo(eqo))
2284 be_process_mcc(adapter);
2285
2286 if (max_work < budget) {
2287 napi_complete(napi);
2288 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2289 } else {
2290 /* As we'll continue in polling mode, count and clear events */
2291 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2292 }
2293 return max_work;
2294 }
2295
2296 void be_detect_error(struct be_adapter *adapter)
2297 {
2298 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2299 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2300 u32 i;
2301
2302 if (be_hw_error(adapter))
2303 return;
2304
2305 if (lancer_chip(adapter)) {
2306 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2307 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2308 sliport_err1 = ioread32(adapter->db +
2309 SLIPORT_ERROR1_OFFSET);
2310 sliport_err2 = ioread32(adapter->db +
2311 SLIPORT_ERROR2_OFFSET);
2312 }
2313 } else {
2314 pci_read_config_dword(adapter->pdev,
2315 PCICFG_UE_STATUS_LOW, &ue_lo);
2316 pci_read_config_dword(adapter->pdev,
2317 PCICFG_UE_STATUS_HIGH, &ue_hi);
2318 pci_read_config_dword(adapter->pdev,
2319 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2320 pci_read_config_dword(adapter->pdev,
2321 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2322
2323 ue_lo = (ue_lo & ~ue_lo_mask);
2324 ue_hi = (ue_hi & ~ue_hi_mask);
2325 }
2326
2327 /* On certain platforms BE hardware can indicate spurious UEs.
2328 * Allow the h/w to stop working completely in case of a real UE.
2329 * Hence not setting the hw_error for UE detection.
2330 */
2331 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2332 adapter->hw_error = true;
2333 dev_err(&adapter->pdev->dev,
2334 "Error detected in the card\n");
2335 }
2336
2337 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2338 dev_err(&adapter->pdev->dev,
2339 "ERR: sliport status 0x%x\n", sliport_status);
2340 dev_err(&adapter->pdev->dev,
2341 "ERR: sliport error1 0x%x\n", sliport_err1);
2342 dev_err(&adapter->pdev->dev,
2343 "ERR: sliport error2 0x%x\n", sliport_err2);
2344 }
2345
2346 if (ue_lo) {
2347 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2348 if (ue_lo & 1)
2349 dev_err(&adapter->pdev->dev,
2350 "UE: %s bit set\n", ue_status_low_desc[i]);
2351 }
2352 }
2353
2354 if (ue_hi) {
2355 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2356 if (ue_hi & 1)
2357 dev_err(&adapter->pdev->dev,
2358 "UE: %s bit set\n", ue_status_hi_desc[i]);
2359 }
2360 }
2361
2362 }
2363
2364 static void be_msix_disable(struct be_adapter *adapter)
2365 {
2366 if (msix_enabled(adapter)) {
2367 pci_disable_msix(adapter->pdev);
2368 adapter->num_msix_vec = 0;
2369 }
2370 }
2371
2372 static uint be_num_rss_want(struct be_adapter *adapter)
2373 {
2374 u32 num = 0;
2375
2376 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2377 (lancer_chip(adapter) ||
2378 (!sriov_want(adapter) && be_physfn(adapter)))) {
2379 num = adapter->max_rss_queues;
2380 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2381 }
2382 return num;
2383 }
2384
2385 static int be_msix_enable(struct be_adapter *adapter)
2386 {
2387 #define BE_MIN_MSIX_VECTORS 1
2388 int i, status, num_vec, num_roce_vec = 0;
2389 struct device *dev = &adapter->pdev->dev;
2390
2391 /* If RSS queues are not used, need a vec for default RX Q */
2392 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2393 if (be_roce_supported(adapter)) {
2394 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2395 (num_online_cpus() + 1));
2396 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2397 num_vec += num_roce_vec;
2398 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2399 }
2400 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2401
2402 for (i = 0; i < num_vec; i++)
2403 adapter->msix_entries[i].entry = i;
2404
2405 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2406 if (status == 0) {
2407 goto done;
2408 } else if (status >= BE_MIN_MSIX_VECTORS) {
2409 num_vec = status;
2410 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2411 num_vec);
2412 if (!status)
2413 goto done;
2414 }
2415
2416 dev_warn(dev, "MSIx enable failed\n");
2417 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2418 if (!be_physfn(adapter))
2419 return status;
2420 return 0;
2421 done:
2422 if (be_roce_supported(adapter)) {
2423 if (num_vec > num_roce_vec) {
2424 adapter->num_msix_vec = num_vec - num_roce_vec;
2425 adapter->num_msix_roce_vec =
2426 num_vec - adapter->num_msix_vec;
2427 } else {
2428 adapter->num_msix_vec = num_vec;
2429 adapter->num_msix_roce_vec = 0;
2430 }
2431 } else
2432 adapter->num_msix_vec = num_vec;
2433 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2434 return 0;
2435 }
2436
2437 static inline int be_msix_vec_get(struct be_adapter *adapter,
2438 struct be_eq_obj *eqo)
2439 {
2440 return adapter->msix_entries[eqo->idx].vector;
2441 }
2442
2443 static int be_msix_register(struct be_adapter *adapter)
2444 {
2445 struct net_device *netdev = adapter->netdev;
2446 struct be_eq_obj *eqo;
2447 int status, i, vec;
2448
2449 for_all_evt_queues(adapter, eqo, i) {
2450 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2451 vec = be_msix_vec_get(adapter, eqo);
2452 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2453 if (status)
2454 goto err_msix;
2455 }
2456
2457 return 0;
2458 err_msix:
2459 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2460 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2461 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2462 status);
2463 be_msix_disable(adapter);
2464 return status;
2465 }
2466
2467 static int be_irq_register(struct be_adapter *adapter)
2468 {
2469 struct net_device *netdev = adapter->netdev;
2470 int status;
2471
2472 if (msix_enabled(adapter)) {
2473 status = be_msix_register(adapter);
2474 if (status == 0)
2475 goto done;
2476 /* INTx is not supported for VF */
2477 if (!be_physfn(adapter))
2478 return status;
2479 }
2480
2481 /* INTx: only the first EQ is used */
2482 netdev->irq = adapter->pdev->irq;
2483 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2484 &adapter->eq_obj[0]);
2485 if (status) {
2486 dev_err(&adapter->pdev->dev,
2487 "INTx request IRQ failed - err %d\n", status);
2488 return status;
2489 }
2490 done:
2491 adapter->isr_registered = true;
2492 return 0;
2493 }
2494
2495 static void be_irq_unregister(struct be_adapter *adapter)
2496 {
2497 struct net_device *netdev = adapter->netdev;
2498 struct be_eq_obj *eqo;
2499 int i;
2500
2501 if (!adapter->isr_registered)
2502 return;
2503
2504 /* INTx */
2505 if (!msix_enabled(adapter)) {
2506 free_irq(netdev->irq, &adapter->eq_obj[0]);
2507 goto done;
2508 }
2509
2510 /* MSIx */
2511 for_all_evt_queues(adapter, eqo, i)
2512 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2513
2514 done:
2515 adapter->isr_registered = false;
2516 }
2517
2518 static void be_rx_qs_destroy(struct be_adapter *adapter)
2519 {
2520 struct be_queue_info *q;
2521 struct be_rx_obj *rxo;
2522 int i;
2523
2524 for_all_rx_queues(adapter, rxo, i) {
2525 q = &rxo->q;
2526 if (q->created) {
2527 be_cmd_rxq_destroy(adapter, q);
2528 be_rx_cq_clean(rxo);
2529 }
2530 be_queue_free(adapter, q);
2531 }
2532 }
2533
2534 static int be_close(struct net_device *netdev)
2535 {
2536 struct be_adapter *adapter = netdev_priv(netdev);
2537 struct be_eq_obj *eqo;
2538 int i;
2539
2540 be_roce_dev_close(adapter);
2541
2542 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2543 for_all_evt_queues(adapter, eqo, i)
2544 napi_disable(&eqo->napi);
2545 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2546 }
2547
2548 be_async_mcc_disable(adapter);
2549
2550 /* Wait for all pending tx completions to arrive so that
2551 * all tx skbs are freed.
2552 */
2553 be_tx_compl_clean(adapter);
2554 netif_tx_disable(netdev);
2555
2556 be_rx_qs_destroy(adapter);
2557
2558 for_all_evt_queues(adapter, eqo, i) {
2559 if (msix_enabled(adapter))
2560 synchronize_irq(be_msix_vec_get(adapter, eqo));
2561 else
2562 synchronize_irq(netdev->irq);
2563 be_eq_clean(eqo);
2564 }
2565
2566 be_irq_unregister(adapter);
2567
2568 return 0;
2569 }
2570
2571 static int be_rx_qs_create(struct be_adapter *adapter)
2572 {
2573 struct be_rx_obj *rxo;
2574 int rc, i, j;
2575 u8 rsstable[128];
2576
2577 for_all_rx_queues(adapter, rxo, i) {
2578 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2579 sizeof(struct be_eth_rx_d));
2580 if (rc)
2581 return rc;
2582 }
2583
2584 /* The FW would like the default RXQ to be created first */
2585 rxo = default_rxo(adapter);
2586 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2587 adapter->if_handle, false, &rxo->rss_id);
2588 if (rc)
2589 return rc;
2590
2591 for_all_rss_queues(adapter, rxo, i) {
2592 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2593 rx_frag_size, adapter->if_handle,
2594 true, &rxo->rss_id);
2595 if (rc)
2596 return rc;
2597 }
2598
2599 if (be_multi_rxq(adapter)) {
2600 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2601 for_all_rss_queues(adapter, rxo, i) {
2602 if ((j + i) >= 128)
2603 break;
2604 rsstable[j + i] = rxo->rss_id;
2605 }
2606 }
2607 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2608 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2609
2610 if (!BEx_chip(adapter))
2611 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2612 RSS_ENABLE_UDP_IPV6;
2613
2614 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2615 128);
2616 if (rc) {
2617 adapter->rss_flags = 0;
2618 return rc;
2619 }
2620 }
2621
2622 /* First time posting */
2623 for_all_rx_queues(adapter, rxo, i)
2624 be_post_rx_frags(rxo, GFP_KERNEL);
2625 return 0;
2626 }
2627
2628 static int be_open(struct net_device *netdev)
2629 {
2630 struct be_adapter *adapter = netdev_priv(netdev);
2631 struct be_eq_obj *eqo;
2632 struct be_rx_obj *rxo;
2633 struct be_tx_obj *txo;
2634 u8 link_status;
2635 int status, i;
2636
2637 status = be_rx_qs_create(adapter);
2638 if (status)
2639 goto err;
2640
2641 status = be_irq_register(adapter);
2642 if (status)
2643 goto err;
2644
2645 for_all_rx_queues(adapter, rxo, i)
2646 be_cq_notify(adapter, rxo->cq.id, true, 0);
2647
2648 for_all_tx_queues(adapter, txo, i)
2649 be_cq_notify(adapter, txo->cq.id, true, 0);
2650
2651 be_async_mcc_enable(adapter);
2652
2653 for_all_evt_queues(adapter, eqo, i) {
2654 napi_enable(&eqo->napi);
2655 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2656 }
2657 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2658
2659 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2660 if (!status)
2661 be_link_status_update(adapter, link_status);
2662
2663 netif_tx_start_all_queues(netdev);
2664 be_roce_dev_open(adapter);
2665 return 0;
2666 err:
2667 be_close(adapter->netdev);
2668 return -EIO;
2669 }
2670
2671 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2672 {
2673 struct be_dma_mem cmd;
2674 int status = 0;
2675 u8 mac[ETH_ALEN];
2676
2677 memset(mac, 0, ETH_ALEN);
2678
2679 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2680 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2681 GFP_KERNEL | __GFP_ZERO);
2682 if (cmd.va == NULL)
2683 return -1;
2684
2685 if (enable) {
2686 status = pci_write_config_dword(adapter->pdev,
2687 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2688 if (status) {
2689 dev_err(&adapter->pdev->dev,
2690 "Could not enable Wake-on-lan\n");
2691 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2692 cmd.dma);
2693 return status;
2694 }
2695 status = be_cmd_enable_magic_wol(adapter,
2696 adapter->netdev->dev_addr, &cmd);
2697 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2698 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2699 } else {
2700 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2701 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2702 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2703 }
2704
2705 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2706 return status;
2707 }
2708
2709 /*
2710 * Generate a seed MAC address from the PF MAC Address using jhash.
2711 * MAC Address for VFs are assigned incrementally starting from the seed.
2712 * These addresses are programmed in the ASIC by the PF and the VF driver
2713 * queries for the MAC address during its probe.
2714 */
2715 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2716 {
2717 u32 vf;
2718 int status = 0;
2719 u8 mac[ETH_ALEN];
2720 struct be_vf_cfg *vf_cfg;
2721
2722 be_vf_eth_addr_generate(adapter, mac);
2723
2724 for_all_vfs(adapter, vf_cfg, vf) {
2725 if (lancer_chip(adapter)) {
2726 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2727 } else {
2728 status = be_cmd_pmac_add(adapter, mac,
2729 vf_cfg->if_handle,
2730 &vf_cfg->pmac_id, vf + 1);
2731 }
2732
2733 if (status)
2734 dev_err(&adapter->pdev->dev,
2735 "Mac address assignment failed for VF %d\n", vf);
2736 else
2737 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2738
2739 mac[5] += 1;
2740 }
2741 return status;
2742 }
2743
2744 static int be_vfs_mac_query(struct be_adapter *adapter)
2745 {
2746 int status, vf;
2747 u8 mac[ETH_ALEN];
2748 struct be_vf_cfg *vf_cfg;
2749 bool active;
2750
2751 for_all_vfs(adapter, vf_cfg, vf) {
2752 be_cmd_get_mac_from_list(adapter, mac, &active,
2753 &vf_cfg->pmac_id, 0);
2754
2755 status = be_cmd_mac_addr_query(adapter, mac, false,
2756 vf_cfg->if_handle, 0);
2757 if (status)
2758 return status;
2759 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2760 }
2761 return 0;
2762 }
2763
2764 static void be_vf_clear(struct be_adapter *adapter)
2765 {
2766 struct be_vf_cfg *vf_cfg;
2767 u32 vf;
2768
2769 if (be_find_vfs(adapter, ASSIGNED)) {
2770 dev_warn(&adapter->pdev->dev,
2771 "VFs are assigned to VMs: not disabling VFs\n");
2772 goto done;
2773 }
2774
2775 pci_disable_sriov(adapter->pdev);
2776
2777 for_all_vfs(adapter, vf_cfg, vf) {
2778 if (lancer_chip(adapter))
2779 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2780 else
2781 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2782 vf_cfg->pmac_id, vf + 1);
2783
2784 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2785 }
2786 done:
2787 kfree(adapter->vf_cfg);
2788 adapter->num_vfs = 0;
2789 }
2790
2791 static int be_clear(struct be_adapter *adapter)
2792 {
2793 int i = 1;
2794
2795 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2796 cancel_delayed_work_sync(&adapter->work);
2797 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2798 }
2799
2800 if (sriov_enabled(adapter))
2801 be_vf_clear(adapter);
2802
2803 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2804 be_cmd_pmac_del(adapter, adapter->if_handle,
2805 adapter->pmac_id[i], 0);
2806
2807 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2808
2809 be_mcc_queues_destroy(adapter);
2810 be_rx_cqs_destroy(adapter);
2811 be_tx_queues_destroy(adapter);
2812 be_evt_queues_destroy(adapter);
2813
2814 kfree(adapter->pmac_id);
2815 adapter->pmac_id = NULL;
2816
2817 be_msix_disable(adapter);
2818 return 0;
2819 }
2820
2821 static int be_vfs_if_create(struct be_adapter *adapter)
2822 {
2823 struct be_vf_cfg *vf_cfg;
2824 u32 cap_flags, en_flags, vf;
2825 int status;
2826
2827 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2828 BE_IF_FLAGS_MULTICAST;
2829
2830 for_all_vfs(adapter, vf_cfg, vf) {
2831 if (!BE3_chip(adapter))
2832 be_cmd_get_profile_config(adapter, &cap_flags,
2833 NULL, vf + 1);
2834
2835 /* If a FW profile exists, then cap_flags are updated */
2836 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2837 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2838 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2839 &vf_cfg->if_handle, vf + 1);
2840 if (status)
2841 goto err;
2842 }
2843 err:
2844 return status;
2845 }
2846
2847 static int be_vf_setup_init(struct be_adapter *adapter)
2848 {
2849 struct be_vf_cfg *vf_cfg;
2850 int vf;
2851
2852 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2853 GFP_KERNEL);
2854 if (!adapter->vf_cfg)
2855 return -ENOMEM;
2856
2857 for_all_vfs(adapter, vf_cfg, vf) {
2858 vf_cfg->if_handle = -1;
2859 vf_cfg->pmac_id = -1;
2860 }
2861 return 0;
2862 }
2863
2864 static int be_vf_setup(struct be_adapter *adapter)
2865 {
2866 struct be_vf_cfg *vf_cfg;
2867 u16 def_vlan, lnk_speed;
2868 int status, old_vfs, vf;
2869 struct device *dev = &adapter->pdev->dev;
2870
2871 old_vfs = be_find_vfs(adapter, ENABLED);
2872 if (old_vfs) {
2873 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2874 if (old_vfs != num_vfs)
2875 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2876 adapter->num_vfs = old_vfs;
2877 } else {
2878 if (num_vfs > adapter->dev_num_vfs)
2879 dev_info(dev, "Device supports %d VFs and not %d\n",
2880 adapter->dev_num_vfs, num_vfs);
2881 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2882 if (!adapter->num_vfs)
2883 return 0;
2884 }
2885
2886 status = be_vf_setup_init(adapter);
2887 if (status)
2888 goto err;
2889
2890 if (old_vfs) {
2891 for_all_vfs(adapter, vf_cfg, vf) {
2892 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2893 if (status)
2894 goto err;
2895 }
2896 } else {
2897 status = be_vfs_if_create(adapter);
2898 if (status)
2899 goto err;
2900 }
2901
2902 if (old_vfs) {
2903 status = be_vfs_mac_query(adapter);
2904 if (status)
2905 goto err;
2906 } else {
2907 status = be_vf_eth_addr_config(adapter);
2908 if (status)
2909 goto err;
2910 }
2911
2912 for_all_vfs(adapter, vf_cfg, vf) {
2913 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2914 * Allow full available bandwidth
2915 */
2916 if (BE3_chip(adapter) && !old_vfs)
2917 be_cmd_set_qos(adapter, 1000, vf+1);
2918
2919 status = be_cmd_link_status_query(adapter, &lnk_speed,
2920 NULL, vf + 1);
2921 if (!status)
2922 vf_cfg->tx_rate = lnk_speed;
2923
2924 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2925 vf + 1, vf_cfg->if_handle);
2926 if (status)
2927 goto err;
2928 vf_cfg->def_vid = def_vlan;
2929
2930 be_cmd_enable_vf(adapter, vf + 1);
2931 }
2932
2933 if (!old_vfs) {
2934 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2935 if (status) {
2936 dev_err(dev, "SRIOV enable failed\n");
2937 adapter->num_vfs = 0;
2938 goto err;
2939 }
2940 }
2941 return 0;
2942 err:
2943 dev_err(dev, "VF setup failed\n");
2944 be_vf_clear(adapter);
2945 return status;
2946 }
2947
2948 static void be_setup_init(struct be_adapter *adapter)
2949 {
2950 adapter->vlan_prio_bmap = 0xff;
2951 adapter->phy.link_speed = -1;
2952 adapter->if_handle = -1;
2953 adapter->be3_native = false;
2954 adapter->promiscuous = false;
2955 if (be_physfn(adapter))
2956 adapter->cmd_privileges = MAX_PRIVILEGES;
2957 else
2958 adapter->cmd_privileges = MIN_PRIVILEGES;
2959 }
2960
2961 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2962 bool *active_mac, u32 *pmac_id)
2963 {
2964 int status = 0;
2965
2966 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2967 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2968 if (!lancer_chip(adapter) && !be_physfn(adapter))
2969 *active_mac = true;
2970 else
2971 *active_mac = false;
2972
2973 return status;
2974 }
2975
2976 if (lancer_chip(adapter)) {
2977 status = be_cmd_get_mac_from_list(adapter, mac,
2978 active_mac, pmac_id, 0);
2979 if (*active_mac) {
2980 status = be_cmd_mac_addr_query(adapter, mac, false,
2981 if_handle, *pmac_id);
2982 }
2983 } else if (be_physfn(adapter)) {
2984 /* For BE3, for PF get permanent MAC */
2985 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2986 *active_mac = false;
2987 } else {
2988 /* For BE3, for VF get soft MAC assigned by PF*/
2989 status = be_cmd_mac_addr_query(adapter, mac, false,
2990 if_handle, 0);
2991 *active_mac = true;
2992 }
2993 return status;
2994 }
2995
2996 static void be_get_resources(struct be_adapter *adapter)
2997 {
2998 u16 dev_num_vfs;
2999 int pos, status;
3000 bool profile_present = false;
3001 u16 txq_count = 0;
3002
3003 if (!BEx_chip(adapter)) {
3004 status = be_cmd_get_func_config(adapter);
3005 if (!status)
3006 profile_present = true;
3007 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3008 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3009 }
3010
3011 if (profile_present) {
3012 /* Sanity fixes for Lancer */
3013 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3014 BE_UC_PMAC_COUNT);
3015 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3016 BE_NUM_VLANS_SUPPORTED);
3017 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3018 BE_MAX_MC);
3019 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3020 MAX_TX_QS);
3021 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3022 BE3_MAX_RSS_QS);
3023 adapter->max_event_queues = min_t(u16,
3024 adapter->max_event_queues,
3025 BE3_MAX_RSS_QS);
3026
3027 if (adapter->max_rss_queues &&
3028 adapter->max_rss_queues == adapter->max_rx_queues)
3029 adapter->max_rss_queues -= 1;
3030
3031 if (adapter->max_event_queues < adapter->max_rss_queues)
3032 adapter->max_rss_queues = adapter->max_event_queues;
3033
3034 } else {
3035 if (be_physfn(adapter))
3036 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3037 else
3038 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3039
3040 if (adapter->function_mode & FLEX10_MODE)
3041 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3042 else
3043 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3044
3045 adapter->max_mcast_mac = BE_MAX_MC;
3046 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3047 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3048 MAX_TX_QS);
3049 adapter->max_rss_queues = (adapter->be3_native) ?
3050 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3051 adapter->max_event_queues = BE3_MAX_RSS_QS;
3052
3053 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3054 BE_IF_FLAGS_BROADCAST |
3055 BE_IF_FLAGS_MULTICAST |
3056 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3057 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3058 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3059 BE_IF_FLAGS_PROMISCUOUS;
3060
3061 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3062 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3063 }
3064
3065 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3066 if (pos) {
3067 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3068 &dev_num_vfs);
3069 if (BE3_chip(adapter))
3070 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3071 adapter->dev_num_vfs = dev_num_vfs;
3072 }
3073 }
3074
3075 /* Routine to query per function resource limits */
3076 static int be_get_config(struct be_adapter *adapter)
3077 {
3078 int status;
3079
3080 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3081 &adapter->function_mode,
3082 &adapter->function_caps,
3083 &adapter->asic_rev);
3084 if (status)
3085 goto err;
3086
3087 be_get_resources(adapter);
3088
3089 /* primary mac needs 1 pmac entry */
3090 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3091 sizeof(u32), GFP_KERNEL);
3092 if (!adapter->pmac_id) {
3093 status = -ENOMEM;
3094 goto err;
3095 }
3096
3097 err:
3098 return status;
3099 }
3100
3101 static int be_setup(struct be_adapter *adapter)
3102 {
3103 struct device *dev = &adapter->pdev->dev;
3104 u32 en_flags;
3105 u32 tx_fc, rx_fc;
3106 int status;
3107 u8 mac[ETH_ALEN];
3108 bool active_mac;
3109
3110 be_setup_init(adapter);
3111
3112 if (!lancer_chip(adapter))
3113 be_cmd_req_native_mode(adapter);
3114
3115 status = be_get_config(adapter);
3116 if (status)
3117 goto err;
3118
3119 status = be_msix_enable(adapter);
3120 if (status)
3121 goto err;
3122
3123 status = be_evt_queues_create(adapter);
3124 if (status)
3125 goto err;
3126
3127 status = be_tx_cqs_create(adapter);
3128 if (status)
3129 goto err;
3130
3131 status = be_rx_cqs_create(adapter);
3132 if (status)
3133 goto err;
3134
3135 status = be_mcc_queues_create(adapter);
3136 if (status)
3137 goto err;
3138
3139 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3140 /* In UMC mode FW does not return right privileges.
3141 * Override with correct privilege equivalent to PF.
3142 */
3143 if (be_is_mc(adapter))
3144 adapter->cmd_privileges = MAX_PRIVILEGES;
3145
3146 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3147 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3148
3149 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3150 en_flags |= BE_IF_FLAGS_RSS;
3151
3152 en_flags = en_flags & adapter->if_cap_flags;
3153
3154 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3155 &adapter->if_handle, 0);
3156 if (status != 0)
3157 goto err;
3158
3159 memset(mac, 0, ETH_ALEN);
3160 active_mac = false;
3161 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3162 &active_mac, &adapter->pmac_id[0]);
3163 if (status != 0)
3164 goto err;
3165
3166 if (!active_mac) {
3167 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3168 &adapter->pmac_id[0], 0);
3169 if (status != 0)
3170 goto err;
3171 }
3172
3173 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3174 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3175 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3176 }
3177
3178 status = be_tx_qs_create(adapter);
3179 if (status)
3180 goto err;
3181
3182 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3183
3184 if (adapter->vlans_added)
3185 be_vid_config(adapter);
3186
3187 be_set_rx_mode(adapter->netdev);
3188
3189 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3190
3191 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3192 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3193 adapter->rx_fc);
3194
3195 if (be_physfn(adapter)) {
3196 if (adapter->dev_num_vfs)
3197 be_vf_setup(adapter);
3198 else
3199 dev_warn(dev, "device doesn't support SRIOV\n");
3200 }
3201
3202 status = be_cmd_get_phy_info(adapter);
3203 if (!status && be_pause_supported(adapter))
3204 adapter->phy.fc_autoneg = 1;
3205
3206 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3207 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3208 return 0;
3209 err:
3210 be_clear(adapter);
3211 return status;
3212 }
3213
3214 #ifdef CONFIG_NET_POLL_CONTROLLER
3215 static void be_netpoll(struct net_device *netdev)
3216 {
3217 struct be_adapter *adapter = netdev_priv(netdev);
3218 struct be_eq_obj *eqo;
3219 int i;
3220
3221 for_all_evt_queues(adapter, eqo, i) {
3222 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3223 napi_schedule(&eqo->napi);
3224 }
3225
3226 return;
3227 }
3228 #endif
3229
3230 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3231 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3232
3233 static bool be_flash_redboot(struct be_adapter *adapter,
3234 const u8 *p, u32 img_start, int image_size,
3235 int hdr_size)
3236 {
3237 u32 crc_offset;
3238 u8 flashed_crc[4];
3239 int status;
3240
3241 crc_offset = hdr_size + img_start + image_size - 4;
3242
3243 p += crc_offset;
3244
3245 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3246 (image_size - 4));
3247 if (status) {
3248 dev_err(&adapter->pdev->dev,
3249 "could not get crc from flash, not flashing redboot\n");
3250 return false;
3251 }
3252
3253 /*update redboot only if crc does not match*/
3254 if (!memcmp(flashed_crc, p, 4))
3255 return false;
3256 else
3257 return true;
3258 }
3259
3260 static bool phy_flashing_required(struct be_adapter *adapter)
3261 {
3262 return (adapter->phy.phy_type == TN_8022 &&
3263 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3264 }
3265
3266 static bool is_comp_in_ufi(struct be_adapter *adapter,
3267 struct flash_section_info *fsec, int type)
3268 {
3269 int i = 0, img_type = 0;
3270 struct flash_section_info_g2 *fsec_g2 = NULL;
3271
3272 if (BE2_chip(adapter))
3273 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3274
3275 for (i = 0; i < MAX_FLASH_COMP; i++) {
3276 if (fsec_g2)
3277 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3278 else
3279 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3280
3281 if (img_type == type)
3282 return true;
3283 }
3284 return false;
3285
3286 }
3287
3288 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3289 int header_size,
3290 const struct firmware *fw)
3291 {
3292 struct flash_section_info *fsec = NULL;
3293 const u8 *p = fw->data;
3294
3295 p += header_size;
3296 while (p < (fw->data + fw->size)) {
3297 fsec = (struct flash_section_info *)p;
3298 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3299 return fsec;
3300 p += 32;
3301 }
3302 return NULL;
3303 }
3304
3305 static int be_flash(struct be_adapter *adapter, const u8 *img,
3306 struct be_dma_mem *flash_cmd, int optype, int img_size)
3307 {
3308 u32 total_bytes = 0, flash_op, num_bytes = 0;
3309 int status = 0;
3310 struct be_cmd_write_flashrom *req = flash_cmd->va;
3311
3312 total_bytes = img_size;
3313 while (total_bytes) {
3314 num_bytes = min_t(u32, 32*1024, total_bytes);
3315
3316 total_bytes -= num_bytes;
3317
3318 if (!total_bytes) {
3319 if (optype == OPTYPE_PHY_FW)
3320 flash_op = FLASHROM_OPER_PHY_FLASH;
3321 else
3322 flash_op = FLASHROM_OPER_FLASH;
3323 } else {
3324 if (optype == OPTYPE_PHY_FW)
3325 flash_op = FLASHROM_OPER_PHY_SAVE;
3326 else
3327 flash_op = FLASHROM_OPER_SAVE;
3328 }
3329
3330 memcpy(req->data_buf, img, num_bytes);
3331 img += num_bytes;
3332 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3333 flash_op, num_bytes);
3334 if (status) {
3335 if (status == ILLEGAL_IOCTL_REQ &&
3336 optype == OPTYPE_PHY_FW)
3337 break;
3338 dev_err(&adapter->pdev->dev,
3339 "cmd to write to flash rom failed.\n");
3340 return status;
3341 }
3342 }
3343 return 0;
3344 }
3345
3346 /* For BE2, BE3 and BE3-R */
3347 static int be_flash_BEx(struct be_adapter *adapter,
3348 const struct firmware *fw,
3349 struct be_dma_mem *flash_cmd,
3350 int num_of_images)
3351
3352 {
3353 int status = 0, i, filehdr_size = 0;
3354 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3355 const u8 *p = fw->data;
3356 const struct flash_comp *pflashcomp;
3357 int num_comp, redboot;
3358 struct flash_section_info *fsec = NULL;
3359
3360 struct flash_comp gen3_flash_types[] = {
3361 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3362 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3363 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3364 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3365 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3366 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3367 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3368 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3369 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3370 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3371 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3372 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3373 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3374 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3375 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3376 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3377 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3378 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3379 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3380 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3381 };
3382
3383 struct flash_comp gen2_flash_types[] = {
3384 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3385 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3386 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3387 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3388 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3389 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3390 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3391 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3392 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3393 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3394 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3395 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3396 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3397 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3398 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3399 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3400 };
3401
3402 if (BE3_chip(adapter)) {
3403 pflashcomp = gen3_flash_types;
3404 filehdr_size = sizeof(struct flash_file_hdr_g3);
3405 num_comp = ARRAY_SIZE(gen3_flash_types);
3406 } else {
3407 pflashcomp = gen2_flash_types;
3408 filehdr_size = sizeof(struct flash_file_hdr_g2);
3409 num_comp = ARRAY_SIZE(gen2_flash_types);
3410 }
3411
3412 /* Get flash section info*/
3413 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3414 if (!fsec) {
3415 dev_err(&adapter->pdev->dev,
3416 "Invalid Cookie. UFI corrupted ?\n");
3417 return -1;
3418 }
3419 for (i = 0; i < num_comp; i++) {
3420 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3421 continue;
3422
3423 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3424 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3425 continue;
3426
3427 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3428 !phy_flashing_required(adapter))
3429 continue;
3430
3431 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3432 redboot = be_flash_redboot(adapter, fw->data,
3433 pflashcomp[i].offset, pflashcomp[i].size,
3434 filehdr_size + img_hdrs_size);
3435 if (!redboot)
3436 continue;
3437 }
3438
3439 p = fw->data;
3440 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3441 if (p + pflashcomp[i].size > fw->data + fw->size)
3442 return -1;
3443
3444 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3445 pflashcomp[i].size);
3446 if (status) {
3447 dev_err(&adapter->pdev->dev,
3448 "Flashing section type %d failed.\n",
3449 pflashcomp[i].img_type);
3450 return status;
3451 }
3452 }
3453 return 0;
3454 }
3455
3456 static int be_flash_skyhawk(struct be_adapter *adapter,
3457 const struct firmware *fw,
3458 struct be_dma_mem *flash_cmd, int num_of_images)
3459 {
3460 int status = 0, i, filehdr_size = 0;
3461 int img_offset, img_size, img_optype, redboot;
3462 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3463 const u8 *p = fw->data;
3464 struct flash_section_info *fsec = NULL;
3465
3466 filehdr_size = sizeof(struct flash_file_hdr_g3);
3467 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3468 if (!fsec) {
3469 dev_err(&adapter->pdev->dev,
3470 "Invalid Cookie. UFI corrupted ?\n");
3471 return -1;
3472 }
3473
3474 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3475 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3476 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3477
3478 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3479 case IMAGE_FIRMWARE_iSCSI:
3480 img_optype = OPTYPE_ISCSI_ACTIVE;
3481 break;
3482 case IMAGE_BOOT_CODE:
3483 img_optype = OPTYPE_REDBOOT;
3484 break;
3485 case IMAGE_OPTION_ROM_ISCSI:
3486 img_optype = OPTYPE_BIOS;
3487 break;
3488 case IMAGE_OPTION_ROM_PXE:
3489 img_optype = OPTYPE_PXE_BIOS;
3490 break;
3491 case IMAGE_OPTION_ROM_FCoE:
3492 img_optype = OPTYPE_FCOE_BIOS;
3493 break;
3494 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3495 img_optype = OPTYPE_ISCSI_BACKUP;
3496 break;
3497 case IMAGE_NCSI:
3498 img_optype = OPTYPE_NCSI_FW;
3499 break;
3500 default:
3501 continue;
3502 }
3503
3504 if (img_optype == OPTYPE_REDBOOT) {
3505 redboot = be_flash_redboot(adapter, fw->data,
3506 img_offset, img_size,
3507 filehdr_size + img_hdrs_size);
3508 if (!redboot)
3509 continue;
3510 }
3511
3512 p = fw->data;
3513 p += filehdr_size + img_offset + img_hdrs_size;
3514 if (p + img_size > fw->data + fw->size)
3515 return -1;
3516
3517 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3518 if (status) {
3519 dev_err(&adapter->pdev->dev,
3520 "Flashing section type %d failed.\n",
3521 fsec->fsec_entry[i].type);
3522 return status;
3523 }
3524 }
3525 return 0;
3526 }
3527
3528 static int lancer_wait_idle(struct be_adapter *adapter)
3529 {
3530 #define SLIPORT_IDLE_TIMEOUT 30
3531 u32 reg_val;
3532 int status = 0, i;
3533
3534 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3535 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3536 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3537 break;
3538
3539 ssleep(1);
3540 }
3541
3542 if (i == SLIPORT_IDLE_TIMEOUT)
3543 status = -1;
3544
3545 return status;
3546 }
3547
3548 static int lancer_fw_reset(struct be_adapter *adapter)
3549 {
3550 int status = 0;
3551
3552 status = lancer_wait_idle(adapter);
3553 if (status)
3554 return status;
3555
3556 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3557 PHYSDEV_CONTROL_OFFSET);
3558
3559 return status;
3560 }
3561
3562 static int lancer_fw_download(struct be_adapter *adapter,
3563 const struct firmware *fw)
3564 {
3565 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3566 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3567 struct be_dma_mem flash_cmd;
3568 const u8 *data_ptr = NULL;
3569 u8 *dest_image_ptr = NULL;
3570 size_t image_size = 0;
3571 u32 chunk_size = 0;
3572 u32 data_written = 0;
3573 u32 offset = 0;
3574 int status = 0;
3575 u8 add_status = 0;
3576 u8 change_status;
3577
3578 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3579 dev_err(&adapter->pdev->dev,
3580 "FW Image not properly aligned. "
3581 "Length must be 4 byte aligned.\n");
3582 status = -EINVAL;
3583 goto lancer_fw_exit;
3584 }
3585
3586 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3587 + LANCER_FW_DOWNLOAD_CHUNK;
3588 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3589 &flash_cmd.dma, GFP_KERNEL);
3590 if (!flash_cmd.va) {
3591 status = -ENOMEM;
3592 goto lancer_fw_exit;
3593 }
3594
3595 dest_image_ptr = flash_cmd.va +
3596 sizeof(struct lancer_cmd_req_write_object);
3597 image_size = fw->size;
3598 data_ptr = fw->data;
3599
3600 while (image_size) {
3601 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3602
3603 /* Copy the image chunk content. */
3604 memcpy(dest_image_ptr, data_ptr, chunk_size);
3605
3606 status = lancer_cmd_write_object(adapter, &flash_cmd,
3607 chunk_size, offset,
3608 LANCER_FW_DOWNLOAD_LOCATION,
3609 &data_written, &change_status,
3610 &add_status);
3611 if (status)
3612 break;
3613
3614 offset += data_written;
3615 data_ptr += data_written;
3616 image_size -= data_written;
3617 }
3618
3619 if (!status) {
3620 /* Commit the FW written */
3621 status = lancer_cmd_write_object(adapter, &flash_cmd,
3622 0, offset,
3623 LANCER_FW_DOWNLOAD_LOCATION,
3624 &data_written, &change_status,
3625 &add_status);
3626 }
3627
3628 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3629 flash_cmd.dma);
3630 if (status) {
3631 dev_err(&adapter->pdev->dev,
3632 "Firmware load error. "
3633 "Status code: 0x%x Additional Status: 0x%x\n",
3634 status, add_status);
3635 goto lancer_fw_exit;
3636 }
3637
3638 if (change_status == LANCER_FW_RESET_NEEDED) {
3639 status = lancer_fw_reset(adapter);
3640 if (status) {
3641 dev_err(&adapter->pdev->dev,
3642 "Adapter busy for FW reset.\n"
3643 "New FW will not be active.\n");
3644 goto lancer_fw_exit;
3645 }
3646 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3647 dev_err(&adapter->pdev->dev,
3648 "System reboot required for new FW"
3649 " to be active\n");
3650 }
3651
3652 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3653 lancer_fw_exit:
3654 return status;
3655 }
3656
3657 #define UFI_TYPE2 2
3658 #define UFI_TYPE3 3
3659 #define UFI_TYPE3R 10
3660 #define UFI_TYPE4 4
3661 static int be_get_ufi_type(struct be_adapter *adapter,
3662 struct flash_file_hdr_g3 *fhdr)
3663 {
3664 if (fhdr == NULL)
3665 goto be_get_ufi_exit;
3666
3667 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3668 return UFI_TYPE4;
3669 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3670 if (fhdr->asic_type_rev == 0x10)
3671 return UFI_TYPE3R;
3672 else
3673 return UFI_TYPE3;
3674 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3675 return UFI_TYPE2;
3676
3677 be_get_ufi_exit:
3678 dev_err(&adapter->pdev->dev,
3679 "UFI and Interface are not compatible for flashing\n");
3680 return -1;
3681 }
3682
3683 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3684 {
3685 struct flash_file_hdr_g3 *fhdr3;
3686 struct image_hdr *img_hdr_ptr = NULL;
3687 struct be_dma_mem flash_cmd;
3688 const u8 *p;
3689 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3690
3691 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3692 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3693 &flash_cmd.dma, GFP_KERNEL);
3694 if (!flash_cmd.va) {
3695 status = -ENOMEM;
3696 goto be_fw_exit;
3697 }
3698
3699 p = fw->data;
3700 fhdr3 = (struct flash_file_hdr_g3 *)p;
3701
3702 ufi_type = be_get_ufi_type(adapter, fhdr3);
3703
3704 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3705 for (i = 0; i < num_imgs; i++) {
3706 img_hdr_ptr = (struct image_hdr *)(fw->data +
3707 (sizeof(struct flash_file_hdr_g3) +
3708 i * sizeof(struct image_hdr)));
3709 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3710 switch (ufi_type) {
3711 case UFI_TYPE4:
3712 status = be_flash_skyhawk(adapter, fw,
3713 &flash_cmd, num_imgs);
3714 break;
3715 case UFI_TYPE3R:
3716 status = be_flash_BEx(adapter, fw, &flash_cmd,
3717 num_imgs);
3718 break;
3719 case UFI_TYPE3:
3720 /* Do not flash this ufi on BE3-R cards */
3721 if (adapter->asic_rev < 0x10)
3722 status = be_flash_BEx(adapter, fw,
3723 &flash_cmd,
3724 num_imgs);
3725 else {
3726 status = -1;
3727 dev_err(&adapter->pdev->dev,
3728 "Can't load BE3 UFI on BE3R\n");
3729 }
3730 }
3731 }
3732 }
3733
3734 if (ufi_type == UFI_TYPE2)
3735 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3736 else if (ufi_type == -1)
3737 status = -1;
3738
3739 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3740 flash_cmd.dma);
3741 if (status) {
3742 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3743 goto be_fw_exit;
3744 }
3745
3746 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3747
3748 be_fw_exit:
3749 return status;
3750 }
3751
3752 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3753 {
3754 const struct firmware *fw;
3755 int status;
3756
3757 if (!netif_running(adapter->netdev)) {
3758 dev_err(&adapter->pdev->dev,
3759 "Firmware load not allowed (interface is down)\n");
3760 return -1;
3761 }
3762
3763 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3764 if (status)
3765 goto fw_exit;
3766
3767 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3768
3769 if (lancer_chip(adapter))
3770 status = lancer_fw_download(adapter, fw);
3771 else
3772 status = be_fw_download(adapter, fw);
3773
3774 fw_exit:
3775 release_firmware(fw);
3776 return status;
3777 }
3778
3779 static const struct net_device_ops be_netdev_ops = {
3780 .ndo_open = be_open,
3781 .ndo_stop = be_close,
3782 .ndo_start_xmit = be_xmit,
3783 .ndo_set_rx_mode = be_set_rx_mode,
3784 .ndo_set_mac_address = be_mac_addr_set,
3785 .ndo_change_mtu = be_change_mtu,
3786 .ndo_get_stats64 = be_get_stats64,
3787 .ndo_validate_addr = eth_validate_addr,
3788 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3789 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3790 .ndo_set_vf_mac = be_set_vf_mac,
3791 .ndo_set_vf_vlan = be_set_vf_vlan,
3792 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3793 .ndo_get_vf_config = be_get_vf_config,
3794 #ifdef CONFIG_NET_POLL_CONTROLLER
3795 .ndo_poll_controller = be_netpoll,
3796 #endif
3797 };
3798
3799 static void be_netdev_init(struct net_device *netdev)
3800 {
3801 struct be_adapter *adapter = netdev_priv(netdev);
3802 struct be_eq_obj *eqo;
3803 int i;
3804
3805 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3806 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3807 NETIF_F_HW_VLAN_CTAG_TX;
3808 if (be_multi_rxq(adapter))
3809 netdev->hw_features |= NETIF_F_RXHASH;
3810
3811 netdev->features |= netdev->hw_features |
3812 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3813
3814 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3815 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3816
3817 netdev->priv_flags |= IFF_UNICAST_FLT;
3818
3819 netdev->flags |= IFF_MULTICAST;
3820
3821 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3822
3823 netdev->netdev_ops = &be_netdev_ops;
3824
3825 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3826
3827 for_all_evt_queues(adapter, eqo, i)
3828 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3829 }
3830
3831 static void be_unmap_pci_bars(struct be_adapter *adapter)
3832 {
3833 if (adapter->csr)
3834 pci_iounmap(adapter->pdev, adapter->csr);
3835 if (adapter->db)
3836 pci_iounmap(adapter->pdev, adapter->db);
3837 }
3838
3839 static int db_bar(struct be_adapter *adapter)
3840 {
3841 if (lancer_chip(adapter) || !be_physfn(adapter))
3842 return 0;
3843 else
3844 return 4;
3845 }
3846
3847 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3848 {
3849 if (skyhawk_chip(adapter)) {
3850 adapter->roce_db.size = 4096;
3851 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3852 db_bar(adapter));
3853 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3854 db_bar(adapter));
3855 }
3856 return 0;
3857 }
3858
3859 static int be_map_pci_bars(struct be_adapter *adapter)
3860 {
3861 u8 __iomem *addr;
3862 u32 sli_intf;
3863
3864 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3865 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3866 SLI_INTF_IF_TYPE_SHIFT;
3867
3868 if (BEx_chip(adapter) && be_physfn(adapter)) {
3869 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3870 if (adapter->csr == NULL)
3871 return -ENOMEM;
3872 }
3873
3874 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3875 if (addr == NULL)
3876 goto pci_map_err;
3877 adapter->db = addr;
3878
3879 be_roce_map_pci_bars(adapter);
3880 return 0;
3881
3882 pci_map_err:
3883 be_unmap_pci_bars(adapter);
3884 return -ENOMEM;
3885 }
3886
3887 static void be_ctrl_cleanup(struct be_adapter *adapter)
3888 {
3889 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3890
3891 be_unmap_pci_bars(adapter);
3892
3893 if (mem->va)
3894 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3895 mem->dma);
3896
3897 mem = &adapter->rx_filter;
3898 if (mem->va)
3899 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3900 mem->dma);
3901 }
3902
3903 static int be_ctrl_init(struct be_adapter *adapter)
3904 {
3905 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3906 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3907 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3908 u32 sli_intf;
3909 int status;
3910
3911 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3912 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3913 SLI_INTF_FAMILY_SHIFT;
3914 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3915
3916 status = be_map_pci_bars(adapter);
3917 if (status)
3918 goto done;
3919
3920 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3921 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3922 mbox_mem_alloc->size,
3923 &mbox_mem_alloc->dma,
3924 GFP_KERNEL);
3925 if (!mbox_mem_alloc->va) {
3926 status = -ENOMEM;
3927 goto unmap_pci_bars;
3928 }
3929 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3930 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3931 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3932 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3933
3934 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3935 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3936 &rx_filter->dma,
3937 GFP_KERNEL | __GFP_ZERO);
3938 if (rx_filter->va == NULL) {
3939 status = -ENOMEM;
3940 goto free_mbox;
3941 }
3942
3943 mutex_init(&adapter->mbox_lock);
3944 spin_lock_init(&adapter->mcc_lock);
3945 spin_lock_init(&adapter->mcc_cq_lock);
3946
3947 init_completion(&adapter->flash_compl);
3948 pci_save_state(adapter->pdev);
3949 return 0;
3950
3951 free_mbox:
3952 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3953 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3954
3955 unmap_pci_bars:
3956 be_unmap_pci_bars(adapter);
3957
3958 done:
3959 return status;
3960 }
3961
3962 static void be_stats_cleanup(struct be_adapter *adapter)
3963 {
3964 struct be_dma_mem *cmd = &adapter->stats_cmd;
3965
3966 if (cmd->va)
3967 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3968 cmd->va, cmd->dma);
3969 }
3970
3971 static int be_stats_init(struct be_adapter *adapter)
3972 {
3973 struct be_dma_mem *cmd = &adapter->stats_cmd;
3974
3975 if (lancer_chip(adapter))
3976 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3977 else if (BE2_chip(adapter))
3978 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3979 else
3980 /* BE3 and Skyhawk */
3981 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3982
3983 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3984 GFP_KERNEL | __GFP_ZERO);
3985 if (cmd->va == NULL)
3986 return -1;
3987 return 0;
3988 }
3989
3990 static void be_remove(struct pci_dev *pdev)
3991 {
3992 struct be_adapter *adapter = pci_get_drvdata(pdev);
3993
3994 if (!adapter)
3995 return;
3996
3997 be_roce_dev_remove(adapter);
3998 be_intr_set(adapter, false);
3999
4000 cancel_delayed_work_sync(&adapter->func_recovery_work);
4001
4002 unregister_netdev(adapter->netdev);
4003
4004 be_clear(adapter);
4005
4006 /* tell fw we're done with firing cmds */
4007 be_cmd_fw_clean(adapter);
4008
4009 be_stats_cleanup(adapter);
4010
4011 be_ctrl_cleanup(adapter);
4012
4013 pci_disable_pcie_error_reporting(pdev);
4014
4015 pci_set_drvdata(pdev, NULL);
4016 pci_release_regions(pdev);
4017 pci_disable_device(pdev);
4018
4019 free_netdev(adapter->netdev);
4020 }
4021
4022 bool be_is_wol_supported(struct be_adapter *adapter)
4023 {
4024 return ((adapter->wol_cap & BE_WOL_CAP) &&
4025 !be_is_wol_excluded(adapter)) ? true : false;
4026 }
4027
4028 u32 be_get_fw_log_level(struct be_adapter *adapter)
4029 {
4030 struct be_dma_mem extfat_cmd;
4031 struct be_fat_conf_params *cfgs;
4032 int status;
4033 u32 level = 0;
4034 int j;
4035
4036 if (lancer_chip(adapter))
4037 return 0;
4038
4039 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4040 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4041 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4042 &extfat_cmd.dma);
4043
4044 if (!extfat_cmd.va) {
4045 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4046 __func__);
4047 goto err;
4048 }
4049
4050 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4051 if (!status) {
4052 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4053 sizeof(struct be_cmd_resp_hdr));
4054 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4055 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4056 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4057 }
4058 }
4059 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4060 extfat_cmd.dma);
4061 err:
4062 return level;
4063 }
4064
4065 static int be_get_initial_config(struct be_adapter *adapter)
4066 {
4067 int status;
4068 u32 level;
4069
4070 status = be_cmd_get_cntl_attributes(adapter);
4071 if (status)
4072 return status;
4073
4074 status = be_cmd_get_acpi_wol_cap(adapter);
4075 if (status) {
4076 /* in case of a failure to get wol capabillities
4077 * check the exclusion list to determine WOL capability */
4078 if (!be_is_wol_excluded(adapter))
4079 adapter->wol_cap |= BE_WOL_CAP;
4080 }
4081
4082 if (be_is_wol_supported(adapter))
4083 adapter->wol = true;
4084
4085 /* Must be a power of 2 or else MODULO will BUG_ON */
4086 adapter->be_get_temp_freq = 64;
4087
4088 level = be_get_fw_log_level(adapter);
4089 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4090
4091 return 0;
4092 }
4093
4094 static int lancer_recover_func(struct be_adapter *adapter)
4095 {
4096 int status;
4097
4098 status = lancer_test_and_set_rdy_state(adapter);
4099 if (status)
4100 goto err;
4101
4102 if (netif_running(adapter->netdev))
4103 be_close(adapter->netdev);
4104
4105 be_clear(adapter);
4106
4107 adapter->hw_error = false;
4108 adapter->fw_timeout = false;
4109
4110 status = be_setup(adapter);
4111 if (status)
4112 goto err;
4113
4114 if (netif_running(adapter->netdev)) {
4115 status = be_open(adapter->netdev);
4116 if (status)
4117 goto err;
4118 }
4119
4120 dev_err(&adapter->pdev->dev,
4121 "Adapter SLIPORT recovery succeeded\n");
4122 return 0;
4123 err:
4124 if (adapter->eeh_error)
4125 dev_err(&adapter->pdev->dev,
4126 "Adapter SLIPORT recovery failed\n");
4127
4128 return status;
4129 }
4130
4131 static void be_func_recovery_task(struct work_struct *work)
4132 {
4133 struct be_adapter *adapter =
4134 container_of(work, struct be_adapter, func_recovery_work.work);
4135 int status;
4136
4137 be_detect_error(adapter);
4138
4139 if (adapter->hw_error && lancer_chip(adapter)) {
4140
4141 if (adapter->eeh_error)
4142 goto out;
4143
4144 rtnl_lock();
4145 netif_device_detach(adapter->netdev);
4146 rtnl_unlock();
4147
4148 status = lancer_recover_func(adapter);
4149
4150 if (!status)
4151 netif_device_attach(adapter->netdev);
4152 }
4153
4154 out:
4155 schedule_delayed_work(&adapter->func_recovery_work,
4156 msecs_to_jiffies(1000));
4157 }
4158
4159 static void be_worker(struct work_struct *work)
4160 {
4161 struct be_adapter *adapter =
4162 container_of(work, struct be_adapter, work.work);
4163 struct be_rx_obj *rxo;
4164 struct be_eq_obj *eqo;
4165 int i;
4166
4167 /* when interrupts are not yet enabled, just reap any pending
4168 * mcc completions */
4169 if (!netif_running(adapter->netdev)) {
4170 local_bh_disable();
4171 be_process_mcc(adapter);
4172 local_bh_enable();
4173 goto reschedule;
4174 }
4175
4176 if (!adapter->stats_cmd_sent) {
4177 if (lancer_chip(adapter))
4178 lancer_cmd_get_pport_stats(adapter,
4179 &adapter->stats_cmd);
4180 else
4181 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4182 }
4183
4184 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4185 be_cmd_get_die_temperature(adapter);
4186
4187 for_all_rx_queues(adapter, rxo, i) {
4188 if (rxo->rx_post_starved) {
4189 rxo->rx_post_starved = false;
4190 be_post_rx_frags(rxo, GFP_KERNEL);
4191 }
4192 }
4193
4194 for_all_evt_queues(adapter, eqo, i)
4195 be_eqd_update(adapter, eqo);
4196
4197 reschedule:
4198 adapter->work_counter++;
4199 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4200 }
4201
4202 static bool be_reset_required(struct be_adapter *adapter)
4203 {
4204 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4205 }
4206
4207 static char *mc_name(struct be_adapter *adapter)
4208 {
4209 if (adapter->function_mode & FLEX10_MODE)
4210 return "FLEX10";
4211 else if (adapter->function_mode & VNIC_MODE)
4212 return "vNIC";
4213 else if (adapter->function_mode & UMC_ENABLED)
4214 return "UMC";
4215 else
4216 return "";
4217 }
4218
4219 static inline char *func_name(struct be_adapter *adapter)
4220 {
4221 return be_physfn(adapter) ? "PF" : "VF";
4222 }
4223
4224 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4225 {
4226 int status = 0;
4227 struct be_adapter *adapter;
4228 struct net_device *netdev;
4229 char port_name;
4230
4231 status = pci_enable_device(pdev);
4232 if (status)
4233 goto do_none;
4234
4235 status = pci_request_regions(pdev, DRV_NAME);
4236 if (status)
4237 goto disable_dev;
4238 pci_set_master(pdev);
4239
4240 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4241 if (netdev == NULL) {
4242 status = -ENOMEM;
4243 goto rel_reg;
4244 }
4245 adapter = netdev_priv(netdev);
4246 adapter->pdev = pdev;
4247 pci_set_drvdata(pdev, adapter);
4248 adapter->netdev = netdev;
4249 SET_NETDEV_DEV(netdev, &pdev->dev);
4250
4251 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4252 if (!status) {
4253 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4254 if (status < 0) {
4255 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4256 goto free_netdev;
4257 }
4258 netdev->features |= NETIF_F_HIGHDMA;
4259 } else {
4260 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4261 if (status) {
4262 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4263 goto free_netdev;
4264 }
4265 }
4266
4267 status = pci_enable_pcie_error_reporting(pdev);
4268 if (status)
4269 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4270
4271 status = be_ctrl_init(adapter);
4272 if (status)
4273 goto free_netdev;
4274
4275 /* sync up with fw's ready state */
4276 if (be_physfn(adapter)) {
4277 status = be_fw_wait_ready(adapter);
4278 if (status)
4279 goto ctrl_clean;
4280 }
4281
4282 if (be_reset_required(adapter)) {
4283 status = be_cmd_reset_function(adapter);
4284 if (status)
4285 goto ctrl_clean;
4286
4287 /* Wait for interrupts to quiesce after an FLR */
4288 msleep(100);
4289 }
4290
4291 /* Allow interrupts for other ULPs running on NIC function */
4292 be_intr_set(adapter, true);
4293
4294 /* tell fw we're ready to fire cmds */
4295 status = be_cmd_fw_init(adapter);
4296 if (status)
4297 goto ctrl_clean;
4298
4299 status = be_stats_init(adapter);
4300 if (status)
4301 goto ctrl_clean;
4302
4303 status = be_get_initial_config(adapter);
4304 if (status)
4305 goto stats_clean;
4306
4307 INIT_DELAYED_WORK(&adapter->work, be_worker);
4308 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4309 adapter->rx_fc = adapter->tx_fc = true;
4310
4311 status = be_setup(adapter);
4312 if (status)
4313 goto stats_clean;
4314
4315 be_netdev_init(netdev);
4316 status = register_netdev(netdev);
4317 if (status != 0)
4318 goto unsetup;
4319
4320 be_roce_dev_add(adapter);
4321
4322 schedule_delayed_work(&adapter->func_recovery_work,
4323 msecs_to_jiffies(1000));
4324
4325 be_cmd_query_port_name(adapter, &port_name);
4326
4327 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4328 func_name(adapter), mc_name(adapter), port_name);
4329
4330 return 0;
4331
4332 unsetup:
4333 be_clear(adapter);
4334 stats_clean:
4335 be_stats_cleanup(adapter);
4336 ctrl_clean:
4337 be_ctrl_cleanup(adapter);
4338 free_netdev:
4339 free_netdev(netdev);
4340 pci_set_drvdata(pdev, NULL);
4341 rel_reg:
4342 pci_release_regions(pdev);
4343 disable_dev:
4344 pci_disable_device(pdev);
4345 do_none:
4346 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4347 return status;
4348 }
4349
4350 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4351 {
4352 struct be_adapter *adapter = pci_get_drvdata(pdev);
4353 struct net_device *netdev = adapter->netdev;
4354
4355 if (adapter->wol)
4356 be_setup_wol(adapter, true);
4357
4358 cancel_delayed_work_sync(&adapter->func_recovery_work);
4359
4360 netif_device_detach(netdev);
4361 if (netif_running(netdev)) {
4362 rtnl_lock();
4363 be_close(netdev);
4364 rtnl_unlock();
4365 }
4366 be_clear(adapter);
4367
4368 pci_save_state(pdev);
4369 pci_disable_device(pdev);
4370 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4371 return 0;
4372 }
4373
4374 static int be_resume(struct pci_dev *pdev)
4375 {
4376 int status = 0;
4377 struct be_adapter *adapter = pci_get_drvdata(pdev);
4378 struct net_device *netdev = adapter->netdev;
4379
4380 netif_device_detach(netdev);
4381
4382 status = pci_enable_device(pdev);
4383 if (status)
4384 return status;
4385
4386 pci_set_power_state(pdev, 0);
4387 pci_restore_state(pdev);
4388
4389 /* tell fw we're ready to fire cmds */
4390 status = be_cmd_fw_init(adapter);
4391 if (status)
4392 return status;
4393
4394 be_setup(adapter);
4395 if (netif_running(netdev)) {
4396 rtnl_lock();
4397 be_open(netdev);
4398 rtnl_unlock();
4399 }
4400
4401 schedule_delayed_work(&adapter->func_recovery_work,
4402 msecs_to_jiffies(1000));
4403 netif_device_attach(netdev);
4404
4405 if (adapter->wol)
4406 be_setup_wol(adapter, false);
4407
4408 return 0;
4409 }
4410
4411 /*
4412 * An FLR will stop BE from DMAing any data.
4413 */
4414 static void be_shutdown(struct pci_dev *pdev)
4415 {
4416 struct be_adapter *adapter = pci_get_drvdata(pdev);
4417
4418 if (!adapter)
4419 return;
4420
4421 cancel_delayed_work_sync(&adapter->work);
4422 cancel_delayed_work_sync(&adapter->func_recovery_work);
4423
4424 netif_device_detach(adapter->netdev);
4425
4426 be_cmd_reset_function(adapter);
4427
4428 pci_disable_device(pdev);
4429 }
4430
4431 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4432 pci_channel_state_t state)
4433 {
4434 struct be_adapter *adapter = pci_get_drvdata(pdev);
4435 struct net_device *netdev = adapter->netdev;
4436
4437 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4438
4439 adapter->eeh_error = true;
4440
4441 cancel_delayed_work_sync(&adapter->func_recovery_work);
4442
4443 rtnl_lock();
4444 netif_device_detach(netdev);
4445 rtnl_unlock();
4446
4447 if (netif_running(netdev)) {
4448 rtnl_lock();
4449 be_close(netdev);
4450 rtnl_unlock();
4451 }
4452 be_clear(adapter);
4453
4454 if (state == pci_channel_io_perm_failure)
4455 return PCI_ERS_RESULT_DISCONNECT;
4456
4457 pci_disable_device(pdev);
4458
4459 /* The error could cause the FW to trigger a flash debug dump.
4460 * Resetting the card while flash dump is in progress
4461 * can cause it not to recover; wait for it to finish.
4462 * Wait only for first function as it is needed only once per
4463 * adapter.
4464 */
4465 if (pdev->devfn == 0)
4466 ssleep(30);
4467
4468 return PCI_ERS_RESULT_NEED_RESET;
4469 }
4470
4471 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4472 {
4473 struct be_adapter *adapter = pci_get_drvdata(pdev);
4474 int status;
4475
4476 dev_info(&adapter->pdev->dev, "EEH reset\n");
4477 be_clear_all_error(adapter);
4478
4479 status = pci_enable_device(pdev);
4480 if (status)
4481 return PCI_ERS_RESULT_DISCONNECT;
4482
4483 pci_set_master(pdev);
4484 pci_set_power_state(pdev, 0);
4485 pci_restore_state(pdev);
4486
4487 /* Check if card is ok and fw is ready */
4488 dev_info(&adapter->pdev->dev,
4489 "Waiting for FW to be ready after EEH reset\n");
4490 status = be_fw_wait_ready(adapter);
4491 if (status)
4492 return PCI_ERS_RESULT_DISCONNECT;
4493
4494 pci_cleanup_aer_uncorrect_error_status(pdev);
4495 return PCI_ERS_RESULT_RECOVERED;
4496 }
4497
4498 static void be_eeh_resume(struct pci_dev *pdev)
4499 {
4500 int status = 0;
4501 struct be_adapter *adapter = pci_get_drvdata(pdev);
4502 struct net_device *netdev = adapter->netdev;
4503
4504 dev_info(&adapter->pdev->dev, "EEH resume\n");
4505
4506 pci_save_state(pdev);
4507
4508 status = be_cmd_reset_function(adapter);
4509 if (status)
4510 goto err;
4511
4512 /* tell fw we're ready to fire cmds */
4513 status = be_cmd_fw_init(adapter);
4514 if (status)
4515 goto err;
4516
4517 status = be_setup(adapter);
4518 if (status)
4519 goto err;
4520
4521 if (netif_running(netdev)) {
4522 status = be_open(netdev);
4523 if (status)
4524 goto err;
4525 }
4526
4527 schedule_delayed_work(&adapter->func_recovery_work,
4528 msecs_to_jiffies(1000));
4529 netif_device_attach(netdev);
4530 return;
4531 err:
4532 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4533 }
4534
4535 static const struct pci_error_handlers be_eeh_handlers = {
4536 .error_detected = be_eeh_err_detected,
4537 .slot_reset = be_eeh_reset,
4538 .resume = be_eeh_resume,
4539 };
4540
4541 static struct pci_driver be_driver = {
4542 .name = DRV_NAME,
4543 .id_table = be_dev_ids,
4544 .probe = be_probe,
4545 .remove = be_remove,
4546 .suspend = be_suspend,
4547 .resume = be_resume,
4548 .shutdown = be_shutdown,
4549 .err_handler = &be_eeh_handlers
4550 };
4551
4552 static int __init be_init_module(void)
4553 {
4554 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4555 rx_frag_size != 2048) {
4556 printk(KERN_WARNING DRV_NAME
4557 " : Module param rx_frag_size must be 2048/4096/8192."
4558 " Using 2048\n");
4559 rx_frag_size = 2048;
4560 }
4561
4562 return pci_register_driver(&be_driver);
4563 }
4564 module_init(be_init_module);
4565
4566 static void __exit be_exit_module(void)
4567 {
4568 pci_unregister_driver(&be_driver);
4569 }
4570 module_exit(be_exit_module);
This page took 0.131259 seconds and 5 git commands to generate.