be2net: fix max VFs reported by HW
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va) {
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
133 mem->va = NULL;
134 }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139 {
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
148 if (!mem->va)
149 return -ENOMEM;
150 memset(mem->va, 0, mem->size);
151 return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156 u32 reg, enabled;
157
158 if (adapter->eeh_error)
159 return;
160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else
170 return;
171
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182 wmb();
183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192 wmb();
193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 bool arm, bool clear_int, u16 num_popped)
198 {
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204 if (adapter->eeh_error)
205 return;
206
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223 if (adapter->eeh_error)
224 return;
225
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
237 u8 current_mac[ETH_ALEN];
238 u32 pmac_id = adapter->pmac_id[0];
239
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
243 status = be_cmd_mac_addr_query(adapter, current_mac,
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
246 if (status)
247 goto err;
248
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 adapter->if_handle, &adapter->pmac_id[0], 0);
252 if (status)
253 goto err;
254
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259 err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261 return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 struct be_port_rxf_stats_v0 *port_stats =
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
272
273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
301 drvs->jabber_events = rxf_stats->port1_jabber_events;
302 else
303 drvs->jabber_events = rxf_stats->port0_jabber_events;
304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 struct be_port_rxf_stats_v1 *port_stats =
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
321
322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 drvs->jabber_events = pport_stats->rx_jabbers;
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 drvs->rx_drops_too_many_frags =
394 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x) (x & 0xFFFF)
400 #define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
421 populate_be2_stats(adapter);
422 }
423
424 if (lancer_chip(adapter))
425 goto done;
426
427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
435 done:
436 return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
441 {
442 struct be_adapter *adapter = netdev_priv(netdev);
443 struct be_drv_stats *drvs = &adapter->drv_stats;
444 struct be_rx_obj *rxo;
445 struct be_tx_obj *txo;
446 u64 pkts, bytes;
447 unsigned int start;
448 int i;
449
450 for_all_rx_queues(adapter, rxo, i) {
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
462 }
463
464 for_all_tx_queues(adapter, txo, i) {
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
473 }
474
475 /* bad pkts received */
476 stats->rx_errors = drvs->rx_crc_errors +
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
485 drvs->rx_dropped_runt;
486
487 /* detailed rx errors */
488 stats->rx_length_errors = drvs->rx_in_range_errors +
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
491
492 stats->rx_crc_errors = drvs->rx_crc_errors;
493
494 /* frame alignment errors */
495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
502 return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507 struct net_device *netdev = adapter->netdev;
508
509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510 netif_carrier_off(netdev);
511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512 }
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523 struct be_tx_stats *stats = tx_stats(txo);
524
525 u64_stats_update_begin(&stats->sync);
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
530 if (stopped)
531 stats->tx_stops++;
532 u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
538 {
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
543 /* to account for hdr wrb */
544 cnt++;
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
551 }
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566 {
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588 u16 vlan_tag;
589
590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594 if (skb_is_gso(skb)) {
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
617 if (vlan_tx_tag_present(skb)) {
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630 bool unmap_single)
631 {
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637 if (wrb->frag_len) {
638 if (unmap_single)
639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
641 else
642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643 }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649 dma_addr_t busaddr;
650 int i, copied = 0;
651 struct device *dev = &adapter->pdev->dev;
652 struct sk_buff *first_skb = skb;
653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
655 bool map_single = false;
656 u16 map_head;
657
658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
660 map_head = txq->head;
661
662 if (skb->len > skb->data_len) {
663 int len = skb_headlen(skb);
664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
666 goto dma_err;
667 map_single = true;
668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
674
675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676 const struct skb_frag_struct *frag =
677 &skb_shinfo(skb)->frags[i];
678 busaddr = skb_frag_dma_map(dev, frag, 0,
679 skb_frag_size(frag), DMA_TO_DEVICE);
680 if (dma_mapping_error(dev, busaddr))
681 goto dma_err;
682 wrb = queue_head_node(txq);
683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
686 copied += skb_frag_size(frag);
687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
700 dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
704 unmap_tx_frag(dev, wrb, map_single);
705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714 {
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731 struct net_device *netdev)
732 {
733 struct be_adapter *adapter = netdev_priv(netdev);
734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
736 struct iphdr *ip = NULL;
737 u32 wrb_cnt = 0, copied = 0;
738 u32 start = txq->head, eth_hdr_len;
739 bool dummy_wrb, stopped = false;
740
741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
746 */
747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
760 if (unlikely(!skb))
761 goto tx_drop;
762 }
763
764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767 if (copied) {
768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770 /* record the sent skb in the sent_skb table */
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
773
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
778 atomic_add(wrb_cnt, &txq->used);
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782 stopped = true;
783 }
784
785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
791 }
792 tx_drop:
793 return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812 }
813
814 /*
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
817 */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
822 int status = 0;
823
824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
834 vids[num++] = cpu_to_le16(i);
835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837 vids, num, 1, 0);
838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
844 }
845
846 return status;
847
848 set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856 struct be_adapter *adapter = netdev_priv(netdev);
857 int status = 0;
858
859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
863
864 adapter->vlan_tag[vid] = 1;
865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 status = be_vid_config(adapter);
867
868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872 ret:
873 return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878 struct be_adapter *adapter = netdev_priv(netdev);
879 int status = 0;
880
881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
885
886 adapter->vlan_tag[vid] = 0;
887 if (adapter->vlans_added <= adapter->max_vlans)
888 status = be_vid_config(adapter);
889
890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894 ret:
895 return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900 struct be_adapter *adapter = netdev_priv(netdev);
901 int status;
902
903 if (netdev->flags & IFF_PROMISC) {
904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905 adapter->promiscuous = true;
906 goto done;
907 }
908
909 /* BE was previously in promiscuous mode; disable it */
910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914 if (adapter->vlans_added)
915 be_vid_config(adapter);
916 }
917
918 /* Enable multicast promisc if num configured exceeds what we support */
919 if (netdev->flags & IFF_ALLMULTI ||
920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922 goto done;
923 }
924
925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
956 done:
957 return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962 struct be_adapter *adapter = netdev_priv(netdev);
963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964 int status;
965
966 if (!sriov_enabled(adapter))
967 return -EPERM;
968
969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970 return -EINVAL;
971
972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
977
978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
980 }
981
982 if (status)
983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
985 else
986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988 return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993 {
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997 if (!sriov_enabled(adapter))
998 return -EPERM;
999
1000 if (vf >= adapter->num_vfs)
1001 return -EINVAL;
1002
1003 vi->vf = vf;
1004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
1006 vi->qos = 0;
1007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009 return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014 {
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
1018 if (!sriov_enabled(adapter))
1019 return -EPERM;
1020
1021 if (vf >= adapter->num_vfs || vlan > 4095)
1022 return -EINVAL;
1023
1024 if (vlan) {
1025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
1032 } else {
1033 /* Reset Transparent Vlan Tagging. */
1034 adapter->vf_cfg[vf].vlan_tag = 0;
1035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
1038 }
1039
1040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049 {
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
1053 if (!sriov_enabled(adapter))
1054 return -EPERM;
1055
1056 if (vf >= adapter->num_vfs)
1057 return -EINVAL;
1058
1059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
1064
1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067 if (status)
1068 dev_err(&adapter->pdev->dev,
1069 "tx rate %d on VF %d failed\n", rate, vf);
1070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
1072 return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082 if (!pos)
1083 return 0;
1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099 }
1100
1101 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1102 {
1103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1104 ulong now = jiffies;
1105 ulong delta = now - stats->rx_jiffies;
1106 u64 pkts;
1107 unsigned int start, eqd;
1108
1109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
1115 return;
1116
1117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
1119 /* Wrapped around */
1120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
1122 return;
1123 }
1124
1125 /* Update once a second */
1126 if (delta < HZ)
1127 return;
1128
1129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
1134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1135 stats->rx_pkts_prev = pkts;
1136 stats->rx_jiffies = now;
1137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
1140 if (eqd < 10)
1141 eqd = 0;
1142
1143 modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
1147 }
1148 }
1149
1150 static void be_rx_stats_update(struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1152 {
1153 struct be_rx_stats *stats = rx_stats(rxo);
1154
1155 u64_stats_update_begin(&stats->sync);
1156 stats->rx_compl++;
1157 stats->rx_bytes += rxcp->pkt_size;
1158 stats->rx_pkts++;
1159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1160 stats->rx_mcast_pkts++;
1161 if (rxcp->err)
1162 stats->rx_compl_err++;
1163 u64_stats_update_end(&stats->sync);
1164 }
1165
1166 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1167 {
1168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
1170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
1172 }
1173
1174 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
1176 {
1177 struct be_adapter *adapter = rxo->adapter;
1178 struct be_rx_page_info *rx_page_info;
1179 struct be_queue_info *rxq = &rxo->q;
1180
1181 rx_page_info = &rxo->page_info_tbl[frag_idx];
1182 BUG_ON(!rx_page_info->page);
1183
1184 if (rx_page_info->last_page_user) {
1185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
1188 rx_page_info->last_page_user = false;
1189 }
1190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193 }
1194
1195 /* Throwaway the data in the Rx completion */
1196 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
1198 {
1199 struct be_queue_info *rxq = &rxo->q;
1200 struct be_rx_page_info *page_info;
1201 u16 i, num_rcvd = rxcp->num_rcvd;
1202
1203 for (i = 0; i < num_rcvd; i++) {
1204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
1207 index_inc(&rxcp->rxq_idx, rxq->len);
1208 }
1209 }
1210
1211 /*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
1215 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
1217 {
1218 struct be_queue_info *rxq = &rxo->q;
1219 struct be_rx_page_info *page_info;
1220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
1222 u8 *start;
1223
1224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
1229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1230
1231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1233 memcpy(skb->data, start, curr_frag_len);
1234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1236 skb->data_len = 0;
1237 skb->tail += curr_frag_len;
1238 } else {
1239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
1241 skb_shinfo(skb)->nr_frags = 1;
1242 skb_frag_set_page(skb, 0, page_info->page);
1243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
1245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1246 skb->data_len = curr_frag_len - hdr_len;
1247 skb->truesize += rx_frag_size;
1248 skb->tail += hdr_len;
1249 }
1250 page_info->page = NULL;
1251
1252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
1255 }
1256
1257 /* More frags present for this completion */
1258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262 curr_frag_len = min(remaining, rx_frag_size);
1263
1264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
1268 skb_frag_set_page(skb, j, page_info->page);
1269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
1271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
1277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
1280 skb->truesize += rx_frag_size;
1281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
1283 page_info->page = NULL;
1284 }
1285 BUG_ON(j > MAX_SKB_FRAGS);
1286 }
1287
1288 /* Process the RX completion indicated by rxcp when GRO is disabled */
1289 static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
1291 {
1292 struct be_adapter *adapter = rxo->adapter;
1293 struct net_device *netdev = adapter->netdev;
1294 struct sk_buff *skb;
1295
1296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1297 if (unlikely(!skb)) {
1298 rx_stats(rxo)->rx_drops_no_skbs++;
1299 be_rx_compl_discard(rxo, rxcp);
1300 return;
1301 }
1302
1303 skb_fill_rx_data(rxo, skb, rxcp);
1304
1305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307 else
1308 skb_checksum_none_assert(skb);
1309
1310 skb->protocol = eth_type_trans(skb, netdev);
1311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1312 if (netdev->features & NETIF_F_RXHASH)
1313 skb->rxhash = rxcp->rss_hash;
1314
1315
1316 if (rxcp->vlanf)
1317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
1320 }
1321
1322 /* Process the RX completion indicated by rxcp when GRO is enabled */
1323 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
1325 {
1326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info;
1328 struct sk_buff *skb = NULL;
1329 struct be_queue_info *rxq = &rxo->q;
1330 u16 remaining, curr_frag_len;
1331 u16 i, j;
1332
1333 skb = napi_get_frags(napi);
1334 if (!skb) {
1335 be_rx_compl_discard(rxo, rxcp);
1336 return;
1337 }
1338
1339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
1345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
1349 skb_frag_set_page(skb, j, page_info->page);
1350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
1352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1353 } else {
1354 put_page(page_info->page);
1355 }
1356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1357 skb->truesize += rx_frag_size;
1358 remaining -= curr_frag_len;
1359 index_inc(&rxcp->rxq_idx, rxq->len);
1360 memset(page_info, 0, sizeof(*page_info));
1361 }
1362 BUG_ON(j > MAX_SKB_FRAGS);
1363
1364 skb_shinfo(skb)->nr_frags = j + 1;
1365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
1371
1372 if (rxcp->vlanf)
1373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
1375 napi_gro_frags(napi);
1376 }
1377
1378 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
1380 {
1381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1399 rxcp->rss_hash =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
1406 }
1407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1408 }
1409
1410 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
1412 {
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1431 rxcp->rss_hash =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
1438 }
1439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1440 }
1441
1442 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443 {
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451 return NULL;
1452
1453 rmb();
1454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
1457 be_parse_rx_compl_v1(compl, rxcp);
1458 else
1459 be_parse_rx_compl_v0(compl, rxcp);
1460
1461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
1464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1465 rxcp->vlanf = 0;
1466
1467 if (!lancer_chip(adapter))
1468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1469
1470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1471 !adapter->vlan_tag[rxcp->vlan_tag])
1472 rxcp->vlanf = 0;
1473 }
1474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1477
1478 queue_tail_inc(&rxo->cq);
1479 return rxcp;
1480 }
1481
1482 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1483 {
1484 u32 order = get_order(size);
1485
1486 if (order > 0)
1487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
1489 }
1490
1491 /*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
1495 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1496 {
1497 struct be_adapter *adapter = rxo->adapter;
1498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1499 struct be_queue_info *rxq = &rxo->q;
1500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
1505 page_info = &rxo->page_info_tbl[rxq->head];
1506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
1508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1509 if (unlikely(!pagep)) {
1510 rx_stats(rxo)->rx_post_fail++;
1511 break;
1512 }
1513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
1516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
1523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
1536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
1539 page_info = &rxo->page_info_tbl[rxq->head];
1540 }
1541 if (pagep)
1542 prev_page_info->last_page_user = true;
1543
1544 if (posted) {
1545 atomic_add(posted, &rxq->used);
1546 be_rxq_notify(adapter, rxq->id, posted);
1547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
1549 rxo->rx_post_starved = true;
1550 }
1551 }
1552
1553 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1554 {
1555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
1560 rmb();
1561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567 }
1568
1569 static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
1571 {
1572 struct be_queue_info *txq = &txo->q;
1573 struct be_eth_wrb *wrb;
1574 struct sk_buff **sent_skbs = txo->sent_skb_list;
1575 struct sk_buff *sent_skb;
1576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
1578
1579 sent_skb = sent_skbs[txq->tail];
1580 BUG_ON(!sent_skb);
1581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
1584 queue_tail_inc(txq);
1585
1586 do {
1587 cur_index = txq->tail;
1588 wrb = queue_tail_node(txq);
1589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
1591 unmap_skb_hdr = false;
1592
1593 num_wrbs++;
1594 queue_tail_inc(txq);
1595 } while (cur_index != last_index);
1596
1597 kfree_skb(sent_skb);
1598 return num_wrbs;
1599 }
1600
1601 /* Return the number of events in the event queue */
1602 static inline int events_get(struct be_eq_obj *eqo)
1603 {
1604 struct be_eq_entry *eqe;
1605 int num = 0;
1606
1607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
1613 eqe->evt = 0;
1614 num++;
1615 queue_tail_inc(&eqo->q);
1616 } while (true);
1617
1618 return num;
1619 }
1620
1621 static int event_handle(struct be_eq_obj *eqo)
1622 {
1623 bool rearm = false;
1624 int num = events_get(eqo);
1625
1626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
1629
1630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
1633 if (num)
1634 napi_schedule(&eqo->napi);
1635
1636 return num;
1637 }
1638
1639 /* Leaves the EQ is disarmed state */
1640 static void be_eq_clean(struct be_eq_obj *eqo)
1641 {
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645 }
1646
1647 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1648 {
1649 struct be_rx_page_info *page_info;
1650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
1652 struct be_rx_compl_info *rxcp;
1653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
1656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1664 page_info = get_rx_page_info(rxo, tail);
1665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
1669 rxq->tail = rxq->head = 0;
1670 }
1671
1672 static void be_tx_compl_clean(struct be_adapter *adapter)
1673 {
1674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
1676 struct be_eth_tx_compl *txcp;
1677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
1680 int i, pending_txqs;
1681
1682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
1684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
1704 }
1705
1706 if (pending_txqs == 0 || ++timeo > 200)
1707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
1712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
1717
1718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
1728 }
1729 }
1730
1731 static void be_evt_queues_destroy(struct be_adapter *adapter)
1732 {
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
1737 if (eqo->q.created) {
1738 be_eq_clean(eqo);
1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 }
1741 be_queue_free(adapter, &eqo->q);
1742 }
1743 }
1744
1745 static int be_evt_queues_create(struct be_adapter *adapter)
1746 {
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1749 int i, rc;
1750
1751 adapter->num_evt_qs = num_irqs(adapter);
1752
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1756 eqo->idx = i;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1759
1760 eq = &eqo->q;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1763 if (rc)
1764 return rc;
1765
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 if (rc)
1768 return rc;
1769 }
1770 return 0;
1771 }
1772
1773 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774 {
1775 struct be_queue_info *q;
1776
1777 q = &adapter->mcc_obj.q;
1778 if (q->created)
1779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1780 be_queue_free(adapter, q);
1781
1782 q = &adapter->mcc_obj.cq;
1783 if (q->created)
1784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1785 be_queue_free(adapter, q);
1786 }
1787
1788 /* Must be called only after TX qs are created as MCC shares TX EQ */
1789 static int be_mcc_queues_create(struct be_adapter *adapter)
1790 {
1791 struct be_queue_info *q, *cq;
1792
1793 cq = &adapter->mcc_obj.cq;
1794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1795 sizeof(struct be_mcc_compl)))
1796 goto err;
1797
1798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1800 goto mcc_cq_free;
1801
1802 q = &adapter->mcc_obj.q;
1803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1805
1806 if (be_cmd_mccq_create(adapter, q, cq))
1807 goto mcc_q_free;
1808
1809 return 0;
1810
1811 mcc_q_free:
1812 be_queue_free(adapter, q);
1813 mcc_cq_destroy:
1814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1815 mcc_cq_free:
1816 be_queue_free(adapter, cq);
1817 err:
1818 return -1;
1819 }
1820
1821 static void be_tx_queues_destroy(struct be_adapter *adapter)
1822 {
1823 struct be_queue_info *q;
1824 struct be_tx_obj *txo;
1825 u8 i;
1826
1827 for_all_tx_queues(adapter, txo, i) {
1828 q = &txo->q;
1829 if (q->created)
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
1832
1833 q = &txo->cq;
1834 if (q->created)
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1837 }
1838 }
1839
1840 static int be_num_txqs_want(struct be_adapter *adapter)
1841 {
1842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
1845 return 1;
1846 else
1847 return MAX_TX_QS;
1848 }
1849
1850 static int be_tx_cqs_create(struct be_adapter *adapter)
1851 {
1852 struct be_queue_info *cq, *eq;
1853 int status;
1854 struct be_tx_obj *txo;
1855 u8 i;
1856
1857 adapter->num_tx_qs = be_num_txqs_want(adapter);
1858 if (adapter->num_tx_qs != MAX_TX_QS) {
1859 rtnl_lock();
1860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
1862 rtnl_unlock();
1863 }
1864
1865 for_all_tx_queues(adapter, txo, i) {
1866 cq = &txo->cq;
1867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1869 if (status)
1870 return status;
1871
1872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1874 */
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 if (status)
1878 return status;
1879 }
1880 return 0;
1881 }
1882
1883 static int be_tx_qs_create(struct be_adapter *adapter)
1884 {
1885 struct be_tx_obj *txo;
1886 int i, status;
1887
1888 for_all_tx_queues(adapter, txo, i) {
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1891 if (status)
1892 return status;
1893
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 if (status)
1896 return status;
1897 }
1898
1899 return 0;
1900 }
1901
1902 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1903 {
1904 struct be_queue_info *q;
1905 struct be_rx_obj *rxo;
1906 int i;
1907
1908 for_all_rx_queues(adapter, rxo, i) {
1909 q = &rxo->cq;
1910 if (q->created)
1911 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 be_queue_free(adapter, q);
1913 }
1914 }
1915
1916 static int be_rx_cqs_create(struct be_adapter *adapter)
1917 {
1918 struct be_queue_info *eq, *cq;
1919 struct be_rx_obj *rxo;
1920 int rc, i;
1921
1922 /* We'll create as many RSS rings as there are irqs.
1923 * But when there's only one irq there's no use creating RSS rings
1924 */
1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 num_irqs(adapter) + 1 : 1;
1927 if (adapter->num_rx_qs != MAX_RX_QS) {
1928 rtnl_lock();
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1931 rtnl_unlock();
1932 }
1933
1934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1935 for_all_rx_queues(adapter, rxo, i) {
1936 rxo->adapter = adapter;
1937 cq = &rxo->cq;
1938 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 sizeof(struct be_eth_rx_compl));
1940 if (rc)
1941 return rc;
1942
1943 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1945 if (rc)
1946 return rc;
1947 }
1948
1949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev,
1951 "Created only %d receive queues\n", adapter->num_rx_qs);
1952
1953 return 0;
1954 }
1955
1956 static irqreturn_t be_intx(int irq, void *dev)
1957 {
1958 struct be_adapter *adapter = dev;
1959 int num_evts;
1960
1961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1963 if (num_evts)
1964 return IRQ_HANDLED;
1965 else
1966 return IRQ_NONE;
1967 }
1968
1969 static irqreturn_t be_msix(int irq, void *dev)
1970 {
1971 struct be_eq_obj *eqo = dev;
1972
1973 event_handle(eqo);
1974 return IRQ_HANDLED;
1975 }
1976
1977 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1978 {
1979 return (rxcp->tcpf && !rxcp->err) ? true : false;
1980 }
1981
1982 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 int budget)
1984 {
1985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
1987 struct be_rx_compl_info *rxcp;
1988 u32 work_done;
1989
1990 for (work_done = 0; work_done < budget; work_done++) {
1991 rxcp = be_rx_compl_get(rxo);
1992 if (!rxcp)
1993 break;
1994
1995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1997 goto loop_continue;
1998
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
2001 be_rx_compl_discard(rxo, rxcp);
2002 goto loop_continue;
2003 }
2004
2005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2007 */
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
2010 be_rx_compl_discard(rxo, rxcp);
2011 goto loop_continue;
2012 }
2013
2014 if (do_gro(rxcp))
2015 be_rx_compl_process_gro(rxo, napi, rxcp);
2016 else
2017 be_rx_compl_process(rxo, rxcp);
2018 loop_continue:
2019 be_rx_stats_update(rxo, rxcp);
2020 }
2021
2022 if (work_done) {
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
2024
2025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
2027 }
2028
2029 return work_done;
2030 }
2031
2032 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
2034 {
2035 struct be_eth_tx_compl *txcp;
2036 int num_wrbs = 0, work_done;
2037
2038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2040 if (!txcp)
2041 break;
2042 num_wrbs += be_tx_compl_process(adapter, txo,
2043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 wrb_index, txcp));
2045 }
2046
2047 if (work_done) {
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
2050
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
2056 }
2057
2058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061 }
2062 return (work_done < budget); /* Done */
2063 }
2064
2065 int be_poll(struct napi_struct *napi, int budget)
2066 {
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2070 bool tx_done;
2071
2072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 eqo->tx_budget, i);
2076 if (!tx_done)
2077 max_work = budget;
2078 }
2079
2080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2083 */
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
2087 }
2088
2089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
2091
2092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 } else {
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2098 }
2099 return max_work;
2100 }
2101
2102 void be_detect_error(struct be_adapter *adapter)
2103 {
2104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2106 u32 i;
2107
2108 if (be_crit_error(adapter))
2109 return;
2110
2111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2118 }
2119 } else {
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2128
2129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
2131 }
2132
2133 if (ue_lo || ue_hi ||
2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2135 adapter->hw_error = true;
2136 dev_err(&adapter->pdev->dev,
2137 "Error detected in the card\n");
2138 }
2139
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2145 dev_err(&adapter->pdev->dev,
2146 "ERR: sliport error2 0x%x\n", sliport_err2);
2147 }
2148
2149 if (ue_lo) {
2150 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2151 if (ue_lo & 1)
2152 dev_err(&adapter->pdev->dev,
2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2154 }
2155 }
2156
2157 if (ue_hi) {
2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2159 if (ue_hi & 1)
2160 dev_err(&adapter->pdev->dev,
2161 "UE: %s bit set\n", ue_status_hi_desc[i]);
2162 }
2163 }
2164
2165 }
2166
2167 static void be_msix_disable(struct be_adapter *adapter)
2168 {
2169 if (msix_enabled(adapter)) {
2170 pci_disable_msix(adapter->pdev);
2171 adapter->num_msix_vec = 0;
2172 }
2173 }
2174
2175 static uint be_num_rss_want(struct be_adapter *adapter)
2176 {
2177 u32 num = 0;
2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 !sriov_want(adapter) && be_physfn(adapter)) {
2180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2182 }
2183 return num;
2184 }
2185
2186 static void be_msix_enable(struct be_adapter *adapter)
2187 {
2188 #define BE_MIN_MSIX_VECTORS 1
2189 int i, status, num_vec, num_roce_vec = 0;
2190
2191 /* If RSS queues are not used, need a vec for default RX Q */
2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2193 if (be_roce_supported(adapter)) {
2194 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195 (num_online_cpus() + 1));
2196 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197 num_vec += num_roce_vec;
2198 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199 }
2200 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2201
2202 for (i = 0; i < num_vec; i++)
2203 adapter->msix_entries[i].entry = i;
2204
2205 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2206 if (status == 0) {
2207 goto done;
2208 } else if (status >= BE_MIN_MSIX_VECTORS) {
2209 num_vec = status;
2210 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2211 num_vec) == 0)
2212 goto done;
2213 }
2214 return;
2215 done:
2216 if (be_roce_supported(adapter)) {
2217 if (num_vec > num_roce_vec) {
2218 adapter->num_msix_vec = num_vec - num_roce_vec;
2219 adapter->num_msix_roce_vec =
2220 num_vec - adapter->num_msix_vec;
2221 } else {
2222 adapter->num_msix_vec = num_vec;
2223 adapter->num_msix_roce_vec = 0;
2224 }
2225 } else
2226 adapter->num_msix_vec = num_vec;
2227 return;
2228 }
2229
2230 static inline int be_msix_vec_get(struct be_adapter *adapter,
2231 struct be_eq_obj *eqo)
2232 {
2233 return adapter->msix_entries[eqo->idx].vector;
2234 }
2235
2236 static int be_msix_register(struct be_adapter *adapter)
2237 {
2238 struct net_device *netdev = adapter->netdev;
2239 struct be_eq_obj *eqo;
2240 int status, i, vec;
2241
2242 for_all_evt_queues(adapter, eqo, i) {
2243 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2244 vec = be_msix_vec_get(adapter, eqo);
2245 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2246 if (status)
2247 goto err_msix;
2248 }
2249
2250 return 0;
2251 err_msix:
2252 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2255 status);
2256 be_msix_disable(adapter);
2257 return status;
2258 }
2259
2260 static int be_irq_register(struct be_adapter *adapter)
2261 {
2262 struct net_device *netdev = adapter->netdev;
2263 int status;
2264
2265 if (msix_enabled(adapter)) {
2266 status = be_msix_register(adapter);
2267 if (status == 0)
2268 goto done;
2269 /* INTx is not supported for VF */
2270 if (!be_physfn(adapter))
2271 return status;
2272 }
2273
2274 /* INTx */
2275 netdev->irq = adapter->pdev->irq;
2276 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2277 adapter);
2278 if (status) {
2279 dev_err(&adapter->pdev->dev,
2280 "INTx request IRQ failed - err %d\n", status);
2281 return status;
2282 }
2283 done:
2284 adapter->isr_registered = true;
2285 return 0;
2286 }
2287
2288 static void be_irq_unregister(struct be_adapter *adapter)
2289 {
2290 struct net_device *netdev = adapter->netdev;
2291 struct be_eq_obj *eqo;
2292 int i;
2293
2294 if (!adapter->isr_registered)
2295 return;
2296
2297 /* INTx */
2298 if (!msix_enabled(adapter)) {
2299 free_irq(netdev->irq, adapter);
2300 goto done;
2301 }
2302
2303 /* MSIx */
2304 for_all_evt_queues(adapter, eqo, i)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2306
2307 done:
2308 adapter->isr_registered = false;
2309 }
2310
2311 static void be_rx_qs_destroy(struct be_adapter *adapter)
2312 {
2313 struct be_queue_info *q;
2314 struct be_rx_obj *rxo;
2315 int i;
2316
2317 for_all_rx_queues(adapter, rxo, i) {
2318 q = &rxo->q;
2319 if (q->created) {
2320 be_cmd_rxq_destroy(adapter, q);
2321 /* After the rxq is invalidated, wait for a grace time
2322 * of 1ms for all dma to end and the flush compl to
2323 * arrive
2324 */
2325 mdelay(1);
2326 be_rx_cq_clean(rxo);
2327 }
2328 be_queue_free(adapter, q);
2329 }
2330 }
2331
2332 static int be_close(struct net_device *netdev)
2333 {
2334 struct be_adapter *adapter = netdev_priv(netdev);
2335 struct be_eq_obj *eqo;
2336 int i;
2337
2338 be_roce_dev_close(adapter);
2339
2340 be_async_mcc_disable(adapter);
2341
2342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, false);
2344
2345 for_all_evt_queues(adapter, eqo, i) {
2346 napi_disable(&eqo->napi);
2347 if (msix_enabled(adapter))
2348 synchronize_irq(be_msix_vec_get(adapter, eqo));
2349 else
2350 synchronize_irq(netdev->irq);
2351 be_eq_clean(eqo);
2352 }
2353
2354 be_irq_unregister(adapter);
2355
2356 /* Wait for all pending tx completions to arrive so that
2357 * all tx skbs are freed.
2358 */
2359 be_tx_compl_clean(adapter);
2360
2361 be_rx_qs_destroy(adapter);
2362 return 0;
2363 }
2364
2365 static int be_rx_qs_create(struct be_adapter *adapter)
2366 {
2367 struct be_rx_obj *rxo;
2368 int rc, i, j;
2369 u8 rsstable[128];
2370
2371 for_all_rx_queues(adapter, rxo, i) {
2372 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2373 sizeof(struct be_eth_rx_d));
2374 if (rc)
2375 return rc;
2376 }
2377
2378 /* The FW would like the default RXQ to be created first */
2379 rxo = default_rxo(adapter);
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2381 adapter->if_handle, false, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384
2385 for_all_rss_queues(adapter, rxo, i) {
2386 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2387 rx_frag_size, adapter->if_handle,
2388 true, &rxo->rss_id);
2389 if (rc)
2390 return rc;
2391 }
2392
2393 if (be_multi_rxq(adapter)) {
2394 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2395 for_all_rss_queues(adapter, rxo, i) {
2396 if ((j + i) >= 128)
2397 break;
2398 rsstable[j + i] = rxo->rss_id;
2399 }
2400 }
2401 rc = be_cmd_rss_config(adapter, rsstable, 128);
2402 if (rc)
2403 return rc;
2404 }
2405
2406 /* First time posting */
2407 for_all_rx_queues(adapter, rxo, i)
2408 be_post_rx_frags(rxo, GFP_KERNEL);
2409 return 0;
2410 }
2411
2412 static int be_open(struct net_device *netdev)
2413 {
2414 struct be_adapter *adapter = netdev_priv(netdev);
2415 struct be_eq_obj *eqo;
2416 struct be_rx_obj *rxo;
2417 struct be_tx_obj *txo;
2418 u8 link_status;
2419 int status, i;
2420
2421 status = be_rx_qs_create(adapter);
2422 if (status)
2423 goto err;
2424
2425 be_irq_register(adapter);
2426
2427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
2429
2430 for_all_rx_queues(adapter, rxo, i)
2431 be_cq_notify(adapter, rxo->cq.id, true, 0);
2432
2433 for_all_tx_queues(adapter, txo, i)
2434 be_cq_notify(adapter, txo->cq.id, true, 0);
2435
2436 be_async_mcc_enable(adapter);
2437
2438 for_all_evt_queues(adapter, eqo, i) {
2439 napi_enable(&eqo->napi);
2440 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2441 }
2442
2443 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 &link_status, 0);
2445 if (!status)
2446 be_link_status_update(adapter, link_status);
2447
2448 be_roce_dev_open(adapter);
2449 return 0;
2450 err:
2451 be_close(adapter->netdev);
2452 return -EIO;
2453 }
2454
2455 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2456 {
2457 struct be_dma_mem cmd;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2460
2461 memset(mac, 0, ETH_ALEN);
2462
2463 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2464 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 GFP_KERNEL);
2466 if (cmd.va == NULL)
2467 return -1;
2468 memset(cmd.va, 0, cmd.size);
2469
2470 if (enable) {
2471 status = pci_write_config_dword(adapter->pdev,
2472 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2473 if (status) {
2474 dev_err(&adapter->pdev->dev,
2475 "Could not enable Wake-on-lan\n");
2476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 cmd.dma);
2478 return status;
2479 }
2480 status = be_cmd_enable_magic_wol(adapter,
2481 adapter->netdev->dev_addr, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2484 } else {
2485 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2488 }
2489
2490 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2491 return status;
2492 }
2493
2494 /*
2495 * Generate a seed MAC address from the PF MAC Address using jhash.
2496 * MAC Address for VFs are assigned incrementally starting from the seed.
2497 * These addresses are programmed in the ASIC by the PF and the VF driver
2498 * queries for the MAC address during its probe.
2499 */
2500 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2501 {
2502 u32 vf;
2503 int status = 0;
2504 u8 mac[ETH_ALEN];
2505 struct be_vf_cfg *vf_cfg;
2506
2507 be_vf_eth_addr_generate(adapter, mac);
2508
2509 for_all_vfs(adapter, vf_cfg, vf) {
2510 if (lancer_chip(adapter)) {
2511 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2512 } else {
2513 status = be_cmd_pmac_add(adapter, mac,
2514 vf_cfg->if_handle,
2515 &vf_cfg->pmac_id, vf + 1);
2516 }
2517
2518 if (status)
2519 dev_err(&adapter->pdev->dev,
2520 "Mac address assignment failed for VF %d\n", vf);
2521 else
2522 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2523
2524 mac[5] += 1;
2525 }
2526 return status;
2527 }
2528
2529 static void be_vf_clear(struct be_adapter *adapter)
2530 {
2531 struct be_vf_cfg *vf_cfg;
2532 u32 vf;
2533
2534 if (be_find_vfs(adapter, ASSIGNED)) {
2535 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2536 goto done;
2537 }
2538
2539 for_all_vfs(adapter, vf_cfg, vf) {
2540 if (lancer_chip(adapter))
2541 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2542 else
2543 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2544 vf_cfg->pmac_id, vf + 1);
2545
2546 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2547 }
2548 pci_disable_sriov(adapter->pdev);
2549 done:
2550 kfree(adapter->vf_cfg);
2551 adapter->num_vfs = 0;
2552 }
2553
2554 static int be_clear(struct be_adapter *adapter)
2555 {
2556 int i = 1;
2557
2558 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2559 cancel_delayed_work_sync(&adapter->work);
2560 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2561 }
2562
2563 if (sriov_enabled(adapter))
2564 be_vf_clear(adapter);
2565
2566 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2567 be_cmd_pmac_del(adapter, adapter->if_handle,
2568 adapter->pmac_id[i], 0);
2569
2570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2571
2572 be_mcc_queues_destroy(adapter);
2573 be_rx_cqs_destroy(adapter);
2574 be_tx_queues_destroy(adapter);
2575 be_evt_queues_destroy(adapter);
2576
2577 be_msix_disable(adapter);
2578 return 0;
2579 }
2580
2581 static int be_vf_setup_init(struct be_adapter *adapter)
2582 {
2583 struct be_vf_cfg *vf_cfg;
2584 int vf;
2585
2586 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2587 GFP_KERNEL);
2588 if (!adapter->vf_cfg)
2589 return -ENOMEM;
2590
2591 for_all_vfs(adapter, vf_cfg, vf) {
2592 vf_cfg->if_handle = -1;
2593 vf_cfg->pmac_id = -1;
2594 }
2595 return 0;
2596 }
2597
2598 static int be_vf_setup(struct be_adapter *adapter)
2599 {
2600 struct be_vf_cfg *vf_cfg;
2601 struct device *dev = &adapter->pdev->dev;
2602 u32 cap_flags, en_flags, vf;
2603 u16 def_vlan, lnk_speed;
2604 int status, enabled_vfs;
2605
2606 enabled_vfs = be_find_vfs(adapter, ENABLED);
2607 if (enabled_vfs) {
2608 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2609 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2610 return 0;
2611 }
2612
2613 if (num_vfs > adapter->dev_num_vfs) {
2614 dev_warn(dev, "Device supports %d VFs and not %d\n",
2615 adapter->dev_num_vfs, num_vfs);
2616 num_vfs = adapter->dev_num_vfs;
2617 }
2618
2619 status = pci_enable_sriov(adapter->pdev, num_vfs);
2620 if (!status) {
2621 adapter->num_vfs = num_vfs;
2622 } else {
2623 /* Platform doesn't support SRIOV though device supports it */
2624 dev_warn(dev, "SRIOV enable failed\n");
2625 return 0;
2626 }
2627
2628 status = be_vf_setup_init(adapter);
2629 if (status)
2630 goto err;
2631
2632 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST;
2634 for_all_vfs(adapter, vf_cfg, vf) {
2635 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2636 &vf_cfg->if_handle, vf + 1);
2637 if (status)
2638 goto err;
2639 }
2640
2641 if (!enabled_vfs) {
2642 status = be_vf_eth_addr_config(adapter);
2643 if (status)
2644 goto err;
2645 }
2646
2647 for_all_vfs(adapter, vf_cfg, vf) {
2648 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2649 NULL, vf + 1);
2650 if (status)
2651 goto err;
2652 vf_cfg->tx_rate = lnk_speed * 10;
2653
2654 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2655 vf + 1, vf_cfg->if_handle);
2656 if (status)
2657 goto err;
2658 vf_cfg->def_vid = def_vlan;
2659 }
2660 return 0;
2661 err:
2662 return status;
2663 }
2664
2665 static void be_setup_init(struct be_adapter *adapter)
2666 {
2667 adapter->vlan_prio_bmap = 0xff;
2668 adapter->phy.link_speed = -1;
2669 adapter->if_handle = -1;
2670 adapter->be3_native = false;
2671 adapter->promiscuous = false;
2672 adapter->eq_next_idx = 0;
2673 adapter->phy.forced_port_speed = -1;
2674 }
2675
2676 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2677 bool *active_mac, u32 *pmac_id)
2678 {
2679 int status = 0;
2680
2681 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2682 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2683 if (!lancer_chip(adapter) && !be_physfn(adapter))
2684 *active_mac = true;
2685 else
2686 *active_mac = false;
2687
2688 return status;
2689 }
2690
2691 if (lancer_chip(adapter)) {
2692 status = be_cmd_get_mac_from_list(adapter, mac,
2693 active_mac, pmac_id, 0);
2694 if (*active_mac) {
2695 status = be_cmd_mac_addr_query(adapter, mac,
2696 MAC_ADDRESS_TYPE_NETWORK,
2697 false, if_handle,
2698 *pmac_id);
2699 }
2700 } else if (be_physfn(adapter)) {
2701 /* For BE3, for PF get permanent MAC */
2702 status = be_cmd_mac_addr_query(adapter, mac,
2703 MAC_ADDRESS_TYPE_NETWORK, true,
2704 0, 0);
2705 *active_mac = false;
2706 } else {
2707 /* For BE3, for VF get soft MAC assigned by PF*/
2708 status = be_cmd_mac_addr_query(adapter, mac,
2709 MAC_ADDRESS_TYPE_NETWORK, false,
2710 if_handle, 0);
2711 *active_mac = true;
2712 }
2713 return status;
2714 }
2715
2716 /* Routine to query per function resource limits */
2717 static int be_get_config(struct be_adapter *adapter)
2718 {
2719 int pos;
2720 u16 dev_num_vfs;
2721
2722 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2723 if (pos) {
2724 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2725 &dev_num_vfs);
2726 if (!lancer_chip(adapter))
2727 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2728 adapter->dev_num_vfs = dev_num_vfs;
2729 }
2730 return 0;
2731 }
2732
2733 static int be_setup(struct be_adapter *adapter)
2734 {
2735 struct device *dev = &adapter->pdev->dev;
2736 u32 cap_flags, en_flags;
2737 u32 tx_fc, rx_fc;
2738 int status;
2739 u8 mac[ETH_ALEN];
2740 bool active_mac;
2741
2742 be_setup_init(adapter);
2743
2744 be_get_config(adapter);
2745
2746 be_cmd_req_native_mode(adapter);
2747
2748 be_msix_enable(adapter);
2749
2750 status = be_evt_queues_create(adapter);
2751 if (status)
2752 goto err;
2753
2754 status = be_tx_cqs_create(adapter);
2755 if (status)
2756 goto err;
2757
2758 status = be_rx_cqs_create(adapter);
2759 if (status)
2760 goto err;
2761
2762 status = be_mcc_queues_create(adapter);
2763 if (status)
2764 goto err;
2765
2766 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2767 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2768 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2769 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2770
2771 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2772 cap_flags |= BE_IF_FLAGS_RSS;
2773 en_flags |= BE_IF_FLAGS_RSS;
2774 }
2775
2776 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2777 en_flags = BE_IF_FLAGS_UNTAGGED |
2778 BE_IF_FLAGS_BROADCAST |
2779 BE_IF_FLAGS_MULTICAST;
2780 cap_flags = en_flags;
2781 }
2782
2783 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2784 &adapter->if_handle, 0);
2785 if (status != 0)
2786 goto err;
2787
2788 memset(mac, 0, ETH_ALEN);
2789 active_mac = false;
2790 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2791 &active_mac, &adapter->pmac_id[0]);
2792 if (status != 0)
2793 goto err;
2794
2795 if (!active_mac) {
2796 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2797 &adapter->pmac_id[0], 0);
2798 if (status != 0)
2799 goto err;
2800 }
2801
2802 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2803 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2804 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2805 }
2806
2807 status = be_tx_qs_create(adapter);
2808 if (status)
2809 goto err;
2810
2811 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2812
2813 if (adapter->vlans_added)
2814 be_vid_config(adapter);
2815
2816 be_set_rx_mode(adapter->netdev);
2817
2818 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2819
2820 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2821 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2822 adapter->rx_fc);
2823
2824 if (be_physfn(adapter) && num_vfs) {
2825 if (adapter->dev_num_vfs)
2826 be_vf_setup(adapter);
2827 else
2828 dev_warn(dev, "device doesn't support SRIOV\n");
2829 }
2830
2831 be_cmd_get_phy_info(adapter);
2832 if (be_pause_supported(adapter))
2833 adapter->phy.fc_autoneg = 1;
2834
2835 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2836 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2837 return 0;
2838 err:
2839 be_clear(adapter);
2840 return status;
2841 }
2842
2843 #ifdef CONFIG_NET_POLL_CONTROLLER
2844 static void be_netpoll(struct net_device *netdev)
2845 {
2846 struct be_adapter *adapter = netdev_priv(netdev);
2847 struct be_eq_obj *eqo;
2848 int i;
2849
2850 for_all_evt_queues(adapter, eqo, i)
2851 event_handle(eqo);
2852
2853 return;
2854 }
2855 #endif
2856
2857 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2858 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2859
2860 static bool be_flash_redboot(struct be_adapter *adapter,
2861 const u8 *p, u32 img_start, int image_size,
2862 int hdr_size)
2863 {
2864 u32 crc_offset;
2865 u8 flashed_crc[4];
2866 int status;
2867
2868 crc_offset = hdr_size + img_start + image_size - 4;
2869
2870 p += crc_offset;
2871
2872 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2873 (image_size - 4));
2874 if (status) {
2875 dev_err(&adapter->pdev->dev,
2876 "could not get crc from flash, not flashing redboot\n");
2877 return false;
2878 }
2879
2880 /*update redboot only if crc does not match*/
2881 if (!memcmp(flashed_crc, p, 4))
2882 return false;
2883 else
2884 return true;
2885 }
2886
2887 static bool phy_flashing_required(struct be_adapter *adapter)
2888 {
2889 return (adapter->phy.phy_type == TN_8022 &&
2890 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2891 }
2892
2893 static bool is_comp_in_ufi(struct be_adapter *adapter,
2894 struct flash_section_info *fsec, int type)
2895 {
2896 int i = 0, img_type = 0;
2897 struct flash_section_info_g2 *fsec_g2 = NULL;
2898
2899 if (adapter->generation != BE_GEN3)
2900 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2901
2902 for (i = 0; i < MAX_FLASH_COMP; i++) {
2903 if (fsec_g2)
2904 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2905 else
2906 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2907
2908 if (img_type == type)
2909 return true;
2910 }
2911 return false;
2912
2913 }
2914
2915 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2916 int header_size,
2917 const struct firmware *fw)
2918 {
2919 struct flash_section_info *fsec = NULL;
2920 const u8 *p = fw->data;
2921
2922 p += header_size;
2923 while (p < (fw->data + fw->size)) {
2924 fsec = (struct flash_section_info *)p;
2925 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2926 return fsec;
2927 p += 32;
2928 }
2929 return NULL;
2930 }
2931
2932 static int be_flash_data(struct be_adapter *adapter,
2933 const struct firmware *fw,
2934 struct be_dma_mem *flash_cmd,
2935 int num_of_images)
2936
2937 {
2938 int status = 0, i, filehdr_size = 0;
2939 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2940 u32 total_bytes = 0, flash_op;
2941 int num_bytes;
2942 const u8 *p = fw->data;
2943 struct be_cmd_write_flashrom *req = flash_cmd->va;
2944 const struct flash_comp *pflashcomp;
2945 int num_comp, hdr_size;
2946 struct flash_section_info *fsec = NULL;
2947
2948 struct flash_comp gen3_flash_types[] = {
2949 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2950 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2951 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2952 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2953 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2954 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2955 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2956 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2957 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2958 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2959 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2960 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2961 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2962 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2963 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2964 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2965 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2966 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2967 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2968 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2969 };
2970
2971 struct flash_comp gen2_flash_types[] = {
2972 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2973 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2974 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2975 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2976 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2977 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2978 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2979 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2980 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2981 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2982 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2983 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2984 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2985 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2986 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2987 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2988 };
2989
2990 if (adapter->generation == BE_GEN3) {
2991 pflashcomp = gen3_flash_types;
2992 filehdr_size = sizeof(struct flash_file_hdr_g3);
2993 num_comp = ARRAY_SIZE(gen3_flash_types);
2994 } else {
2995 pflashcomp = gen2_flash_types;
2996 filehdr_size = sizeof(struct flash_file_hdr_g2);
2997 num_comp = ARRAY_SIZE(gen2_flash_types);
2998 }
2999 /* Get flash section info*/
3000 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3001 if (!fsec) {
3002 dev_err(&adapter->pdev->dev,
3003 "Invalid Cookie. UFI corrupted ?\n");
3004 return -1;
3005 }
3006 for (i = 0; i < num_comp; i++) {
3007 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3008 continue;
3009
3010 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3011 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3012 continue;
3013
3014 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3015 if (!phy_flashing_required(adapter))
3016 continue;
3017 }
3018
3019 hdr_size = filehdr_size +
3020 (num_of_images * sizeof(struct image_hdr));
3021
3022 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3023 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3024 pflashcomp[i].size, hdr_size)))
3025 continue;
3026
3027 /* Flash the component */
3028 p = fw->data;
3029 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3030 if (p + pflashcomp[i].size > fw->data + fw->size)
3031 return -1;
3032 total_bytes = pflashcomp[i].size;
3033 while (total_bytes) {
3034 if (total_bytes > 32*1024)
3035 num_bytes = 32*1024;
3036 else
3037 num_bytes = total_bytes;
3038 total_bytes -= num_bytes;
3039 if (!total_bytes) {
3040 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3041 flash_op = FLASHROM_OPER_PHY_FLASH;
3042 else
3043 flash_op = FLASHROM_OPER_FLASH;
3044 } else {
3045 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3046 flash_op = FLASHROM_OPER_PHY_SAVE;
3047 else
3048 flash_op = FLASHROM_OPER_SAVE;
3049 }
3050 memcpy(req->params.data_buf, p, num_bytes);
3051 p += num_bytes;
3052 status = be_cmd_write_flashrom(adapter, flash_cmd,
3053 pflashcomp[i].optype, flash_op, num_bytes);
3054 if (status) {
3055 if ((status == ILLEGAL_IOCTL_REQ) &&
3056 (pflashcomp[i].optype ==
3057 OPTYPE_PHY_FW))
3058 break;
3059 dev_err(&adapter->pdev->dev,
3060 "cmd to write to flash rom failed.\n");
3061 return -1;
3062 }
3063 }
3064 }
3065 return 0;
3066 }
3067
3068 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3069 {
3070 if (fhdr == NULL)
3071 return 0;
3072 if (fhdr->build[0] == '3')
3073 return BE_GEN3;
3074 else if (fhdr->build[0] == '2')
3075 return BE_GEN2;
3076 else
3077 return 0;
3078 }
3079
3080 static int lancer_wait_idle(struct be_adapter *adapter)
3081 {
3082 #define SLIPORT_IDLE_TIMEOUT 30
3083 u32 reg_val;
3084 int status = 0, i;
3085
3086 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3087 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3088 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3089 break;
3090
3091 ssleep(1);
3092 }
3093
3094 if (i == SLIPORT_IDLE_TIMEOUT)
3095 status = -1;
3096
3097 return status;
3098 }
3099
3100 static int lancer_fw_reset(struct be_adapter *adapter)
3101 {
3102 int status = 0;
3103
3104 status = lancer_wait_idle(adapter);
3105 if (status)
3106 return status;
3107
3108 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3109 PHYSDEV_CONTROL_OFFSET);
3110
3111 return status;
3112 }
3113
3114 static int lancer_fw_download(struct be_adapter *adapter,
3115 const struct firmware *fw)
3116 {
3117 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3118 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3119 struct be_dma_mem flash_cmd;
3120 const u8 *data_ptr = NULL;
3121 u8 *dest_image_ptr = NULL;
3122 size_t image_size = 0;
3123 u32 chunk_size = 0;
3124 u32 data_written = 0;
3125 u32 offset = 0;
3126 int status = 0;
3127 u8 add_status = 0;
3128 u8 change_status;
3129
3130 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3131 dev_err(&adapter->pdev->dev,
3132 "FW Image not properly aligned. "
3133 "Length must be 4 byte aligned.\n");
3134 status = -EINVAL;
3135 goto lancer_fw_exit;
3136 }
3137
3138 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3139 + LANCER_FW_DOWNLOAD_CHUNK;
3140 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3141 &flash_cmd.dma, GFP_KERNEL);
3142 if (!flash_cmd.va) {
3143 status = -ENOMEM;
3144 dev_err(&adapter->pdev->dev,
3145 "Memory allocation failure while flashing\n");
3146 goto lancer_fw_exit;
3147 }
3148
3149 dest_image_ptr = flash_cmd.va +
3150 sizeof(struct lancer_cmd_req_write_object);
3151 image_size = fw->size;
3152 data_ptr = fw->data;
3153
3154 while (image_size) {
3155 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3156
3157 /* Copy the image chunk content. */
3158 memcpy(dest_image_ptr, data_ptr, chunk_size);
3159
3160 status = lancer_cmd_write_object(adapter, &flash_cmd,
3161 chunk_size, offset,
3162 LANCER_FW_DOWNLOAD_LOCATION,
3163 &data_written, &change_status,
3164 &add_status);
3165 if (status)
3166 break;
3167
3168 offset += data_written;
3169 data_ptr += data_written;
3170 image_size -= data_written;
3171 }
3172
3173 if (!status) {
3174 /* Commit the FW written */
3175 status = lancer_cmd_write_object(adapter, &flash_cmd,
3176 0, offset,
3177 LANCER_FW_DOWNLOAD_LOCATION,
3178 &data_written, &change_status,
3179 &add_status);
3180 }
3181
3182 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3183 flash_cmd.dma);
3184 if (status) {
3185 dev_err(&adapter->pdev->dev,
3186 "Firmware load error. "
3187 "Status code: 0x%x Additional Status: 0x%x\n",
3188 status, add_status);
3189 goto lancer_fw_exit;
3190 }
3191
3192 if (change_status == LANCER_FW_RESET_NEEDED) {
3193 status = lancer_fw_reset(adapter);
3194 if (status) {
3195 dev_err(&adapter->pdev->dev,
3196 "Adapter busy for FW reset.\n"
3197 "New FW will not be active.\n");
3198 goto lancer_fw_exit;
3199 }
3200 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3201 dev_err(&adapter->pdev->dev,
3202 "System reboot required for new FW"
3203 " to be active\n");
3204 }
3205
3206 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3207 lancer_fw_exit:
3208 return status;
3209 }
3210
3211 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3212 {
3213 struct flash_file_hdr_g2 *fhdr;
3214 struct flash_file_hdr_g3 *fhdr3;
3215 struct image_hdr *img_hdr_ptr = NULL;
3216 struct be_dma_mem flash_cmd;
3217 const u8 *p;
3218 int status = 0, i = 0, num_imgs = 0;
3219
3220 p = fw->data;
3221 fhdr = (struct flash_file_hdr_g2 *) p;
3222
3223 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3224 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3225 &flash_cmd.dma, GFP_KERNEL);
3226 if (!flash_cmd.va) {
3227 status = -ENOMEM;
3228 dev_err(&adapter->pdev->dev,
3229 "Memory allocation failure while flashing\n");
3230 goto be_fw_exit;
3231 }
3232
3233 if ((adapter->generation == BE_GEN3) &&
3234 (get_ufigen_type(fhdr) == BE_GEN3)) {
3235 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3236 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3237 for (i = 0; i < num_imgs; i++) {
3238 img_hdr_ptr = (struct image_hdr *) (fw->data +
3239 (sizeof(struct flash_file_hdr_g3) +
3240 i * sizeof(struct image_hdr)));
3241 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3242 status = be_flash_data(adapter, fw, &flash_cmd,
3243 num_imgs);
3244 }
3245 } else if ((adapter->generation == BE_GEN2) &&
3246 (get_ufigen_type(fhdr) == BE_GEN2)) {
3247 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3248 } else {
3249 dev_err(&adapter->pdev->dev,
3250 "UFI and Interface are not compatible for flashing\n");
3251 status = -1;
3252 }
3253
3254 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3255 flash_cmd.dma);
3256 if (status) {
3257 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3258 goto be_fw_exit;
3259 }
3260
3261 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3262
3263 be_fw_exit:
3264 return status;
3265 }
3266
3267 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3268 {
3269 const struct firmware *fw;
3270 int status;
3271
3272 if (!netif_running(adapter->netdev)) {
3273 dev_err(&adapter->pdev->dev,
3274 "Firmware load not allowed (interface is down)\n");
3275 return -1;
3276 }
3277
3278 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3279 if (status)
3280 goto fw_exit;
3281
3282 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3283
3284 if (lancer_chip(adapter))
3285 status = lancer_fw_download(adapter, fw);
3286 else
3287 status = be_fw_download(adapter, fw);
3288
3289 fw_exit:
3290 release_firmware(fw);
3291 return status;
3292 }
3293
3294 static const struct net_device_ops be_netdev_ops = {
3295 .ndo_open = be_open,
3296 .ndo_stop = be_close,
3297 .ndo_start_xmit = be_xmit,
3298 .ndo_set_rx_mode = be_set_rx_mode,
3299 .ndo_set_mac_address = be_mac_addr_set,
3300 .ndo_change_mtu = be_change_mtu,
3301 .ndo_get_stats64 = be_get_stats64,
3302 .ndo_validate_addr = eth_validate_addr,
3303 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3304 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3305 .ndo_set_vf_mac = be_set_vf_mac,
3306 .ndo_set_vf_vlan = be_set_vf_vlan,
3307 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3308 .ndo_get_vf_config = be_get_vf_config,
3309 #ifdef CONFIG_NET_POLL_CONTROLLER
3310 .ndo_poll_controller = be_netpoll,
3311 #endif
3312 };
3313
3314 static void be_netdev_init(struct net_device *netdev)
3315 {
3316 struct be_adapter *adapter = netdev_priv(netdev);
3317 struct be_eq_obj *eqo;
3318 int i;
3319
3320 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3321 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3322 NETIF_F_HW_VLAN_TX;
3323 if (be_multi_rxq(adapter))
3324 netdev->hw_features |= NETIF_F_RXHASH;
3325
3326 netdev->features |= netdev->hw_features |
3327 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3328
3329 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3330 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3331
3332 netdev->priv_flags |= IFF_UNICAST_FLT;
3333
3334 netdev->flags |= IFF_MULTICAST;
3335
3336 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3337
3338 netdev->netdev_ops = &be_netdev_ops;
3339
3340 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3341
3342 for_all_evt_queues(adapter, eqo, i)
3343 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3344 }
3345
3346 static void be_unmap_pci_bars(struct be_adapter *adapter)
3347 {
3348 if (adapter->csr)
3349 iounmap(adapter->csr);
3350 if (adapter->db)
3351 iounmap(adapter->db);
3352 if (adapter->roce_db.base)
3353 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3354 }
3355
3356 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3357 {
3358 struct pci_dev *pdev = adapter->pdev;
3359 u8 __iomem *addr;
3360
3361 addr = pci_iomap(pdev, 2, 0);
3362 if (addr == NULL)
3363 return -ENOMEM;
3364
3365 adapter->roce_db.base = addr;
3366 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3367 adapter->roce_db.size = 8192;
3368 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3369 return 0;
3370 }
3371
3372 static int be_map_pci_bars(struct be_adapter *adapter)
3373 {
3374 u8 __iomem *addr;
3375 int db_reg;
3376
3377 if (lancer_chip(adapter)) {
3378 if (be_type_2_3(adapter)) {
3379 addr = ioremap_nocache(
3380 pci_resource_start(adapter->pdev, 0),
3381 pci_resource_len(adapter->pdev, 0));
3382 if (addr == NULL)
3383 return -ENOMEM;
3384 adapter->db = addr;
3385 }
3386 if (adapter->if_type == SLI_INTF_TYPE_3) {
3387 if (lancer_roce_map_pci_bars(adapter))
3388 goto pci_map_err;
3389 }
3390 return 0;
3391 }
3392
3393 if (be_physfn(adapter)) {
3394 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3395 pci_resource_len(adapter->pdev, 2));
3396 if (addr == NULL)
3397 return -ENOMEM;
3398 adapter->csr = addr;
3399 }
3400
3401 if (adapter->generation == BE_GEN2) {
3402 db_reg = 4;
3403 } else {
3404 if (be_physfn(adapter))
3405 db_reg = 4;
3406 else
3407 db_reg = 0;
3408 }
3409 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3410 pci_resource_len(adapter->pdev, db_reg));
3411 if (addr == NULL)
3412 goto pci_map_err;
3413 adapter->db = addr;
3414 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3415 adapter->roce_db.size = 4096;
3416 adapter->roce_db.io_addr =
3417 pci_resource_start(adapter->pdev, db_reg);
3418 adapter->roce_db.total_size =
3419 pci_resource_len(adapter->pdev, db_reg);
3420 }
3421 return 0;
3422 pci_map_err:
3423 be_unmap_pci_bars(adapter);
3424 return -ENOMEM;
3425 }
3426
3427 static void be_ctrl_cleanup(struct be_adapter *adapter)
3428 {
3429 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3430
3431 be_unmap_pci_bars(adapter);
3432
3433 if (mem->va)
3434 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3435 mem->dma);
3436
3437 mem = &adapter->rx_filter;
3438 if (mem->va)
3439 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3440 mem->dma);
3441 }
3442
3443 static int be_ctrl_init(struct be_adapter *adapter)
3444 {
3445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3448 int status;
3449
3450 status = be_map_pci_bars(adapter);
3451 if (status)
3452 goto done;
3453
3454 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3455 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3456 mbox_mem_alloc->size,
3457 &mbox_mem_alloc->dma,
3458 GFP_KERNEL);
3459 if (!mbox_mem_alloc->va) {
3460 status = -ENOMEM;
3461 goto unmap_pci_bars;
3462 }
3463 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3464 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3465 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3466 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3467
3468 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3469 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3470 &rx_filter->dma, GFP_KERNEL);
3471 if (rx_filter->va == NULL) {
3472 status = -ENOMEM;
3473 goto free_mbox;
3474 }
3475 memset(rx_filter->va, 0, rx_filter->size);
3476
3477 mutex_init(&adapter->mbox_lock);
3478 spin_lock_init(&adapter->mcc_lock);
3479 spin_lock_init(&adapter->mcc_cq_lock);
3480
3481 init_completion(&adapter->flash_compl);
3482 pci_save_state(adapter->pdev);
3483 return 0;
3484
3485 free_mbox:
3486 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3487 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3488
3489 unmap_pci_bars:
3490 be_unmap_pci_bars(adapter);
3491
3492 done:
3493 return status;
3494 }
3495
3496 static void be_stats_cleanup(struct be_adapter *adapter)
3497 {
3498 struct be_dma_mem *cmd = &adapter->stats_cmd;
3499
3500 if (cmd->va)
3501 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3502 cmd->va, cmd->dma);
3503 }
3504
3505 static int be_stats_init(struct be_adapter *adapter)
3506 {
3507 struct be_dma_mem *cmd = &adapter->stats_cmd;
3508
3509 if (adapter->generation == BE_GEN2) {
3510 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3511 } else {
3512 if (lancer_chip(adapter))
3513 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3514 else
3515 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3516 }
3517 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3518 GFP_KERNEL);
3519 if (cmd->va == NULL)
3520 return -1;
3521 memset(cmd->va, 0, cmd->size);
3522 return 0;
3523 }
3524
3525 static void __devexit be_remove(struct pci_dev *pdev)
3526 {
3527 struct be_adapter *adapter = pci_get_drvdata(pdev);
3528
3529 if (!adapter)
3530 return;
3531
3532 be_roce_dev_remove(adapter);
3533
3534 cancel_delayed_work_sync(&adapter->func_recovery_work);
3535
3536 unregister_netdev(adapter->netdev);
3537
3538 be_clear(adapter);
3539
3540 /* tell fw we're done with firing cmds */
3541 be_cmd_fw_clean(adapter);
3542
3543 be_stats_cleanup(adapter);
3544
3545 be_ctrl_cleanup(adapter);
3546
3547 pci_set_drvdata(pdev, NULL);
3548 pci_release_regions(pdev);
3549 pci_disable_device(pdev);
3550
3551 free_netdev(adapter->netdev);
3552 }
3553
3554 bool be_is_wol_supported(struct be_adapter *adapter)
3555 {
3556 return ((adapter->wol_cap & BE_WOL_CAP) &&
3557 !be_is_wol_excluded(adapter)) ? true : false;
3558 }
3559
3560 u32 be_get_fw_log_level(struct be_adapter *adapter)
3561 {
3562 struct be_dma_mem extfat_cmd;
3563 struct be_fat_conf_params *cfgs;
3564 int status;
3565 u32 level = 0;
3566 int j;
3567
3568 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3569 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3570 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3571 &extfat_cmd.dma);
3572
3573 if (!extfat_cmd.va) {
3574 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3575 __func__);
3576 goto err;
3577 }
3578
3579 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3580 if (!status) {
3581 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3582 sizeof(struct be_cmd_resp_hdr));
3583 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3584 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3585 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3586 }
3587 }
3588 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3589 extfat_cmd.dma);
3590 err:
3591 return level;
3592 }
3593 static int be_get_initial_config(struct be_adapter *adapter)
3594 {
3595 int status;
3596 u32 level;
3597
3598 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3599 &adapter->function_mode, &adapter->function_caps);
3600 if (status)
3601 return status;
3602
3603 if (adapter->function_mode & FLEX10_MODE)
3604 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3605 else
3606 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3607
3608 if (be_physfn(adapter))
3609 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3610 else
3611 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3612
3613 /* primary mac needs 1 pmac entry */
3614 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3615 sizeof(u32), GFP_KERNEL);
3616 if (!adapter->pmac_id)
3617 return -ENOMEM;
3618
3619 status = be_cmd_get_cntl_attributes(adapter);
3620 if (status)
3621 return status;
3622
3623 status = be_cmd_get_acpi_wol_cap(adapter);
3624 if (status) {
3625 /* in case of a failure to get wol capabillities
3626 * check the exclusion list to determine WOL capability */
3627 if (!be_is_wol_excluded(adapter))
3628 adapter->wol_cap |= BE_WOL_CAP;
3629 }
3630
3631 if (be_is_wol_supported(adapter))
3632 adapter->wol = true;
3633
3634 /* Must be a power of 2 or else MODULO will BUG_ON */
3635 adapter->be_get_temp_freq = 64;
3636
3637 level = be_get_fw_log_level(adapter);
3638 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3639
3640 return 0;
3641 }
3642
3643 static int be_dev_type_check(struct be_adapter *adapter)
3644 {
3645 struct pci_dev *pdev = adapter->pdev;
3646 u32 sli_intf = 0, if_type;
3647
3648 switch (pdev->device) {
3649 case BE_DEVICE_ID1:
3650 case OC_DEVICE_ID1:
3651 adapter->generation = BE_GEN2;
3652 break;
3653 case BE_DEVICE_ID2:
3654 case OC_DEVICE_ID2:
3655 adapter->generation = BE_GEN3;
3656 break;
3657 case OC_DEVICE_ID3:
3658 case OC_DEVICE_ID4:
3659 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3660 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3661 SLI_INTF_IF_TYPE_SHIFT;
3662 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
3664 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3665 !be_type_2_3(adapter)) {
3666 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3667 return -EINVAL;
3668 }
3669 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3670 SLI_INTF_FAMILY_SHIFT);
3671 adapter->generation = BE_GEN3;
3672 break;
3673 case OC_DEVICE_ID5:
3674 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3675 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3676 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3677 return -EINVAL;
3678 }
3679 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3680 SLI_INTF_FAMILY_SHIFT);
3681 adapter->generation = BE_GEN3;
3682 break;
3683 default:
3684 adapter->generation = 0;
3685 }
3686
3687 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3688 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3689 return 0;
3690 }
3691
3692 static int lancer_recover_func(struct be_adapter *adapter)
3693 {
3694 int status;
3695
3696 status = lancer_test_and_set_rdy_state(adapter);
3697 if (status)
3698 goto err;
3699
3700 if (netif_running(adapter->netdev))
3701 be_close(adapter->netdev);
3702
3703 be_clear(adapter);
3704
3705 adapter->hw_error = false;
3706 adapter->fw_timeout = false;
3707
3708 status = be_setup(adapter);
3709 if (status)
3710 goto err;
3711
3712 if (netif_running(adapter->netdev)) {
3713 status = be_open(adapter->netdev);
3714 if (status)
3715 goto err;
3716 }
3717
3718 dev_err(&adapter->pdev->dev,
3719 "Adapter SLIPORT recovery succeeded\n");
3720 return 0;
3721 err:
3722 dev_err(&adapter->pdev->dev,
3723 "Adapter SLIPORT recovery failed\n");
3724
3725 return status;
3726 }
3727
3728 static void be_func_recovery_task(struct work_struct *work)
3729 {
3730 struct be_adapter *adapter =
3731 container_of(work, struct be_adapter, func_recovery_work.work);
3732 int status;
3733
3734 be_detect_error(adapter);
3735
3736 if (adapter->hw_error && lancer_chip(adapter)) {
3737
3738 if (adapter->eeh_error)
3739 goto out;
3740
3741 rtnl_lock();
3742 netif_device_detach(adapter->netdev);
3743 rtnl_unlock();
3744
3745 status = lancer_recover_func(adapter);
3746
3747 if (!status)
3748 netif_device_attach(adapter->netdev);
3749 }
3750
3751 out:
3752 schedule_delayed_work(&adapter->func_recovery_work,
3753 msecs_to_jiffies(1000));
3754 }
3755
3756 static void be_worker(struct work_struct *work)
3757 {
3758 struct be_adapter *adapter =
3759 container_of(work, struct be_adapter, work.work);
3760 struct be_rx_obj *rxo;
3761 struct be_eq_obj *eqo;
3762 int i;
3763
3764 /* when interrupts are not yet enabled, just reap any pending
3765 * mcc completions */
3766 if (!netif_running(adapter->netdev)) {
3767 be_process_mcc(adapter);
3768 goto reschedule;
3769 }
3770
3771 if (!adapter->stats_cmd_sent) {
3772 if (lancer_chip(adapter))
3773 lancer_cmd_get_pport_stats(adapter,
3774 &adapter->stats_cmd);
3775 else
3776 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3777 }
3778
3779 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3780 be_cmd_get_die_temperature(adapter);
3781
3782 for_all_rx_queues(adapter, rxo, i) {
3783 if (rxo->rx_post_starved) {
3784 rxo->rx_post_starved = false;
3785 be_post_rx_frags(rxo, GFP_KERNEL);
3786 }
3787 }
3788
3789 for_all_evt_queues(adapter, eqo, i)
3790 be_eqd_update(adapter, eqo);
3791
3792 reschedule:
3793 adapter->work_counter++;
3794 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3795 }
3796
3797 static bool be_reset_required(struct be_adapter *adapter)
3798 {
3799 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3800 }
3801
3802 static int __devinit be_probe(struct pci_dev *pdev,
3803 const struct pci_device_id *pdev_id)
3804 {
3805 int status = 0;
3806 struct be_adapter *adapter;
3807 struct net_device *netdev;
3808 char port_name;
3809
3810 status = pci_enable_device(pdev);
3811 if (status)
3812 goto do_none;
3813
3814 status = pci_request_regions(pdev, DRV_NAME);
3815 if (status)
3816 goto disable_dev;
3817 pci_set_master(pdev);
3818
3819 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3820 if (netdev == NULL) {
3821 status = -ENOMEM;
3822 goto rel_reg;
3823 }
3824 adapter = netdev_priv(netdev);
3825 adapter->pdev = pdev;
3826 pci_set_drvdata(pdev, adapter);
3827
3828 status = be_dev_type_check(adapter);
3829 if (status)
3830 goto free_netdev;
3831
3832 adapter->netdev = netdev;
3833 SET_NETDEV_DEV(netdev, &pdev->dev);
3834
3835 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3836 if (!status) {
3837 netdev->features |= NETIF_F_HIGHDMA;
3838 } else {
3839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3840 if (status) {
3841 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3842 goto free_netdev;
3843 }
3844 }
3845
3846 status = be_ctrl_init(adapter);
3847 if (status)
3848 goto free_netdev;
3849
3850 /* sync up with fw's ready state */
3851 if (be_physfn(adapter)) {
3852 status = be_fw_wait_ready(adapter);
3853 if (status)
3854 goto ctrl_clean;
3855 }
3856
3857 /* tell fw we're ready to fire cmds */
3858 status = be_cmd_fw_init(adapter);
3859 if (status)
3860 goto ctrl_clean;
3861
3862 if (be_reset_required(adapter)) {
3863 status = be_cmd_reset_function(adapter);
3864 if (status)
3865 goto ctrl_clean;
3866 }
3867
3868 /* The INTR bit may be set in the card when probed by a kdump kernel
3869 * after a crash.
3870 */
3871 if (!lancer_chip(adapter))
3872 be_intr_set(adapter, false);
3873
3874 status = be_stats_init(adapter);
3875 if (status)
3876 goto ctrl_clean;
3877
3878 status = be_get_initial_config(adapter);
3879 if (status)
3880 goto stats_clean;
3881
3882 INIT_DELAYED_WORK(&adapter->work, be_worker);
3883 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3884 adapter->rx_fc = adapter->tx_fc = true;
3885
3886 status = be_setup(adapter);
3887 if (status)
3888 goto msix_disable;
3889
3890 be_netdev_init(netdev);
3891 status = register_netdev(netdev);
3892 if (status != 0)
3893 goto unsetup;
3894
3895 be_roce_dev_add(adapter);
3896
3897 schedule_delayed_work(&adapter->func_recovery_work,
3898 msecs_to_jiffies(1000));
3899
3900 be_cmd_query_port_name(adapter, &port_name);
3901
3902 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3903 port_name);
3904
3905 return 0;
3906
3907 unsetup:
3908 be_clear(adapter);
3909 msix_disable:
3910 be_msix_disable(adapter);
3911 stats_clean:
3912 be_stats_cleanup(adapter);
3913 ctrl_clean:
3914 be_ctrl_cleanup(adapter);
3915 free_netdev:
3916 free_netdev(netdev);
3917 pci_set_drvdata(pdev, NULL);
3918 rel_reg:
3919 pci_release_regions(pdev);
3920 disable_dev:
3921 pci_disable_device(pdev);
3922 do_none:
3923 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3924 return status;
3925 }
3926
3927 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3928 {
3929 struct be_adapter *adapter = pci_get_drvdata(pdev);
3930 struct net_device *netdev = adapter->netdev;
3931
3932 if (adapter->wol)
3933 be_setup_wol(adapter, true);
3934
3935 cancel_delayed_work_sync(&adapter->func_recovery_work);
3936
3937 netif_device_detach(netdev);
3938 if (netif_running(netdev)) {
3939 rtnl_lock();
3940 be_close(netdev);
3941 rtnl_unlock();
3942 }
3943 be_clear(adapter);
3944
3945 pci_save_state(pdev);
3946 pci_disable_device(pdev);
3947 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3948 return 0;
3949 }
3950
3951 static int be_resume(struct pci_dev *pdev)
3952 {
3953 int status = 0;
3954 struct be_adapter *adapter = pci_get_drvdata(pdev);
3955 struct net_device *netdev = adapter->netdev;
3956
3957 netif_device_detach(netdev);
3958
3959 status = pci_enable_device(pdev);
3960 if (status)
3961 return status;
3962
3963 pci_set_power_state(pdev, 0);
3964 pci_restore_state(pdev);
3965
3966 /* tell fw we're ready to fire cmds */
3967 status = be_cmd_fw_init(adapter);
3968 if (status)
3969 return status;
3970
3971 be_setup(adapter);
3972 if (netif_running(netdev)) {
3973 rtnl_lock();
3974 be_open(netdev);
3975 rtnl_unlock();
3976 }
3977
3978 schedule_delayed_work(&adapter->func_recovery_work,
3979 msecs_to_jiffies(1000));
3980 netif_device_attach(netdev);
3981
3982 if (adapter->wol)
3983 be_setup_wol(adapter, false);
3984
3985 return 0;
3986 }
3987
3988 /*
3989 * An FLR will stop BE from DMAing any data.
3990 */
3991 static void be_shutdown(struct pci_dev *pdev)
3992 {
3993 struct be_adapter *adapter = pci_get_drvdata(pdev);
3994
3995 if (!adapter)
3996 return;
3997
3998 cancel_delayed_work_sync(&adapter->work);
3999 cancel_delayed_work_sync(&adapter->func_recovery_work);
4000
4001 netif_device_detach(adapter->netdev);
4002
4003 if (adapter->wol)
4004 be_setup_wol(adapter, true);
4005
4006 be_cmd_reset_function(adapter);
4007
4008 pci_disable_device(pdev);
4009 }
4010
4011 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4012 pci_channel_state_t state)
4013 {
4014 struct be_adapter *adapter = pci_get_drvdata(pdev);
4015 struct net_device *netdev = adapter->netdev;
4016
4017 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4018
4019 adapter->eeh_error = true;
4020
4021 cancel_delayed_work_sync(&adapter->func_recovery_work);
4022
4023 rtnl_lock();
4024 netif_device_detach(netdev);
4025 rtnl_unlock();
4026
4027 if (netif_running(netdev)) {
4028 rtnl_lock();
4029 be_close(netdev);
4030 rtnl_unlock();
4031 }
4032 be_clear(adapter);
4033
4034 if (state == pci_channel_io_perm_failure)
4035 return PCI_ERS_RESULT_DISCONNECT;
4036
4037 pci_disable_device(pdev);
4038
4039 /* The error could cause the FW to trigger a flash debug dump.
4040 * Resetting the card while flash dump is in progress
4041 * can cause it not to recover; wait for it to finish
4042 */
4043 ssleep(30);
4044 return PCI_ERS_RESULT_NEED_RESET;
4045 }
4046
4047 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4048 {
4049 struct be_adapter *adapter = pci_get_drvdata(pdev);
4050 int status;
4051
4052 dev_info(&adapter->pdev->dev, "EEH reset\n");
4053 be_clear_all_error(adapter);
4054
4055 status = pci_enable_device(pdev);
4056 if (status)
4057 return PCI_ERS_RESULT_DISCONNECT;
4058
4059 pci_set_master(pdev);
4060 pci_set_power_state(pdev, 0);
4061 pci_restore_state(pdev);
4062
4063 /* Check if card is ok and fw is ready */
4064 status = be_fw_wait_ready(adapter);
4065 if (status)
4066 return PCI_ERS_RESULT_DISCONNECT;
4067
4068 return PCI_ERS_RESULT_RECOVERED;
4069 }
4070
4071 static void be_eeh_resume(struct pci_dev *pdev)
4072 {
4073 int status = 0;
4074 struct be_adapter *adapter = pci_get_drvdata(pdev);
4075 struct net_device *netdev = adapter->netdev;
4076
4077 dev_info(&adapter->pdev->dev, "EEH resume\n");
4078
4079 pci_save_state(pdev);
4080
4081 /* tell fw we're ready to fire cmds */
4082 status = be_cmd_fw_init(adapter);
4083 if (status)
4084 goto err;
4085
4086 status = be_cmd_reset_function(adapter);
4087 if (status)
4088 goto err;
4089
4090 status = be_setup(adapter);
4091 if (status)
4092 goto err;
4093
4094 if (netif_running(netdev)) {
4095 status = be_open(netdev);
4096 if (status)
4097 goto err;
4098 }
4099
4100 schedule_delayed_work(&adapter->func_recovery_work,
4101 msecs_to_jiffies(1000));
4102 netif_device_attach(netdev);
4103 return;
4104 err:
4105 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4106 }
4107
4108 static struct pci_error_handlers be_eeh_handlers = {
4109 .error_detected = be_eeh_err_detected,
4110 .slot_reset = be_eeh_reset,
4111 .resume = be_eeh_resume,
4112 };
4113
4114 static struct pci_driver be_driver = {
4115 .name = DRV_NAME,
4116 .id_table = be_dev_ids,
4117 .probe = be_probe,
4118 .remove = be_remove,
4119 .suspend = be_suspend,
4120 .resume = be_resume,
4121 .shutdown = be_shutdown,
4122 .err_handler = &be_eeh_handlers
4123 };
4124
4125 static int __init be_init_module(void)
4126 {
4127 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4128 rx_frag_size != 2048) {
4129 printk(KERN_WARNING DRV_NAME
4130 " : Module param rx_frag_size must be 2048/4096/8192."
4131 " Using 2048\n");
4132 rx_frag_size = 2048;
4133 }
4134
4135 return pci_register_driver(&be_driver);
4136 }
4137 module_init(be_init_module);
4138
4139 static void __exit be_exit_module(void)
4140 {
4141 pci_unregister_driver(&be_driver);
4142 }
4143 module_exit(be_exit_module);
This page took 0.160288 seconds and 5 git commands to generate.