966d9afb652f4609c6012a47270ac91b45b3939d
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static const char * const ue_status_low_desc[] = {
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static const char * const ue_status_hi_desc[] = {
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
110 "NETC",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119 };
120
121 /* Is BE in a multi-channel mode */
122 static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126 }
127
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 {
130 struct be_dma_mem *mem = &q->dma_mem;
131 if (mem->va) {
132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
134 mem->va = NULL;
135 }
136 }
137
138 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140 {
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
149 if (!mem->va)
150 return -ENOMEM;
151 memset(mem->va, 0, mem->size);
152 return 0;
153 }
154
155 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 {
157 u32 reg, enabled;
158
159 if (adapter->eeh_error)
160 return;
161
162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
166 if (!enabled && enable)
167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168 else if (enabled && !enable)
169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 else
171 return;
172
173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
175 }
176
177 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182
183 wmb();
184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
185 }
186
187 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192
193 wmb();
194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
195 }
196
197 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
198 bool arm, bool clear_int, u16 num_popped)
199 {
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
204
205 if (adapter->eeh_error)
206 return;
207
208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
215 }
216
217 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
218 {
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
223
224 if (adapter->eeh_error)
225 return;
226
227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
231 }
232
233 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 {
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
238 u8 current_mac[ETH_ALEN];
239 u32 pmac_id = adapter->pmac_id[0];
240
241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
244 status = be_cmd_mac_addr_query(adapter, current_mac,
245 MAC_ADDRESS_TYPE_NETWORK, false,
246 adapter->if_handle, 0);
247 if (status)
248 goto err;
249
250 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
251 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
252 adapter->if_handle, &adapter->pmac_id[0], 0);
253 if (status)
254 goto err;
255
256 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
257 }
258 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
259 return 0;
260 err:
261 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
262 return status;
263 }
264
265 static void populate_be2_stats(struct be_adapter *adapter)
266 {
267 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
268 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
269 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
270 struct be_port_rxf_stats_v0 *port_stats =
271 &rxf_stats->port[adapter->port_num];
272 struct be_drv_stats *drvs = &adapter->drv_stats;
273
274 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
275 drvs->rx_pause_frames = port_stats->rx_pause_frames;
276 drvs->rx_crc_errors = port_stats->rx_crc_errors;
277 drvs->rx_control_frames = port_stats->rx_control_frames;
278 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
279 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
280 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
281 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
282 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
283 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
284 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
285 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
286 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
287 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
288 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
289 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
290 drvs->rx_dropped_header_too_small =
291 port_stats->rx_dropped_header_too_small;
292 drvs->rx_address_mismatch_drops =
293 port_stats->rx_address_mismatch_drops +
294 port_stats->rx_vlan_mismatch_drops;
295 drvs->rx_alignment_symbol_errors =
296 port_stats->rx_alignment_symbol_errors;
297
298 drvs->tx_pauseframes = port_stats->tx_pauseframes;
299 drvs->tx_controlframes = port_stats->tx_controlframes;
300
301 if (adapter->port_num)
302 drvs->jabber_events = rxf_stats->port1_jabber_events;
303 else
304 drvs->jabber_events = rxf_stats->port0_jabber_events;
305 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
306 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
307 drvs->forwarded_packets = rxf_stats->forwarded_packets;
308 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
309 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
310 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
311 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
312 }
313
314 static void populate_be3_stats(struct be_adapter *adapter)
315 {
316 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
317 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
318 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
319 struct be_port_rxf_stats_v1 *port_stats =
320 &rxf_stats->port[adapter->port_num];
321 struct be_drv_stats *drvs = &adapter->drv_stats;
322
323 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
324 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
325 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
336 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
337 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
338 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
339 drvs->rx_dropped_header_too_small =
340 port_stats->rx_dropped_header_too_small;
341 drvs->rx_input_fifo_overflow_drop =
342 port_stats->rx_input_fifo_overflow_drop;
343 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
344 drvs->rx_alignment_symbol_errors =
345 port_stats->rx_alignment_symbol_errors;
346 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
347 drvs->tx_pauseframes = port_stats->tx_pauseframes;
348 drvs->tx_controlframes = port_stats->tx_controlframes;
349 drvs->jabber_events = port_stats->jabber_events;
350 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
351 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
352 drvs->forwarded_packets = rxf_stats->forwarded_packets;
353 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
354 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
355 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
356 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
357 }
358
359 static void populate_lancer_stats(struct be_adapter *adapter)
360 {
361
362 struct be_drv_stats *drvs = &adapter->drv_stats;
363 struct lancer_pport_stats *pport_stats =
364 pport_stats_from_cmd(adapter);
365
366 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
367 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
368 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
369 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_mismatch_drops =
385 pport_stats->rx_address_mismatch_drops +
386 pport_stats->rx_vlan_mismatch_drops;
387 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
390 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
391 drvs->jabber_events = pport_stats->rx_jabbers;
392 drvs->forwarded_packets = pport_stats->num_forwards_lo;
393 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
394 drvs->rx_drops_too_many_frags =
395 pport_stats->rx_drops_too_many_frags_lo;
396 }
397
398 static void accumulate_16bit_val(u32 *acc, u16 val)
399 {
400 #define lo(x) (x & 0xFFFF)
401 #define hi(x) (x & 0xFFFF0000)
402 bool wrapped = val < lo(*acc);
403 u32 newacc = hi(*acc) + val;
404
405 if (wrapped)
406 newacc += 65536;
407 ACCESS_ONCE(*acc) = newacc;
408 }
409
410 void be_parse_stats(struct be_adapter *adapter)
411 {
412 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
413 struct be_rx_obj *rxo;
414 int i;
415
416 if (adapter->generation == BE_GEN3) {
417 if (lancer_chip(adapter))
418 populate_lancer_stats(adapter);
419 else
420 populate_be3_stats(adapter);
421 } else {
422 populate_be2_stats(adapter);
423 }
424
425 if (lancer_chip(adapter))
426 goto done;
427
428 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
429 for_all_rx_queues(adapter, rxo, i) {
430 /* below erx HW counter can actually wrap around after
431 * 65535. Driver accumulates a 32-bit value
432 */
433 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
434 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
435 }
436 done:
437 return;
438 }
439
440 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
441 struct rtnl_link_stats64 *stats)
442 {
443 struct be_adapter *adapter = netdev_priv(netdev);
444 struct be_drv_stats *drvs = &adapter->drv_stats;
445 struct be_rx_obj *rxo;
446 struct be_tx_obj *txo;
447 u64 pkts, bytes;
448 unsigned int start;
449 int i;
450
451 for_all_rx_queues(adapter, rxo, i) {
452 const struct be_rx_stats *rx_stats = rx_stats(rxo);
453 do {
454 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
455 pkts = rx_stats(rxo)->rx_pkts;
456 bytes = rx_stats(rxo)->rx_bytes;
457 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
458 stats->rx_packets += pkts;
459 stats->rx_bytes += bytes;
460 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
461 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
462 rx_stats(rxo)->rx_drops_no_frags;
463 }
464
465 for_all_tx_queues(adapter, txo, i) {
466 const struct be_tx_stats *tx_stats = tx_stats(txo);
467 do {
468 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
469 pkts = tx_stats(txo)->tx_pkts;
470 bytes = tx_stats(txo)->tx_bytes;
471 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
472 stats->tx_packets += pkts;
473 stats->tx_bytes += bytes;
474 }
475
476 /* bad pkts received */
477 stats->rx_errors = drvs->rx_crc_errors +
478 drvs->rx_alignment_symbol_errors +
479 drvs->rx_in_range_errors +
480 drvs->rx_out_range_errors +
481 drvs->rx_frame_too_long +
482 drvs->rx_dropped_too_small +
483 drvs->rx_dropped_too_short +
484 drvs->rx_dropped_header_too_small +
485 drvs->rx_dropped_tcp_length +
486 drvs->rx_dropped_runt;
487
488 /* detailed rx errors */
489 stats->rx_length_errors = drvs->rx_in_range_errors +
490 drvs->rx_out_range_errors +
491 drvs->rx_frame_too_long;
492
493 stats->rx_crc_errors = drvs->rx_crc_errors;
494
495 /* frame alignment errors */
496 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
497
498 /* receiver fifo overrun */
499 /* drops_no_pbuf is no per i/f, it's per BE card */
500 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
501 drvs->rx_input_fifo_overflow_drop +
502 drvs->rx_drops_no_pbuf;
503 return stats;
504 }
505
506 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
507 {
508 struct net_device *netdev = adapter->netdev;
509
510 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
511 netif_carrier_off(netdev);
512 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
513 }
514
515 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
516 netif_carrier_on(netdev);
517 else
518 netif_carrier_off(netdev);
519 }
520
521 static void be_tx_stats_update(struct be_tx_obj *txo,
522 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
523 {
524 struct be_tx_stats *stats = tx_stats(txo);
525
526 u64_stats_update_begin(&stats->sync);
527 stats->tx_reqs++;
528 stats->tx_wrbs += wrb_cnt;
529 stats->tx_bytes += copied;
530 stats->tx_pkts += (gso_segs ? gso_segs : 1);
531 if (stopped)
532 stats->tx_stops++;
533 u64_stats_update_end(&stats->sync);
534 }
535
536 /* Determine number of WRB entries needed to xmit data in an skb */
537 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
538 bool *dummy)
539 {
540 int cnt = (skb->len > skb->data_len);
541
542 cnt += skb_shinfo(skb)->nr_frags;
543
544 /* to account for hdr wrb */
545 cnt++;
546 if (lancer_chip(adapter) || !(cnt & 1)) {
547 *dummy = false;
548 } else {
549 /* add a dummy to make it an even num */
550 cnt++;
551 *dummy = true;
552 }
553 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
554 return cnt;
555 }
556
557 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558 {
559 wrb->frag_pa_hi = upper_32_bits(addr);
560 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
561 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
562 wrb->rsvd0 = 0;
563 }
564
565 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
566 struct sk_buff *skb)
567 {
568 u8 vlan_prio;
569 u16 vlan_tag;
570
571 vlan_tag = vlan_tx_tag_get(skb);
572 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
573 /* If vlan priority provided by OS is NOT in available bmap */
574 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
575 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
576 adapter->recommended_prio;
577
578 return vlan_tag;
579 }
580
581 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
582 {
583 return vlan_tx_tag_present(skb) || adapter->pvid;
584 }
585
586 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
587 struct sk_buff *skb, u32 wrb_cnt, u32 len)
588 {
589 u16 vlan_tag;
590
591 memset(hdr, 0, sizeof(*hdr));
592
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
594
595 if (skb_is_gso(skb)) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
598 hdr, skb_shinfo(skb)->gso_size);
599 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
601 if (lancer_chip(adapter) && adapter->sli_family ==
602 LANCER_A0_SLI_FAMILY) {
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
604 if (is_tcp_pkt(skb))
605 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
606 tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
609 udpcs, hdr, 1);
610 }
611 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
612 if (is_tcp_pkt(skb))
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
614 else if (is_udp_pkt(skb))
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
616 }
617
618 if (vlan_tx_tag_present(skb)) {
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
620 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
621 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
622 }
623
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
628 }
629
630 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
631 bool unmap_single)
632 {
633 dma_addr_t dma;
634
635 be_dws_le_to_cpu(wrb, sizeof(*wrb));
636
637 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
638 if (wrb->frag_len) {
639 if (unmap_single)
640 dma_unmap_single(dev, dma, wrb->frag_len,
641 DMA_TO_DEVICE);
642 else
643 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
644 }
645 }
646
647 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
648 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
649 {
650 dma_addr_t busaddr;
651 int i, copied = 0;
652 struct device *dev = &adapter->pdev->dev;
653 struct sk_buff *first_skb = skb;
654 struct be_eth_wrb *wrb;
655 struct be_eth_hdr_wrb *hdr;
656 bool map_single = false;
657 u16 map_head;
658
659 hdr = queue_head_node(txq);
660 queue_head_inc(txq);
661 map_head = txq->head;
662
663 if (skb->len > skb->data_len) {
664 int len = skb_headlen(skb);
665 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
666 if (dma_mapping_error(dev, busaddr))
667 goto dma_err;
668 map_single = true;
669 wrb = queue_head_node(txq);
670 wrb_fill(wrb, busaddr, len);
671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
673 copied += len;
674 }
675
676 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
677 const struct skb_frag_struct *frag =
678 &skb_shinfo(skb)->frags[i];
679 busaddr = skb_frag_dma_map(dev, frag, 0,
680 skb_frag_size(frag), DMA_TO_DEVICE);
681 if (dma_mapping_error(dev, busaddr))
682 goto dma_err;
683 wrb = queue_head_node(txq);
684 wrb_fill(wrb, busaddr, skb_frag_size(frag));
685 be_dws_cpu_to_le(wrb, sizeof(*wrb));
686 queue_head_inc(txq);
687 copied += skb_frag_size(frag);
688 }
689
690 if (dummy_wrb) {
691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, 0, 0);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 }
696
697 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
698 be_dws_cpu_to_le(hdr, sizeof(*hdr));
699
700 return copied;
701 dma_err:
702 txq->head = map_head;
703 while (copied) {
704 wrb = queue_head_node(txq);
705 unmap_tx_frag(dev, wrb, map_single);
706 map_single = false;
707 copied -= wrb->frag_len;
708 queue_head_inc(txq);
709 }
710 return 0;
711 }
712
713 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
714 struct sk_buff *skb)
715 {
716 u16 vlan_tag = 0;
717
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 return skb;
721
722 if (vlan_tx_tag_present(skb)) {
723 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
724 __vlan_put_tag(skb, vlan_tag);
725 skb->vlan_tci = 0;
726 }
727
728 return skb;
729 }
730
731 static netdev_tx_t be_xmit(struct sk_buff *skb,
732 struct net_device *netdev)
733 {
734 struct be_adapter *adapter = netdev_priv(netdev);
735 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
736 struct be_queue_info *txq = &txo->q;
737 struct iphdr *ip = NULL;
738 u32 wrb_cnt = 0, copied = 0;
739 u32 start = txq->head, eth_hdr_len;
740 bool dummy_wrb, stopped = false;
741
742 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
743 VLAN_ETH_HLEN : ETH_HLEN;
744
745 /* HW has a bug which considers padding bytes as legal
746 * and modifies the IPv4 hdr's 'tot_len' field
747 */
748 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
749 is_ipv4_pkt(skb)) {
750 ip = (struct iphdr *)ip_hdr(skb);
751 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
752 }
753
754 /* HW has a bug wherein it will calculate CSUM for VLAN
755 * pkts even though it is disabled.
756 * Manually insert VLAN in pkt.
757 */
758 if (skb->ip_summed != CHECKSUM_PARTIAL &&
759 be_vlan_tag_chk(adapter, skb)) {
760 skb = be_insert_vlan_in_pkt(adapter, skb);
761 if (unlikely(!skb))
762 goto tx_drop;
763 }
764
765 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
766
767 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
768 if (copied) {
769 int gso_segs = skb_shinfo(skb)->gso_segs;
770
771 /* record the sent skb in the sent_skb table */
772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
774
775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
779 atomic_add(wrb_cnt, &txq->used);
780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
783 stopped = true;
784 }
785
786 be_txq_notify(adapter, txq->id, wrb_cnt);
787
788 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
792 }
793 tx_drop:
794 return NETDEV_TX_OK;
795 }
796
797 static int be_change_mtu(struct net_device *netdev, int new_mtu)
798 {
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813 }
814
815 /*
816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
818 */
819 static int be_vid_config(struct be_adapter *adapter)
820 {
821 u16 vids[BE_NUM_VLANS_SUPPORTED];
822 u16 num = 0, i;
823 int status = 0;
824
825 /* No need to further configure vids if in promiscuous mode */
826 if (adapter->promiscuous)
827 return 0;
828
829 if (adapter->vlans_added > adapter->max_vlans)
830 goto set_vlan_promisc;
831
832 /* Construct VLAN Table to give to HW */
833 for (i = 0; i < VLAN_N_VID; i++)
834 if (adapter->vlan_tag[i])
835 vids[num++] = cpu_to_le16(i);
836
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
838 vids, num, 1, 0);
839
840 /* Set to VLAN promisc mode as setting VLAN filter failed */
841 if (status) {
842 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
843 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
844 goto set_vlan_promisc;
845 }
846
847 return status;
848
849 set_vlan_promisc:
850 status = be_cmd_vlan_config(adapter, adapter->if_handle,
851 NULL, 0, 1, 1);
852 return status;
853 }
854
855 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
856 {
857 struct be_adapter *adapter = netdev_priv(netdev);
858 int status = 0;
859
860 if (!be_physfn(adapter)) {
861 status = -EINVAL;
862 goto ret;
863 }
864
865 adapter->vlan_tag[vid] = 1;
866 if (adapter->vlans_added <= (adapter->max_vlans + 1))
867 status = be_vid_config(adapter);
868
869 if (!status)
870 adapter->vlans_added++;
871 else
872 adapter->vlan_tag[vid] = 0;
873 ret:
874 return status;
875 }
876
877 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
878 {
879 struct be_adapter *adapter = netdev_priv(netdev);
880 int status = 0;
881
882 if (!be_physfn(adapter)) {
883 status = -EINVAL;
884 goto ret;
885 }
886
887 adapter->vlan_tag[vid] = 0;
888 if (adapter->vlans_added <= adapter->max_vlans)
889 status = be_vid_config(adapter);
890
891 if (!status)
892 adapter->vlans_added--;
893 else
894 adapter->vlan_tag[vid] = 1;
895 ret:
896 return status;
897 }
898
899 static void be_set_rx_mode(struct net_device *netdev)
900 {
901 struct be_adapter *adapter = netdev_priv(netdev);
902 int status;
903
904 if (netdev->flags & IFF_PROMISC) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 /* BE was previously in promiscuous mode; disable it */
911 if (adapter->promiscuous) {
912 adapter->promiscuous = false;
913 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
914
915 if (adapter->vlans_added)
916 be_vid_config(adapter);
917 }
918
919 /* Enable multicast promisc if num configured exceeds what we support */
920 if (netdev->flags & IFF_ALLMULTI ||
921 netdev_mc_count(netdev) > BE_MAX_MC) {
922 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
923 goto done;
924 }
925
926 if (netdev_uc_count(netdev) != adapter->uc_macs) {
927 struct netdev_hw_addr *ha;
928 int i = 1; /* First slot is claimed by the Primary MAC */
929
930 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
931 be_cmd_pmac_del(adapter, adapter->if_handle,
932 adapter->pmac_id[i], 0);
933 }
934
935 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
936 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
937 adapter->promiscuous = true;
938 goto done;
939 }
940
941 netdev_for_each_uc_addr(ha, adapter->netdev) {
942 adapter->uc_macs++; /* First slot is for Primary MAC */
943 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
944 adapter->if_handle,
945 &adapter->pmac_id[adapter->uc_macs], 0);
946 }
947 }
948
949 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
950
951 /* Set to MCAST promisc mode if setting MULTICAST address fails */
952 if (status) {
953 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
954 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
955 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
956 }
957 done:
958 return;
959 }
960
961 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962 {
963 struct be_adapter *adapter = netdev_priv(netdev);
964 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
965 int status;
966
967 if (!sriov_enabled(adapter))
968 return -EPERM;
969
970 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
971 return -EINVAL;
972
973 if (lancer_chip(adapter)) {
974 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
975 } else {
976 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
977 vf_cfg->pmac_id, vf + 1);
978
979 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
980 &vf_cfg->pmac_id, vf + 1);
981 }
982
983 if (status)
984 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
985 mac, vf);
986 else
987 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
988
989 return status;
990 }
991
992 static int be_get_vf_config(struct net_device *netdev, int vf,
993 struct ifla_vf_info *vi)
994 {
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
997
998 if (!sriov_enabled(adapter))
999 return -EPERM;
1000
1001 if (vf >= adapter->num_vfs)
1002 return -EINVAL;
1003
1004 vi->vf = vf;
1005 vi->tx_rate = vf_cfg->tx_rate;
1006 vi->vlan = vf_cfg->vlan_tag;
1007 vi->qos = 0;
1008 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1009
1010 return 0;
1011 }
1012
1013 static int be_set_vf_vlan(struct net_device *netdev,
1014 int vf, u16 vlan, u8 qos)
1015 {
1016 struct be_adapter *adapter = netdev_priv(netdev);
1017 int status = 0;
1018
1019 if (!sriov_enabled(adapter))
1020 return -EPERM;
1021
1022 if (vf >= adapter->num_vfs || vlan > 4095)
1023 return -EINVAL;
1024
1025 if (vlan) {
1026 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1027 /* If this is new value, program it. Else skip. */
1028 adapter->vf_cfg[vf].vlan_tag = vlan;
1029
1030 status = be_cmd_set_hsw_config(adapter, vlan,
1031 vf + 1, adapter->vf_cfg[vf].if_handle);
1032 }
1033 } else {
1034 /* Reset Transparent Vlan Tagging. */
1035 adapter->vf_cfg[vf].vlan_tag = 0;
1036 vlan = adapter->vf_cfg[vf].def_vid;
1037 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1038 adapter->vf_cfg[vf].if_handle);
1039 }
1040
1041
1042 if (status)
1043 dev_info(&adapter->pdev->dev,
1044 "VLAN %d config on VF %d failed\n", vlan, vf);
1045 return status;
1046 }
1047
1048 static int be_set_vf_tx_rate(struct net_device *netdev,
1049 int vf, int rate)
1050 {
1051 struct be_adapter *adapter = netdev_priv(netdev);
1052 int status = 0;
1053
1054 if (!sriov_enabled(adapter))
1055 return -EPERM;
1056
1057 if (vf >= adapter->num_vfs)
1058 return -EINVAL;
1059
1060 if (rate < 100 || rate > 10000) {
1061 dev_err(&adapter->pdev->dev,
1062 "tx rate must be between 100 and 10000 Mbps\n");
1063 return -EINVAL;
1064 }
1065
1066 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1067
1068 if (status)
1069 dev_err(&adapter->pdev->dev,
1070 "tx rate %d on VF %d failed\n", rate, vf);
1071 else
1072 adapter->vf_cfg[vf].tx_rate = rate;
1073 return status;
1074 }
1075
1076 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1077 {
1078 struct pci_dev *dev, *pdev = adapter->pdev;
1079 int vfs = 0, assigned_vfs = 0, pos;
1080 u16 offset, stride;
1081
1082 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1083 if (!pos)
1084 return 0;
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1086 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1087
1088 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1089 while (dev) {
1090 if (dev->is_virtfn && dev->physfn == pdev) {
1091 vfs++;
1092 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1093 assigned_vfs++;
1094 }
1095 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1096 }
1097 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1098 }
1099
1100 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1101 {
1102 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1103 ulong now = jiffies;
1104 ulong delta = now - stats->rx_jiffies;
1105 u64 pkts;
1106 unsigned int start, eqd;
1107
1108 if (!eqo->enable_aic) {
1109 eqd = eqo->eqd;
1110 goto modify_eqd;
1111 }
1112
1113 if (eqo->idx >= adapter->num_rx_qs)
1114 return;
1115
1116 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1117
1118 /* Wrapped around */
1119 if (time_before(now, stats->rx_jiffies)) {
1120 stats->rx_jiffies = now;
1121 return;
1122 }
1123
1124 /* Update once a second */
1125 if (delta < HZ)
1126 return;
1127
1128 do {
1129 start = u64_stats_fetch_begin_bh(&stats->sync);
1130 pkts = stats->rx_pkts;
1131 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1132
1133 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1134 stats->rx_pkts_prev = pkts;
1135 stats->rx_jiffies = now;
1136 eqd = (stats->rx_pps / 110000) << 3;
1137 eqd = min(eqd, eqo->max_eqd);
1138 eqd = max(eqd, eqo->min_eqd);
1139 if (eqd < 10)
1140 eqd = 0;
1141
1142 modify_eqd:
1143 if (eqd != eqo->cur_eqd) {
1144 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1145 eqo->cur_eqd = eqd;
1146 }
1147 }
1148
1149 static void be_rx_stats_update(struct be_rx_obj *rxo,
1150 struct be_rx_compl_info *rxcp)
1151 {
1152 struct be_rx_stats *stats = rx_stats(rxo);
1153
1154 u64_stats_update_begin(&stats->sync);
1155 stats->rx_compl++;
1156 stats->rx_bytes += rxcp->pkt_size;
1157 stats->rx_pkts++;
1158 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1159 stats->rx_mcast_pkts++;
1160 if (rxcp->err)
1161 stats->rx_compl_err++;
1162 u64_stats_update_end(&stats->sync);
1163 }
1164
1165 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1166 {
1167 /* L4 checksum is not reliable for non TCP/UDP packets.
1168 * Also ignore ipcksm for ipv6 pkts */
1169 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1170 (rxcp->ip_csum || rxcp->ipv6);
1171 }
1172
1173 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1174 u16 frag_idx)
1175 {
1176 struct be_adapter *adapter = rxo->adapter;
1177 struct be_rx_page_info *rx_page_info;
1178 struct be_queue_info *rxq = &rxo->q;
1179
1180 rx_page_info = &rxo->page_info_tbl[frag_idx];
1181 BUG_ON(!rx_page_info->page);
1182
1183 if (rx_page_info->last_page_user) {
1184 dma_unmap_page(&adapter->pdev->dev,
1185 dma_unmap_addr(rx_page_info, bus),
1186 adapter->big_page_size, DMA_FROM_DEVICE);
1187 rx_page_info->last_page_user = false;
1188 }
1189
1190 atomic_dec(&rxq->used);
1191 return rx_page_info;
1192 }
1193
1194 /* Throwaway the data in the Rx completion */
1195 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1196 struct be_rx_compl_info *rxcp)
1197 {
1198 struct be_queue_info *rxq = &rxo->q;
1199 struct be_rx_page_info *page_info;
1200 u16 i, num_rcvd = rxcp->num_rcvd;
1201
1202 for (i = 0; i < num_rcvd; i++) {
1203 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1204 put_page(page_info->page);
1205 memset(page_info, 0, sizeof(*page_info));
1206 index_inc(&rxcp->rxq_idx, rxq->len);
1207 }
1208 }
1209
1210 /*
1211 * skb_fill_rx_data forms a complete skb for an ether frame
1212 * indicated by rxcp.
1213 */
1214 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1215 struct be_rx_compl_info *rxcp)
1216 {
1217 struct be_queue_info *rxq = &rxo->q;
1218 struct be_rx_page_info *page_info;
1219 u16 i, j;
1220 u16 hdr_len, curr_frag_len, remaining;
1221 u8 *start;
1222
1223 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1224 start = page_address(page_info->page) + page_info->page_offset;
1225 prefetch(start);
1226
1227 /* Copy data in the first descriptor of this completion */
1228 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1229
1230 skb->len = curr_frag_len;
1231 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1232 memcpy(skb->data, start, curr_frag_len);
1233 /* Complete packet has now been moved to data */
1234 put_page(page_info->page);
1235 skb->data_len = 0;
1236 skb->tail += curr_frag_len;
1237 } else {
1238 hdr_len = ETH_HLEN;
1239 memcpy(skb->data, start, hdr_len);
1240 skb_shinfo(skb)->nr_frags = 1;
1241 skb_frag_set_page(skb, 0, page_info->page);
1242 skb_shinfo(skb)->frags[0].page_offset =
1243 page_info->page_offset + hdr_len;
1244 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1245 skb->data_len = curr_frag_len - hdr_len;
1246 skb->truesize += rx_frag_size;
1247 skb->tail += hdr_len;
1248 }
1249 page_info->page = NULL;
1250
1251 if (rxcp->pkt_size <= rx_frag_size) {
1252 BUG_ON(rxcp->num_rcvd != 1);
1253 return;
1254 }
1255
1256 /* More frags present for this completion */
1257 index_inc(&rxcp->rxq_idx, rxq->len);
1258 remaining = rxcp->pkt_size - curr_frag_len;
1259 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1260 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1261 curr_frag_len = min(remaining, rx_frag_size);
1262
1263 /* Coalesce all frags from the same physical page in one slot */
1264 if (page_info->page_offset == 0) {
1265 /* Fresh page */
1266 j++;
1267 skb_frag_set_page(skb, j, page_info->page);
1268 skb_shinfo(skb)->frags[j].page_offset =
1269 page_info->page_offset;
1270 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1271 skb_shinfo(skb)->nr_frags++;
1272 } else {
1273 put_page(page_info->page);
1274 }
1275
1276 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1277 skb->len += curr_frag_len;
1278 skb->data_len += curr_frag_len;
1279 skb->truesize += rx_frag_size;
1280 remaining -= curr_frag_len;
1281 index_inc(&rxcp->rxq_idx, rxq->len);
1282 page_info->page = NULL;
1283 }
1284 BUG_ON(j > MAX_SKB_FRAGS);
1285 }
1286
1287 /* Process the RX completion indicated by rxcp when GRO is disabled */
1288 static void be_rx_compl_process(struct be_rx_obj *rxo,
1289 struct be_rx_compl_info *rxcp)
1290 {
1291 struct be_adapter *adapter = rxo->adapter;
1292 struct net_device *netdev = adapter->netdev;
1293 struct sk_buff *skb;
1294
1295 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1296 if (unlikely(!skb)) {
1297 rx_stats(rxo)->rx_drops_no_skbs++;
1298 be_rx_compl_discard(rxo, rxcp);
1299 return;
1300 }
1301
1302 skb_fill_rx_data(rxo, skb, rxcp);
1303
1304 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1305 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306 else
1307 skb_checksum_none_assert(skb);
1308
1309 skb->protocol = eth_type_trans(skb, netdev);
1310 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1311 if (netdev->features & NETIF_F_RXHASH)
1312 skb->rxhash = rxcp->rss_hash;
1313
1314
1315 if (rxcp->vlanf)
1316 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1317
1318 netif_receive_skb(skb);
1319 }
1320
1321 /* Process the RX completion indicated by rxcp when GRO is enabled */
1322 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1323 struct be_rx_compl_info *rxcp)
1324 {
1325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info;
1327 struct sk_buff *skb = NULL;
1328 struct be_queue_info *rxq = &rxo->q;
1329 u16 remaining, curr_frag_len;
1330 u16 i, j;
1331
1332 skb = napi_get_frags(napi);
1333 if (!skb) {
1334 be_rx_compl_discard(rxo, rxcp);
1335 return;
1336 }
1337
1338 remaining = rxcp->pkt_size;
1339 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1340 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1341
1342 curr_frag_len = min(remaining, rx_frag_size);
1343
1344 /* Coalesce all frags from the same physical page in one slot */
1345 if (i == 0 || page_info->page_offset == 0) {
1346 /* First frag or Fresh page */
1347 j++;
1348 skb_frag_set_page(skb, j, page_info->page);
1349 skb_shinfo(skb)->frags[j].page_offset =
1350 page_info->page_offset;
1351 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1352 } else {
1353 put_page(page_info->page);
1354 }
1355 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1356 skb->truesize += rx_frag_size;
1357 remaining -= curr_frag_len;
1358 index_inc(&rxcp->rxq_idx, rxq->len);
1359 memset(page_info, 0, sizeof(*page_info));
1360 }
1361 BUG_ON(j > MAX_SKB_FRAGS);
1362
1363 skb_shinfo(skb)->nr_frags = j + 1;
1364 skb->len = rxcp->pkt_size;
1365 skb->data_len = rxcp->pkt_size;
1366 skb->ip_summed = CHECKSUM_UNNECESSARY;
1367 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1368 if (adapter->netdev->features & NETIF_F_RXHASH)
1369 skb->rxhash = rxcp->rss_hash;
1370
1371 if (rxcp->vlanf)
1372 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1373
1374 napi_gro_frags(napi);
1375 }
1376
1377 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1378 struct be_rx_compl_info *rxcp)
1379 {
1380 rxcp->pkt_size =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1382 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1383 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1384 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1385 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1386 rxcp->ip_csum =
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1388 rxcp->l4_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1390 rxcp->ipv6 =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1392 rxcp->rxq_idx =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1394 rxcp->num_rcvd =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1396 rxcp->pkt_type =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1398 rxcp->rss_hash =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1400 if (rxcp->vlanf) {
1401 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1402 compl);
1403 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1404 compl);
1405 }
1406 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1407 }
1408
1409 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1410 struct be_rx_compl_info *rxcp)
1411 {
1412 rxcp->pkt_size =
1413 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1414 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1415 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1416 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1417 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1418 rxcp->ip_csum =
1419 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1420 rxcp->l4_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1422 rxcp->ipv6 =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1424 rxcp->rxq_idx =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1426 rxcp->num_rcvd =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1428 rxcp->pkt_type =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1430 rxcp->rss_hash =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1432 if (rxcp->vlanf) {
1433 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1434 compl);
1435 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1436 compl);
1437 }
1438 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1439 }
1440
1441 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1442 {
1443 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1444 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1445 struct be_adapter *adapter = rxo->adapter;
1446
1447 /* For checking the valid bit it is Ok to use either definition as the
1448 * valid bit is at the same position in both v0 and v1 Rx compl */
1449 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1450 return NULL;
1451
1452 rmb();
1453 be_dws_le_to_cpu(compl, sizeof(*compl));
1454
1455 if (adapter->be3_native)
1456 be_parse_rx_compl_v1(compl, rxcp);
1457 else
1458 be_parse_rx_compl_v0(compl, rxcp);
1459
1460 if (rxcp->vlanf) {
1461 /* vlanf could be wrongly set in some cards.
1462 * ignore if vtm is not set */
1463 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1464 rxcp->vlanf = 0;
1465
1466 if (!lancer_chip(adapter))
1467 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1468
1469 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1470 !adapter->vlan_tag[rxcp->vlan_tag])
1471 rxcp->vlanf = 0;
1472 }
1473
1474 /* As the compl has been parsed, reset it; we wont touch it again */
1475 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1476
1477 queue_tail_inc(&rxo->cq);
1478 return rxcp;
1479 }
1480
1481 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1482 {
1483 u32 order = get_order(size);
1484
1485 if (order > 0)
1486 gfp |= __GFP_COMP;
1487 return alloc_pages(gfp, order);
1488 }
1489
1490 /*
1491 * Allocate a page, split it to fragments of size rx_frag_size and post as
1492 * receive buffers to BE
1493 */
1494 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1495 {
1496 struct be_adapter *adapter = rxo->adapter;
1497 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1498 struct be_queue_info *rxq = &rxo->q;
1499 struct page *pagep = NULL;
1500 struct be_eth_rx_d *rxd;
1501 u64 page_dmaaddr = 0, frag_dmaaddr;
1502 u32 posted, page_offset = 0;
1503
1504 page_info = &rxo->page_info_tbl[rxq->head];
1505 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1506 if (!pagep) {
1507 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1508 if (unlikely(!pagep)) {
1509 rx_stats(rxo)->rx_post_fail++;
1510 break;
1511 }
1512 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1513 0, adapter->big_page_size,
1514 DMA_FROM_DEVICE);
1515 page_info->page_offset = 0;
1516 } else {
1517 get_page(pagep);
1518 page_info->page_offset = page_offset + rx_frag_size;
1519 }
1520 page_offset = page_info->page_offset;
1521 page_info->page = pagep;
1522 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1523 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1524
1525 rxd = queue_head_node(rxq);
1526 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1527 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1528
1529 /* Any space left in the current big page for another frag? */
1530 if ((page_offset + rx_frag_size + rx_frag_size) >
1531 adapter->big_page_size) {
1532 pagep = NULL;
1533 page_info->last_page_user = true;
1534 }
1535
1536 prev_page_info = page_info;
1537 queue_head_inc(rxq);
1538 page_info = &rxo->page_info_tbl[rxq->head];
1539 }
1540 if (pagep)
1541 prev_page_info->last_page_user = true;
1542
1543 if (posted) {
1544 atomic_add(posted, &rxq->used);
1545 be_rxq_notify(adapter, rxq->id, posted);
1546 } else if (atomic_read(&rxq->used) == 0) {
1547 /* Let be_worker replenish when memory is available */
1548 rxo->rx_post_starved = true;
1549 }
1550 }
1551
1552 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1553 {
1554 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1555
1556 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1557 return NULL;
1558
1559 rmb();
1560 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1561
1562 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1563
1564 queue_tail_inc(tx_cq);
1565 return txcp;
1566 }
1567
1568 static u16 be_tx_compl_process(struct be_adapter *adapter,
1569 struct be_tx_obj *txo, u16 last_index)
1570 {
1571 struct be_queue_info *txq = &txo->q;
1572 struct be_eth_wrb *wrb;
1573 struct sk_buff **sent_skbs = txo->sent_skb_list;
1574 struct sk_buff *sent_skb;
1575 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1576 bool unmap_skb_hdr = true;
1577
1578 sent_skb = sent_skbs[txq->tail];
1579 BUG_ON(!sent_skb);
1580 sent_skbs[txq->tail] = NULL;
1581
1582 /* skip header wrb */
1583 queue_tail_inc(txq);
1584
1585 do {
1586 cur_index = txq->tail;
1587 wrb = queue_tail_node(txq);
1588 unmap_tx_frag(&adapter->pdev->dev, wrb,
1589 (unmap_skb_hdr && skb_headlen(sent_skb)));
1590 unmap_skb_hdr = false;
1591
1592 num_wrbs++;
1593 queue_tail_inc(txq);
1594 } while (cur_index != last_index);
1595
1596 kfree_skb(sent_skb);
1597 return num_wrbs;
1598 }
1599
1600 /* Return the number of events in the event queue */
1601 static inline int events_get(struct be_eq_obj *eqo)
1602 {
1603 struct be_eq_entry *eqe;
1604 int num = 0;
1605
1606 do {
1607 eqe = queue_tail_node(&eqo->q);
1608 if (eqe->evt == 0)
1609 break;
1610
1611 rmb();
1612 eqe->evt = 0;
1613 num++;
1614 queue_tail_inc(&eqo->q);
1615 } while (true);
1616
1617 return num;
1618 }
1619
1620 static int event_handle(struct be_eq_obj *eqo)
1621 {
1622 bool rearm = false;
1623 int num = events_get(eqo);
1624
1625 /* Deal with any spurious interrupts that come without events */
1626 if (!num)
1627 rearm = true;
1628
1629 if (num || msix_enabled(eqo->adapter))
1630 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1631
1632 if (num)
1633 napi_schedule(&eqo->napi);
1634
1635 return num;
1636 }
1637
1638 /* Leaves the EQ is disarmed state */
1639 static void be_eq_clean(struct be_eq_obj *eqo)
1640 {
1641 int num = events_get(eqo);
1642
1643 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1644 }
1645
1646 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1647 {
1648 struct be_rx_page_info *page_info;
1649 struct be_queue_info *rxq = &rxo->q;
1650 struct be_queue_info *rx_cq = &rxo->cq;
1651 struct be_rx_compl_info *rxcp;
1652 u16 tail;
1653
1654 /* First cleanup pending rx completions */
1655 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1656 be_rx_compl_discard(rxo, rxcp);
1657 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1658 }
1659
1660 /* Then free posted rx buffer that were not used */
1661 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1662 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1663 page_info = get_rx_page_info(rxo, tail);
1664 put_page(page_info->page);
1665 memset(page_info, 0, sizeof(*page_info));
1666 }
1667 BUG_ON(atomic_read(&rxq->used));
1668 rxq->tail = rxq->head = 0;
1669 }
1670
1671 static void be_tx_compl_clean(struct be_adapter *adapter)
1672 {
1673 struct be_tx_obj *txo;
1674 struct be_queue_info *txq;
1675 struct be_eth_tx_compl *txcp;
1676 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1677 struct sk_buff *sent_skb;
1678 bool dummy_wrb;
1679 int i, pending_txqs;
1680
1681 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1682 do {
1683 pending_txqs = adapter->num_tx_qs;
1684
1685 for_all_tx_queues(adapter, txo, i) {
1686 txq = &txo->q;
1687 while ((txcp = be_tx_compl_get(&txo->cq))) {
1688 end_idx =
1689 AMAP_GET_BITS(struct amap_eth_tx_compl,
1690 wrb_index, txcp);
1691 num_wrbs += be_tx_compl_process(adapter, txo,
1692 end_idx);
1693 cmpl++;
1694 }
1695 if (cmpl) {
1696 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1697 atomic_sub(num_wrbs, &txq->used);
1698 cmpl = 0;
1699 num_wrbs = 0;
1700 }
1701 if (atomic_read(&txq->used) == 0)
1702 pending_txqs--;
1703 }
1704
1705 if (pending_txqs == 0 || ++timeo > 200)
1706 break;
1707
1708 mdelay(1);
1709 } while (true);
1710
1711 for_all_tx_queues(adapter, txo, i) {
1712 txq = &txo->q;
1713 if (atomic_read(&txq->used))
1714 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1715 atomic_read(&txq->used));
1716
1717 /* free posted tx for which compls will never arrive */
1718 while (atomic_read(&txq->used)) {
1719 sent_skb = txo->sent_skb_list[txq->tail];
1720 end_idx = txq->tail;
1721 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1722 &dummy_wrb);
1723 index_adv(&end_idx, num_wrbs - 1, txq->len);
1724 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1725 atomic_sub(num_wrbs, &txq->used);
1726 }
1727 }
1728 }
1729
1730 static void be_evt_queues_destroy(struct be_adapter *adapter)
1731 {
1732 struct be_eq_obj *eqo;
1733 int i;
1734
1735 for_all_evt_queues(adapter, eqo, i) {
1736 if (eqo->q.created) {
1737 be_eq_clean(eqo);
1738 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1739 }
1740 be_queue_free(adapter, &eqo->q);
1741 }
1742 }
1743
1744 static int be_evt_queues_create(struct be_adapter *adapter)
1745 {
1746 struct be_queue_info *eq;
1747 struct be_eq_obj *eqo;
1748 int i, rc;
1749
1750 adapter->num_evt_qs = num_irqs(adapter);
1751
1752 for_all_evt_queues(adapter, eqo, i) {
1753 eqo->adapter = adapter;
1754 eqo->tx_budget = BE_TX_BUDGET;
1755 eqo->idx = i;
1756 eqo->max_eqd = BE_MAX_EQD;
1757 eqo->enable_aic = true;
1758
1759 eq = &eqo->q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 return rc;
1764
1765 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766 if (rc)
1767 return rc;
1768 }
1769 return 0;
1770 }
1771
1772 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773 {
1774 struct be_queue_info *q;
1775
1776 q = &adapter->mcc_obj.q;
1777 if (q->created)
1778 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1779 be_queue_free(adapter, q);
1780
1781 q = &adapter->mcc_obj.cq;
1782 if (q->created)
1783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1784 be_queue_free(adapter, q);
1785 }
1786
1787 /* Must be called only after TX qs are created as MCC shares TX EQ */
1788 static int be_mcc_queues_create(struct be_adapter *adapter)
1789 {
1790 struct be_queue_info *q, *cq;
1791
1792 cq = &adapter->mcc_obj.cq;
1793 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1794 sizeof(struct be_mcc_compl)))
1795 goto err;
1796
1797 /* Use the default EQ for MCC completions */
1798 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1799 goto mcc_cq_free;
1800
1801 q = &adapter->mcc_obj.q;
1802 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803 goto mcc_cq_destroy;
1804
1805 if (be_cmd_mccq_create(adapter, q, cq))
1806 goto mcc_q_free;
1807
1808 return 0;
1809
1810 mcc_q_free:
1811 be_queue_free(adapter, q);
1812 mcc_cq_destroy:
1813 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1814 mcc_cq_free:
1815 be_queue_free(adapter, cq);
1816 err:
1817 return -1;
1818 }
1819
1820 static void be_tx_queues_destroy(struct be_adapter *adapter)
1821 {
1822 struct be_queue_info *q;
1823 struct be_tx_obj *txo;
1824 u8 i;
1825
1826 for_all_tx_queues(adapter, txo, i) {
1827 q = &txo->q;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830 be_queue_free(adapter, q);
1831
1832 q = &txo->cq;
1833 if (q->created)
1834 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835 be_queue_free(adapter, q);
1836 }
1837 }
1838
1839 static int be_num_txqs_want(struct be_adapter *adapter)
1840 {
1841 if (sriov_want(adapter) || be_is_mc(adapter) ||
1842 lancer_chip(adapter) || !be_physfn(adapter) ||
1843 adapter->generation == BE_GEN2)
1844 return 1;
1845 else
1846 return MAX_TX_QS;
1847 }
1848
1849 static int be_tx_cqs_create(struct be_adapter *adapter)
1850 {
1851 struct be_queue_info *cq, *eq;
1852 int status;
1853 struct be_tx_obj *txo;
1854 u8 i;
1855
1856 adapter->num_tx_qs = be_num_txqs_want(adapter);
1857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
1859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
1861 rtnl_unlock();
1862 }
1863
1864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
1866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
1870
1871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
1878 }
1879 return 0;
1880 }
1881
1882 static int be_tx_qs_create(struct be_adapter *adapter)
1883 {
1884 struct be_tx_obj *txo;
1885 int i, status;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
1892
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
1896 }
1897
1898 return 0;
1899 }
1900
1901 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1902 {
1903 struct be_queue_info *q;
1904 struct be_rx_obj *rxo;
1905 int i;
1906
1907 for_all_rx_queues(adapter, rxo, i) {
1908 q = &rxo->cq;
1909 if (q->created)
1910 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911 be_queue_free(adapter, q);
1912 }
1913 }
1914
1915 static int be_rx_cqs_create(struct be_adapter *adapter)
1916 {
1917 struct be_queue_info *eq, *cq;
1918 struct be_rx_obj *rxo;
1919 int rc, i;
1920
1921 /* We'll create as many RSS rings as there are irqs.
1922 * But when there's only one irq there's no use creating RSS rings
1923 */
1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925 num_irqs(adapter) + 1 : 1;
1926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
1932
1933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1934 for_all_rx_queues(adapter, rxo, i) {
1935 rxo->adapter = adapter;
1936 cq = &rxo->cq;
1937 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938 sizeof(struct be_eth_rx_compl));
1939 if (rc)
1940 return rc;
1941
1942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944 if (rc)
1945 return rc;
1946 }
1947
1948 if (adapter->num_rx_qs != MAX_RX_QS)
1949 dev_info(&adapter->pdev->dev,
1950 "Created only %d receive queues\n", adapter->num_rx_qs);
1951
1952 return 0;
1953 }
1954
1955 static irqreturn_t be_intx(int irq, void *dev)
1956 {
1957 struct be_adapter *adapter = dev;
1958 int num_evts;
1959
1960 /* With INTx only one EQ is used */
1961 num_evts = event_handle(&adapter->eq_obj[0]);
1962 if (num_evts)
1963 return IRQ_HANDLED;
1964 else
1965 return IRQ_NONE;
1966 }
1967
1968 static irqreturn_t be_msix(int irq, void *dev)
1969 {
1970 struct be_eq_obj *eqo = dev;
1971
1972 event_handle(eqo);
1973 return IRQ_HANDLED;
1974 }
1975
1976 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1977 {
1978 return (rxcp->tcpf && !rxcp->err) ? true : false;
1979 }
1980
1981 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982 int budget)
1983 {
1984 struct be_adapter *adapter = rxo->adapter;
1985 struct be_queue_info *rx_cq = &rxo->cq;
1986 struct be_rx_compl_info *rxcp;
1987 u32 work_done;
1988
1989 for (work_done = 0; work_done < budget; work_done++) {
1990 rxcp = be_rx_compl_get(rxo);
1991 if (!rxcp)
1992 break;
1993
1994 /* Is it a flush compl that has no data */
1995 if (unlikely(rxcp->num_rcvd == 0))
1996 goto loop_continue;
1997
1998 /* Discard compl with partial DMA Lancer B0 */
1999 if (unlikely(!rxcp->pkt_size)) {
2000 be_rx_compl_discard(rxo, rxcp);
2001 goto loop_continue;
2002 }
2003
2004 /* On BE drop pkts that arrive due to imperfect filtering in
2005 * promiscuous mode on some skews
2006 */
2007 if (unlikely(rxcp->port != adapter->port_num &&
2008 !lancer_chip(adapter))) {
2009 be_rx_compl_discard(rxo, rxcp);
2010 goto loop_continue;
2011 }
2012
2013 if (do_gro(rxcp))
2014 be_rx_compl_process_gro(rxo, napi, rxcp);
2015 else
2016 be_rx_compl_process(rxo, rxcp);
2017 loop_continue:
2018 be_rx_stats_update(rxo, rxcp);
2019 }
2020
2021 if (work_done) {
2022 be_cq_notify(adapter, rx_cq->id, true, work_done);
2023
2024 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025 be_post_rx_frags(rxo, GFP_ATOMIC);
2026 }
2027
2028 return work_done;
2029 }
2030
2031 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032 int budget, int idx)
2033 {
2034 struct be_eth_tx_compl *txcp;
2035 int num_wrbs = 0, work_done;
2036
2037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->cq);
2039 if (!txcp)
2040 break;
2041 num_wrbs += be_tx_compl_process(adapter, txo,
2042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp));
2044 }
2045
2046 if (work_done) {
2047 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048 atomic_sub(num_wrbs, &txo->q.used);
2049
2050 /* As Tx wrbs have been freed up, wake up netdev queue
2051 * if it was stopped due to lack of tx wrbs. */
2052 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053 atomic_read(&txo->q.used) < txo->q.len / 2) {
2054 netif_wake_subqueue(adapter->netdev, idx);
2055 }
2056
2057 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060 }
2061 return (work_done < budget); /* Done */
2062 }
2063
2064 int be_poll(struct napi_struct *napi, int budget)
2065 {
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i;
2069 bool tx_done;
2070
2071 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074 eqo->tx_budget, i);
2075 if (!tx_done)
2076 max_work = budget;
2077 }
2078
2079 /* This loop will iterate twice for EQ0 in which
2080 * completions of the last RXQ (default one) are also processed
2081 * For other EQs the loop iterates only once
2082 */
2083 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085 max_work = max(work, max_work);
2086 }
2087
2088 if (is_mcc_eqo(eqo))
2089 be_process_mcc(adapter);
2090
2091 if (max_work < budget) {
2092 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094 } else {
2095 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2097 }
2098 return max_work;
2099 }
2100
2101 void be_detect_error(struct be_adapter *adapter)
2102 {
2103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2105 u32 i;
2106
2107 if (be_crit_error(adapter))
2108 return;
2109
2110 if (lancer_chip(adapter)) {
2111 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113 sliport_err1 = ioread32(adapter->db +
2114 SLIPORT_ERROR1_OFFSET);
2115 sliport_err2 = ioread32(adapter->db +
2116 SLIPORT_ERROR2_OFFSET);
2117 }
2118 } else {
2119 pci_read_config_dword(adapter->pdev,
2120 PCICFG_UE_STATUS_LOW, &ue_lo);
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2127
2128 ue_lo = (ue_lo & ~ue_lo_mask);
2129 ue_hi = (ue_hi & ~ue_hi_mask);
2130 }
2131
2132 if (ue_lo || ue_hi ||
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134 adapter->hw_error = true;
2135 dev_err(&adapter->pdev->dev,
2136 "Error detected in the card\n");
2137 }
2138
2139 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2140 dev_err(&adapter->pdev->dev,
2141 "ERR: sliport status 0x%x\n", sliport_status);
2142 dev_err(&adapter->pdev->dev,
2143 "ERR: sliport error1 0x%x\n", sliport_err1);
2144 dev_err(&adapter->pdev->dev,
2145 "ERR: sliport error2 0x%x\n", sliport_err2);
2146 }
2147
2148 if (ue_lo) {
2149 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2150 if (ue_lo & 1)
2151 dev_err(&adapter->pdev->dev,
2152 "UE: %s bit set\n", ue_status_low_desc[i]);
2153 }
2154 }
2155
2156 if (ue_hi) {
2157 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2158 if (ue_hi & 1)
2159 dev_err(&adapter->pdev->dev,
2160 "UE: %s bit set\n", ue_status_hi_desc[i]);
2161 }
2162 }
2163
2164 }
2165
2166 static void be_msix_disable(struct be_adapter *adapter)
2167 {
2168 if (msix_enabled(adapter)) {
2169 pci_disable_msix(adapter->pdev);
2170 adapter->num_msix_vec = 0;
2171 }
2172 }
2173
2174 static uint be_num_rss_want(struct be_adapter *adapter)
2175 {
2176 u32 num = 0;
2177 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2178 !sriov_want(adapter) && be_physfn(adapter)) {
2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181 }
2182 return num;
2183 }
2184
2185 static void be_msix_enable(struct be_adapter *adapter)
2186 {
2187 #define BE_MIN_MSIX_VECTORS 1
2188 int i, status, num_vec, num_roce_vec = 0;
2189
2190 /* If RSS queues are not used, need a vec for default RX Q */
2191 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2192 if (be_roce_supported(adapter)) {
2193 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2194 (num_online_cpus() + 1));
2195 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2196 num_vec += num_roce_vec;
2197 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2198 }
2199 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2200
2201 for (i = 0; i < num_vec; i++)
2202 adapter->msix_entries[i].entry = i;
2203
2204 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2205 if (status == 0) {
2206 goto done;
2207 } else if (status >= BE_MIN_MSIX_VECTORS) {
2208 num_vec = status;
2209 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2210 num_vec) == 0)
2211 goto done;
2212 }
2213 return;
2214 done:
2215 if (be_roce_supported(adapter)) {
2216 if (num_vec > num_roce_vec) {
2217 adapter->num_msix_vec = num_vec - num_roce_vec;
2218 adapter->num_msix_roce_vec =
2219 num_vec - adapter->num_msix_vec;
2220 } else {
2221 adapter->num_msix_vec = num_vec;
2222 adapter->num_msix_roce_vec = 0;
2223 }
2224 } else
2225 adapter->num_msix_vec = num_vec;
2226 return;
2227 }
2228
2229 static inline int be_msix_vec_get(struct be_adapter *adapter,
2230 struct be_eq_obj *eqo)
2231 {
2232 return adapter->msix_entries[eqo->idx].vector;
2233 }
2234
2235 static int be_msix_register(struct be_adapter *adapter)
2236 {
2237 struct net_device *netdev = adapter->netdev;
2238 struct be_eq_obj *eqo;
2239 int status, i, vec;
2240
2241 for_all_evt_queues(adapter, eqo, i) {
2242 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2243 vec = be_msix_vec_get(adapter, eqo);
2244 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2245 if (status)
2246 goto err_msix;
2247 }
2248
2249 return 0;
2250 err_msix:
2251 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2254 status);
2255 be_msix_disable(adapter);
2256 return status;
2257 }
2258
2259 static int be_irq_register(struct be_adapter *adapter)
2260 {
2261 struct net_device *netdev = adapter->netdev;
2262 int status;
2263
2264 if (msix_enabled(adapter)) {
2265 status = be_msix_register(adapter);
2266 if (status == 0)
2267 goto done;
2268 /* INTx is not supported for VF */
2269 if (!be_physfn(adapter))
2270 return status;
2271 }
2272
2273 /* INTx */
2274 netdev->irq = adapter->pdev->irq;
2275 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2276 adapter);
2277 if (status) {
2278 dev_err(&adapter->pdev->dev,
2279 "INTx request IRQ failed - err %d\n", status);
2280 return status;
2281 }
2282 done:
2283 adapter->isr_registered = true;
2284 return 0;
2285 }
2286
2287 static void be_irq_unregister(struct be_adapter *adapter)
2288 {
2289 struct net_device *netdev = adapter->netdev;
2290 struct be_eq_obj *eqo;
2291 int i;
2292
2293 if (!adapter->isr_registered)
2294 return;
2295
2296 /* INTx */
2297 if (!msix_enabled(adapter)) {
2298 free_irq(netdev->irq, adapter);
2299 goto done;
2300 }
2301
2302 /* MSIx */
2303 for_all_evt_queues(adapter, eqo, i)
2304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2305
2306 done:
2307 adapter->isr_registered = false;
2308 }
2309
2310 static void be_rx_qs_destroy(struct be_adapter *adapter)
2311 {
2312 struct be_queue_info *q;
2313 struct be_rx_obj *rxo;
2314 int i;
2315
2316 for_all_rx_queues(adapter, rxo, i) {
2317 q = &rxo->q;
2318 if (q->created) {
2319 be_cmd_rxq_destroy(adapter, q);
2320 /* After the rxq is invalidated, wait for a grace time
2321 * of 1ms for all dma to end and the flush compl to
2322 * arrive
2323 */
2324 mdelay(1);
2325 be_rx_cq_clean(rxo);
2326 }
2327 be_queue_free(adapter, q);
2328 }
2329 }
2330
2331 static int be_close(struct net_device *netdev)
2332 {
2333 struct be_adapter *adapter = netdev_priv(netdev);
2334 struct be_eq_obj *eqo;
2335 int i;
2336
2337 be_roce_dev_close(adapter);
2338
2339 be_async_mcc_disable(adapter);
2340
2341 if (!lancer_chip(adapter))
2342 be_intr_set(adapter, false);
2343
2344 for_all_evt_queues(adapter, eqo, i) {
2345 napi_disable(&eqo->napi);
2346 if (msix_enabled(adapter))
2347 synchronize_irq(be_msix_vec_get(adapter, eqo));
2348 else
2349 synchronize_irq(netdev->irq);
2350 be_eq_clean(eqo);
2351 }
2352
2353 be_irq_unregister(adapter);
2354
2355 /* Wait for all pending tx completions to arrive so that
2356 * all tx skbs are freed.
2357 */
2358 be_tx_compl_clean(adapter);
2359
2360 be_rx_qs_destroy(adapter);
2361 return 0;
2362 }
2363
2364 static int be_rx_qs_create(struct be_adapter *adapter)
2365 {
2366 struct be_rx_obj *rxo;
2367 int rc, i, j;
2368 u8 rsstable[128];
2369
2370 for_all_rx_queues(adapter, rxo, i) {
2371 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2372 sizeof(struct be_eth_rx_d));
2373 if (rc)
2374 return rc;
2375 }
2376
2377 /* The FW would like the default RXQ to be created first */
2378 rxo = default_rxo(adapter);
2379 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2380 adapter->if_handle, false, &rxo->rss_id);
2381 if (rc)
2382 return rc;
2383
2384 for_all_rss_queues(adapter, rxo, i) {
2385 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2386 rx_frag_size, adapter->if_handle,
2387 true, &rxo->rss_id);
2388 if (rc)
2389 return rc;
2390 }
2391
2392 if (be_multi_rxq(adapter)) {
2393 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2394 for_all_rss_queues(adapter, rxo, i) {
2395 if ((j + i) >= 128)
2396 break;
2397 rsstable[j + i] = rxo->rss_id;
2398 }
2399 }
2400 rc = be_cmd_rss_config(adapter, rsstable, 128);
2401 if (rc)
2402 return rc;
2403 }
2404
2405 /* First time posting */
2406 for_all_rx_queues(adapter, rxo, i)
2407 be_post_rx_frags(rxo, GFP_KERNEL);
2408 return 0;
2409 }
2410
2411 static int be_open(struct net_device *netdev)
2412 {
2413 struct be_adapter *adapter = netdev_priv(netdev);
2414 struct be_eq_obj *eqo;
2415 struct be_rx_obj *rxo;
2416 struct be_tx_obj *txo;
2417 u8 link_status;
2418 int status, i;
2419
2420 status = be_rx_qs_create(adapter);
2421 if (status)
2422 goto err;
2423
2424 be_irq_register(adapter);
2425
2426 if (!lancer_chip(adapter))
2427 be_intr_set(adapter, true);
2428
2429 for_all_rx_queues(adapter, rxo, i)
2430 be_cq_notify(adapter, rxo->cq.id, true, 0);
2431
2432 for_all_tx_queues(adapter, txo, i)
2433 be_cq_notify(adapter, txo->cq.id, true, 0);
2434
2435 be_async_mcc_enable(adapter);
2436
2437 for_all_evt_queues(adapter, eqo, i) {
2438 napi_enable(&eqo->napi);
2439 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2440 }
2441
2442 status = be_cmd_link_status_query(adapter, NULL, NULL,
2443 &link_status, 0);
2444 if (!status)
2445 be_link_status_update(adapter, link_status);
2446
2447 be_roce_dev_open(adapter);
2448 return 0;
2449 err:
2450 be_close(adapter->netdev);
2451 return -EIO;
2452 }
2453
2454 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2455 {
2456 struct be_dma_mem cmd;
2457 int status = 0;
2458 u8 mac[ETH_ALEN];
2459
2460 memset(mac, 0, ETH_ALEN);
2461
2462 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2463 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2464 GFP_KERNEL);
2465 if (cmd.va == NULL)
2466 return -1;
2467 memset(cmd.va, 0, cmd.size);
2468
2469 if (enable) {
2470 status = pci_write_config_dword(adapter->pdev,
2471 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2472 if (status) {
2473 dev_err(&adapter->pdev->dev,
2474 "Could not enable Wake-on-lan\n");
2475 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2476 cmd.dma);
2477 return status;
2478 }
2479 status = be_cmd_enable_magic_wol(adapter,
2480 adapter->netdev->dev_addr, &cmd);
2481 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2482 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2483 } else {
2484 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2485 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2486 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487 }
2488
2489 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2490 return status;
2491 }
2492
2493 /*
2494 * Generate a seed MAC address from the PF MAC Address using jhash.
2495 * MAC Address for VFs are assigned incrementally starting from the seed.
2496 * These addresses are programmed in the ASIC by the PF and the VF driver
2497 * queries for the MAC address during its probe.
2498 */
2499 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2500 {
2501 u32 vf;
2502 int status = 0;
2503 u8 mac[ETH_ALEN];
2504 struct be_vf_cfg *vf_cfg;
2505
2506 be_vf_eth_addr_generate(adapter, mac);
2507
2508 for_all_vfs(adapter, vf_cfg, vf) {
2509 if (lancer_chip(adapter)) {
2510 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2511 } else {
2512 status = be_cmd_pmac_add(adapter, mac,
2513 vf_cfg->if_handle,
2514 &vf_cfg->pmac_id, vf + 1);
2515 }
2516
2517 if (status)
2518 dev_err(&adapter->pdev->dev,
2519 "Mac address assignment failed for VF %d\n", vf);
2520 else
2521 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2522
2523 mac[5] += 1;
2524 }
2525 return status;
2526 }
2527
2528 static void be_vf_clear(struct be_adapter *adapter)
2529 {
2530 struct be_vf_cfg *vf_cfg;
2531 u32 vf;
2532
2533 if (be_find_vfs(adapter, ASSIGNED)) {
2534 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2535 goto done;
2536 }
2537
2538 for_all_vfs(adapter, vf_cfg, vf) {
2539 if (lancer_chip(adapter))
2540 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2541 else
2542 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2543 vf_cfg->pmac_id, vf + 1);
2544
2545 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2546 }
2547 pci_disable_sriov(adapter->pdev);
2548 done:
2549 kfree(adapter->vf_cfg);
2550 adapter->num_vfs = 0;
2551 }
2552
2553 static int be_clear(struct be_adapter *adapter)
2554 {
2555 int i = 1;
2556
2557 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2558 cancel_delayed_work_sync(&adapter->work);
2559 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2560 }
2561
2562 if (sriov_enabled(adapter))
2563 be_vf_clear(adapter);
2564
2565 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2566 be_cmd_pmac_del(adapter, adapter->if_handle,
2567 adapter->pmac_id[i], 0);
2568
2569 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2570
2571 be_mcc_queues_destroy(adapter);
2572 be_rx_cqs_destroy(adapter);
2573 be_tx_queues_destroy(adapter);
2574 be_evt_queues_destroy(adapter);
2575
2576 be_msix_disable(adapter);
2577 return 0;
2578 }
2579
2580 static int be_vf_setup_init(struct be_adapter *adapter)
2581 {
2582 struct be_vf_cfg *vf_cfg;
2583 int vf;
2584
2585 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2586 GFP_KERNEL);
2587 if (!adapter->vf_cfg)
2588 return -ENOMEM;
2589
2590 for_all_vfs(adapter, vf_cfg, vf) {
2591 vf_cfg->if_handle = -1;
2592 vf_cfg->pmac_id = -1;
2593 }
2594 return 0;
2595 }
2596
2597 static int be_vf_setup(struct be_adapter *adapter)
2598 {
2599 struct be_vf_cfg *vf_cfg;
2600 struct device *dev = &adapter->pdev->dev;
2601 u32 cap_flags, en_flags, vf;
2602 u16 def_vlan, lnk_speed;
2603 int status, enabled_vfs;
2604
2605 enabled_vfs = be_find_vfs(adapter, ENABLED);
2606 if (enabled_vfs) {
2607 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2608 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2609 return 0;
2610 }
2611
2612 if (num_vfs > adapter->dev_num_vfs) {
2613 dev_warn(dev, "Device supports %d VFs and not %d\n",
2614 adapter->dev_num_vfs, num_vfs);
2615 num_vfs = adapter->dev_num_vfs;
2616 }
2617
2618 status = pci_enable_sriov(adapter->pdev, num_vfs);
2619 if (!status) {
2620 adapter->num_vfs = num_vfs;
2621 } else {
2622 /* Platform doesn't support SRIOV though device supports it */
2623 dev_warn(dev, "SRIOV enable failed\n");
2624 return 0;
2625 }
2626
2627 status = be_vf_setup_init(adapter);
2628 if (status)
2629 goto err;
2630
2631 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2632 BE_IF_FLAGS_MULTICAST;
2633 for_all_vfs(adapter, vf_cfg, vf) {
2634 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2635 &vf_cfg->if_handle, vf + 1);
2636 if (status)
2637 goto err;
2638 }
2639
2640 if (!enabled_vfs) {
2641 status = be_vf_eth_addr_config(adapter);
2642 if (status)
2643 goto err;
2644 }
2645
2646 for_all_vfs(adapter, vf_cfg, vf) {
2647 lnk_speed = 1000;
2648 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2649 if (status)
2650 goto err;
2651 vf_cfg->tx_rate = lnk_speed * 10;
2652
2653 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2654 vf + 1, vf_cfg->if_handle);
2655 if (status)
2656 goto err;
2657 vf_cfg->def_vid = def_vlan;
2658 }
2659 return 0;
2660 err:
2661 return status;
2662 }
2663
2664 static void be_setup_init(struct be_adapter *adapter)
2665 {
2666 adapter->vlan_prio_bmap = 0xff;
2667 adapter->phy.link_speed = -1;
2668 adapter->if_handle = -1;
2669 adapter->be3_native = false;
2670 adapter->promiscuous = false;
2671 adapter->eq_next_idx = 0;
2672 adapter->phy.forced_port_speed = -1;
2673 }
2674
2675 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2676 bool *active_mac, u32 *pmac_id)
2677 {
2678 int status = 0;
2679
2680 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2681 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2682 if (!lancer_chip(adapter) && !be_physfn(adapter))
2683 *active_mac = true;
2684 else
2685 *active_mac = false;
2686
2687 return status;
2688 }
2689
2690 if (lancer_chip(adapter)) {
2691 status = be_cmd_get_mac_from_list(adapter, mac,
2692 active_mac, pmac_id, 0);
2693 if (*active_mac) {
2694 status = be_cmd_mac_addr_query(adapter, mac,
2695 MAC_ADDRESS_TYPE_NETWORK,
2696 false, if_handle,
2697 *pmac_id);
2698 }
2699 } else if (be_physfn(adapter)) {
2700 /* For BE3, for PF get permanent MAC */
2701 status = be_cmd_mac_addr_query(adapter, mac,
2702 MAC_ADDRESS_TYPE_NETWORK, true,
2703 0, 0);
2704 *active_mac = false;
2705 } else {
2706 /* For BE3, for VF get soft MAC assigned by PF*/
2707 status = be_cmd_mac_addr_query(adapter, mac,
2708 MAC_ADDRESS_TYPE_NETWORK, false,
2709 if_handle, 0);
2710 *active_mac = true;
2711 }
2712 return status;
2713 }
2714
2715 /* Routine to query per function resource limits */
2716 static int be_get_config(struct be_adapter *adapter)
2717 {
2718 int pos;
2719 u16 dev_num_vfs;
2720
2721 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722 if (pos) {
2723 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2724 &dev_num_vfs);
2725 if (!lancer_chip(adapter))
2726 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs;
2728 }
2729 return 0;
2730 }
2731
2732 static int be_setup(struct be_adapter *adapter)
2733 {
2734 struct device *dev = &adapter->pdev->dev;
2735 u32 cap_flags, en_flags;
2736 u32 tx_fc, rx_fc;
2737 int status;
2738 u8 mac[ETH_ALEN];
2739 bool active_mac;
2740
2741 be_setup_init(adapter);
2742
2743 be_get_config(adapter);
2744
2745 be_cmd_req_native_mode(adapter);
2746
2747 be_msix_enable(adapter);
2748
2749 status = be_evt_queues_create(adapter);
2750 if (status)
2751 goto err;
2752
2753 status = be_tx_cqs_create(adapter);
2754 if (status)
2755 goto err;
2756
2757 status = be_rx_cqs_create(adapter);
2758 if (status)
2759 goto err;
2760
2761 status = be_mcc_queues_create(adapter);
2762 if (status)
2763 goto err;
2764
2765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769
2770 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2771 cap_flags |= BE_IF_FLAGS_RSS;
2772 en_flags |= BE_IF_FLAGS_RSS;
2773 }
2774
2775 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2780 }
2781
2782 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2783 &adapter->if_handle, 0);
2784 if (status != 0)
2785 goto err;
2786
2787 memset(mac, 0, ETH_ALEN);
2788 active_mac = false;
2789 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2790 &active_mac, &adapter->pmac_id[0]);
2791 if (status != 0)
2792 goto err;
2793
2794 if (!active_mac) {
2795 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2796 &adapter->pmac_id[0], 0);
2797 if (status != 0)
2798 goto err;
2799 }
2800
2801 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2804 }
2805
2806 status = be_tx_qs_create(adapter);
2807 if (status)
2808 goto err;
2809
2810 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2811
2812 if (adapter->vlans_added)
2813 be_vid_config(adapter);
2814
2815 be_set_rx_mode(adapter->netdev);
2816
2817 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2818
2819 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2820 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2821 adapter->rx_fc);
2822
2823 if (be_physfn(adapter) && num_vfs) {
2824 if (adapter->dev_num_vfs)
2825 be_vf_setup(adapter);
2826 else
2827 dev_warn(dev, "device doesn't support SRIOV\n");
2828 }
2829
2830 be_cmd_get_phy_info(adapter);
2831 if (be_pause_supported(adapter))
2832 adapter->phy.fc_autoneg = 1;
2833
2834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2835 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2836 return 0;
2837 err:
2838 be_clear(adapter);
2839 return status;
2840 }
2841
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843 static void be_netpoll(struct net_device *netdev)
2844 {
2845 struct be_adapter *adapter = netdev_priv(netdev);
2846 struct be_eq_obj *eqo;
2847 int i;
2848
2849 for_all_evt_queues(adapter, eqo, i)
2850 event_handle(eqo);
2851
2852 return;
2853 }
2854 #endif
2855
2856 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2857 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2858
2859 static bool be_flash_redboot(struct be_adapter *adapter,
2860 const u8 *p, u32 img_start, int image_size,
2861 int hdr_size)
2862 {
2863 u32 crc_offset;
2864 u8 flashed_crc[4];
2865 int status;
2866
2867 crc_offset = hdr_size + img_start + image_size - 4;
2868
2869 p += crc_offset;
2870
2871 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2872 (image_size - 4));
2873 if (status) {
2874 dev_err(&adapter->pdev->dev,
2875 "could not get crc from flash, not flashing redboot\n");
2876 return false;
2877 }
2878
2879 /*update redboot only if crc does not match*/
2880 if (!memcmp(flashed_crc, p, 4))
2881 return false;
2882 else
2883 return true;
2884 }
2885
2886 static bool phy_flashing_required(struct be_adapter *adapter)
2887 {
2888 return (adapter->phy.phy_type == TN_8022 &&
2889 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2890 }
2891
2892 static bool is_comp_in_ufi(struct be_adapter *adapter,
2893 struct flash_section_info *fsec, int type)
2894 {
2895 int i = 0, img_type = 0;
2896 struct flash_section_info_g2 *fsec_g2 = NULL;
2897
2898 if (adapter->generation != BE_GEN3)
2899 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900
2901 for (i = 0; i < MAX_FLASH_COMP; i++) {
2902 if (fsec_g2)
2903 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2904 else
2905 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2906
2907 if (img_type == type)
2908 return true;
2909 }
2910 return false;
2911
2912 }
2913
2914 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2915 int header_size,
2916 const struct firmware *fw)
2917 {
2918 struct flash_section_info *fsec = NULL;
2919 const u8 *p = fw->data;
2920
2921 p += header_size;
2922 while (p < (fw->data + fw->size)) {
2923 fsec = (struct flash_section_info *)p;
2924 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2925 return fsec;
2926 p += 32;
2927 }
2928 return NULL;
2929 }
2930
2931 static int be_flash_data(struct be_adapter *adapter,
2932 const struct firmware *fw,
2933 struct be_dma_mem *flash_cmd,
2934 int num_of_images)
2935
2936 {
2937 int status = 0, i, filehdr_size = 0;
2938 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939 u32 total_bytes = 0, flash_op;
2940 int num_bytes;
2941 const u8 *p = fw->data;
2942 struct be_cmd_write_flashrom *req = flash_cmd->va;
2943 const struct flash_comp *pflashcomp;
2944 int num_comp, hdr_size;
2945 struct flash_section_info *fsec = NULL;
2946
2947 struct flash_comp gen3_flash_types[] = {
2948 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2949 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2950 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2951 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2952 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2953 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2954 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2955 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2956 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2957 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2958 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2959 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2960 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2961 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2962 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2963 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2964 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2965 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2966 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2967 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2968 };
2969
2970 struct flash_comp gen2_flash_types[] = {
2971 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2972 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2973 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2974 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2975 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2976 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2977 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2978 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2979 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2980 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2981 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2982 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2983 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2984 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2985 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987 };
2988
2989 if (adapter->generation == BE_GEN3) {
2990 pflashcomp = gen3_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g3);
2992 num_comp = ARRAY_SIZE(gen3_flash_types);
2993 } else {
2994 pflashcomp = gen2_flash_types;
2995 filehdr_size = sizeof(struct flash_file_hdr_g2);
2996 num_comp = ARRAY_SIZE(gen2_flash_types);
2997 }
2998 /* Get flash section info*/
2999 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000 if (!fsec) {
3001 dev_err(&adapter->pdev->dev,
3002 "Invalid Cookie. UFI corrupted ?\n");
3003 return -1;
3004 }
3005 for (i = 0; i < num_comp; i++) {
3006 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3007 continue;
3008
3009 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3010 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011 continue;
3012
3013 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3014 if (!phy_flashing_required(adapter))
3015 continue;
3016 }
3017
3018 hdr_size = filehdr_size +
3019 (num_of_images * sizeof(struct image_hdr));
3020
3021 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3022 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3023 pflashcomp[i].size, hdr_size)))
3024 continue;
3025
3026 /* Flash the component */
3027 p = fw->data;
3028 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 if (p + pflashcomp[i].size > fw->data + fw->size)
3030 return -1;
3031 total_bytes = pflashcomp[i].size;
3032 while (total_bytes) {
3033 if (total_bytes > 32*1024)
3034 num_bytes = 32*1024;
3035 else
3036 num_bytes = total_bytes;
3037 total_bytes -= num_bytes;
3038 if (!total_bytes) {
3039 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 flash_op = FLASHROM_OPER_PHY_FLASH;
3041 else
3042 flash_op = FLASHROM_OPER_FLASH;
3043 } else {
3044 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 flash_op = FLASHROM_OPER_PHY_SAVE;
3046 else
3047 flash_op = FLASHROM_OPER_SAVE;
3048 }
3049 memcpy(req->params.data_buf, p, num_bytes);
3050 p += num_bytes;
3051 status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 pflashcomp[i].optype, flash_op, num_bytes);
3053 if (status) {
3054 if ((status == ILLEGAL_IOCTL_REQ) &&
3055 (pflashcomp[i].optype ==
3056 OPTYPE_PHY_FW))
3057 break;
3058 dev_err(&adapter->pdev->dev,
3059 "cmd to write to flash rom failed.\n");
3060 return -1;
3061 }
3062 }
3063 }
3064 return 0;
3065 }
3066
3067 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3068 {
3069 if (fhdr == NULL)
3070 return 0;
3071 if (fhdr->build[0] == '3')
3072 return BE_GEN3;
3073 else if (fhdr->build[0] == '2')
3074 return BE_GEN2;
3075 else
3076 return 0;
3077 }
3078
3079 static int lancer_wait_idle(struct be_adapter *adapter)
3080 {
3081 #define SLIPORT_IDLE_TIMEOUT 30
3082 u32 reg_val;
3083 int status = 0, i;
3084
3085 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3088 break;
3089
3090 ssleep(1);
3091 }
3092
3093 if (i == SLIPORT_IDLE_TIMEOUT)
3094 status = -1;
3095
3096 return status;
3097 }
3098
3099 static int lancer_fw_reset(struct be_adapter *adapter)
3100 {
3101 int status = 0;
3102
3103 status = lancer_wait_idle(adapter);
3104 if (status)
3105 return status;
3106
3107 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108 PHYSDEV_CONTROL_OFFSET);
3109
3110 return status;
3111 }
3112
3113 static int lancer_fw_download(struct be_adapter *adapter,
3114 const struct firmware *fw)
3115 {
3116 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3117 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3118 struct be_dma_mem flash_cmd;
3119 const u8 *data_ptr = NULL;
3120 u8 *dest_image_ptr = NULL;
3121 size_t image_size = 0;
3122 u32 chunk_size = 0;
3123 u32 data_written = 0;
3124 u32 offset = 0;
3125 int status = 0;
3126 u8 add_status = 0;
3127 u8 change_status;
3128
3129 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130 dev_err(&adapter->pdev->dev,
3131 "FW Image not properly aligned. "
3132 "Length must be 4 byte aligned.\n");
3133 status = -EINVAL;
3134 goto lancer_fw_exit;
3135 }
3136
3137 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3138 + LANCER_FW_DOWNLOAD_CHUNK;
3139 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3140 &flash_cmd.dma, GFP_KERNEL);
3141 if (!flash_cmd.va) {
3142 status = -ENOMEM;
3143 dev_err(&adapter->pdev->dev,
3144 "Memory allocation failure while flashing\n");
3145 goto lancer_fw_exit;
3146 }
3147
3148 dest_image_ptr = flash_cmd.va +
3149 sizeof(struct lancer_cmd_req_write_object);
3150 image_size = fw->size;
3151 data_ptr = fw->data;
3152
3153 while (image_size) {
3154 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3155
3156 /* Copy the image chunk content. */
3157 memcpy(dest_image_ptr, data_ptr, chunk_size);
3158
3159 status = lancer_cmd_write_object(adapter, &flash_cmd,
3160 chunk_size, offset,
3161 LANCER_FW_DOWNLOAD_LOCATION,
3162 &data_written, &change_status,
3163 &add_status);
3164 if (status)
3165 break;
3166
3167 offset += data_written;
3168 data_ptr += data_written;
3169 image_size -= data_written;
3170 }
3171
3172 if (!status) {
3173 /* Commit the FW written */
3174 status = lancer_cmd_write_object(adapter, &flash_cmd,
3175 0, offset,
3176 LANCER_FW_DOWNLOAD_LOCATION,
3177 &data_written, &change_status,
3178 &add_status);
3179 }
3180
3181 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3182 flash_cmd.dma);
3183 if (status) {
3184 dev_err(&adapter->pdev->dev,
3185 "Firmware load error. "
3186 "Status code: 0x%x Additional Status: 0x%x\n",
3187 status, add_status);
3188 goto lancer_fw_exit;
3189 }
3190
3191 if (change_status == LANCER_FW_RESET_NEEDED) {
3192 status = lancer_fw_reset(adapter);
3193 if (status) {
3194 dev_err(&adapter->pdev->dev,
3195 "Adapter busy for FW reset.\n"
3196 "New FW will not be active.\n");
3197 goto lancer_fw_exit;
3198 }
3199 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3200 dev_err(&adapter->pdev->dev,
3201 "System reboot required for new FW"
3202 " to be active\n");
3203 }
3204
3205 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3206 lancer_fw_exit:
3207 return status;
3208 }
3209
3210 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211 {
3212 struct flash_file_hdr_g2 *fhdr;
3213 struct flash_file_hdr_g3 *fhdr3;
3214 struct image_hdr *img_hdr_ptr = NULL;
3215 struct be_dma_mem flash_cmd;
3216 const u8 *p;
3217 int status = 0, i = 0, num_imgs = 0;
3218
3219 p = fw->data;
3220 fhdr = (struct flash_file_hdr_g2 *) p;
3221
3222 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3223 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 &flash_cmd.dma, GFP_KERNEL);
3225 if (!flash_cmd.va) {
3226 status = -ENOMEM;
3227 dev_err(&adapter->pdev->dev,
3228 "Memory allocation failure while flashing\n");
3229 goto be_fw_exit;
3230 }
3231
3232 if ((adapter->generation == BE_GEN3) &&
3233 (get_ufigen_type(fhdr) == BE_GEN3)) {
3234 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3235 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3236 for (i = 0; i < num_imgs; i++) {
3237 img_hdr_ptr = (struct image_hdr *) (fw->data +
3238 (sizeof(struct flash_file_hdr_g3) +
3239 i * sizeof(struct image_hdr)));
3240 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3241 status = be_flash_data(adapter, fw, &flash_cmd,
3242 num_imgs);
3243 }
3244 } else if ((adapter->generation == BE_GEN2) &&
3245 (get_ufigen_type(fhdr) == BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247 } else {
3248 dev_err(&adapter->pdev->dev,
3249 "UFI and Interface are not compatible for flashing\n");
3250 status = -1;
3251 }
3252
3253 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254 flash_cmd.dma);
3255 if (status) {
3256 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3257 goto be_fw_exit;
3258 }
3259
3260 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3261
3262 be_fw_exit:
3263 return status;
3264 }
3265
3266 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3267 {
3268 const struct firmware *fw;
3269 int status;
3270
3271 if (!netif_running(adapter->netdev)) {
3272 dev_err(&adapter->pdev->dev,
3273 "Firmware load not allowed (interface is down)\n");
3274 return -1;
3275 }
3276
3277 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3278 if (status)
3279 goto fw_exit;
3280
3281 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3282
3283 if (lancer_chip(adapter))
3284 status = lancer_fw_download(adapter, fw);
3285 else
3286 status = be_fw_download(adapter, fw);
3287
3288 fw_exit:
3289 release_firmware(fw);
3290 return status;
3291 }
3292
3293 static const struct net_device_ops be_netdev_ops = {
3294 .ndo_open = be_open,
3295 .ndo_stop = be_close,
3296 .ndo_start_xmit = be_xmit,
3297 .ndo_set_rx_mode = be_set_rx_mode,
3298 .ndo_set_mac_address = be_mac_addr_set,
3299 .ndo_change_mtu = be_change_mtu,
3300 .ndo_get_stats64 = be_get_stats64,
3301 .ndo_validate_addr = eth_validate_addr,
3302 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3303 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3304 .ndo_set_vf_mac = be_set_vf_mac,
3305 .ndo_set_vf_vlan = be_set_vf_vlan,
3306 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3307 .ndo_get_vf_config = be_get_vf_config,
3308 #ifdef CONFIG_NET_POLL_CONTROLLER
3309 .ndo_poll_controller = be_netpoll,
3310 #endif
3311 };
3312
3313 static void be_netdev_init(struct net_device *netdev)
3314 {
3315 struct be_adapter *adapter = netdev_priv(netdev);
3316 struct be_eq_obj *eqo;
3317 int i;
3318
3319 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3320 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3321 NETIF_F_HW_VLAN_TX;
3322 if (be_multi_rxq(adapter))
3323 netdev->hw_features |= NETIF_F_RXHASH;
3324
3325 netdev->features |= netdev->hw_features |
3326 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3327
3328 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3329 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3330
3331 netdev->priv_flags |= IFF_UNICAST_FLT;
3332
3333 netdev->flags |= IFF_MULTICAST;
3334
3335 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3336
3337 netdev->netdev_ops = &be_netdev_ops;
3338
3339 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3340
3341 for_all_evt_queues(adapter, eqo, i)
3342 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3343 }
3344
3345 static void be_unmap_pci_bars(struct be_adapter *adapter)
3346 {
3347 if (adapter->csr)
3348 iounmap(adapter->csr);
3349 if (adapter->db)
3350 iounmap(adapter->db);
3351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353 }
3354
3355 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3356 {
3357 struct pci_dev *pdev = adapter->pdev;
3358 u8 __iomem *addr;
3359
3360 addr = pci_iomap(pdev, 2, 0);
3361 if (addr == NULL)
3362 return -ENOMEM;
3363
3364 adapter->roce_db.base = addr;
3365 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3366 adapter->roce_db.size = 8192;
3367 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3368 return 0;
3369 }
3370
3371 static int be_map_pci_bars(struct be_adapter *adapter)
3372 {
3373 u8 __iomem *addr;
3374 int db_reg;
3375
3376 if (lancer_chip(adapter)) {
3377 if (be_type_2_3(adapter)) {
3378 addr = ioremap_nocache(
3379 pci_resource_start(adapter->pdev, 0),
3380 pci_resource_len(adapter->pdev, 0));
3381 if (addr == NULL)
3382 return -ENOMEM;
3383 adapter->db = addr;
3384 }
3385 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 if (lancer_roce_map_pci_bars(adapter))
3387 goto pci_map_err;
3388 }
3389 return 0;
3390 }
3391
3392 if (be_physfn(adapter)) {
3393 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 pci_resource_len(adapter->pdev, 2));
3395 if (addr == NULL)
3396 return -ENOMEM;
3397 adapter->csr = addr;
3398 }
3399
3400 if (adapter->generation == BE_GEN2) {
3401 db_reg = 4;
3402 } else {
3403 if (be_physfn(adapter))
3404 db_reg = 4;
3405 else
3406 db_reg = 0;
3407 }
3408 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 pci_resource_len(adapter->pdev, db_reg));
3410 if (addr == NULL)
3411 goto pci_map_err;
3412 adapter->db = addr;
3413 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3414 adapter->roce_db.size = 4096;
3415 adapter->roce_db.io_addr =
3416 pci_resource_start(adapter->pdev, db_reg);
3417 adapter->roce_db.total_size =
3418 pci_resource_len(adapter->pdev, db_reg);
3419 }
3420 return 0;
3421 pci_map_err:
3422 be_unmap_pci_bars(adapter);
3423 return -ENOMEM;
3424 }
3425
3426 static void be_ctrl_cleanup(struct be_adapter *adapter)
3427 {
3428 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3429
3430 be_unmap_pci_bars(adapter);
3431
3432 if (mem->va)
3433 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3434 mem->dma);
3435
3436 mem = &adapter->rx_filter;
3437 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma);
3440 kfree(adapter->pmac_id);
3441 }
3442
3443 static int be_ctrl_init(struct be_adapter *adapter)
3444 {
3445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3448 int status;
3449
3450 status = be_map_pci_bars(adapter);
3451 if (status)
3452 goto done;
3453
3454 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3455 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3456 mbox_mem_alloc->size,
3457 &mbox_mem_alloc->dma,
3458 GFP_KERNEL);
3459 if (!mbox_mem_alloc->va) {
3460 status = -ENOMEM;
3461 goto unmap_pci_bars;
3462 }
3463 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3464 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3465 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3466 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3467
3468 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3469 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3470 &rx_filter->dma, GFP_KERNEL);
3471 if (rx_filter->va == NULL) {
3472 status = -ENOMEM;
3473 goto free_mbox;
3474 }
3475 memset(rx_filter->va, 0, rx_filter->size);
3476
3477 /* primary mac needs 1 pmac entry */
3478 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3479 sizeof(*adapter->pmac_id), GFP_KERNEL);
3480 if (!adapter->pmac_id)
3481 return -ENOMEM;
3482
3483 mutex_init(&adapter->mbox_lock);
3484 spin_lock_init(&adapter->mcc_lock);
3485 spin_lock_init(&adapter->mcc_cq_lock);
3486
3487 init_completion(&adapter->flash_compl);
3488 pci_save_state(adapter->pdev);
3489 return 0;
3490
3491 free_mbox:
3492 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3493 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3494
3495 unmap_pci_bars:
3496 be_unmap_pci_bars(adapter);
3497
3498 done:
3499 return status;
3500 }
3501
3502 static void be_stats_cleanup(struct be_adapter *adapter)
3503 {
3504 struct be_dma_mem *cmd = &adapter->stats_cmd;
3505
3506 if (cmd->va)
3507 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3508 cmd->va, cmd->dma);
3509 }
3510
3511 static int be_stats_init(struct be_adapter *adapter)
3512 {
3513 struct be_dma_mem *cmd = &adapter->stats_cmd;
3514
3515 if (adapter->generation == BE_GEN2) {
3516 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3517 } else {
3518 if (lancer_chip(adapter))
3519 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3520 else
3521 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3522 }
3523 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3524 GFP_KERNEL);
3525 if (cmd->va == NULL)
3526 return -1;
3527 memset(cmd->va, 0, cmd->size);
3528 return 0;
3529 }
3530
3531 static void __devexit be_remove(struct pci_dev *pdev)
3532 {
3533 struct be_adapter *adapter = pci_get_drvdata(pdev);
3534
3535 if (!adapter)
3536 return;
3537
3538 be_roce_dev_remove(adapter);
3539
3540 cancel_delayed_work_sync(&adapter->func_recovery_work);
3541
3542 unregister_netdev(adapter->netdev);
3543
3544 be_clear(adapter);
3545
3546 /* tell fw we're done with firing cmds */
3547 be_cmd_fw_clean(adapter);
3548
3549 be_stats_cleanup(adapter);
3550
3551 be_ctrl_cleanup(adapter);
3552
3553 pci_disable_pcie_error_reporting(pdev);
3554
3555 pci_set_drvdata(pdev, NULL);
3556 pci_release_regions(pdev);
3557 pci_disable_device(pdev);
3558
3559 free_netdev(adapter->netdev);
3560 }
3561
3562 bool be_is_wol_supported(struct be_adapter *adapter)
3563 {
3564 return ((adapter->wol_cap & BE_WOL_CAP) &&
3565 !be_is_wol_excluded(adapter)) ? true : false;
3566 }
3567
3568 u32 be_get_fw_log_level(struct be_adapter *adapter)
3569 {
3570 struct be_dma_mem extfat_cmd;
3571 struct be_fat_conf_params *cfgs;
3572 int status;
3573 u32 level = 0;
3574 int j;
3575
3576 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3577 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3578 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3579 &extfat_cmd.dma);
3580
3581 if (!extfat_cmd.va) {
3582 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3583 __func__);
3584 goto err;
3585 }
3586
3587 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3588 if (!status) {
3589 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3590 sizeof(struct be_cmd_resp_hdr));
3591 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3592 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3593 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3594 }
3595 }
3596 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3597 extfat_cmd.dma);
3598 err:
3599 return level;
3600 }
3601 static int be_get_initial_config(struct be_adapter *adapter)
3602 {
3603 int status;
3604 u32 level;
3605
3606 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3607 &adapter->function_mode, &adapter->function_caps);
3608 if (status)
3609 return status;
3610
3611 if (adapter->function_mode & FLEX10_MODE)
3612 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3613 else
3614 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3615
3616 if (be_physfn(adapter))
3617 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3618 else
3619 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3620
3621 status = be_cmd_get_cntl_attributes(adapter);
3622 if (status)
3623 return status;
3624
3625 status = be_cmd_get_acpi_wol_cap(adapter);
3626 if (status) {
3627 /* in case of a failure to get wol capabillities
3628 * check the exclusion list to determine WOL capability */
3629 if (!be_is_wol_excluded(adapter))
3630 adapter->wol_cap |= BE_WOL_CAP;
3631 }
3632
3633 if (be_is_wol_supported(adapter))
3634 adapter->wol = true;
3635
3636 /* Must be a power of 2 or else MODULO will BUG_ON */
3637 adapter->be_get_temp_freq = 64;
3638
3639 level = be_get_fw_log_level(adapter);
3640 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3641
3642 return 0;
3643 }
3644
3645 static int be_dev_type_check(struct be_adapter *adapter)
3646 {
3647 struct pci_dev *pdev = adapter->pdev;
3648 u32 sli_intf = 0, if_type;
3649
3650 switch (pdev->device) {
3651 case BE_DEVICE_ID1:
3652 case OC_DEVICE_ID1:
3653 adapter->generation = BE_GEN2;
3654 break;
3655 case BE_DEVICE_ID2:
3656 case OC_DEVICE_ID2:
3657 adapter->generation = BE_GEN3;
3658 break;
3659 case OC_DEVICE_ID3:
3660 case OC_DEVICE_ID4:
3661 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3662 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
3664 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665 SLI_INTF_IF_TYPE_SHIFT;
3666 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3667 !be_type_2_3(adapter)) {
3668 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3669 return -EINVAL;
3670 }
3671 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3672 SLI_INTF_FAMILY_SHIFT);
3673 adapter->generation = BE_GEN3;
3674 break;
3675 case OC_DEVICE_ID5:
3676 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3677 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3678 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3679 return -EINVAL;
3680 }
3681 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3682 SLI_INTF_FAMILY_SHIFT);
3683 adapter->generation = BE_GEN3;
3684 break;
3685 default:
3686 adapter->generation = 0;
3687 }
3688
3689 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3690 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691 return 0;
3692 }
3693
3694 static int lancer_recover_func(struct be_adapter *adapter)
3695 {
3696 int status;
3697
3698 status = lancer_test_and_set_rdy_state(adapter);
3699 if (status)
3700 goto err;
3701
3702 if (netif_running(adapter->netdev))
3703 be_close(adapter->netdev);
3704
3705 be_clear(adapter);
3706
3707 adapter->hw_error = false;
3708 adapter->fw_timeout = false;
3709
3710 status = be_setup(adapter);
3711 if (status)
3712 goto err;
3713
3714 if (netif_running(adapter->netdev)) {
3715 status = be_open(adapter->netdev);
3716 if (status)
3717 goto err;
3718 }
3719
3720 dev_err(&adapter->pdev->dev,
3721 "Adapter SLIPORT recovery succeeded\n");
3722 return 0;
3723 err:
3724 dev_err(&adapter->pdev->dev,
3725 "Adapter SLIPORT recovery failed\n");
3726
3727 return status;
3728 }
3729
3730 static void be_func_recovery_task(struct work_struct *work)
3731 {
3732 struct be_adapter *adapter =
3733 container_of(work, struct be_adapter, func_recovery_work.work);
3734 int status;
3735
3736 be_detect_error(adapter);
3737
3738 if (adapter->hw_error && lancer_chip(adapter)) {
3739
3740 if (adapter->eeh_error)
3741 goto out;
3742
3743 rtnl_lock();
3744 netif_device_detach(adapter->netdev);
3745 rtnl_unlock();
3746
3747 status = lancer_recover_func(adapter);
3748
3749 if (!status)
3750 netif_device_attach(adapter->netdev);
3751 }
3752
3753 out:
3754 schedule_delayed_work(&adapter->func_recovery_work,
3755 msecs_to_jiffies(1000));
3756 }
3757
3758 static void be_worker(struct work_struct *work)
3759 {
3760 struct be_adapter *adapter =
3761 container_of(work, struct be_adapter, work.work);
3762 struct be_rx_obj *rxo;
3763 struct be_eq_obj *eqo;
3764 int i;
3765
3766 /* when interrupts are not yet enabled, just reap any pending
3767 * mcc completions */
3768 if (!netif_running(adapter->netdev)) {
3769 local_bh_disable();
3770 be_process_mcc(adapter);
3771 local_bh_enable();
3772 goto reschedule;
3773 }
3774
3775 if (!adapter->stats_cmd_sent) {
3776 if (lancer_chip(adapter))
3777 lancer_cmd_get_pport_stats(adapter,
3778 &adapter->stats_cmd);
3779 else
3780 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3781 }
3782
3783 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3784 be_cmd_get_die_temperature(adapter);
3785
3786 for_all_rx_queues(adapter, rxo, i) {
3787 if (rxo->rx_post_starved) {
3788 rxo->rx_post_starved = false;
3789 be_post_rx_frags(rxo, GFP_KERNEL);
3790 }
3791 }
3792
3793 for_all_evt_queues(adapter, eqo, i)
3794 be_eqd_update(adapter, eqo);
3795
3796 reschedule:
3797 adapter->work_counter++;
3798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3799 }
3800
3801 static bool be_reset_required(struct be_adapter *adapter)
3802 {
3803 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3804 }
3805
3806 static int __devinit be_probe(struct pci_dev *pdev,
3807 const struct pci_device_id *pdev_id)
3808 {
3809 int status = 0;
3810 struct be_adapter *adapter;
3811 struct net_device *netdev;
3812 char port_name;
3813
3814 status = pci_enable_device(pdev);
3815 if (status)
3816 goto do_none;
3817
3818 status = pci_request_regions(pdev, DRV_NAME);
3819 if (status)
3820 goto disable_dev;
3821 pci_set_master(pdev);
3822
3823 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3824 if (netdev == NULL) {
3825 status = -ENOMEM;
3826 goto rel_reg;
3827 }
3828 adapter = netdev_priv(netdev);
3829 adapter->pdev = pdev;
3830 pci_set_drvdata(pdev, adapter);
3831
3832 status = be_dev_type_check(adapter);
3833 if (status)
3834 goto free_netdev;
3835
3836 adapter->netdev = netdev;
3837 SET_NETDEV_DEV(netdev, &pdev->dev);
3838
3839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3840 if (!status) {
3841 netdev->features |= NETIF_F_HIGHDMA;
3842 } else {
3843 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3844 if (status) {
3845 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3846 goto free_netdev;
3847 }
3848 }
3849
3850 status = pci_enable_pcie_error_reporting(pdev);
3851 if (status)
3852 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3853
3854 status = be_ctrl_init(adapter);
3855 if (status)
3856 goto free_netdev;
3857
3858 /* sync up with fw's ready state */
3859 if (be_physfn(adapter)) {
3860 status = be_fw_wait_ready(adapter);
3861 if (status)
3862 goto ctrl_clean;
3863 }
3864
3865 /* tell fw we're ready to fire cmds */
3866 status = be_cmd_fw_init(adapter);
3867 if (status)
3868 goto ctrl_clean;
3869
3870 if (be_reset_required(adapter)) {
3871 status = be_cmd_reset_function(adapter);
3872 if (status)
3873 goto ctrl_clean;
3874 }
3875
3876 /* The INTR bit may be set in the card when probed by a kdump kernel
3877 * after a crash.
3878 */
3879 if (!lancer_chip(adapter))
3880 be_intr_set(adapter, false);
3881
3882 status = be_stats_init(adapter);
3883 if (status)
3884 goto ctrl_clean;
3885
3886 status = be_get_initial_config(adapter);
3887 if (status)
3888 goto stats_clean;
3889
3890 INIT_DELAYED_WORK(&adapter->work, be_worker);
3891 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3892 adapter->rx_fc = adapter->tx_fc = true;
3893
3894 status = be_setup(adapter);
3895 if (status)
3896 goto msix_disable;
3897
3898 be_netdev_init(netdev);
3899 status = register_netdev(netdev);
3900 if (status != 0)
3901 goto unsetup;
3902
3903 be_roce_dev_add(adapter);
3904
3905 schedule_delayed_work(&adapter->func_recovery_work,
3906 msecs_to_jiffies(1000));
3907
3908 be_cmd_query_port_name(adapter, &port_name);
3909
3910 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3911 port_name);
3912
3913 return 0;
3914
3915 unsetup:
3916 be_clear(adapter);
3917 msix_disable:
3918 be_msix_disable(adapter);
3919 stats_clean:
3920 be_stats_cleanup(adapter);
3921 ctrl_clean:
3922 be_ctrl_cleanup(adapter);
3923 free_netdev:
3924 free_netdev(netdev);
3925 pci_set_drvdata(pdev, NULL);
3926 rel_reg:
3927 pci_release_regions(pdev);
3928 disable_dev:
3929 pci_disable_device(pdev);
3930 do_none:
3931 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3932 return status;
3933 }
3934
3935 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3936 {
3937 struct be_adapter *adapter = pci_get_drvdata(pdev);
3938 struct net_device *netdev = adapter->netdev;
3939
3940 if (adapter->wol)
3941 be_setup_wol(adapter, true);
3942
3943 cancel_delayed_work_sync(&adapter->func_recovery_work);
3944
3945 netif_device_detach(netdev);
3946 if (netif_running(netdev)) {
3947 rtnl_lock();
3948 be_close(netdev);
3949 rtnl_unlock();
3950 }
3951 be_clear(adapter);
3952
3953 pci_save_state(pdev);
3954 pci_disable_device(pdev);
3955 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3956 return 0;
3957 }
3958
3959 static int be_resume(struct pci_dev *pdev)
3960 {
3961 int status = 0;
3962 struct be_adapter *adapter = pci_get_drvdata(pdev);
3963 struct net_device *netdev = adapter->netdev;
3964
3965 netif_device_detach(netdev);
3966
3967 status = pci_enable_device(pdev);
3968 if (status)
3969 return status;
3970
3971 pci_set_power_state(pdev, 0);
3972 pci_restore_state(pdev);
3973
3974 /* tell fw we're ready to fire cmds */
3975 status = be_cmd_fw_init(adapter);
3976 if (status)
3977 return status;
3978
3979 be_setup(adapter);
3980 if (netif_running(netdev)) {
3981 rtnl_lock();
3982 be_open(netdev);
3983 rtnl_unlock();
3984 }
3985
3986 schedule_delayed_work(&adapter->func_recovery_work,
3987 msecs_to_jiffies(1000));
3988 netif_device_attach(netdev);
3989
3990 if (adapter->wol)
3991 be_setup_wol(adapter, false);
3992
3993 return 0;
3994 }
3995
3996 /*
3997 * An FLR will stop BE from DMAing any data.
3998 */
3999 static void be_shutdown(struct pci_dev *pdev)
4000 {
4001 struct be_adapter *adapter = pci_get_drvdata(pdev);
4002
4003 if (!adapter)
4004 return;
4005
4006 cancel_delayed_work_sync(&adapter->work);
4007 cancel_delayed_work_sync(&adapter->func_recovery_work);
4008
4009 netif_device_detach(adapter->netdev);
4010
4011 if (adapter->wol)
4012 be_setup_wol(adapter, true);
4013
4014 be_cmd_reset_function(adapter);
4015
4016 pci_disable_device(pdev);
4017 }
4018
4019 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4020 pci_channel_state_t state)
4021 {
4022 struct be_adapter *adapter = pci_get_drvdata(pdev);
4023 struct net_device *netdev = adapter->netdev;
4024
4025 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4026
4027 adapter->eeh_error = true;
4028
4029 cancel_delayed_work_sync(&adapter->func_recovery_work);
4030
4031 rtnl_lock();
4032 netif_device_detach(netdev);
4033 rtnl_unlock();
4034
4035 if (netif_running(netdev)) {
4036 rtnl_lock();
4037 be_close(netdev);
4038 rtnl_unlock();
4039 }
4040 be_clear(adapter);
4041
4042 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT;
4044
4045 pci_disable_device(pdev);
4046
4047 /* The error could cause the FW to trigger a flash debug dump.
4048 * Resetting the card while flash dump is in progress
4049 * can cause it not to recover; wait for it to finish
4050 */
4051 ssleep(30);
4052 return PCI_ERS_RESULT_NEED_RESET;
4053 }
4054
4055 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4056 {
4057 struct be_adapter *adapter = pci_get_drvdata(pdev);
4058 int status;
4059
4060 dev_info(&adapter->pdev->dev, "EEH reset\n");
4061 be_clear_all_error(adapter);
4062
4063 status = pci_enable_device(pdev);
4064 if (status)
4065 return PCI_ERS_RESULT_DISCONNECT;
4066
4067 pci_set_master(pdev);
4068 pci_set_power_state(pdev, 0);
4069 pci_restore_state(pdev);
4070
4071 /* Check if card is ok and fw is ready */
4072 status = be_fw_wait_ready(adapter);
4073 if (status)
4074 return PCI_ERS_RESULT_DISCONNECT;
4075
4076 pci_cleanup_aer_uncorrect_error_status(pdev);
4077 return PCI_ERS_RESULT_RECOVERED;
4078 }
4079
4080 static void be_eeh_resume(struct pci_dev *pdev)
4081 {
4082 int status = 0;
4083 struct be_adapter *adapter = pci_get_drvdata(pdev);
4084 struct net_device *netdev = adapter->netdev;
4085
4086 dev_info(&adapter->pdev->dev, "EEH resume\n");
4087
4088 pci_save_state(pdev);
4089
4090 /* tell fw we're ready to fire cmds */
4091 status = be_cmd_fw_init(adapter);
4092 if (status)
4093 goto err;
4094
4095 status = be_cmd_reset_function(adapter);
4096 if (status)
4097 goto err;
4098
4099 status = be_setup(adapter);
4100 if (status)
4101 goto err;
4102
4103 if (netif_running(netdev)) {
4104 status = be_open(netdev);
4105 if (status)
4106 goto err;
4107 }
4108
4109 schedule_delayed_work(&adapter->func_recovery_work,
4110 msecs_to_jiffies(1000));
4111 netif_device_attach(netdev);
4112 return;
4113 err:
4114 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4115 }
4116
4117 static struct pci_error_handlers be_eeh_handlers = {
4118 .error_detected = be_eeh_err_detected,
4119 .slot_reset = be_eeh_reset,
4120 .resume = be_eeh_resume,
4121 };
4122
4123 static struct pci_driver be_driver = {
4124 .name = DRV_NAME,
4125 .id_table = be_dev_ids,
4126 .probe = be_probe,
4127 .remove = be_remove,
4128 .suspend = be_suspend,
4129 .resume = be_resume,
4130 .shutdown = be_shutdown,
4131 .err_handler = &be_eeh_handlers
4132 };
4133
4134 static int __init be_init_module(void)
4135 {
4136 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4137 rx_frag_size != 2048) {
4138 printk(KERN_WARNING DRV_NAME
4139 " : Module param rx_frag_size must be 2048/4096/8192."
4140 " Using 2048\n");
4141 rx_frag_size = 2048;
4142 }
4143
4144 return pci_register_driver(&be_driver);
4145 }
4146 module_init(be_init_module);
4147
4148 static void __exit be_exit_module(void)
4149 {
4150 pci_unregister_driver(&be_driver);
4151 }
4152 module_exit(be_exit_module);
This page took 0.117236 seconds and 4 git commands to generate.