be2net: Fix INTx processing for Lancer
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137 {
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150 }
151
152 static void be_intr_set(struct be_adapter *adapter, bool enable)
153 {
154 u32 reg, enabled;
155
156 if (adapter->eeh_err)
157 return;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
179
180 wmb();
181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
182 }
183
184 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
185 {
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
189
190 wmb();
191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
192 }
193
194 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
195 bool arm, bool clear_int, u16 num_popped)
196 {
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
201
202 if (adapter->eeh_err)
203 return;
204
205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
212 }
213
214 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
215 {
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
220
221 if (adapter->eeh_err)
222 return;
223
224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
228 }
229
230 static int be_mac_addr_set(struct net_device *netdev, void *p)
231 {
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
237
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
241 status = be_cmd_mac_addr_query(adapter, current_mac,
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
244 if (status)
245 goto err;
246
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
249 adapter->if_handle, &adapter->pmac_id, 0);
250 if (status)
251 goto err;
252
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257 err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
259 return status;
260 }
261
262 static void populate_be2_stats(struct be_adapter *adapter)
263 {
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
267 struct be_port_rxf_stats_v0 *port_stats =
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
270
271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
297 drvs->jabber_events = rxf_stats->port1_jabber_events;
298 else
299 drvs->jabber_events = rxf_stats->port0_jabber_events;
300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309 }
310
311 static void populate_be3_stats(struct be_adapter *adapter)
312 {
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
316 struct be_port_rxf_stats_v1 *port_stats =
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
319
320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
388 drvs->jabber_events = pport_stats->rx_jabbers;
389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
392 drvs->rx_drops_too_many_frags =
393 pport_stats->rx_drops_too_many_frags_lo;
394 }
395
396 static void accumulate_16bit_val(u32 *acc, u16 val)
397 {
398 #define lo(x) (x & 0xFFFF)
399 #define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406 }
407
408 void be_parse_stats(struct be_adapter *adapter)
409 {
410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
420 populate_be2_stats(adapter);
421 }
422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
431 }
432
433 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
435 {
436 struct be_adapter *adapter = netdev_priv(netdev);
437 struct be_drv_stats *drvs = &adapter->drv_stats;
438 struct be_rx_obj *rxo;
439 struct be_tx_obj *txo;
440 u64 pkts, bytes;
441 unsigned int start;
442 int i;
443
444 for_all_rx_queues(adapter, rxo, i) {
445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
456 }
457
458 for_all_tx_queues(adapter, txo, i) {
459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
467 }
468
469 /* bad pkts received */
470 stats->rx_errors = drvs->rx_crc_errors +
471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
479 drvs->rx_dropped_runt;
480
481 /* detailed rx errors */
482 stats->rx_length_errors = drvs->rx_in_range_errors +
483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
485
486 stats->rx_crc_errors = drvs->rx_crc_errors;
487
488 /* frame alignment errors */
489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
490
491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
496 return stats;
497 }
498
499 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
500 {
501 struct net_device *netdev = adapter->netdev;
502
503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
511 }
512 }
513
514 static void be_tx_stats_update(struct be_tx_obj *txo,
515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
516 {
517 struct be_tx_stats *stats = tx_stats(txo);
518
519 u64_stats_update_begin(&stats->sync);
520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
524 if (stopped)
525 stats->tx_stops++;
526 u64_stats_update_end(&stats->sync);
527 }
528
529 /* Determine number of WRB entries needed to xmit data in an skb */
530 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
532 {
533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
537 /* to account for hdr wrb */
538 cnt++;
539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
545 }
546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548 }
549
550 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551 {
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555 }
556
557 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559 {
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571 }
572
573 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
575 {
576 u16 vlan_tag;
577
578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
582 if (skb_is_gso(skb)) {
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
605 if (vlan_tx_tag_present(skb)) {
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615 }
616
617 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
618 bool unmap_single)
619 {
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
625 if (wrb->frag_len) {
626 if (unmap_single)
627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
629 else
630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
631 }
632 }
633
634 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636 {
637 dma_addr_t busaddr;
638 int i, copied = 0;
639 struct device *dev = &adapter->pdev->dev;
640 struct sk_buff *first_skb = skb;
641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
643 bool map_single = false;
644 u16 map_head;
645
646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
648 map_head = txq->head;
649
650 if (skb->len > skb->data_len) {
651 int len = skb_headlen(skb);
652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
654 goto dma_err;
655 map_single = true;
656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
662
663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664 const struct skb_frag_struct *frag =
665 &skb_shinfo(skb)->frags[i];
666 busaddr = skb_frag_dma_map(dev, frag, 0,
667 skb_frag_size(frag), DMA_TO_DEVICE);
668 if (dma_mapping_error(dev, busaddr))
669 goto dma_err;
670 wrb = queue_head_node(txq);
671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
674 copied += skb_frag_size(frag);
675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
688 dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
692 unmap_tx_frag(dev, wrb, map_single);
693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
698 }
699
700 static netdev_tx_t be_xmit(struct sk_buff *skb,
701 struct net_device *netdev)
702 {
703 struct be_adapter *adapter = netdev_priv(netdev);
704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
730
731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
732 if (copied) {
733 /* record the sent skb in the sent_skb table */
734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
736
737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
741 atomic_add(wrb_cnt, &txq->used);
742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
745 stopped = true;
746 }
747
748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
750 be_tx_stats_update(txo, wrb_cnt, copied,
751 skb_shinfo(skb)->gso_segs, stopped);
752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
755 }
756 tx_drop:
757 return NETDEV_TX_OK;
758 }
759
760 static int be_change_mtu(struct net_device *netdev, int new_mtu)
761 {
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776 }
777
778 /*
779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
781 */
782 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
783 {
784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
787 int status = 0;
788
789 if (vf) {
790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
793 }
794
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
799 if (adapter->vlans_added <= adapter->max_vlans) {
800 /* Construct VLAN Table to give to HW */
801 for (i = 0; i < VLAN_N_VID; i++) {
802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
809 } else {
810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
812 }
813
814 return status;
815 }
816
817 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
818 {
819 struct be_adapter *adapter = netdev_priv(netdev);
820
821 adapter->vlans_added++;
822 if (!be_physfn(adapter))
823 return 0;
824
825 adapter->vlan_tag[vid] = 1;
826 if (adapter->vlans_added <= (adapter->max_vlans + 1))
827 be_vid_config(adapter, false, 0);
828
829 return 0;
830 }
831
832 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
833 {
834 struct be_adapter *adapter = netdev_priv(netdev);
835
836 adapter->vlans_added--;
837
838 if (!be_physfn(adapter))
839 return 0;
840
841 adapter->vlan_tag[vid] = 0;
842 if (adapter->vlans_added <= adapter->max_vlans)
843 be_vid_config(adapter, false, 0);
844
845 return 0;
846 }
847
848 static void be_set_rx_mode(struct net_device *netdev)
849 {
850 struct be_adapter *adapter = netdev_priv(netdev);
851
852 if (netdev->flags & IFF_PROMISC) {
853 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
854 adapter->promiscuous = true;
855 goto done;
856 }
857
858 /* BE was previously in promiscuous mode; disable it */
859 if (adapter->promiscuous) {
860 adapter->promiscuous = false;
861 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
862
863 if (adapter->vlans_added)
864 be_vid_config(adapter, false, 0);
865 }
866
867 /* Enable multicast promisc if num configured exceeds what we support */
868 if (netdev->flags & IFF_ALLMULTI ||
869 netdev_mc_count(netdev) > BE_MAX_MC) {
870 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
871 goto done;
872 }
873
874 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
875 done:
876 return;
877 }
878
879 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
880 {
881 struct be_adapter *adapter = netdev_priv(netdev);
882 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
883 int status;
884
885 if (!sriov_enabled(adapter))
886 return -EPERM;
887
888 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
889 return -EINVAL;
890
891 if (lancer_chip(adapter)) {
892 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
893 } else {
894 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
895 vf_cfg->pmac_id, vf + 1);
896
897 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
898 &vf_cfg->pmac_id, vf + 1);
899 }
900
901 if (status)
902 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
903 mac, vf);
904 else
905 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
906
907 return status;
908 }
909
910 static int be_get_vf_config(struct net_device *netdev, int vf,
911 struct ifla_vf_info *vi)
912 {
913 struct be_adapter *adapter = netdev_priv(netdev);
914 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
915
916 if (!sriov_enabled(adapter))
917 return -EPERM;
918
919 if (vf >= adapter->num_vfs)
920 return -EINVAL;
921
922 vi->vf = vf;
923 vi->tx_rate = vf_cfg->tx_rate;
924 vi->vlan = vf_cfg->vlan_tag;
925 vi->qos = 0;
926 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
927
928 return 0;
929 }
930
931 static int be_set_vf_vlan(struct net_device *netdev,
932 int vf, u16 vlan, u8 qos)
933 {
934 struct be_adapter *adapter = netdev_priv(netdev);
935 int status = 0;
936
937 if (!sriov_enabled(adapter))
938 return -EPERM;
939
940 if (vf >= adapter->num_vfs || vlan > 4095)
941 return -EINVAL;
942
943 if (vlan) {
944 adapter->vf_cfg[vf].vlan_tag = vlan;
945 adapter->vlans_added++;
946 } else {
947 adapter->vf_cfg[vf].vlan_tag = 0;
948 adapter->vlans_added--;
949 }
950
951 status = be_vid_config(adapter, true, vf);
952
953 if (status)
954 dev_info(&adapter->pdev->dev,
955 "VLAN %d config on VF %d failed\n", vlan, vf);
956 return status;
957 }
958
959 static int be_set_vf_tx_rate(struct net_device *netdev,
960 int vf, int rate)
961 {
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
965 if (!sriov_enabled(adapter))
966 return -EPERM;
967
968 if (vf >= adapter->num_vfs || rate < 0)
969 return -EINVAL;
970
971 if (rate > 10000)
972 rate = 10000;
973
974 adapter->vf_cfg[vf].tx_rate = rate;
975 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
976
977 if (status)
978 dev_info(&adapter->pdev->dev,
979 "tx rate %d on VF %d failed\n", rate, vf);
980 return status;
981 }
982
983 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
984 {
985 struct be_eq_obj *rx_eq = &rxo->rx_eq;
986 struct be_rx_stats *stats = rx_stats(rxo);
987 ulong now = jiffies;
988 ulong delta = now - stats->rx_jiffies;
989 u64 pkts;
990 unsigned int start, eqd;
991
992 if (!rx_eq->enable_aic)
993 return;
994
995 /* Wrapped around */
996 if (time_before(now, stats->rx_jiffies)) {
997 stats->rx_jiffies = now;
998 return;
999 }
1000
1001 /* Update once a second */
1002 if (delta < HZ)
1003 return;
1004
1005 do {
1006 start = u64_stats_fetch_begin_bh(&stats->sync);
1007 pkts = stats->rx_pkts;
1008 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1009
1010 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1011 stats->rx_pkts_prev = pkts;
1012 stats->rx_jiffies = now;
1013 eqd = stats->rx_pps / 110000;
1014 eqd = eqd << 3;
1015 if (eqd > rx_eq->max_eqd)
1016 eqd = rx_eq->max_eqd;
1017 if (eqd < rx_eq->min_eqd)
1018 eqd = rx_eq->min_eqd;
1019 if (eqd < 10)
1020 eqd = 0;
1021 if (eqd != rx_eq->cur_eqd) {
1022 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1023 rx_eq->cur_eqd = eqd;
1024 }
1025 }
1026
1027 static void be_rx_stats_update(struct be_rx_obj *rxo,
1028 struct be_rx_compl_info *rxcp)
1029 {
1030 struct be_rx_stats *stats = rx_stats(rxo);
1031
1032 u64_stats_update_begin(&stats->sync);
1033 stats->rx_compl++;
1034 stats->rx_bytes += rxcp->pkt_size;
1035 stats->rx_pkts++;
1036 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1037 stats->rx_mcast_pkts++;
1038 if (rxcp->err)
1039 stats->rx_compl_err++;
1040 u64_stats_update_end(&stats->sync);
1041 }
1042
1043 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1044 {
1045 /* L4 checksum is not reliable for non TCP/UDP packets.
1046 * Also ignore ipcksm for ipv6 pkts */
1047 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1048 (rxcp->ip_csum || rxcp->ipv6);
1049 }
1050
1051 static struct be_rx_page_info *
1052 get_rx_page_info(struct be_adapter *adapter,
1053 struct be_rx_obj *rxo,
1054 u16 frag_idx)
1055 {
1056 struct be_rx_page_info *rx_page_info;
1057 struct be_queue_info *rxq = &rxo->q;
1058
1059 rx_page_info = &rxo->page_info_tbl[frag_idx];
1060 BUG_ON(!rx_page_info->page);
1061
1062 if (rx_page_info->last_page_user) {
1063 dma_unmap_page(&adapter->pdev->dev,
1064 dma_unmap_addr(rx_page_info, bus),
1065 adapter->big_page_size, DMA_FROM_DEVICE);
1066 rx_page_info->last_page_user = false;
1067 }
1068
1069 atomic_dec(&rxq->used);
1070 return rx_page_info;
1071 }
1072
1073 /* Throwaway the data in the Rx completion */
1074 static void be_rx_compl_discard(struct be_adapter *adapter,
1075 struct be_rx_obj *rxo,
1076 struct be_rx_compl_info *rxcp)
1077 {
1078 struct be_queue_info *rxq = &rxo->q;
1079 struct be_rx_page_info *page_info;
1080 u16 i, num_rcvd = rxcp->num_rcvd;
1081
1082 for (i = 0; i < num_rcvd; i++) {
1083 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1084 put_page(page_info->page);
1085 memset(page_info, 0, sizeof(*page_info));
1086 index_inc(&rxcp->rxq_idx, rxq->len);
1087 }
1088 }
1089
1090 /*
1091 * skb_fill_rx_data forms a complete skb for an ether frame
1092 * indicated by rxcp.
1093 */
1094 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1095 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1096 {
1097 struct be_queue_info *rxq = &rxo->q;
1098 struct be_rx_page_info *page_info;
1099 u16 i, j;
1100 u16 hdr_len, curr_frag_len, remaining;
1101 u8 *start;
1102
1103 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1104 start = page_address(page_info->page) + page_info->page_offset;
1105 prefetch(start);
1106
1107 /* Copy data in the first descriptor of this completion */
1108 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1109
1110 /* Copy the header portion into skb_data */
1111 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1112 memcpy(skb->data, start, hdr_len);
1113 skb->len = curr_frag_len;
1114 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1115 /* Complete packet has now been moved to data */
1116 put_page(page_info->page);
1117 skb->data_len = 0;
1118 skb->tail += curr_frag_len;
1119 } else {
1120 skb_shinfo(skb)->nr_frags = 1;
1121 skb_frag_set_page(skb, 0, page_info->page);
1122 skb_shinfo(skb)->frags[0].page_offset =
1123 page_info->page_offset + hdr_len;
1124 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1125 skb->data_len = curr_frag_len - hdr_len;
1126 skb->truesize += rx_frag_size;
1127 skb->tail += hdr_len;
1128 }
1129 page_info->page = NULL;
1130
1131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
1134 }
1135
1136 /* More frags present for this completion */
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
1142
1143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
1147 skb_frag_set_page(skb, j, page_info->page);
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
1150 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
1156 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
1159 skb->truesize += rx_frag_size;
1160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
1162 page_info->page = NULL;
1163 }
1164 BUG_ON(j > MAX_SKB_FRAGS);
1165 }
1166
1167 /* Process the RX completion indicated by rxcp when GRO is disabled */
1168 static void be_rx_compl_process(struct be_adapter *adapter,
1169 struct be_rx_obj *rxo,
1170 struct be_rx_compl_info *rxcp)
1171 {
1172 struct net_device *netdev = adapter->netdev;
1173 struct sk_buff *skb;
1174
1175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1176 if (unlikely(!skb)) {
1177 rx_stats(rxo)->rx_drops_no_skbs++;
1178 be_rx_compl_discard(adapter, rxo, rxcp);
1179 return;
1180 }
1181
1182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1183
1184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1185 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186 else
1187 skb_checksum_none_assert(skb);
1188
1189 skb->protocol = eth_type_trans(skb, netdev);
1190 if (adapter->netdev->features & NETIF_F_RXHASH)
1191 skb->rxhash = rxcp->rss_hash;
1192
1193
1194 if (rxcp->vlanf)
1195 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1196
1197 netif_receive_skb(skb);
1198 }
1199
1200 /* Process the RX completion indicated by rxcp when GRO is enabled */
1201 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1202 struct be_rx_obj *rxo,
1203 struct be_rx_compl_info *rxcp)
1204 {
1205 struct be_rx_page_info *page_info;
1206 struct sk_buff *skb = NULL;
1207 struct be_queue_info *rxq = &rxo->q;
1208 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1209 u16 remaining, curr_frag_len;
1210 u16 i, j;
1211
1212 skb = napi_get_frags(&eq_obj->napi);
1213 if (!skb) {
1214 be_rx_compl_discard(adapter, rxo, rxcp);
1215 return;
1216 }
1217
1218 remaining = rxcp->pkt_size;
1219 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1220 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1221
1222 curr_frag_len = min(remaining, rx_frag_size);
1223
1224 /* Coalesce all frags from the same physical page in one slot */
1225 if (i == 0 || page_info->page_offset == 0) {
1226 /* First frag or Fresh page */
1227 j++;
1228 skb_frag_set_page(skb, j, page_info->page);
1229 skb_shinfo(skb)->frags[j].page_offset =
1230 page_info->page_offset;
1231 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1232 } else {
1233 put_page(page_info->page);
1234 }
1235 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1236 skb->truesize += rx_frag_size;
1237 remaining -= curr_frag_len;
1238 index_inc(&rxcp->rxq_idx, rxq->len);
1239 memset(page_info, 0, sizeof(*page_info));
1240 }
1241 BUG_ON(j > MAX_SKB_FRAGS);
1242
1243 skb_shinfo(skb)->nr_frags = j + 1;
1244 skb->len = rxcp->pkt_size;
1245 skb->data_len = rxcp->pkt_size;
1246 skb->ip_summed = CHECKSUM_UNNECESSARY;
1247 if (adapter->netdev->features & NETIF_F_RXHASH)
1248 skb->rxhash = rxcp->rss_hash;
1249
1250 if (rxcp->vlanf)
1251 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1252
1253 napi_gro_frags(&eq_obj->napi);
1254 }
1255
1256 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1257 struct be_eth_rx_compl *compl,
1258 struct be_rx_compl_info *rxcp)
1259 {
1260 rxcp->pkt_size =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1262 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1263 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1264 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1265 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1266 rxcp->ip_csum =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1268 rxcp->l4_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1270 rxcp->ipv6 =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1272 rxcp->rxq_idx =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1274 rxcp->num_rcvd =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1276 rxcp->pkt_type =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1278 rxcp->rss_hash =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1280 if (rxcp->vlanf) {
1281 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1282 compl);
1283 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1284 compl);
1285 }
1286 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1287 }
1288
1289 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1290 struct be_eth_rx_compl *compl,
1291 struct be_rx_compl_info *rxcp)
1292 {
1293 rxcp->pkt_size =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1295 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1296 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1297 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1298 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1299 rxcp->ip_csum =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1301 rxcp->l4_csum =
1302 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1303 rxcp->ipv6 =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1305 rxcp->rxq_idx =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1307 rxcp->num_rcvd =
1308 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1309 rxcp->pkt_type =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1311 rxcp->rss_hash =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1313 if (rxcp->vlanf) {
1314 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1315 compl);
1316 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1317 compl);
1318 }
1319 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1320 }
1321
1322 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323 {
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
1327
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331 return NULL;
1332
1333 rmb();
1334 be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
1341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
1344 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1345 rxcp->vlanf = 0;
1346
1347 if (!lancer_chip(adapter))
1348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1349
1350 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1351 !adapter->vlan_tag[rxcp->vlan_tag])
1352 rxcp->vlanf = 0;
1353 }
1354
1355 /* As the compl has been parsed, reset it; we wont touch it again */
1356 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1357
1358 queue_tail_inc(&rxo->cq);
1359 return rxcp;
1360 }
1361
1362 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1363 {
1364 u32 order = get_order(size);
1365
1366 if (order > 0)
1367 gfp |= __GFP_COMP;
1368 return alloc_pages(gfp, order);
1369 }
1370
1371 /*
1372 * Allocate a page, split it to fragments of size rx_frag_size and post as
1373 * receive buffers to BE
1374 */
1375 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1376 {
1377 struct be_adapter *adapter = rxo->adapter;
1378 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1379 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1380 struct be_queue_info *rxq = &rxo->q;
1381 struct page *pagep = NULL;
1382 struct be_eth_rx_d *rxd;
1383 u64 page_dmaaddr = 0, frag_dmaaddr;
1384 u32 posted, page_offset = 0;
1385
1386 page_info = &rxo->page_info_tbl[rxq->head];
1387 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1388 if (!pagep) {
1389 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1390 if (unlikely(!pagep)) {
1391 rx_stats(rxo)->rx_post_fail++;
1392 break;
1393 }
1394 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1395 0, adapter->big_page_size,
1396 DMA_FROM_DEVICE);
1397 page_info->page_offset = 0;
1398 } else {
1399 get_page(pagep);
1400 page_info->page_offset = page_offset + rx_frag_size;
1401 }
1402 page_offset = page_info->page_offset;
1403 page_info->page = pagep;
1404 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1405 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1406
1407 rxd = queue_head_node(rxq);
1408 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1409 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1410
1411 /* Any space left in the current big page for another frag? */
1412 if ((page_offset + rx_frag_size + rx_frag_size) >
1413 adapter->big_page_size) {
1414 pagep = NULL;
1415 page_info->last_page_user = true;
1416 }
1417
1418 prev_page_info = page_info;
1419 queue_head_inc(rxq);
1420 page_info = &page_info_tbl[rxq->head];
1421 }
1422 if (pagep)
1423 prev_page_info->last_page_user = true;
1424
1425 if (posted) {
1426 atomic_add(posted, &rxq->used);
1427 be_rxq_notify(adapter, rxq->id, posted);
1428 } else if (atomic_read(&rxq->used) == 0) {
1429 /* Let be_worker replenish when memory is available */
1430 rxo->rx_post_starved = true;
1431 }
1432 }
1433
1434 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1435 {
1436 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1437
1438 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1439 return NULL;
1440
1441 rmb();
1442 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1443
1444 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1445
1446 queue_tail_inc(tx_cq);
1447 return txcp;
1448 }
1449
1450 static u16 be_tx_compl_process(struct be_adapter *adapter,
1451 struct be_tx_obj *txo, u16 last_index)
1452 {
1453 struct be_queue_info *txq = &txo->q;
1454 struct be_eth_wrb *wrb;
1455 struct sk_buff **sent_skbs = txo->sent_skb_list;
1456 struct sk_buff *sent_skb;
1457 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1458 bool unmap_skb_hdr = true;
1459
1460 sent_skb = sent_skbs[txq->tail];
1461 BUG_ON(!sent_skb);
1462 sent_skbs[txq->tail] = NULL;
1463
1464 /* skip header wrb */
1465 queue_tail_inc(txq);
1466
1467 do {
1468 cur_index = txq->tail;
1469 wrb = queue_tail_node(txq);
1470 unmap_tx_frag(&adapter->pdev->dev, wrb,
1471 (unmap_skb_hdr && skb_headlen(sent_skb)));
1472 unmap_skb_hdr = false;
1473
1474 num_wrbs++;
1475 queue_tail_inc(txq);
1476 } while (cur_index != last_index);
1477
1478 kfree_skb(sent_skb);
1479 return num_wrbs;
1480 }
1481
1482 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1483 {
1484 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1485
1486 if (!eqe->evt)
1487 return NULL;
1488
1489 rmb();
1490 eqe->evt = le32_to_cpu(eqe->evt);
1491 queue_tail_inc(&eq_obj->q);
1492 return eqe;
1493 }
1494
1495 static int event_handle(struct be_adapter *adapter,
1496 struct be_eq_obj *eq_obj,
1497 bool rearm)
1498 {
1499 struct be_eq_entry *eqe;
1500 u16 num = 0;
1501
1502 while ((eqe = event_get(eq_obj)) != NULL) {
1503 eqe->evt = 0;
1504 num++;
1505 }
1506
1507 /* Deal with any spurious interrupts that come
1508 * without events
1509 */
1510 if (!num)
1511 rearm = true;
1512
1513 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1514 if (num)
1515 napi_schedule(&eq_obj->napi);
1516
1517 return num;
1518 }
1519
1520 /* Just read and notify events without processing them.
1521 * Used at the time of destroying event queues */
1522 static void be_eq_clean(struct be_adapter *adapter,
1523 struct be_eq_obj *eq_obj)
1524 {
1525 struct be_eq_entry *eqe;
1526 u16 num = 0;
1527
1528 while ((eqe = event_get(eq_obj)) != NULL) {
1529 eqe->evt = 0;
1530 num++;
1531 }
1532
1533 if (num)
1534 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1535 }
1536
1537 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1538 {
1539 struct be_rx_page_info *page_info;
1540 struct be_queue_info *rxq = &rxo->q;
1541 struct be_queue_info *rx_cq = &rxo->cq;
1542 struct be_rx_compl_info *rxcp;
1543 u16 tail;
1544
1545 /* First cleanup pending rx completions */
1546 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1547 be_rx_compl_discard(adapter, rxo, rxcp);
1548 be_cq_notify(adapter, rx_cq->id, false, 1);
1549 }
1550
1551 /* Then free posted rx buffer that were not used */
1552 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1553 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1554 page_info = get_rx_page_info(adapter, rxo, tail);
1555 put_page(page_info->page);
1556 memset(page_info, 0, sizeof(*page_info));
1557 }
1558 BUG_ON(atomic_read(&rxq->used));
1559 rxq->tail = rxq->head = 0;
1560 }
1561
1562 static void be_tx_compl_clean(struct be_adapter *adapter,
1563 struct be_tx_obj *txo)
1564 {
1565 struct be_queue_info *tx_cq = &txo->cq;
1566 struct be_queue_info *txq = &txo->q;
1567 struct be_eth_tx_compl *txcp;
1568 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1569 struct sk_buff **sent_skbs = txo->sent_skb_list;
1570 struct sk_buff *sent_skb;
1571 bool dummy_wrb;
1572
1573 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1574 do {
1575 while ((txcp = be_tx_compl_get(tx_cq))) {
1576 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1577 wrb_index, txcp);
1578 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1579 cmpl++;
1580 }
1581 if (cmpl) {
1582 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1583 atomic_sub(num_wrbs, &txq->used);
1584 cmpl = 0;
1585 num_wrbs = 0;
1586 }
1587
1588 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1589 break;
1590
1591 mdelay(1);
1592 } while (true);
1593
1594 if (atomic_read(&txq->used))
1595 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1596 atomic_read(&txq->used));
1597
1598 /* free posted tx for which compls will never arrive */
1599 while (atomic_read(&txq->used)) {
1600 sent_skb = sent_skbs[txq->tail];
1601 end_idx = txq->tail;
1602 index_adv(&end_idx,
1603 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1604 txq->len);
1605 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1606 atomic_sub(num_wrbs, &txq->used);
1607 }
1608 }
1609
1610 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1611 {
1612 struct be_queue_info *q;
1613
1614 q = &adapter->mcc_obj.q;
1615 if (q->created)
1616 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1617 be_queue_free(adapter, q);
1618
1619 q = &adapter->mcc_obj.cq;
1620 if (q->created)
1621 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1622 be_queue_free(adapter, q);
1623 }
1624
1625 /* Must be called only after TX qs are created as MCC shares TX EQ */
1626 static int be_mcc_queues_create(struct be_adapter *adapter)
1627 {
1628 struct be_queue_info *q, *cq;
1629
1630 /* Alloc MCC compl queue */
1631 cq = &adapter->mcc_obj.cq;
1632 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1633 sizeof(struct be_mcc_compl)))
1634 goto err;
1635
1636 /* Ask BE to create MCC compl queue; share TX's eq */
1637 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1638 goto mcc_cq_free;
1639
1640 /* Alloc MCC queue */
1641 q = &adapter->mcc_obj.q;
1642 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1643 goto mcc_cq_destroy;
1644
1645 /* Ask BE to create MCC queue */
1646 if (be_cmd_mccq_create(adapter, q, cq))
1647 goto mcc_q_free;
1648
1649 return 0;
1650
1651 mcc_q_free:
1652 be_queue_free(adapter, q);
1653 mcc_cq_destroy:
1654 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1655 mcc_cq_free:
1656 be_queue_free(adapter, cq);
1657 err:
1658 return -1;
1659 }
1660
1661 static void be_tx_queues_destroy(struct be_adapter *adapter)
1662 {
1663 struct be_queue_info *q;
1664 struct be_tx_obj *txo;
1665 u8 i;
1666
1667 for_all_tx_queues(adapter, txo, i) {
1668 q = &txo->q;
1669 if (q->created)
1670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1671 be_queue_free(adapter, q);
1672
1673 q = &txo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677 }
1678
1679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
1682 q = &adapter->tx_eq.q;
1683 if (q->created)
1684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1685 be_queue_free(adapter, q);
1686 }
1687
1688 static int be_num_txqs_want(struct be_adapter *adapter)
1689 {
1690 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1691 lancer_chip(adapter) || !be_physfn(adapter) ||
1692 adapter->generation == BE_GEN2)
1693 return 1;
1694 else
1695 return MAX_TX_QS;
1696 }
1697
1698 /* One TX event queue is shared by all TX compl qs */
1699 static int be_tx_queues_create(struct be_adapter *adapter)
1700 {
1701 struct be_queue_info *eq, *q, *cq;
1702 struct be_tx_obj *txo;
1703 u8 i;
1704
1705 adapter->num_tx_qs = be_num_txqs_want(adapter);
1706 if (adapter->num_tx_qs != MAX_TX_QS) {
1707 rtnl_lock();
1708 netif_set_real_num_tx_queues(adapter->netdev,
1709 adapter->num_tx_qs);
1710 rtnl_unlock();
1711 }
1712
1713 adapter->tx_eq.max_eqd = 0;
1714 adapter->tx_eq.min_eqd = 0;
1715 adapter->tx_eq.cur_eqd = 96;
1716 adapter->tx_eq.enable_aic = false;
1717
1718 eq = &adapter->tx_eq.q;
1719 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1720 sizeof(struct be_eq_entry)))
1721 return -1;
1722
1723 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1724 goto err;
1725 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1726
1727 for_all_tx_queues(adapter, txo, i) {
1728 cq = &txo->cq;
1729 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1730 sizeof(struct be_eth_tx_compl)))
1731 goto err;
1732
1733 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1734 goto err;
1735
1736 q = &txo->q;
1737 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1738 sizeof(struct be_eth_wrb)))
1739 goto err;
1740 }
1741 return 0;
1742
1743 err:
1744 be_tx_queues_destroy(adapter);
1745 return -1;
1746 }
1747
1748 static void be_rx_queues_destroy(struct be_adapter *adapter)
1749 {
1750 struct be_queue_info *q;
1751 struct be_rx_obj *rxo;
1752 int i;
1753
1754 for_all_rx_queues(adapter, rxo, i) {
1755 be_queue_free(adapter, &rxo->q);
1756
1757 q = &rxo->cq;
1758 if (q->created)
1759 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1760 be_queue_free(adapter, q);
1761
1762 q = &rxo->rx_eq.q;
1763 if (q->created)
1764 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1765 be_queue_free(adapter, q);
1766 }
1767 }
1768
1769 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1770 {
1771 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1772 !sriov_enabled(adapter) && be_physfn(adapter) &&
1773 !be_is_mc(adapter)) {
1774 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775 } else {
1776 dev_warn(&adapter->pdev->dev,
1777 "No support for multiple RX queues\n");
1778 return 1;
1779 }
1780 }
1781
1782 static int be_rx_queues_create(struct be_adapter *adapter)
1783 {
1784 struct be_queue_info *eq, *q, *cq;
1785 struct be_rx_obj *rxo;
1786 int rc, i;
1787
1788 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789 msix_enabled(adapter) ?
1790 adapter->num_msix_vec - 1 : 1);
1791 if (adapter->num_rx_qs != MAX_RX_QS)
1792 dev_warn(&adapter->pdev->dev,
1793 "Can create only %d RX queues", adapter->num_rx_qs);
1794
1795 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1796 for_all_rx_queues(adapter, rxo, i) {
1797 rxo->adapter = adapter;
1798 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799 rxo->rx_eq.enable_aic = true;
1800
1801 /* EQ */
1802 eq = &rxo->rx_eq.q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 goto err;
1807
1808 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809 if (rc)
1810 goto err;
1811
1812 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1813
1814 /* CQ */
1815 cq = &rxo->cq;
1816 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817 sizeof(struct be_eth_rx_compl));
1818 if (rc)
1819 goto err;
1820
1821 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822 if (rc)
1823 goto err;
1824
1825 /* Rx Q - will be created in be_open() */
1826 q = &rxo->q;
1827 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828 sizeof(struct be_eth_rx_d));
1829 if (rc)
1830 goto err;
1831
1832 }
1833
1834 return 0;
1835 err:
1836 be_rx_queues_destroy(adapter);
1837 return -1;
1838 }
1839
1840 static bool event_peek(struct be_eq_obj *eq_obj)
1841 {
1842 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843 if (!eqe->evt)
1844 return false;
1845 else
1846 return true;
1847 }
1848
1849 static irqreturn_t be_intx(int irq, void *dev)
1850 {
1851 struct be_adapter *adapter = dev;
1852 struct be_rx_obj *rxo;
1853 int isr, i, tx = 0 , rx = 0;
1854
1855 if (lancer_chip(adapter)) {
1856 if (event_peek(&adapter->tx_eq))
1857 tx = event_handle(adapter, &adapter->tx_eq, false);
1858 for_all_rx_queues(adapter, rxo, i) {
1859 if (event_peek(&rxo->rx_eq))
1860 rx |= event_handle(adapter, &rxo->rx_eq, true);
1861 }
1862
1863 if (!(tx || rx))
1864 return IRQ_NONE;
1865
1866 } else {
1867 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869 if (!isr)
1870 return IRQ_NONE;
1871
1872 if ((1 << adapter->tx_eq.eq_idx & isr))
1873 event_handle(adapter, &adapter->tx_eq, false);
1874
1875 for_all_rx_queues(adapter, rxo, i) {
1876 if ((1 << rxo->rx_eq.eq_idx & isr))
1877 event_handle(adapter, &rxo->rx_eq, true);
1878 }
1879 }
1880
1881 return IRQ_HANDLED;
1882 }
1883
1884 static irqreturn_t be_msix_rx(int irq, void *dev)
1885 {
1886 struct be_rx_obj *rxo = dev;
1887 struct be_adapter *adapter = rxo->adapter;
1888
1889 event_handle(adapter, &rxo->rx_eq, true);
1890
1891 return IRQ_HANDLED;
1892 }
1893
1894 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1895 {
1896 struct be_adapter *adapter = dev;
1897
1898 event_handle(adapter, &adapter->tx_eq, false);
1899
1900 return IRQ_HANDLED;
1901 }
1902
1903 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1904 {
1905 return (rxcp->tcpf && !rxcp->err) ? true : false;
1906 }
1907
1908 static int be_poll_rx(struct napi_struct *napi, int budget)
1909 {
1910 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1911 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912 struct be_adapter *adapter = rxo->adapter;
1913 struct be_queue_info *rx_cq = &rxo->cq;
1914 struct be_rx_compl_info *rxcp;
1915 u32 work_done;
1916
1917 rx_stats(rxo)->rx_polls++;
1918 for (work_done = 0; work_done < budget; work_done++) {
1919 rxcp = be_rx_compl_get(rxo);
1920 if (!rxcp)
1921 break;
1922
1923 /* Is it a flush compl that has no data */
1924 if (unlikely(rxcp->num_rcvd == 0))
1925 goto loop_continue;
1926
1927 /* Discard compl with partial DMA Lancer B0 */
1928 if (unlikely(!rxcp->pkt_size)) {
1929 be_rx_compl_discard(adapter, rxo, rxcp);
1930 goto loop_continue;
1931 }
1932
1933 /* On BE drop pkts that arrive due to imperfect filtering in
1934 * promiscuous mode on some skews
1935 */
1936 if (unlikely(rxcp->port != adapter->port_num &&
1937 !lancer_chip(adapter))) {
1938 be_rx_compl_discard(adapter, rxo, rxcp);
1939 goto loop_continue;
1940 }
1941
1942 if (do_gro(rxcp))
1943 be_rx_compl_process_gro(adapter, rxo, rxcp);
1944 else
1945 be_rx_compl_process(adapter, rxo, rxcp);
1946 loop_continue:
1947 be_rx_stats_update(rxo, rxcp);
1948 }
1949
1950 be_cq_notify(adapter, rx_cq->id, false, work_done);
1951
1952 /* Refill the queue */
1953 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1954 be_post_rx_frags(rxo, GFP_ATOMIC);
1955
1956 /* All consumed */
1957 if (work_done < budget) {
1958 napi_complete(napi);
1959 /* Arm CQ */
1960 be_cq_notify(adapter, rx_cq->id, true, 0);
1961 }
1962 return work_done;
1963 }
1964
1965 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1966 * For TX/MCC we don't honour budget; consume everything
1967 */
1968 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1969 {
1970 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1971 struct be_adapter *adapter =
1972 container_of(tx_eq, struct be_adapter, tx_eq);
1973 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1974 struct be_tx_obj *txo;
1975 struct be_eth_tx_compl *txcp;
1976 int tx_compl, mcc_compl, status = 0;
1977 u8 i;
1978 u16 num_wrbs;
1979
1980 for_all_tx_queues(adapter, txo, i) {
1981 tx_compl = 0;
1982 num_wrbs = 0;
1983 while ((txcp = be_tx_compl_get(&txo->cq))) {
1984 num_wrbs += be_tx_compl_process(adapter, txo,
1985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp));
1987 tx_compl++;
1988 }
1989 if (tx_compl) {
1990 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1991
1992 atomic_sub(num_wrbs, &txo->q.used);
1993
1994 /* As Tx wrbs have been freed up, wake up netdev queue
1995 * if it was stopped due to lack of tx wrbs. */
1996 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1997 atomic_read(&txo->q.used) < txo->q.len / 2) {
1998 netif_wake_subqueue(adapter->netdev, i);
1999 }
2000
2001 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2002 tx_stats(txo)->tx_compl += tx_compl;
2003 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2004 }
2005 }
2006
2007 mcc_compl = be_process_mcc(adapter, &status);
2008
2009 if (mcc_compl) {
2010 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2011 }
2012
2013 napi_complete(napi);
2014
2015 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2016 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2017 for_all_tx_queues(adapter, txo, i)
2018 be_cq_notify(adapter, txo->cq.id, true, 0);
2019
2020 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2021 }
2022
2023 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2024 adapter->drv_stats.tx_events++;
2025 return 1;
2026 }
2027
2028 void be_detect_dump_ue(struct be_adapter *adapter)
2029 {
2030 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2031 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2032 u32 i;
2033
2034 if (adapter->eeh_err || adapter->ue_detected)
2035 return;
2036
2037 if (lancer_chip(adapter)) {
2038 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2039 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2040 sliport_err1 = ioread32(adapter->db +
2041 SLIPORT_ERROR1_OFFSET);
2042 sliport_err2 = ioread32(adapter->db +
2043 SLIPORT_ERROR2_OFFSET);
2044 }
2045 } else {
2046 pci_read_config_dword(adapter->pdev,
2047 PCICFG_UE_STATUS_LOW, &ue_lo);
2048 pci_read_config_dword(adapter->pdev,
2049 PCICFG_UE_STATUS_HIGH, &ue_hi);
2050 pci_read_config_dword(adapter->pdev,
2051 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2052 pci_read_config_dword(adapter->pdev,
2053 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2054
2055 ue_lo = (ue_lo & (~ue_lo_mask));
2056 ue_hi = (ue_hi & (~ue_hi_mask));
2057 }
2058
2059 if (ue_lo || ue_hi ||
2060 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2061 adapter->ue_detected = true;
2062 adapter->eeh_err = true;
2063 dev_err(&adapter->pdev->dev,
2064 "Unrecoverable error in the card\n");
2065 }
2066
2067 if (ue_lo) {
2068 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2069 if (ue_lo & 1)
2070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_low_desc[i]);
2072 }
2073 }
2074 if (ue_hi) {
2075 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2076 if (ue_hi & 1)
2077 dev_err(&adapter->pdev->dev,
2078 "UE: %s bit set\n", ue_status_hi_desc[i]);
2079 }
2080 }
2081
2082 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083 dev_err(&adapter->pdev->dev,
2084 "sliport status 0x%x\n", sliport_status);
2085 dev_err(&adapter->pdev->dev,
2086 "sliport error1 0x%x\n", sliport_err1);
2087 dev_err(&adapter->pdev->dev,
2088 "sliport error2 0x%x\n", sliport_err2);
2089 }
2090 }
2091
2092 static void be_msix_disable(struct be_adapter *adapter)
2093 {
2094 if (msix_enabled(adapter)) {
2095 pci_disable_msix(adapter->pdev);
2096 adapter->num_msix_vec = 0;
2097 }
2098 }
2099
2100 static void be_msix_enable(struct be_adapter *adapter)
2101 {
2102 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2103 int i, status, num_vec;
2104
2105 num_vec = be_num_rxqs_want(adapter) + 1;
2106
2107 for (i = 0; i < num_vec; i++)
2108 adapter->msix_entries[i].entry = i;
2109
2110 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2111 if (status == 0) {
2112 goto done;
2113 } else if (status >= BE_MIN_MSIX_VECTORS) {
2114 num_vec = status;
2115 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2116 num_vec) == 0)
2117 goto done;
2118 }
2119 return;
2120 done:
2121 adapter->num_msix_vec = num_vec;
2122 return;
2123 }
2124
2125 static int be_sriov_enable(struct be_adapter *adapter)
2126 {
2127 be_check_sriov_fn_type(adapter);
2128
2129 #ifdef CONFIG_PCI_IOV
2130 if (be_physfn(adapter) && num_vfs) {
2131 int status, pos;
2132 u16 dev_vfs;
2133
2134 pos = pci_find_ext_capability(adapter->pdev,
2135 PCI_EXT_CAP_ID_SRIOV);
2136 pci_read_config_word(adapter->pdev,
2137 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2138
2139 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2140 if (adapter->num_vfs != num_vfs)
2141 dev_info(&adapter->pdev->dev,
2142 "Device supports %d VFs and not %d\n",
2143 adapter->num_vfs, num_vfs);
2144
2145 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2146 if (status)
2147 adapter->num_vfs = 0;
2148
2149 if (adapter->num_vfs) {
2150 adapter->vf_cfg = kcalloc(num_vfs,
2151 sizeof(struct be_vf_cfg),
2152 GFP_KERNEL);
2153 if (!adapter->vf_cfg)
2154 return -ENOMEM;
2155 }
2156 }
2157 #endif
2158 return 0;
2159 }
2160
2161 static void be_sriov_disable(struct be_adapter *adapter)
2162 {
2163 #ifdef CONFIG_PCI_IOV
2164 if (sriov_enabled(adapter)) {
2165 pci_disable_sriov(adapter->pdev);
2166 kfree(adapter->vf_cfg);
2167 adapter->num_vfs = 0;
2168 }
2169 #endif
2170 }
2171
2172 static inline int be_msix_vec_get(struct be_adapter *adapter,
2173 struct be_eq_obj *eq_obj)
2174 {
2175 return adapter->msix_entries[eq_obj->eq_idx].vector;
2176 }
2177
2178 static int be_request_irq(struct be_adapter *adapter,
2179 struct be_eq_obj *eq_obj,
2180 void *handler, char *desc, void *context)
2181 {
2182 struct net_device *netdev = adapter->netdev;
2183 int vec;
2184
2185 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2186 vec = be_msix_vec_get(adapter, eq_obj);
2187 return request_irq(vec, handler, 0, eq_obj->desc, context);
2188 }
2189
2190 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2191 void *context)
2192 {
2193 int vec = be_msix_vec_get(adapter, eq_obj);
2194 free_irq(vec, context);
2195 }
2196
2197 static int be_msix_register(struct be_adapter *adapter)
2198 {
2199 struct be_rx_obj *rxo;
2200 int status, i;
2201 char qname[10];
2202
2203 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2204 adapter);
2205 if (status)
2206 goto err;
2207
2208 for_all_rx_queues(adapter, rxo, i) {
2209 sprintf(qname, "rxq%d", i);
2210 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211 qname, rxo);
2212 if (status)
2213 goto err_msix;
2214 }
2215
2216 return 0;
2217
2218 err_msix:
2219 be_free_irq(adapter, &adapter->tx_eq, adapter);
2220
2221 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2222 be_free_irq(adapter, &rxo->rx_eq, rxo);
2223
2224 err:
2225 dev_warn(&adapter->pdev->dev,
2226 "MSIX Request IRQ failed - err %d\n", status);
2227 be_msix_disable(adapter);
2228 return status;
2229 }
2230
2231 static int be_irq_register(struct be_adapter *adapter)
2232 {
2233 struct net_device *netdev = adapter->netdev;
2234 int status;
2235
2236 if (msix_enabled(adapter)) {
2237 status = be_msix_register(adapter);
2238 if (status == 0)
2239 goto done;
2240 /* INTx is not supported for VF */
2241 if (!be_physfn(adapter))
2242 return status;
2243 }
2244
2245 /* INTx */
2246 netdev->irq = adapter->pdev->irq;
2247 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2248 adapter);
2249 if (status) {
2250 dev_err(&adapter->pdev->dev,
2251 "INTx request IRQ failed - err %d\n", status);
2252 return status;
2253 }
2254 done:
2255 adapter->isr_registered = true;
2256 return 0;
2257 }
2258
2259 static void be_irq_unregister(struct be_adapter *adapter)
2260 {
2261 struct net_device *netdev = adapter->netdev;
2262 struct be_rx_obj *rxo;
2263 int i;
2264
2265 if (!adapter->isr_registered)
2266 return;
2267
2268 /* INTx */
2269 if (!msix_enabled(adapter)) {
2270 free_irq(netdev->irq, adapter);
2271 goto done;
2272 }
2273
2274 /* MSIx */
2275 be_free_irq(adapter, &adapter->tx_eq, adapter);
2276
2277 for_all_rx_queues(adapter, rxo, i)
2278 be_free_irq(adapter, &rxo->rx_eq, rxo);
2279
2280 done:
2281 adapter->isr_registered = false;
2282 }
2283
2284 static void be_rx_queues_clear(struct be_adapter *adapter)
2285 {
2286 struct be_queue_info *q;
2287 struct be_rx_obj *rxo;
2288 int i;
2289
2290 for_all_rx_queues(adapter, rxo, i) {
2291 q = &rxo->q;
2292 if (q->created) {
2293 be_cmd_rxq_destroy(adapter, q);
2294 /* After the rxq is invalidated, wait for a grace time
2295 * of 1ms for all dma to end and the flush compl to
2296 * arrive
2297 */
2298 mdelay(1);
2299 be_rx_q_clean(adapter, rxo);
2300 }
2301
2302 /* Clear any residual events */
2303 q = &rxo->rx_eq.q;
2304 if (q->created)
2305 be_eq_clean(adapter, &rxo->rx_eq);
2306 }
2307 }
2308
2309 static int be_close(struct net_device *netdev)
2310 {
2311 struct be_adapter *adapter = netdev_priv(netdev);
2312 struct be_rx_obj *rxo;
2313 struct be_tx_obj *txo;
2314 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2315 int vec, i;
2316
2317 be_async_mcc_disable(adapter);
2318
2319 if (!lancer_chip(adapter))
2320 be_intr_set(adapter, false);
2321
2322 for_all_rx_queues(adapter, rxo, i)
2323 napi_disable(&rxo->rx_eq.napi);
2324
2325 napi_disable(&tx_eq->napi);
2326
2327 if (lancer_chip(adapter)) {
2328 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2329 for_all_rx_queues(adapter, rxo, i)
2330 be_cq_notify(adapter, rxo->cq.id, false, 0);
2331 for_all_tx_queues(adapter, txo, i)
2332 be_cq_notify(adapter, txo->cq.id, false, 0);
2333 }
2334
2335 if (msix_enabled(adapter)) {
2336 vec = be_msix_vec_get(adapter, tx_eq);
2337 synchronize_irq(vec);
2338
2339 for_all_rx_queues(adapter, rxo, i) {
2340 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2341 synchronize_irq(vec);
2342 }
2343 } else {
2344 synchronize_irq(netdev->irq);
2345 }
2346 be_irq_unregister(adapter);
2347
2348 /* Wait for all pending tx completions to arrive so that
2349 * all tx skbs are freed.
2350 */
2351 for_all_tx_queues(adapter, txo, i)
2352 be_tx_compl_clean(adapter, txo);
2353
2354 be_rx_queues_clear(adapter);
2355 return 0;
2356 }
2357
2358 static int be_rx_queues_setup(struct be_adapter *adapter)
2359 {
2360 struct be_rx_obj *rxo;
2361 int rc, i, j;
2362 u8 rsstable[128];
2363
2364 for_all_rx_queues(adapter, rxo, i) {
2365 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2366 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2367 adapter->if_handle,
2368 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2369 if (rc)
2370 return rc;
2371 }
2372
2373 if (be_multi_rxq(adapter)) {
2374 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2375 for_all_rss_queues(adapter, rxo, i) {
2376 if ((j + i) >= 128)
2377 break;
2378 rsstable[j + i] = rxo->rss_id;
2379 }
2380 }
2381 rc = be_cmd_rss_config(adapter, rsstable, 128);
2382
2383 if (rc)
2384 return rc;
2385 }
2386
2387 /* First time posting */
2388 for_all_rx_queues(adapter, rxo, i) {
2389 be_post_rx_frags(rxo, GFP_KERNEL);
2390 napi_enable(&rxo->rx_eq.napi);
2391 }
2392 return 0;
2393 }
2394
2395 static int be_open(struct net_device *netdev)
2396 {
2397 struct be_adapter *adapter = netdev_priv(netdev);
2398 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2399 struct be_rx_obj *rxo;
2400 int status, i;
2401
2402 status = be_rx_queues_setup(adapter);
2403 if (status)
2404 goto err;
2405
2406 napi_enable(&tx_eq->napi);
2407
2408 be_irq_register(adapter);
2409
2410 if (!lancer_chip(adapter))
2411 be_intr_set(adapter, true);
2412
2413 /* The evt queues are created in unarmed state; arm them */
2414 for_all_rx_queues(adapter, rxo, i) {
2415 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2416 be_cq_notify(adapter, rxo->cq.id, true, 0);
2417 }
2418 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2419
2420 /* Now that interrupts are on we can process async mcc */
2421 be_async_mcc_enable(adapter);
2422
2423 return 0;
2424 err:
2425 be_close(adapter->netdev);
2426 return -EIO;
2427 }
2428
2429 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2430 {
2431 struct be_dma_mem cmd;
2432 int status = 0;
2433 u8 mac[ETH_ALEN];
2434
2435 memset(mac, 0, ETH_ALEN);
2436
2437 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2439 GFP_KERNEL);
2440 if (cmd.va == NULL)
2441 return -1;
2442 memset(cmd.va, 0, cmd.size);
2443
2444 if (enable) {
2445 status = pci_write_config_dword(adapter->pdev,
2446 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2447 if (status) {
2448 dev_err(&adapter->pdev->dev,
2449 "Could not enable Wake-on-lan\n");
2450 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2451 cmd.dma);
2452 return status;
2453 }
2454 status = be_cmd_enable_magic_wol(adapter,
2455 adapter->netdev->dev_addr, &cmd);
2456 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2457 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2458 } else {
2459 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2460 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2461 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2462 }
2463
2464 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2465 return status;
2466 }
2467
2468 /*
2469 * Generate a seed MAC address from the PF MAC Address using jhash.
2470 * MAC Address for VFs are assigned incrementally starting from the seed.
2471 * These addresses are programmed in the ASIC by the PF and the VF driver
2472 * queries for the MAC address during its probe.
2473 */
2474 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2475 {
2476 u32 vf;
2477 int status = 0;
2478 u8 mac[ETH_ALEN];
2479 struct be_vf_cfg *vf_cfg;
2480
2481 be_vf_eth_addr_generate(adapter, mac);
2482
2483 for_all_vfs(adapter, vf_cfg, vf) {
2484 if (lancer_chip(adapter)) {
2485 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2486 } else {
2487 status = be_cmd_pmac_add(adapter, mac,
2488 vf_cfg->if_handle,
2489 &vf_cfg->pmac_id, vf + 1);
2490 }
2491
2492 if (status)
2493 dev_err(&adapter->pdev->dev,
2494 "Mac address assignment failed for VF %d\n", vf);
2495 else
2496 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2497
2498 mac[5] += 1;
2499 }
2500 return status;
2501 }
2502
2503 static void be_vf_clear(struct be_adapter *adapter)
2504 {
2505 struct be_vf_cfg *vf_cfg;
2506 u32 vf;
2507
2508 for_all_vfs(adapter, vf_cfg, vf) {
2509 if (lancer_chip(adapter))
2510 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2511 else
2512 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2513 vf_cfg->pmac_id, vf + 1);
2514
2515 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2516 }
2517 }
2518
2519 static int be_clear(struct be_adapter *adapter)
2520 {
2521 if (sriov_enabled(adapter))
2522 be_vf_clear(adapter);
2523
2524 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2525
2526 be_mcc_queues_destroy(adapter);
2527 be_rx_queues_destroy(adapter);
2528 be_tx_queues_destroy(adapter);
2529
2530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
2532 return 0;
2533 }
2534
2535 static void be_vf_setup_init(struct be_adapter *adapter)
2536 {
2537 struct be_vf_cfg *vf_cfg;
2538 int vf;
2539
2540 for_all_vfs(adapter, vf_cfg, vf) {
2541 vf_cfg->if_handle = -1;
2542 vf_cfg->pmac_id = -1;
2543 }
2544 }
2545
2546 static int be_vf_setup(struct be_adapter *adapter)
2547 {
2548 struct be_vf_cfg *vf_cfg;
2549 u32 cap_flags, en_flags, vf;
2550 u16 lnk_speed;
2551 int status;
2552
2553 be_vf_setup_init(adapter);
2554
2555 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2556 BE_IF_FLAGS_MULTICAST;
2557 for_all_vfs(adapter, vf_cfg, vf) {
2558 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2559 &vf_cfg->if_handle, NULL, vf + 1);
2560 if (status)
2561 goto err;
2562 }
2563
2564 status = be_vf_eth_addr_config(adapter);
2565 if (status)
2566 goto err;
2567
2568 for_all_vfs(adapter, vf_cfg, vf) {
2569 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2570 vf + 1);
2571 if (status)
2572 goto err;
2573 vf_cfg->tx_rate = lnk_speed * 10;
2574 }
2575 return 0;
2576 err:
2577 return status;
2578 }
2579
2580 static void be_setup_init(struct be_adapter *adapter)
2581 {
2582 adapter->vlan_prio_bmap = 0xff;
2583 adapter->link_speed = -1;
2584 adapter->if_handle = -1;
2585 adapter->be3_native = false;
2586 adapter->promiscuous = false;
2587 adapter->eq_next_idx = 0;
2588 }
2589
2590 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2591 {
2592 u32 pmac_id;
2593 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2594 if (status != 0)
2595 goto do_none;
2596 status = be_cmd_mac_addr_query(adapter, mac,
2597 MAC_ADDRESS_TYPE_NETWORK,
2598 false, adapter->if_handle, pmac_id);
2599 if (status != 0)
2600 goto do_none;
2601 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2602 &adapter->pmac_id, 0);
2603 do_none:
2604 return status;
2605 }
2606
2607 static int be_setup(struct be_adapter *adapter)
2608 {
2609 struct net_device *netdev = adapter->netdev;
2610 u32 cap_flags, en_flags;
2611 u32 tx_fc, rx_fc;
2612 int status, i;
2613 u8 mac[ETH_ALEN];
2614 struct be_tx_obj *txo;
2615
2616 be_setup_init(adapter);
2617
2618 be_cmd_req_native_mode(adapter);
2619
2620 status = be_tx_queues_create(adapter);
2621 if (status != 0)
2622 goto err;
2623
2624 status = be_rx_queues_create(adapter);
2625 if (status != 0)
2626 goto err;
2627
2628 status = be_mcc_queues_create(adapter);
2629 if (status != 0)
2630 goto err;
2631
2632 memset(mac, 0, ETH_ALEN);
2633 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2634 true /*permanent */, 0, 0);
2635 if (status)
2636 return status;
2637 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2638 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2639
2640 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2641 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2642 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2643 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2644
2645 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2646 cap_flags |= BE_IF_FLAGS_RSS;
2647 en_flags |= BE_IF_FLAGS_RSS;
2648 }
2649 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2650 netdev->dev_addr, &adapter->if_handle,
2651 &adapter->pmac_id, 0);
2652 if (status != 0)
2653 goto err;
2654
2655 for_all_tx_queues(adapter, txo, i) {
2656 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2657 if (status)
2658 goto err;
2659 }
2660
2661 /* The VF's permanent mac queried from card is incorrect.
2662 * For BEx: Query the mac configued by the PF using if_handle
2663 * For Lancer: Get and use mac_list to obtain mac address.
2664 */
2665 if (!be_physfn(adapter)) {
2666 if (lancer_chip(adapter))
2667 status = be_configure_mac_from_list(adapter, mac);
2668 else
2669 status = be_cmd_mac_addr_query(adapter, mac,
2670 MAC_ADDRESS_TYPE_NETWORK, false,
2671 adapter->if_handle, 0);
2672 if (!status) {
2673 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2674 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2675 }
2676 }
2677
2678 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2679
2680 status = be_vid_config(adapter, false, 0);
2681 if (status)
2682 goto err;
2683
2684 be_set_rx_mode(adapter->netdev);
2685
2686 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2687 /* For Lancer: It is legal for this cmd to fail on VF */
2688 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2689 goto err;
2690
2691 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2692 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2693 adapter->rx_fc);
2694 /* For Lancer: It is legal for this cmd to fail on VF */
2695 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2696 goto err;
2697 }
2698
2699 pcie_set_readrq(adapter->pdev, 4096);
2700
2701 if (sriov_enabled(adapter)) {
2702 status = be_vf_setup(adapter);
2703 if (status)
2704 goto err;
2705 }
2706
2707 return 0;
2708 err:
2709 be_clear(adapter);
2710 return status;
2711 }
2712
2713 #ifdef CONFIG_NET_POLL_CONTROLLER
2714 static void be_netpoll(struct net_device *netdev)
2715 {
2716 struct be_adapter *adapter = netdev_priv(netdev);
2717 struct be_rx_obj *rxo;
2718 int i;
2719
2720 event_handle(adapter, &adapter->tx_eq, false);
2721 for_all_rx_queues(adapter, rxo, i)
2722 event_handle(adapter, &rxo->rx_eq, true);
2723 }
2724 #endif
2725
2726 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2727 static bool be_flash_redboot(struct be_adapter *adapter,
2728 const u8 *p, u32 img_start, int image_size,
2729 int hdr_size)
2730 {
2731 u32 crc_offset;
2732 u8 flashed_crc[4];
2733 int status;
2734
2735 crc_offset = hdr_size + img_start + image_size - 4;
2736
2737 p += crc_offset;
2738
2739 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2740 (image_size - 4));
2741 if (status) {
2742 dev_err(&adapter->pdev->dev,
2743 "could not get crc from flash, not flashing redboot\n");
2744 return false;
2745 }
2746
2747 /*update redboot only if crc does not match*/
2748 if (!memcmp(flashed_crc, p, 4))
2749 return false;
2750 else
2751 return true;
2752 }
2753
2754 static bool phy_flashing_required(struct be_adapter *adapter)
2755 {
2756 int status = 0;
2757 struct be_phy_info phy_info;
2758
2759 status = be_cmd_get_phy_info(adapter, &phy_info);
2760 if (status)
2761 return false;
2762 if ((phy_info.phy_type == TN_8022) &&
2763 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2764 return true;
2765 }
2766 return false;
2767 }
2768
2769 static int be_flash_data(struct be_adapter *adapter,
2770 const struct firmware *fw,
2771 struct be_dma_mem *flash_cmd, int num_of_images)
2772
2773 {
2774 int status = 0, i, filehdr_size = 0;
2775 u32 total_bytes = 0, flash_op;
2776 int num_bytes;
2777 const u8 *p = fw->data;
2778 struct be_cmd_write_flashrom *req = flash_cmd->va;
2779 const struct flash_comp *pflashcomp;
2780 int num_comp;
2781
2782 static const struct flash_comp gen3_flash_types[10] = {
2783 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2784 FLASH_IMAGE_MAX_SIZE_g3},
2785 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2786 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2787 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2788 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2789 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2790 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2791 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2792 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2793 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2794 FLASH_IMAGE_MAX_SIZE_g3},
2795 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2796 FLASH_IMAGE_MAX_SIZE_g3},
2797 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2798 FLASH_IMAGE_MAX_SIZE_g3},
2799 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2800 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2801 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2802 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2803 };
2804 static const struct flash_comp gen2_flash_types[8] = {
2805 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2806 FLASH_IMAGE_MAX_SIZE_g2},
2807 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2808 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2809 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2810 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2811 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2812 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2813 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2815 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2816 FLASH_IMAGE_MAX_SIZE_g2},
2817 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2818 FLASH_IMAGE_MAX_SIZE_g2},
2819 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2820 FLASH_IMAGE_MAX_SIZE_g2}
2821 };
2822
2823 if (adapter->generation == BE_GEN3) {
2824 pflashcomp = gen3_flash_types;
2825 filehdr_size = sizeof(struct flash_file_hdr_g3);
2826 num_comp = ARRAY_SIZE(gen3_flash_types);
2827 } else {
2828 pflashcomp = gen2_flash_types;
2829 filehdr_size = sizeof(struct flash_file_hdr_g2);
2830 num_comp = ARRAY_SIZE(gen2_flash_types);
2831 }
2832 for (i = 0; i < num_comp; i++) {
2833 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2834 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2835 continue;
2836 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2837 if (!phy_flashing_required(adapter))
2838 continue;
2839 }
2840 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2841 (!be_flash_redboot(adapter, fw->data,
2842 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2843 (num_of_images * sizeof(struct image_hdr)))))
2844 continue;
2845 p = fw->data;
2846 p += filehdr_size + pflashcomp[i].offset
2847 + (num_of_images * sizeof(struct image_hdr));
2848 if (p + pflashcomp[i].size > fw->data + fw->size)
2849 return -1;
2850 total_bytes = pflashcomp[i].size;
2851 while (total_bytes) {
2852 if (total_bytes > 32*1024)
2853 num_bytes = 32*1024;
2854 else
2855 num_bytes = total_bytes;
2856 total_bytes -= num_bytes;
2857 if (!total_bytes) {
2858 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2859 flash_op = FLASHROM_OPER_PHY_FLASH;
2860 else
2861 flash_op = FLASHROM_OPER_FLASH;
2862 } else {
2863 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2864 flash_op = FLASHROM_OPER_PHY_SAVE;
2865 else
2866 flash_op = FLASHROM_OPER_SAVE;
2867 }
2868 memcpy(req->params.data_buf, p, num_bytes);
2869 p += num_bytes;
2870 status = be_cmd_write_flashrom(adapter, flash_cmd,
2871 pflashcomp[i].optype, flash_op, num_bytes);
2872 if (status) {
2873 if ((status == ILLEGAL_IOCTL_REQ) &&
2874 (pflashcomp[i].optype ==
2875 IMG_TYPE_PHY_FW))
2876 break;
2877 dev_err(&adapter->pdev->dev,
2878 "cmd to write to flash rom failed.\n");
2879 return -1;
2880 }
2881 }
2882 }
2883 return 0;
2884 }
2885
2886 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2887 {
2888 if (fhdr == NULL)
2889 return 0;
2890 if (fhdr->build[0] == '3')
2891 return BE_GEN3;
2892 else if (fhdr->build[0] == '2')
2893 return BE_GEN2;
2894 else
2895 return 0;
2896 }
2897
2898 static int lancer_fw_download(struct be_adapter *adapter,
2899 const struct firmware *fw)
2900 {
2901 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2902 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2903 struct be_dma_mem flash_cmd;
2904 const u8 *data_ptr = NULL;
2905 u8 *dest_image_ptr = NULL;
2906 size_t image_size = 0;
2907 u32 chunk_size = 0;
2908 u32 data_written = 0;
2909 u32 offset = 0;
2910 int status = 0;
2911 u8 add_status = 0;
2912
2913 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2914 dev_err(&adapter->pdev->dev,
2915 "FW Image not properly aligned. "
2916 "Length must be 4 byte aligned.\n");
2917 status = -EINVAL;
2918 goto lancer_fw_exit;
2919 }
2920
2921 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2922 + LANCER_FW_DOWNLOAD_CHUNK;
2923 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2924 &flash_cmd.dma, GFP_KERNEL);
2925 if (!flash_cmd.va) {
2926 status = -ENOMEM;
2927 dev_err(&adapter->pdev->dev,
2928 "Memory allocation failure while flashing\n");
2929 goto lancer_fw_exit;
2930 }
2931
2932 dest_image_ptr = flash_cmd.va +
2933 sizeof(struct lancer_cmd_req_write_object);
2934 image_size = fw->size;
2935 data_ptr = fw->data;
2936
2937 while (image_size) {
2938 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2939
2940 /* Copy the image chunk content. */
2941 memcpy(dest_image_ptr, data_ptr, chunk_size);
2942
2943 status = lancer_cmd_write_object(adapter, &flash_cmd,
2944 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2945 &data_written, &add_status);
2946
2947 if (status)
2948 break;
2949
2950 offset += data_written;
2951 data_ptr += data_written;
2952 image_size -= data_written;
2953 }
2954
2955 if (!status) {
2956 /* Commit the FW written */
2957 status = lancer_cmd_write_object(adapter, &flash_cmd,
2958 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2959 &data_written, &add_status);
2960 }
2961
2962 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2963 flash_cmd.dma);
2964 if (status) {
2965 dev_err(&adapter->pdev->dev,
2966 "Firmware load error. "
2967 "Status code: 0x%x Additional Status: 0x%x\n",
2968 status, add_status);
2969 goto lancer_fw_exit;
2970 }
2971
2972 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2973 lancer_fw_exit:
2974 return status;
2975 }
2976
2977 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2978 {
2979 struct flash_file_hdr_g2 *fhdr;
2980 struct flash_file_hdr_g3 *fhdr3;
2981 struct image_hdr *img_hdr_ptr = NULL;
2982 struct be_dma_mem flash_cmd;
2983 const u8 *p;
2984 int status = 0, i = 0, num_imgs = 0;
2985
2986 p = fw->data;
2987 fhdr = (struct flash_file_hdr_g2 *) p;
2988
2989 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2990 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2991 &flash_cmd.dma, GFP_KERNEL);
2992 if (!flash_cmd.va) {
2993 status = -ENOMEM;
2994 dev_err(&adapter->pdev->dev,
2995 "Memory allocation failure while flashing\n");
2996 goto be_fw_exit;
2997 }
2998
2999 if ((adapter->generation == BE_GEN3) &&
3000 (get_ufigen_type(fhdr) == BE_GEN3)) {
3001 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3002 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3003 for (i = 0; i < num_imgs; i++) {
3004 img_hdr_ptr = (struct image_hdr *) (fw->data +
3005 (sizeof(struct flash_file_hdr_g3) +
3006 i * sizeof(struct image_hdr)));
3007 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3008 status = be_flash_data(adapter, fw, &flash_cmd,
3009 num_imgs);
3010 }
3011 } else if ((adapter->generation == BE_GEN2) &&
3012 (get_ufigen_type(fhdr) == BE_GEN2)) {
3013 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3014 } else {
3015 dev_err(&adapter->pdev->dev,
3016 "UFI and Interface are not compatible for flashing\n");
3017 status = -1;
3018 }
3019
3020 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3021 flash_cmd.dma);
3022 if (status) {
3023 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3024 goto be_fw_exit;
3025 }
3026
3027 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3028
3029 be_fw_exit:
3030 return status;
3031 }
3032
3033 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3034 {
3035 const struct firmware *fw;
3036 int status;
3037
3038 if (!netif_running(adapter->netdev)) {
3039 dev_err(&adapter->pdev->dev,
3040 "Firmware load not allowed (interface is down)\n");
3041 return -1;
3042 }
3043
3044 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3045 if (status)
3046 goto fw_exit;
3047
3048 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3049
3050 if (lancer_chip(adapter))
3051 status = lancer_fw_download(adapter, fw);
3052 else
3053 status = be_fw_download(adapter, fw);
3054
3055 fw_exit:
3056 release_firmware(fw);
3057 return status;
3058 }
3059
3060 static struct net_device_ops be_netdev_ops = {
3061 .ndo_open = be_open,
3062 .ndo_stop = be_close,
3063 .ndo_start_xmit = be_xmit,
3064 .ndo_set_rx_mode = be_set_rx_mode,
3065 .ndo_set_mac_address = be_mac_addr_set,
3066 .ndo_change_mtu = be_change_mtu,
3067 .ndo_get_stats64 = be_get_stats64,
3068 .ndo_validate_addr = eth_validate_addr,
3069 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3070 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3071 .ndo_set_vf_mac = be_set_vf_mac,
3072 .ndo_set_vf_vlan = be_set_vf_vlan,
3073 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3074 .ndo_get_vf_config = be_get_vf_config,
3075 #ifdef CONFIG_NET_POLL_CONTROLLER
3076 .ndo_poll_controller = be_netpoll,
3077 #endif
3078 };
3079
3080 static void be_netdev_init(struct net_device *netdev)
3081 {
3082 struct be_adapter *adapter = netdev_priv(netdev);
3083 struct be_rx_obj *rxo;
3084 int i;
3085
3086 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3087 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3088 NETIF_F_HW_VLAN_TX;
3089 if (be_multi_rxq(adapter))
3090 netdev->hw_features |= NETIF_F_RXHASH;
3091
3092 netdev->features |= netdev->hw_features |
3093 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3094
3095 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3096 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3097
3098 netdev->flags |= IFF_MULTICAST;
3099
3100 netif_set_gso_max_size(netdev, 65535);
3101
3102 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3103
3104 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3105
3106 for_all_rx_queues(adapter, rxo, i)
3107 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3108 BE_NAPI_WEIGHT);
3109
3110 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3111 BE_NAPI_WEIGHT);
3112 }
3113
3114 static void be_unmap_pci_bars(struct be_adapter *adapter)
3115 {
3116 if (adapter->csr)
3117 iounmap(adapter->csr);
3118 if (adapter->db)
3119 iounmap(adapter->db);
3120 }
3121
3122 static int be_map_pci_bars(struct be_adapter *adapter)
3123 {
3124 u8 __iomem *addr;
3125 int db_reg;
3126
3127 if (lancer_chip(adapter)) {
3128 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3129 pci_resource_len(adapter->pdev, 0));
3130 if (addr == NULL)
3131 return -ENOMEM;
3132 adapter->db = addr;
3133 return 0;
3134 }
3135
3136 if (be_physfn(adapter)) {
3137 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3138 pci_resource_len(adapter->pdev, 2));
3139 if (addr == NULL)
3140 return -ENOMEM;
3141 adapter->csr = addr;
3142 }
3143
3144 if (adapter->generation == BE_GEN2) {
3145 db_reg = 4;
3146 } else {
3147 if (be_physfn(adapter))
3148 db_reg = 4;
3149 else
3150 db_reg = 0;
3151 }
3152 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3153 pci_resource_len(adapter->pdev, db_reg));
3154 if (addr == NULL)
3155 goto pci_map_err;
3156 adapter->db = addr;
3157
3158 return 0;
3159 pci_map_err:
3160 be_unmap_pci_bars(adapter);
3161 return -ENOMEM;
3162 }
3163
3164
3165 static void be_ctrl_cleanup(struct be_adapter *adapter)
3166 {
3167 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3168
3169 be_unmap_pci_bars(adapter);
3170
3171 if (mem->va)
3172 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3173 mem->dma);
3174
3175 mem = &adapter->rx_filter;
3176 if (mem->va)
3177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
3179 }
3180
3181 static int be_ctrl_init(struct be_adapter *adapter)
3182 {
3183 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3184 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3185 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3186 int status;
3187
3188 status = be_map_pci_bars(adapter);
3189 if (status)
3190 goto done;
3191
3192 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3193 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3194 mbox_mem_alloc->size,
3195 &mbox_mem_alloc->dma,
3196 GFP_KERNEL);
3197 if (!mbox_mem_alloc->va) {
3198 status = -ENOMEM;
3199 goto unmap_pci_bars;
3200 }
3201 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3202 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3203 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3204 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3205
3206 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3207 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3208 &rx_filter->dma, GFP_KERNEL);
3209 if (rx_filter->va == NULL) {
3210 status = -ENOMEM;
3211 goto free_mbox;
3212 }
3213 memset(rx_filter->va, 0, rx_filter->size);
3214
3215 mutex_init(&adapter->mbox_lock);
3216 spin_lock_init(&adapter->mcc_lock);
3217 spin_lock_init(&adapter->mcc_cq_lock);
3218
3219 init_completion(&adapter->flash_compl);
3220 pci_save_state(adapter->pdev);
3221 return 0;
3222
3223 free_mbox:
3224 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3225 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3226
3227 unmap_pci_bars:
3228 be_unmap_pci_bars(adapter);
3229
3230 done:
3231 return status;
3232 }
3233
3234 static void be_stats_cleanup(struct be_adapter *adapter)
3235 {
3236 struct be_dma_mem *cmd = &adapter->stats_cmd;
3237
3238 if (cmd->va)
3239 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3240 cmd->va, cmd->dma);
3241 }
3242
3243 static int be_stats_init(struct be_adapter *adapter)
3244 {
3245 struct be_dma_mem *cmd = &adapter->stats_cmd;
3246
3247 if (adapter->generation == BE_GEN2) {
3248 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3249 } else {
3250 if (lancer_chip(adapter))
3251 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3252 else
3253 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3254 }
3255 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3256 GFP_KERNEL);
3257 if (cmd->va == NULL)
3258 return -1;
3259 memset(cmd->va, 0, cmd->size);
3260 return 0;
3261 }
3262
3263 static void __devexit be_remove(struct pci_dev *pdev)
3264 {
3265 struct be_adapter *adapter = pci_get_drvdata(pdev);
3266
3267 if (!adapter)
3268 return;
3269
3270 cancel_delayed_work_sync(&adapter->work);
3271
3272 unregister_netdev(adapter->netdev);
3273
3274 be_clear(adapter);
3275
3276 be_stats_cleanup(adapter);
3277
3278 be_ctrl_cleanup(adapter);
3279
3280 be_sriov_disable(adapter);
3281
3282 be_msix_disable(adapter);
3283
3284 pci_set_drvdata(pdev, NULL);
3285 pci_release_regions(pdev);
3286 pci_disable_device(pdev);
3287
3288 free_netdev(adapter->netdev);
3289 }
3290
3291 static int be_get_config(struct be_adapter *adapter)
3292 {
3293 int status;
3294
3295 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3296 &adapter->function_mode, &adapter->function_caps);
3297 if (status)
3298 return status;
3299
3300 if (adapter->function_mode & FLEX10_MODE)
3301 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3302 else
3303 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3304
3305 status = be_cmd_get_cntl_attributes(adapter);
3306 if (status)
3307 return status;
3308
3309 return 0;
3310 }
3311
3312 static int be_dev_family_check(struct be_adapter *adapter)
3313 {
3314 struct pci_dev *pdev = adapter->pdev;
3315 u32 sli_intf = 0, if_type;
3316
3317 switch (pdev->device) {
3318 case BE_DEVICE_ID1:
3319 case OC_DEVICE_ID1:
3320 adapter->generation = BE_GEN2;
3321 break;
3322 case BE_DEVICE_ID2:
3323 case OC_DEVICE_ID2:
3324 case OC_DEVICE_ID5:
3325 adapter->generation = BE_GEN3;
3326 break;
3327 case OC_DEVICE_ID3:
3328 case OC_DEVICE_ID4:
3329 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3330 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3331 SLI_INTF_IF_TYPE_SHIFT;
3332
3333 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3334 if_type != 0x02) {
3335 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3336 return -EINVAL;
3337 }
3338 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3339 SLI_INTF_FAMILY_SHIFT);
3340 adapter->generation = BE_GEN3;
3341 break;
3342 default:
3343 adapter->generation = 0;
3344 }
3345 return 0;
3346 }
3347
3348 static int lancer_wait_ready(struct be_adapter *adapter)
3349 {
3350 #define SLIPORT_READY_TIMEOUT 30
3351 u32 sliport_status;
3352 int status = 0, i;
3353
3354 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3355 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3356 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3357 break;
3358
3359 msleep(1000);
3360 }
3361
3362 if (i == SLIPORT_READY_TIMEOUT)
3363 status = -1;
3364
3365 return status;
3366 }
3367
3368 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3369 {
3370 int status;
3371 u32 sliport_status, err, reset_needed;
3372 status = lancer_wait_ready(adapter);
3373 if (!status) {
3374 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3375 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3376 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3377 if (err && reset_needed) {
3378 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3379 adapter->db + SLIPORT_CONTROL_OFFSET);
3380
3381 /* check adapter has corrected the error */
3382 status = lancer_wait_ready(adapter);
3383 sliport_status = ioread32(adapter->db +
3384 SLIPORT_STATUS_OFFSET);
3385 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3386 SLIPORT_STATUS_RN_MASK);
3387 if (status || sliport_status)
3388 status = -1;
3389 } else if (err || reset_needed) {
3390 status = -1;
3391 }
3392 }
3393 return status;
3394 }
3395
3396 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3397 {
3398 int status;
3399 u32 sliport_status;
3400
3401 if (adapter->eeh_err || adapter->ue_detected)
3402 return;
3403
3404 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3405
3406 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3407 dev_err(&adapter->pdev->dev,
3408 "Adapter in error state."
3409 "Trying to recover.\n");
3410
3411 status = lancer_test_and_set_rdy_state(adapter);
3412 if (status)
3413 goto err;
3414
3415 netif_device_detach(adapter->netdev);
3416
3417 if (netif_running(adapter->netdev))
3418 be_close(adapter->netdev);
3419
3420 be_clear(adapter);
3421
3422 adapter->fw_timeout = false;
3423
3424 status = be_setup(adapter);
3425 if (status)
3426 goto err;
3427
3428 if (netif_running(adapter->netdev)) {
3429 status = be_open(adapter->netdev);
3430 if (status)
3431 goto err;
3432 }
3433
3434 netif_device_attach(adapter->netdev);
3435
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter error recovery succeeded\n");
3438 }
3439 return;
3440 err:
3441 dev_err(&adapter->pdev->dev,
3442 "Adapter error recovery failed\n");
3443 }
3444
3445 static void be_worker(struct work_struct *work)
3446 {
3447 struct be_adapter *adapter =
3448 container_of(work, struct be_adapter, work.work);
3449 struct be_rx_obj *rxo;
3450 int i;
3451
3452 if (lancer_chip(adapter))
3453 lancer_test_and_recover_fn_err(adapter);
3454
3455 be_detect_dump_ue(adapter);
3456
3457 /* when interrupts are not yet enabled, just reap any pending
3458 * mcc completions */
3459 if (!netif_running(adapter->netdev)) {
3460 int mcc_compl, status = 0;
3461
3462 mcc_compl = be_process_mcc(adapter, &status);
3463
3464 if (mcc_compl) {
3465 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3466 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3467 }
3468
3469 goto reschedule;
3470 }
3471
3472 if (!adapter->stats_cmd_sent) {
3473 if (lancer_chip(adapter))
3474 lancer_cmd_get_pport_stats(adapter,
3475 &adapter->stats_cmd);
3476 else
3477 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3478 }
3479
3480 for_all_rx_queues(adapter, rxo, i) {
3481 be_rx_eqd_update(adapter, rxo);
3482
3483 if (rxo->rx_post_starved) {
3484 rxo->rx_post_starved = false;
3485 be_post_rx_frags(rxo, GFP_KERNEL);
3486 }
3487 }
3488
3489 reschedule:
3490 adapter->work_counter++;
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3492 }
3493
3494 static int __devinit be_probe(struct pci_dev *pdev,
3495 const struct pci_device_id *pdev_id)
3496 {
3497 int status = 0;
3498 struct be_adapter *adapter;
3499 struct net_device *netdev;
3500
3501 status = pci_enable_device(pdev);
3502 if (status)
3503 goto do_none;
3504
3505 status = pci_request_regions(pdev, DRV_NAME);
3506 if (status)
3507 goto disable_dev;
3508 pci_set_master(pdev);
3509
3510 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3511 if (netdev == NULL) {
3512 status = -ENOMEM;
3513 goto rel_reg;
3514 }
3515 adapter = netdev_priv(netdev);
3516 adapter->pdev = pdev;
3517 pci_set_drvdata(pdev, adapter);
3518
3519 status = be_dev_family_check(adapter);
3520 if (status)
3521 goto free_netdev;
3522
3523 adapter->netdev = netdev;
3524 SET_NETDEV_DEV(netdev, &pdev->dev);
3525
3526 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3527 if (!status) {
3528 netdev->features |= NETIF_F_HIGHDMA;
3529 } else {
3530 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3531 if (status) {
3532 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3533 goto free_netdev;
3534 }
3535 }
3536
3537 status = be_sriov_enable(adapter);
3538 if (status)
3539 goto free_netdev;
3540
3541 status = be_ctrl_init(adapter);
3542 if (status)
3543 goto disable_sriov;
3544
3545 if (lancer_chip(adapter)) {
3546 status = lancer_wait_ready(adapter);
3547 if (!status) {
3548 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3549 adapter->db + SLIPORT_CONTROL_OFFSET);
3550 status = lancer_test_and_set_rdy_state(adapter);
3551 }
3552 if (status) {
3553 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3554 goto ctrl_clean;
3555 }
3556 }
3557
3558 /* sync up with fw's ready state */
3559 if (be_physfn(adapter)) {
3560 status = be_cmd_POST(adapter);
3561 if (status)
3562 goto ctrl_clean;
3563 }
3564
3565 /* tell fw we're ready to fire cmds */
3566 status = be_cmd_fw_init(adapter);
3567 if (status)
3568 goto ctrl_clean;
3569
3570 status = be_cmd_reset_function(adapter);
3571 if (status)
3572 goto ctrl_clean;
3573
3574 status = be_stats_init(adapter);
3575 if (status)
3576 goto ctrl_clean;
3577
3578 status = be_get_config(adapter);
3579 if (status)
3580 goto stats_clean;
3581
3582 /* The INTR bit may be set in the card when probed by a kdump kernel
3583 * after a crash.
3584 */
3585 if (!lancer_chip(adapter))
3586 be_intr_set(adapter, false);
3587
3588 be_msix_enable(adapter);
3589
3590 INIT_DELAYED_WORK(&adapter->work, be_worker);
3591 adapter->rx_fc = adapter->tx_fc = true;
3592
3593 status = be_setup(adapter);
3594 if (status)
3595 goto msix_disable;
3596
3597 be_netdev_init(netdev);
3598 status = register_netdev(netdev);
3599 if (status != 0)
3600 goto unsetup;
3601
3602 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3603
3604 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3605 return 0;
3606
3607 unsetup:
3608 be_clear(adapter);
3609 msix_disable:
3610 be_msix_disable(adapter);
3611 stats_clean:
3612 be_stats_cleanup(adapter);
3613 ctrl_clean:
3614 be_ctrl_cleanup(adapter);
3615 disable_sriov:
3616 be_sriov_disable(adapter);
3617 free_netdev:
3618 free_netdev(netdev);
3619 pci_set_drvdata(pdev, NULL);
3620 rel_reg:
3621 pci_release_regions(pdev);
3622 disable_dev:
3623 pci_disable_device(pdev);
3624 do_none:
3625 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3626 return status;
3627 }
3628
3629 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3630 {
3631 struct be_adapter *adapter = pci_get_drvdata(pdev);
3632 struct net_device *netdev = adapter->netdev;
3633
3634 cancel_delayed_work_sync(&adapter->work);
3635 if (adapter->wol)
3636 be_setup_wol(adapter, true);
3637
3638 netif_device_detach(netdev);
3639 if (netif_running(netdev)) {
3640 rtnl_lock();
3641 be_close(netdev);
3642 rtnl_unlock();
3643 }
3644 be_clear(adapter);
3645
3646 be_msix_disable(adapter);
3647 pci_save_state(pdev);
3648 pci_disable_device(pdev);
3649 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3650 return 0;
3651 }
3652
3653 static int be_resume(struct pci_dev *pdev)
3654 {
3655 int status = 0;
3656 struct be_adapter *adapter = pci_get_drvdata(pdev);
3657 struct net_device *netdev = adapter->netdev;
3658
3659 netif_device_detach(netdev);
3660
3661 status = pci_enable_device(pdev);
3662 if (status)
3663 return status;
3664
3665 pci_set_power_state(pdev, 0);
3666 pci_restore_state(pdev);
3667
3668 be_msix_enable(adapter);
3669 /* tell fw we're ready to fire cmds */
3670 status = be_cmd_fw_init(adapter);
3671 if (status)
3672 return status;
3673
3674 be_setup(adapter);
3675 if (netif_running(netdev)) {
3676 rtnl_lock();
3677 be_open(netdev);
3678 rtnl_unlock();
3679 }
3680 netif_device_attach(netdev);
3681
3682 if (adapter->wol)
3683 be_setup_wol(adapter, false);
3684
3685 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3686 return 0;
3687 }
3688
3689 /*
3690 * An FLR will stop BE from DMAing any data.
3691 */
3692 static void be_shutdown(struct pci_dev *pdev)
3693 {
3694 struct be_adapter *adapter = pci_get_drvdata(pdev);
3695
3696 if (!adapter)
3697 return;
3698
3699 cancel_delayed_work_sync(&adapter->work);
3700
3701 netif_device_detach(adapter->netdev);
3702
3703 if (adapter->wol)
3704 be_setup_wol(adapter, true);
3705
3706 be_cmd_reset_function(adapter);
3707
3708 pci_disable_device(pdev);
3709 }
3710
3711 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3712 pci_channel_state_t state)
3713 {
3714 struct be_adapter *adapter = pci_get_drvdata(pdev);
3715 struct net_device *netdev = adapter->netdev;
3716
3717 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3718
3719 adapter->eeh_err = true;
3720
3721 netif_device_detach(netdev);
3722
3723 if (netif_running(netdev)) {
3724 rtnl_lock();
3725 be_close(netdev);
3726 rtnl_unlock();
3727 }
3728 be_clear(adapter);
3729
3730 if (state == pci_channel_io_perm_failure)
3731 return PCI_ERS_RESULT_DISCONNECT;
3732
3733 pci_disable_device(pdev);
3734
3735 return PCI_ERS_RESULT_NEED_RESET;
3736 }
3737
3738 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3739 {
3740 struct be_adapter *adapter = pci_get_drvdata(pdev);
3741 int status;
3742
3743 dev_info(&adapter->pdev->dev, "EEH reset\n");
3744 adapter->eeh_err = false;
3745 adapter->ue_detected = false;
3746 adapter->fw_timeout = false;
3747
3748 status = pci_enable_device(pdev);
3749 if (status)
3750 return PCI_ERS_RESULT_DISCONNECT;
3751
3752 pci_set_master(pdev);
3753 pci_set_power_state(pdev, 0);
3754 pci_restore_state(pdev);
3755
3756 /* Check if card is ok and fw is ready */
3757 status = be_cmd_POST(adapter);
3758 if (status)
3759 return PCI_ERS_RESULT_DISCONNECT;
3760
3761 return PCI_ERS_RESULT_RECOVERED;
3762 }
3763
3764 static void be_eeh_resume(struct pci_dev *pdev)
3765 {
3766 int status = 0;
3767 struct be_adapter *adapter = pci_get_drvdata(pdev);
3768 struct net_device *netdev = adapter->netdev;
3769
3770 dev_info(&adapter->pdev->dev, "EEH resume\n");
3771
3772 pci_save_state(pdev);
3773
3774 /* tell fw we're ready to fire cmds */
3775 status = be_cmd_fw_init(adapter);
3776 if (status)
3777 goto err;
3778
3779 status = be_setup(adapter);
3780 if (status)
3781 goto err;
3782
3783 if (netif_running(netdev)) {
3784 status = be_open(netdev);
3785 if (status)
3786 goto err;
3787 }
3788 netif_device_attach(netdev);
3789 return;
3790 err:
3791 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3792 }
3793
3794 static struct pci_error_handlers be_eeh_handlers = {
3795 .error_detected = be_eeh_err_detected,
3796 .slot_reset = be_eeh_reset,
3797 .resume = be_eeh_resume,
3798 };
3799
3800 static struct pci_driver be_driver = {
3801 .name = DRV_NAME,
3802 .id_table = be_dev_ids,
3803 .probe = be_probe,
3804 .remove = be_remove,
3805 .suspend = be_suspend,
3806 .resume = be_resume,
3807 .shutdown = be_shutdown,
3808 .err_handler = &be_eeh_handlers
3809 };
3810
3811 static int __init be_init_module(void)
3812 {
3813 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3814 rx_frag_size != 2048) {
3815 printk(KERN_WARNING DRV_NAME
3816 " : Module param rx_frag_size must be 2048/4096/8192."
3817 " Using 2048\n");
3818 rx_frag_size = 2048;
3819 }
3820
3821 return pci_register_driver(&be_driver);
3822 }
3823 module_init(be_init_module);
3824
3825 static void __exit be_exit_module(void)
3826 {
3827 pci_unregister_driver(&be_driver);
3828 }
3829 module_exit(be_exit_module);
This page took 0.119197 seconds and 5 git commands to generate.